buildbot success in on flink-docs-release-0.10

2016-06-06 Thread buildbot
The Buildbot has detected a restored build on builder flink-docs-release-0.10 
while building . Full details are available at:
https://ci.apache.org/builders/flink-docs-release-0.10/builds/230

Buildbot URL: https://ci.apache.org/

Buildslave for this Build: bb_slave1_ubuntu

Build Reason: The Nightly scheduler named 'flink-nightly-docs-release-0.10' 
triggered this build
Build Source Stamp: [branch release-0.10] HEAD
Blamelist: 

Build succeeded!

Sincerely,
 -The Buildbot





[2/2] flink git commit: Revert "[FLINK-3960] ignore EventTimeWindowCheckpointingITCase for now"

2016-06-06 Thread aljoscha
Revert "[FLINK-3960] ignore EventTimeWindowCheckpointingITCase for now"

This reverts commit 98a939552e12fc699ff39111bbe877e112460ceb.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/cfffdc87
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/cfffdc87
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/cfffdc87

Branch: refs/heads/master
Commit: cfffdc87e3a3ac8aa7e33db87223df7bb7c8aef9
Parents: ccc3e44
Author: Aljoscha Krettek 
Authored: Sat Jun 4 07:59:48 2016 +0200
Committer: Aljoscha Krettek 
Committed: Mon Jun 6 09:29:33 2016 +0200

--
 .../test/checkpointing/EventTimeWindowCheckpointingITCase.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/cfffdc87/flink-tests/src/test/java/org/apache/flink/test/checkpointing/EventTimeWindowCheckpointingITCase.java
--
diff --git 
a/flink-tests/src/test/java/org/apache/flink/test/checkpointing/EventTimeWindowCheckpointingITCase.java
 
b/flink-tests/src/test/java/org/apache/flink/test/checkpointing/EventTimeWindowCheckpointingITCase.java
index 474fc60..199a6af 100644
--- 
a/flink-tests/src/test/java/org/apache/flink/test/checkpointing/EventTimeWindowCheckpointingITCase.java
+++ 
b/flink-tests/src/test/java/org/apache/flink/test/checkpointing/EventTimeWindowCheckpointingITCase.java
@@ -49,7 +49,6 @@ import org.apache.flink.util.TestLogger;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -72,7 +71,6 @@ import static org.junit.Assert.*;
  */
 @SuppressWarnings("serial")
 @RunWith(Parameterized.class)
-@Ignore("Disabled because RocksDB fails with a segmentation fault. See 
FLINK-3960")
 public class EventTimeWindowCheckpointingITCase extends TestLogger {
 
private static final int PARALLELISM = 4;



[1/2] flink git commit: [FLINK-3948] Protect RocksDB cleanup by cleanup lock

2016-06-06 Thread aljoscha
Repository: flink
Updated Branches:
  refs/heads/master ccc3e44cb -> 61d69a229


[FLINK-3948] Protect RocksDB cleanup by cleanup lock

Before, it could happen that an asynchronous checkpoint was going on
when trying to do cleanup. Now we protect cleanup and asynchronous
checkpointing by a lock.


Project: http://git-wip-us.apache.org/repos/asf/flink/repo
Commit: http://git-wip-us.apache.org/repos/asf/flink/commit/61d69a22
Tree: http://git-wip-us.apache.org/repos/asf/flink/tree/61d69a22
Diff: http://git-wip-us.apache.org/repos/asf/flink/diff/61d69a22

Branch: refs/heads/master
Commit: 61d69a229b40e52460f26804e4a36cf12e150004
Parents: cfffdc8
Author: Aljoscha Krettek 
Authored: Fri May 20 22:37:14 2016 +0200
Committer: Aljoscha Krettek 
Committed: Mon Jun 6 09:29:33 2016 +0200

--
 .../flink-statebackend-rocksdb/pom.xml  |   2 +-
 .../streaming/state/RocksDBStateBackend.java| 110 ---
 2 files changed, 70 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/flink/blob/61d69a22/flink-contrib/flink-statebackend-rocksdb/pom.xml
--
diff --git a/flink-contrib/flink-statebackend-rocksdb/pom.xml 
b/flink-contrib/flink-statebackend-rocksdb/pom.xml
index cccdc20..115752c 100644
--- a/flink-contrib/flink-statebackend-rocksdb/pom.xml
+++ b/flink-contrib/flink-statebackend-rocksdb/pom.xml
@@ -50,7 +50,7 @@ under the License.

org.rocksdb
rocksdbjni
-   4.1.0
+   4.5.1


org.apache.flink

http://git-wip-us.apache.org/repos/asf/flink/blob/61d69a22/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
--
diff --git 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
index 4b7d7ee..4c44249 100644
--- 
a/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
+++ 
b/flink-contrib/flink-statebackend-rocksdb/src/main/java/org/apache/flink/contrib/streaming/state/RocksDBStateBackend.java
@@ -155,7 +155,14 @@ public class RocksDBStateBackend extends 
AbstractStateBackend {
 * to store state. The different k/v states that we have don't each 
have their own RocksDB
 * instance. They all write to this instance but to their own column 
family.
 */
-   protected transient RocksDB db;
+   protected volatile transient RocksDB db;
+
+   /**
+* Lock for protecting cleanup of the RocksDB db. We acquire this when 
doing asynchronous
+* checkpoints and when disposing the db. Otherwise, the asynchronous 
snapshot might try
+* iterating over a disposed db.
+*/
+   private Object dbCleanupLock;
 
/**
 * Information about the k/v states as we create them. This is used to 
retrieve the
@@ -282,6 +289,8 @@ public class RocksDBStateBackend extends 
AbstractStateBackend {
throw new RuntimeException("Error cleaning RocksDB data 
directory.", e);
}
 
+   dbCleanupLock = new Object();
+
List columnFamilyDescriptors = new 
ArrayList<>(1);
// RocksDB seems to need this...
columnFamilyDescriptors.add(new 
ColumnFamilyDescriptor("default".getBytes()));
@@ -305,28 +314,44 @@ public class RocksDBStateBackend extends 
AbstractStateBackend {
super.dispose();
nonPartitionedStateBackend.dispose();
 
-   if (this.dbOptions != null) {
-   this.dbOptions.dispose();
-   this.dbOptions = null;
-   }
-   for (Tuple2 column: 
kvStateInformation.values()) {
-   column.f0.dispose();
+   // we have to lock because we might have an asynchronous 
checkpoint going on
+   synchronized (dbCleanupLock) {
+   if (db != null) {
+   if (this.dbOptions != null) {
+   this.dbOptions.dispose();
+   this.dbOptions = null;
+   }
+
+   for (Tuple2 column : kvStateInformation.values()) {
+   column.f0.dispose();
+