This is an automated email from the ASF dual-hosted git repository.

klund pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git

commit 8009a1203ffc30c5d3c7b0d6c7652afdaae6353b
Author: Kirk Lund <[email protected]>
AuthorDate: Wed Mar 21 13:28:03 2018 -0700

    GEODE-1279: Rename Bug37241DUnitTest as 
CleanupFailedInitWithDiskFilesRegressionTest
---
 .../geode/internal/cache/Bug37241DUnitTest.java    | 205 ---------------------
 ...eanupFailedInitWithDiskFilesRegressionTest.java | 161 ++++++++++++++++
 2 files changed, 161 insertions(+), 205 deletions(-)

diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/Bug37241DUnitTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/Bug37241DUnitTest.java
deleted file mode 100755
index 8aec85b..0000000
--- 
a/geode-core/src/test/java/org/apache/geode/internal/cache/Bug37241DUnitTest.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software 
distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
- * or implied. See the License for the specific language governing permissions 
and limitations under
- * the License.
- */
-
-package org.apache.geode.internal.cache;
-
-import static org.junit.Assert.*;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.Properties;
-
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.cache.AttributesFactory;
-import org.apache.geode.cache.Cache;
-import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.cache.DataPolicy;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionAttributes;
-import org.apache.geode.cache.Scope;
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.distributed.internal.ReplyException;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
-/**
- * Confirms the bug 37241 is fixed. CleanupFailedInitialization on should also 
clean disk files
- * created
- */
-@Category(DistributedTest.class)
-public class Bug37241DUnitTest extends JUnit4DistributedTestCase {
-
-  private static Cache cache = null;
-
-  static VM server1 = null;
-
-  static VM server2 = null;
-
-  private static final String REGION_NAME = "Bug37241DUnitTest_region";
-
-  static final String expectedReplyException = ReplyException.class.getName();
-
-  static final String expectedException = 
IllegalStateException.class.getName();
-
-  @Override
-  public final void postSetUp() throws Exception {
-    final Host host = Host.getHost(0);
-    server1 = host.getVM(0);
-    server2 = host.getVM(1);
-  }
-
-  /*
-   * 1.Create persistent region serevr1 with scope global. 2.Try to create 
persitent region with
-   * same name on server2 with scope d-ack. 3.Region creation should fail . 
Check for all files
-   * created in the directory for server 2 gets deleted.
-   */
-  @Test
-  public void testBug37241ForNewDiskRegion() {
-    server1.invoke(() -> Bug37241DUnitTest.createRegionOnServer1());
-
-    try {
-      server2.invoke(() -> 
Bug37241DUnitTest.createRegionOnServer2(Scope.DISTRIBUTED_ACK));
-    } catch (Exception e) {
-      server2.invoke(() -> Bug37241DUnitTest.ignoreExceptionInLogs());
-      server2.invoke(() -> Bug37241DUnitTest.checkForCleanup());
-    }
-  }
-
-  @Test
-  public void testBug37241ForRecreatedDiskRegion() {
-    server1.invoke(() -> Bug37241DUnitTest.createRegionOnServer1());
-    server2.invoke(() -> 
Bug37241DUnitTest.createRegionOnServer2(Scope.GLOBAL));
-    server2.invoke(() -> Bug37241DUnitTest.closeRegion());
-    try {
-      server2.invoke(() -> 
Bug37241DUnitTest.createRegionOnServer2(Scope.DISTRIBUTED_ACK));
-    } catch (Exception e) {
-      server2.invoke(() -> Bug37241DUnitTest.ignoreExceptionInLogs());
-      server2.invoke(() -> Bug37241DUnitTest.checkForCleanupAfterRecreation());
-    }
-  }
-
-  private void createCache(Properties props) throws Exception {
-    DistributedSystem ds = getSystem(props);
-    ds.disconnect();
-    ds = getSystem(props);
-    assertNotNull(ds);
-    cache = CacheFactory.create(ds);
-    assertNotNull(cache);
-  }
-
-  public static void createRegionOnServer1() throws Exception {
-    new Bug37241DUnitTest().createCache(new Properties());
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.GLOBAL);
-    factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
-
-    File[] dirs = new File[2];
-    File file1 = new File("server1_disk1");
-    File file2 = new File("server1_disk2");
-    file1.mkdir();
-    file2.mkdir();
-    dirs[0] = file1;
-    dirs[1] = file2;
-    factory.setDiskSynchronous(false);
-    factory.setDiskStoreName(
-        
cache.createDiskStoreFactory().setDiskDirs(dirs).create("Bug37241DUnitTest").getName());
-    RegionAttributes attrs = factory.create();
-    cache.createRegion(REGION_NAME, attrs);
-  }
-
-  public static void createRegionOnServer2(Scope scope) throws Exception {
-    new Bug37241DUnitTest().createCache(new Properties());
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(scope);
-    factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
-
-    File[] dirs = new File[2];
-    File file1 = new File("server2_disk1");
-    File file2 = new File("server2_disk2");
-    file1.mkdir();
-    file2.mkdir();
-    dirs[0] = file1;
-    dirs[1] = file2;
-    factory.setDiskSynchronous(false);
-    factory.setDiskStoreName(
-        
cache.createDiskStoreFactory().setDiskDirs(dirs).create("Bug37241DUnitTest").getName());
-
-    // added for not to log exepected IllegalStateExcepion.
-    LogWriterUtils.getLogWriter()
-        .info("<ExpectedException action=add>" + expectedReplyException + 
"</ExpectedException>");
-    LogWriterUtils.getLogWriter()
-        .info("<ExpectedException action=add>" + expectedException + 
"</ExpectedException>");
-    cache.getLogger()
-        .info("<ExpectedException action=add>" + expectedReplyException + 
"</ExpectedException>");
-    cache.getLogger()
-        .info("<ExpectedException action=add>" + expectedException + 
"</ExpectedException>");
-
-    RegionAttributes attrs = factory.create();
-    cache.createRegion(REGION_NAME, attrs);
-
-  }
-
-  public static void checkForCleanup() {
-    try {
-      Thread.sleep(200);
-    } catch (InterruptedException ignore) {
-    }
-    cache.getLogger()
-        .info("checkForCleanup=" + Arrays.asList(new 
File("server2_disk2").listFiles()));
-    assertEquals(0, new File("server2_disk2").listFiles().length);
-  }
-
-
-  public static void checkForCleanupAfterRecreation() {
-    checkForCleanup();
-  }
-
-  public static void ignoreExceptionInLogs() {
-
-    cache.getLogger()
-        .info("<ExpectedException action=remove>" + expectedException + 
"</ExpectedException>");
-
-    cache.getLogger().info(
-        "<ExpectedException action=remove>" + expectedReplyException + 
"</ExpectedException>");
-    LogWriterUtils.getLogWriter()
-        .info("<ExpectedException action=remove>" + expectedException + 
"</ExpectedException>");
-    LogWriterUtils.getLogWriter().info(
-        "<ExpectedException action=remove>" + expectedReplyException + 
"</ExpectedException>");
-  }
-
-  public static void closeRegion() {
-    Cache cache = CacheFactory.getAnyInstance();
-    Region region = cache.getRegion("/" + REGION_NAME);
-    region.close();
-  }
-
-  public static void closeCache() {
-    if (cache != null && !cache.isClosed()) {
-      cache.close();
-      cache.getDistributedSystem().disconnect();
-    }
-  }
-
-  @Override
-  public final void preTearDown() throws Exception {
-    server1.invoke(() -> Bug37241DUnitTest.closeCache());
-    server2.invoke(() -> Bug37241DUnitTest.closeCache());
-  }
-}
diff --git 
a/geode-core/src/test/java/org/apache/geode/internal/cache/CleanupFailedInitWithDiskFilesRegressionTest.java
 
b/geode-core/src/test/java/org/apache/geode/internal/cache/CleanupFailedInitWithDiskFilesRegressionTest.java
new file mode 100755
index 0000000..a276800
--- /dev/null
+++ 
b/geode-core/src/test/java/org/apache/geode/internal/cache/CleanupFailedInitWithDiskFilesRegressionTest.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.apache.geode.test.dunit.Host.getHost;
+import static org.apache.geode.test.dunit.IgnoredException.addIgnoredException;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.awaitility.Awaitility.await;
+
+import java.io.File;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.distributed.internal.ReplyException;
+import org.apache.geode.test.dunit.RMIException;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.CacheTestCase;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import 
org.apache.geode.test.junit.rules.serializable.SerializableTemporaryFolder;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+/**
+ * Confirms the bug 37241 is fixed. CleanupFailedInitialization on should also 
clean disk files
+ * created
+ *
+ * <p>
+ * TRAC #37241: cleanupFailedInitialization on should also clean disk files 
created
+ */
+@Category(DistributedTest.class)
+public class CleanupFailedInitWithDiskFilesRegressionTest extends 
CacheTestCase {
+
+  private String uniqueName;
+  private File[] foldersForServer1;
+  private File[] foldersForServer2;
+  private File server2Disk2;
+
+  private VM server1;
+  private VM server2;
+
+  @Rule
+  public SerializableTemporaryFolder temporaryFolder = new 
SerializableTemporaryFolder();
+
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
+
+  @Before
+  public void setUp() throws Exception {
+    server1 = getHost(0).getVM(0);
+    server2 = getHost(0).getVM(1);
+
+    uniqueName = getClass().getSimpleName() + "_" + testName.getMethodName();
+
+    File server1Disk1 = temporaryFolder.newFolder(uniqueName + 
"_server1_disk1");
+    File server1Disk2 = temporaryFolder.newFolder(uniqueName + 
"_server1_disk2");
+    foldersForServer1 = new File[] {server1Disk1, server1Disk2};
+
+    File server2Disk1 = temporaryFolder.newFolder(uniqueName + 
"_server2_disk1");
+    server2Disk2 = temporaryFolder.newFolder(uniqueName + "_server2_disk2");
+    foldersForServer2 = new File[] {server2Disk1, server2Disk2};
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    disconnectAllFromDS();
+  }
+
+  /**
+   * 1. Create persistent region server1 with scope global.
+   * <p>
+   * 2. Try to create persistent region with same name on server2 with scope 
d-ack.
+   * <p>
+   * 3. Region creation should fail. Check for all files created in the 
directory for server 2 gets
+   * deleted.
+   */
+  @Test
+  public void newDiskRegionShouldBeCleanedUp() {
+    server1.invoke(() -> createRegionOnServer1());
+
+    assertThatThrownBy(() -> server2.invoke(() -> 
createRegionOnServer2(Scope.DISTRIBUTED_ACK)))
+        
.isInstanceOf(RMIException.class).hasCauseInstanceOf(IllegalStateException.class);
+
+    addIgnoredException(IllegalStateException.class);
+    addIgnoredException(ReplyException.class);
+    server2.invoke(() -> validateCleanupOfDiskFiles());
+  }
+
+  @Test
+  public void recreatedDiskRegionShouldBeCleanedUp() {
+    server1.invoke(() -> createRegionOnServer1());
+    server2.invoke(() -> createRegionOnServer2(Scope.GLOBAL));
+    server2.invoke(() -> closeRegion());
+
+    assertThatThrownBy(() -> server2.invoke(() -> 
createRegionOnServer2(Scope.DISTRIBUTED_ACK)))
+        
.isInstanceOf(RMIException.class).hasCauseInstanceOf(IllegalStateException.class);
+
+    addIgnoredException(IllegalStateException.class);
+    addIgnoredException(ReplyException.class);
+    server2.invoke(() -> validateCleanupOfDiskFiles());
+  }
+
+  private void createRegionOnServer1() {
+    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
+    dsf.setDiskDirs(foldersForServer1);
+
+    DiskStore diskStore = dsf.create(uniqueName);
+
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.GLOBAL);
+    factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+    factory.setDiskSynchronous(false);
+    factory.setDiskStoreName(diskStore.getName());
+
+    getCache().createRegion(uniqueName, factory.create());
+  }
+
+  private void createRegionOnServer2(Scope scope) {
+    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
+    dsf.setDiskDirs(foldersForServer2);
+
+    DiskStore diskStore = dsf.create(uniqueName);
+
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(scope);
+    factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
+    factory.setDiskSynchronous(false);
+    factory.setDiskStoreName(diskStore.getName());
+
+    getCache().createRegion(uniqueName, factory.create());
+  }
+
+  private void closeRegion() {
+    getCache().getRegion(uniqueName).close();
+  }
+
+  private void validateCleanupOfDiskFiles() {
+    await().atMost(1, MINUTES).until(() -> 
assertThat(server2Disk2.listFiles()).hasSize(0));
+  }
+}

-- 
To stop receiving notification emails like this one, please contact
[email protected].

Reply via email to