http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
index f3d5182..79669b2 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
@@ -35,10 +35,13 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import 
com.gemstone.gemfire.internal.cache.PartitionedRegionDataStore.BucketVisitor;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -115,22 +118,22 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     // put().
     validateBucket2NodeBeforePutInMultiplePartitionedRegion(
         startIndexForRegion, endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket2Node 
region of partition regions before any put() successfully validated ");
     // doing put() operation on multiple partition region
     putInMultiplePartitionedRegion(startIndexForRegion, endIndexForRegion,
         startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Put() operation 
successfully in partition regions");
     // validating bucket regions of multiple partition regions.
     validateBucketsAfterPutInMultiplePartitionRegion(startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket regions of 
partition regions successfully validated");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketCerationInMultiPlePartitionRegion() Successfully 
completed");
   }
 
@@ -168,23 +171,23 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy);
         
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Partition Regions 
successfully created ");
     // doing put() operation from vm0 only
     putInMultiplePartitionRegionFromOneVm(vm[0], startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Put() Opereration 
done only from one VM ");
     // validating bucket distribution ovar all the nodes
     int noBucketsExpectedOnEachNode = getNoBucketsExpectedOnEachNode();
     validateBucketsDistributionInMultiplePartitionRegion(startIndexForRegion,
         endIndexForRegion, noBucketsExpectedOnEachNode);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket regions 
are equally distributed");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketCerationInMultiPlePartitionRegion() successfully 
completed");
   }
 
@@ -226,23 +229,23 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     // creating multiple partition regions on 3 nodes with localMaxMemory=200 
redundancy = 0
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Partition Regions 
successfully created ");
     // doing put() operation from all vms
     putInMultiplePartitionedRegionFromAllVms(startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Put() Opereration 
done only from one VM ");
     // validating bucket distribution ovar all the nodes
     int noBucketsExpectedOnEachNode = getNoBucketsExpectedOnEachNode() - 4;
     validateBucketsDistributionInMultiplePartitionRegion(startIndexForRegion,
         endIndexForRegion, noBucketsExpectedOnEachNode);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket regions 
are equally distributed");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketCerationInMultiPlePartitionRegion() successfully created");
   }
 
@@ -295,7 +298,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     // doing put() in multiple partition regions from 3 nodes.
     putInMultiplePartitionedRegionFrom3Nodes(startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketDistributionAfterNodeAdditionInPR() - Put() operation 
successfully in partition regions on 3 Nodes");
 
@@ -313,15 +316,15 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     // doing put() in multiple partition regions from 3 nodes.
     putInMultiplePartitionedRegionFrom3Nodes(startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketDistributionAfterNodeAdditionInPR() - Put() operation 
successfully in partition regions on 4th node");
     // validating bucket creation in the 4th node
     validateBucketsOnAllNodes(startIndexForRegion, endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketDistributionAfterNodeAdditionInPR() - buckets on all 
the nodes are validated");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketDistributionAfterNodeAdditionInPR() successfully created");
   }
 
@@ -367,7 +370,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
         endIndexForRegion, startIndexForKey, endIndexForKey);
     validateTotalNumBuckets(prPrefix, vmList, startIndexForRegion,
         endIndexForRegion, expectedNumBuckets);
-    getLogWriter().info("testTotalNumBucketProperty() completed successfully");
+    LogWriterUtils.getLogWriter().info("testTotalNumBucketProperty() completed 
successfully");
 
   }
 
@@ -421,7 +424,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     
     Host host = Host.getHost(0);
     createVMs(host);
-    invokeInEveryVM(new SerializableRunnable("Create PR") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Create PR") {
       public void run() {
         getCache().createRegion(regionName, createRegionAttrs(0, 10, 
maxBuckets));
         
@@ -512,12 +515,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("Exception during " + count, async[count].getException());
+        Assert.fail("Exception during " + count, async[count].getException());
       }
     }
   }
@@ -554,11 +557,11 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
       }
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("Exception during " + count, async[count].getException());
+        Assert.fail("Exception during " + count, async[count].getException());
       }
     }
   }
@@ -587,12 +590,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
  
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during" + count, async[count].getException());
+        Assert.fail("exception during" + count, async[count].getException());
       }
     }
   }
@@ -625,12 +628,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
      }
     }
   }
@@ -697,12 +700,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -742,12 +745,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -771,12 +774,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
         startIndexForRegion, endIndexForRegion));
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        getLogWriter().warning("Failure in async invocation on vm " 
+        LogWriterUtils.getLogWriter().warning("Failure in async invocation on 
vm " 
             + vm[count]
             + " with exception " + async[count].getException());
         throw async[count].getException();
@@ -807,12 +810,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < 4; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < 4; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("got exception on " + count, async[count].getException());
+        Assert.fail("got exception on " + count, async[count].getException());
       }
     }
 
@@ -828,12 +831,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        getLogWriter().warning("Failure of async invocation on VM " + 
+        LogWriterUtils.getLogWriter().warning("Failure of async invocation on 
VM " + 
             this.vm[count] + " exception thrown " + 
async[count].getException());
         throw async[count].getException();
       }
@@ -862,12 +865,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("Validation of bucket distribution failed on " + count,
+        Assert.fail("Validation of bucket distribution failed on " + count,
             async[count].getException());
       }
     }
@@ -948,12 +951,12 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
           }
           if (redundancyManageFlag == 0) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "validateRedundancy() - Redundancy not satisfied for the 
partition region  : "
                     + pr.getName());
           }
           else {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "validateRedundancy() - Redundancy satisfied for the partition 
region  : "
                     + pr.getName());
           }
@@ -1139,7 +1142,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
           assertTrue(pr.getRegionAdvisor().getNumProfiles() > 0);
           assertTrue(pr.getRegionAdvisor().getNumDataStores() > 0);
           final int bucketSetSize = 
pr.getRegionAdvisor().getCreatedBucketsCount();
-          getLogWriter().info("BucketSet size " + bucketSetSize);
+          LogWriterUtils.getLogWriter().info("BucketSet size " + 
bucketSetSize);
           if (bucketSetSize != 0) {
             Set buckets = pr.getRegionAdvisor().getBucketSet();
             Iterator it  = buckets.iterator();
@@ -1153,7 +1156,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
                 numBucketsWithStorage++;
               }
             } catch (NoSuchElementException end) {
-              getLogWriter().info("BucketSet iterations " + 
numBucketsWithStorage);
+              LogWriterUtils.getLogWriter().info("BucketSet iterations " + 
numBucketsWithStorage);
             }
             fail("There should be no buckets assigned");
           }
@@ -1185,7 +1188,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
           
           assertNotNull(pr.getDataStore());
           final int localBSize = pr.getDataStore().getBucketsManaged();
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "validateBucketsDistribution() - Number of bukctes for "
                   + pr.getName() + " : "  + localBSize);
 
@@ -1260,7 +1263,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
           cache.createRegion(prPrefix + i,
               createRegionAttrs(redundancy, localMaxMem, numBuckets));
         }
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(
                 "createMultiplePartitionRegion() - Partition Regions 
Successfully Completed ");
       }
@@ -1306,7 +1309,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
           Set bucketsWithStorage = pr.getRegionAdvisor().getBucketSet();
           assertEquals(expectedNumBuckets, bucketsWithStorage.size());
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Total Number of buckets validated in partition region");
       }
     };
@@ -1379,7 +1382,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
     createPartitionRegion(vmList, midIndexForRegion, endIndexForNode,
         localMaxMemory, redundancyTwo);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Partition Regions 
successfully created ");
   }
@@ -1391,7 +1394,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
   {
     for (int i = 0; i < 4; i++) {
       if (vm[i] == null)
-        getLogWriter().fine("VM is null" + vm[i]);
+        LogWriterUtils.getLogWriter().fine("VM is null" + vm[i]);
       vm[i].invoke(calculateMemoryOfPartitionRegion(i, i + 1));
     }
   }
@@ -1427,7 +1430,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
         while (sizeItr.hasNext()) {
           assertEquals(sizeItr.next(), objSize);
         }
-        getLogWriter().info("Size of partition region on each node is equal");
+        LogWriterUtils.getLogWriter().info("Size of partition region on each 
node is equal");
       }
     };
     vm[0].invoke(testTotalMemory);
@@ -1505,7 +1508,7 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     /** testing whether exception occurred */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
@@ -1530,9 +1533,9 @@ public class 
PartitionedRegionBucketCreationDistributionDUnitTest extends
             .getRegion(Region.SEPARATOR + regionName);
         for (int i = 0; i < MAX_SIZE * 2; i++) {
           pr.put(key + i, Obj);
-          getLogWriter().info("MAXSIZE : " + i);
+          LogWriterUtils.getLogWriter().info("MAXSIZE : " + i);
         }
-        getLogWriter().info("Put successfully done for vm" + key);
+        LogWriterUtils.getLogWriter().info("Put successfully done for vm" + 
key);
       }
     };
     return putForLocalMaxMemory;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
index d458947..8fd6f4e 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
@@ -27,10 +27,13 @@ import 
com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test to verify the meta-data cleanUp done at the time of cache close Op. 
This
@@ -99,7 +102,7 @@ public class PartitionedRegionCacheCloseDUnitTest extends
             key = new Integer(k);
             pr.put(key, rName + k);
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info("VM0 Done put successfully for PR = " + rName + j);
         }
       }
@@ -120,16 +123,16 @@ public class PartitionedRegionCacheCloseDUnitTest extends
             key = new Integer(k);
             pr.put(key, rName + k);
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info("VM1 Done put successfully for PR = " + rName + j);
         }
       }
     });
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
+    ThreadUtils.join(async1, 30 * 1000);
 
    if(async0.exceptionOccurred()) {
-     fail("Exception during async0", async0.getException());
+     Assert.fail("Exception during async0", async0.getException());
    }
    
     // Here we would close cache on one of the vms.
@@ -200,7 +203,7 @@ public class PartitionedRegionCacheCloseDUnitTest extends
         for (int j = 0; j < MAX_REGIONS; j++) {
           final String regionName = "#" + rName + j;
 
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             
             private Set<Node> nodes;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
index a81570b..78c70dc 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
@@ -34,10 +34,13 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 @SuppressWarnings("serial")
@@ -65,7 +68,7 @@ public class PartitionedRegionCreationDUnitTest extends
    */
   public void testSequentialCreation() throws Exception
   {
-    getLogWriter().info("*****CREATION TEST ACK STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST ACK STARTED*****");
     final String name = getUniqueName();
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -88,7 +91,7 @@ public class PartitionedRegionCreationDUnitTest extends
     vm1.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm2.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm3.invoke(getCacheSerializableRunnableForPRValidate(name));
-    getLogWriter().info("*****CREATION TEST ACK ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST ACK ENDED*****");
   }
 
   /**
@@ -101,7 +104,7 @@ public class PartitionedRegionCreationDUnitTest extends
   // 2/8/06
   public void testConcurrentCreation() throws Throwable
   {
-    getLogWriter().info("*****CREATION TEST NO_ACK STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST NO_ACK 
STARTED*****");
     final String name = getUniqueName();
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -121,12 +124,12 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
     
@@ -135,7 +138,7 @@ public class PartitionedRegionCreationDUnitTest extends
     vm1.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm2.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm3.invoke(getCacheSerializableRunnableForPRValidate(name));
-    getLogWriter().info("*****CREATION TEST NO_ACK ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST NO_ACK ENDED*****");
   }
 
   /**
@@ -251,13 +254,13 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     th.join(30 * 1000);
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
 
@@ -424,7 +427,7 @@ public class PartitionedRegionCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    getLogWriter().info("*****INITIALIZATION TEST STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****INITIALIZATION TEST 
STARTED*****");
     int AsyncInvocationArrSize = 8;
     AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
     async[0] = vm0.invokeAsync(getCacheSerializableRunnableForPRCreate(name,
@@ -438,12 +441,12 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < 4; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < 4; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
     
@@ -454,15 +457,15 @@ public class PartitionedRegionCreationDUnitTest extends
     
     /** main thread is waiting for the other threads to complete */
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
   
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
-    getLogWriter().info("*****INITIALIZATION TEST ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****INITIALIZATION TEST ENDED*****");
   }
 
   /**
@@ -480,7 +483,7 @@ public class PartitionedRegionCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    getLogWriter().info("*****REGISTRATION TEST STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****REGISTRATION TEST STARTED*****");
     int AsyncInvocationArrSize = 8;
     AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
     async[0] = vm0.invokeAsync(getCacheSerializableRunnableForPRCreate(name,
@@ -494,12 +497,12 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < 4; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < 4; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
     
@@ -514,15 +517,15 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
   
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
-    getLogWriter().info("*****REGISTRATION TEST ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****REGISTRATION TEST ENDED*****");
   }
   
   /**
@@ -532,7 +535,7 @@ public class PartitionedRegionCreationDUnitTest extends
    */
   public void testPartitionRegionPersistenceConflicts() throws Throwable
   {
-    addExpectedException("IllegalStateException");
+    IgnoredException.addIgnoredException("IllegalStateException");
     final String name = getUniqueName();
     // Cache cache = getCache();
     Host host = Host.getHost(0);
@@ -540,13 +543,13 @@ public class PartitionedRegionCreationDUnitTest extends
     VM dataStore1 = host.getVM(1);
     VM accessor0 = host.getVM(2);
     VM accessor1 = host.getVM(3);
-    getLogWriter().info("*****PERSISTENCE CONFLICTS TEST STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****PERSISTENCE CONFLICTS TEST 
STARTED*****");
     accessor0.invoke(getCacheSerializableRunnableForPRPersistence(name, 0, 
false, false));
     accessor1.invoke(getCacheSerializableRunnableForPRPersistence(name, 0, 
true, true));
     dataStore0.invoke(getCacheSerializableRunnableForPRPersistence(name, 100, 
true, false));
     dataStore1.invoke(getCacheSerializableRunnableForPRPersistence(name, 100, 
false, true));
 
-     getLogWriter().info("*****PERSISTENCE CONFLICTS TEST ENDED*****");
+     LogWriterUtils.getLogWriter().info("*****PERSISTENCE CONFLICTS TEST 
ENDED*****");
   }
 
   /**
@@ -626,7 +629,7 @@ public class PartitionedRegionCreationDUnitTest extends
                 + name + " configs do not exists in  region - "
                 + root.getName());
         }
-        getLogWriter().info(" PartitionedRegionCreationTest 
PartionedRegionRegistrationTest() Successfully Complete ..  ");
+        LogWriterUtils.getLogWriter().info(" PartitionedRegionCreationTest 
PartionedRegionRegistrationTest() Successfully Complete ..  ");
       }
     };
     return (CacheSerializableRunnable)registerPrRegion;
@@ -692,11 +695,11 @@ public class PartitionedRegionCreationDUnitTest extends
             getCache().getLogger().warning(
                 "Creation caught IllegalStateException", ex);
             if (exceptionType.equals("GLOBAL"))
-              
getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for scope = GLOBAL");
+              
LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for scope = GLOBAL");
             if (exceptionType.equals("REDUNDANCY"))
-              
getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for 0 > redundancy  > 3  ");
+              
LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for 0 > redundancy  > 3  ");
             if (exceptionType.equals("DIFFREG"))
-              
getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for regions with diff scope ");
+              
LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for regions with diff scope ");
           }
           assertNotNull("Partitioned Region " + regionName + " not in cache",
               cache.getRegion(regionName));
@@ -730,11 +733,11 @@ public class PartitionedRegionCreationDUnitTest extends
               getCache().getLogger().warning(
                   "Creation caught IllegalStateException", ex);
               if (exceptionType.equals("GLOBAL"))
-                
getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for scope = GLOBAL");
+                
LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for scope = GLOBAL");
               if (exceptionType.equals("REDUNDANCY"))
-                
getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for 0 > redundancy  > 3  ");
+                
LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for 0 > redundancy  > 3  ");
               if (exceptionType.equals("DIFFREG"))
-                
getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for regions with diff scope ");
+                
LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()
  Got a Correct exception for regions with diff scope ");
             }
             assertNotNull("Partitioned Region " + rName + " not in cache",
                 cache.getRegion(rName));
@@ -867,7 +870,7 @@ public class PartitionedRegionCreationDUnitTest extends
       RegionAttributes regionAttribs = attr.create();
       PartitionedRegion accessor = (PartitionedRegion)cache.createRegion(
           "PR1", regionAttribs);
-      getLogWriter().info("Region created in VM1.");
+      LogWriterUtils.getLogWriter().info("Region created in VM1.");
       assertEquals(accessor.getTotalNumberOfBuckets(),
           PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT);
       try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
index 498d835..cd6e980 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
@@ -31,6 +31,7 @@ import 
com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.LogWriterImpl;
 import com.gemstone.gemfire.internal.logging.PureLogWriter;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.standalone.DUnitLauncher;
 
@@ -91,15 +92,16 @@ public class PartitionedRegionDUnitTestCase extends 
CacheTestCase
    * Tear down a PartitionedRegionTestCase by cleaning up the existing cache 
(mainly
    * because we want to destroy any existing PartitionedRegions)
    */
-  public void tearDown2() throws Exception
-  {
-    try {
-      closeCache();
-      invokeInEveryVM(CacheTestCase.class, "closeCache");
-    } finally {
-      super.tearDown2();
-    }
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    preTearDownPartitionedRegionDUnitTest();
+    closeCache();
+    Invoke.invokeInEveryVM(CacheTestCase.class, "closeCache");
   }
+  
+  protected void preTearDownPartitionedRegionDUnitTest() throws Exception {
+  }
+  
   public static void caseSetUp() {
     DUnitLauncher.launchIfNeeded();
     // this makes sure we don't have any connection left over from previous 
tests
@@ -197,10 +199,10 @@ public class PartitionedRegionDUnitTestCase extends 
CacheTestCase
                 prPrefix + i,
                 PartitionedRegionTestHelper.createRegionAttrsForPR(redundancy,
                     localmaxMemory, recoveryDelay));
-            getLogWriter().info("Created Region  new  --- " + prPrefix + i);
+            
com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Created 
Region  new  --- " + prPrefix + i);
           } catch (RegionExistsException ignore) {}
         }
-        getLogWriter().info("getCreateMultiplePRregion() - Partition Regions 
Successfully Completed ");
+        
com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("getCreateMultiplePRregion()
 - Partition Regions Successfully Completed ");
       }
     };
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
index 1d89967..337e6ce 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
@@ -26,7 +26,9 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import 
com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -42,11 +44,9 @@ public class PartitionedRegionDelayedRecoveryDUnitTest 
extends CacheTestCase {
     super(name);
   }
   
-  
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         InternalResourceManager.setResourceObserver(null);
       }
@@ -54,7 +54,6 @@ public class PartitionedRegionDelayedRecoveryDUnitTest 
extends CacheTestCase {
     InternalResourceManager.setResourceObserver(null);
   }
 
-
   public void testNoRecovery() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -156,7 +155,7 @@ public class PartitionedRegionDelayedRecoveryDUnitTest 
extends CacheTestCase {
             fail("Redundancy recovery did not happen within 60 seconds");
           }
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
index 4733fff..e5a95cd 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
@@ -28,11 +28,14 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This test aims to test the destroyRegion functionality.
@@ -77,7 +80,7 @@ public class PartitionedRegionDestroyDUnitTest extends
           cache.createRegion(PR_PREFIX + i,
               createRegionAttrsForPR(0, 200));
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Successfully created " + MAX_REGIONS + " PartitionedRegions.");
       }
     };
@@ -156,7 +159,7 @@ public class PartitionedRegionDestroyDUnitTest extends
           }
         }
         catch (RegionDestroyedException e) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "RegionDestroyedException occured for Region = " + PR_PREFIX + 
j);
         }
         getCache().getLogger().info("<ExpectedException action=remove>" + 
@@ -164,14 +167,14 @@ public class PartitionedRegionDestroyDUnitTest extends
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     if(async1.exceptionOccurred()) {
-      fail("async1 failed", async1.getException());
+      Assert.fail("async1 failed", async1.getException());
     }
     final String expectedExceptions = 
"com.gemstone.gemfire.distributed.internal.ReplyException"; 
     addExceptionTag(expectedExceptions);
     
-    pause(1000); // give async a chance to grab the regions...
+    Wait.pause(1000); // give async a chance to grab the regions...
     
     vm0.invoke(new CacheSerializableRunnable("destroyPRRegions") {
 
@@ -225,18 +228,18 @@ public class PartitionedRegionDestroyDUnitTest extends
 
         // Assert that all PartitionedRegions are gone
         assertEquals(0, rootRegion.size());
-        getLogWriter().info("allPartitionedRegions size() =" + 
rootRegion.size());
+        LogWriterUtils.getLogWriter().info("allPartitionedRegions size() =" + 
rootRegion.size());
         assertEquals("ThePrIdToPR Map size 
is:"+PartitionedRegion.prIdToPR.size()+" instead of 0", MAX_REGIONS, 
PartitionedRegion.prIdToPR.size());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PartitionedRegion.prIdToPR.size() ="
                 + PartitionedRegion.prIdToPR.size());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "# of Subregions of root Region after destroy call = "
                 + rootRegion.subregions(false).size());
         Iterator itr = (rootRegion.subregions(false)).iterator();
         while (itr.hasNext()) {
           Region rg = (Region)itr.next();
-          getLogWriter().info("Root Region SubRegionName = " + rg.getName());
+          LogWriterUtils.getLogWriter().info("Root Region SubRegionName = " + 
rg.getName());
 //          assertEquals("REGION NAME FOUND:"+rg.getName(),-1, 
rg.getName().indexOf(
 //              PartitionedRegionHelper.BUCKET_2_NODE_TABLE_PREFIX));
           assertEquals("regionFound that should be gone!:"+rg.getName(),-1, 
rg.getName().indexOf(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
index dbaa433..985656d 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
@@ -23,6 +23,7 @@ import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -85,7 +86,7 @@ public class PartitionedRegionEntryCountDUnitTest extends 
CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
index 5fe0b45..0319171 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
@@ -46,11 +46,13 @@ import 
com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
 import 
com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
 import com.gemstone.gemfire.internal.cache.lru.HeapLRUCapacityController;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
   public PartitionedRegionEvictionDUnitTest(final String name) {
@@ -160,7 +162,7 @@ public class PartitionedRegionEvictionDUnitTest extends 
CacheTestCase {
               return excuse;
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+          Wait.waitForCriterion(wc, 60000, 1000, true);
             
           int entriesEvicted = 0;
           
@@ -298,7 +300,7 @@ public class PartitionedRegionEvictionDUnitTest extends 
CacheTestCase {
               return excuse;
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+          Wait.waitForCriterion(wc, 60000, 1000, true);
           
           entriesEvicted = 
((AbstractLRURegionMap)pr.entries)._getLruList().stats()
               .getEvictions();
@@ -362,7 +364,7 @@ public class PartitionedRegionEvictionDUnitTest extends 
CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };
@@ -543,7 +545,7 @@ public class PartitionedRegionEvictionDUnitTest extends 
CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };
@@ -661,7 +663,7 @@ public class PartitionedRegionEvictionDUnitTest extends 
CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };
@@ -1723,7 +1725,7 @@ public class PartitionedRegionEvictionDUnitTest extends 
CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
index a787a6f..3f4edf1 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
@@ -36,11 +36,13 @@ import 
com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import 
com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserver;
 import 
com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -102,7 +104,7 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
           }
           assertNotNull(partitionedregion);
         } catch (InterruptedException e) {
-          fail("interrupted",e);
+          Assert.fail("interrupted",e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }
@@ -219,7 +221,7 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
             fail("recovery didn't happen in 60 seconds");
           }
         } catch (InterruptedException e) {
-          fail("recovery wait interrupted", e);
+          Assert.fail("recovery wait interrupted", e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }
@@ -246,14 +248,14 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
         public void run2() throws CacheException {
           getCache().getLogger().info("<ExpectedException action=add>" + 
               expectedExceptions + "</ExpectedException>");
-          getLogWriter().info("<ExpectedException action=add>" + 
+          LogWriterUtils.getLogWriter().info("<ExpectedException action=add>" 
+ 
                   expectedExceptions + "</ExpectedException>");
         }
       };
     SerializableRunnable removeExpectedExceptions = 
       new CacheSerializableRunnable("removeExpectedExceptions") {
         public void run2() throws CacheException {
-          getLogWriter().info("<ExpectedException action=remove>" + 
+          LogWriterUtils.getLogWriter().info("<ExpectedException 
action=remove>" + 
                     expectedExceptions + "</ExpectedException>");      
           getCache().getLogger().info("<ExpectedException action=remove>" + 
               expectedExceptions + "</ExpectedException>");
@@ -271,7 +273,7 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
           for (int k = 0; k < 10; k++) {
             pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
           }
-          getLogWriter().info("VM0 Done put successfully for PR = " + PR_PREFIX
+          LogWriterUtils.getLogWriter().info("VM0 Done put successfully for PR 
= " + PR_PREFIX
               + j);
         }
       }
@@ -288,7 +290,7 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
           for (int k = 10; k < 20; k++) {
             pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
           }
-          getLogWriter().info("VM1 Done put successfully for PR = " + PR_PREFIX
+          LogWriterUtils.getLogWriter().info("VM1 Done put successfully for PR 
= " + PR_PREFIX
               + j);
         }
       }
@@ -297,7 +299,7 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
     // dataStore1.invoke(addExpectedExceptions);
     AsyncInvocation async0 = dataStore0.invokeAsync(dataStore0Puts);
     // AsyncInvocation  async1 = dataStore1.invokeAsync(dataStore1Puts);
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
     // async1.join();
     dataStore0.invoke(removeExpectedExceptions);
     // dataStore1.invoke(removeExpectedExceptions);
@@ -317,11 +319,11 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
     
     async0 = dataStore0.invokeAsync(dataStore0Puts);
     // async1 = dataStore1.invokeAsync(dataStore1Puts);
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
     // async1.join();
     
     if (async0.exceptionOccurred()) {
-      fail("async0 failed", async0.getException());
+      Assert.fail("async0 failed", async0.getException());
     }
     // assertFalse(async1.exceptionOccurred());
     
@@ -378,7 +380,7 @@ public class PartitionedRegionHADUnitTest extends 
PartitionedRegionDUnitTestCase
       // This accessor should NOT have picked up any buckets.
       assertFalse(vm3LBRsize != 0);
       int vm2B2Nsize = 
((Integer)dataStore2.invoke(validateBucketsOnNode)).intValue();
-      getLogWriter().info("vm2B2Nsize = " + vm2B2Nsize);
+      LogWriterUtils.getLogWriter().info("vm2B2Nsize = " + vm2B2Nsize);
       assertEquals(vm2B2Nsize, vm2LBRsize);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
index 726423d..69bebdf 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
@@ -32,11 +32,14 @@ import 
com.gemstone.gemfire.cache30.CertifiableTestCacheListener;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.NanoTimer;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -87,7 +90,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     final int redundancy = 1;
     createPartitionRegionAsynch("testMetaDataCleanupOnSinglePRNodeFail_",
         startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, 
-1);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() - PartitionedRegion's 
created at all VM nodes");
     
@@ -97,7 +100,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     // disconnect vm0.
     DistributedMember dsMember = (DistributedMember)vmArr[0].invoke(this, 
"disconnectMethod");
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testMetaDataCleanupOnSinglePRNodeFail() - VM = " + dsMember
             + " disconnected from the distributed system ");
     
@@ -105,7 +108,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     vmArr[1].invoke(validateNodeFailMetaDataCleanUp(dsMember));
     vmArr[2].invoke(validateNodeFailMetaDataCleanUp(dsMember));
     vmArr[3].invoke(validateNodeFailMetaDataCleanUp(dsMember));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() - Validation of Failed 
node config metadata complete");
 
@@ -114,11 +117,11 @@ public class 
PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     vmArr[2].invoke(validateNodeFailbucket2NodeCleanUp(dsMember));
     vmArr[3].invoke(validateNodeFailbucket2NodeCleanUp(dsMember));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() - Validation of Failed 
node bucket2Node Region metadata complete");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() Completed Successfuly 
..........");
   }
@@ -133,7 +136,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
         Cache c = getCache();
         Region rootReg = PartitionedRegionHelper.getPRRoot(c);
 //        Region allPRs = PartitionedRegionHelper.getPRConfigRegion(rootReg, 
c);
-        rootReg.getAttributesMutator().addCacheListener(new 
CertifiableTestCacheListener(getLogWriter()));
+        rootReg.getAttributesMutator().addCacheListener(new 
CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
       }
     };
   
@@ -196,7 +199,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     final int redundancy = 1;
     createPartitionRegionAsynch("testMetaDataCleanupOnMultiplePRNodeFail_",
         startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, 
-1);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() - PartitionedRegion's 
created at all VM nodes");
     
@@ -205,7 +208,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     // disconnect vm0
     DistributedMember dsMember = (DistributedMember)vmArr[0].invoke(this, 
"disconnectMethod");
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testMetaDataCleanupOnMultiplePRNodeFail() - VM = " + dsMember
             + " disconnected from the distributed system ");
 
@@ -228,7 +231,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     //  disconnect vm1
     DistributedMember dsMember2 = (DistributedMember)vmArr[1].invoke(this, 
"disconnectMethod");
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testMetaDataCleanupOnMultiplePRNodeFail() - VM = " + dsMember2
             + " disconnected from the distributed system ");
 
@@ -251,18 +254,18 @@ public class 
PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     vmArr[2].invoke(validateNodeFailMetaDataCleanUp(dsMember2));
     vmArr[3].invoke(validateNodeFailMetaDataCleanUp(dsMember2));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() - Validation of Failed 
nodes config metadata complete");
 
     vmArr[2].invoke(validateNodeFailbucket2NodeCleanUp(dsMember2));
     vmArr[3].invoke(validateNodeFailbucket2NodeCleanUp(dsMember2));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() - Validation of Failed 
nodes bucket2Node Region metadata complete");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() Completed Successfuly 
..........");
   }
@@ -290,8 +293,8 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
         assertEquals(2, cls.length);
         CertifiableTestCacheListener ctcl = (CertifiableTestCacheListener) 
cls[1];
         
-        getLogWriter().info("Listener update (" + ctcl.updates.size() + "): " 
+ ctcl.updates) ;
-        getLogWriter().info("Listener destroy: (" + ctcl.destroys.size() + "): 
" + ctcl.destroys) ;
+        LogWriterUtils.getLogWriter().info("Listener update (" + 
ctcl.updates.size() + "): " + ctcl.updates) ;
+        LogWriterUtils.getLogWriter().info("Listener destroy: (" + 
ctcl.destroys.size() + "): " + ctcl.destroys) ;
 
         Iterator itrator = rootReg.keySet().iterator();
         for (Iterator itr = itrator; itr.hasNext();) {
@@ -378,7 +381,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
     DistributedMember dsMember = ((InternalDistributedSystem)getCache()
         .getDistributedSystem()).getDistributionManager().getId();
     getCache().getDistributedSystem().disconnect();
-    getLogWriter().info("disconnectMethod() completed ..");
+    LogWriterUtils.getLogWriter().info("disconnectMethod() completed ..");
     return dsMember;
   }
   
@@ -396,12 +399,12 @@ public class 
PartitionedRegionHAFailureAndRecoveryDUnitTest extends
           redundancy, localMaxMemory, recoveryDelay));
     }
     for (int count2 = 0; count2 < async.length; count2++) {
-        DistributedTestCase.join(async[count2], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count2], 30 * 1000);
      }
     
     for (int count2 = 0; count2 < async.length; count2++) {
       if (async[count2].exceptionOccurred()) {
-        fail("exception during " + count2, async[count2].getException());
+        Assert.fail("exception during " + count2, 
async[count2].getException());
       }
     }  
   }
@@ -446,17 +449,17 @@ public class 
PartitionedRegionHAFailureAndRecoveryDUnitTest extends
         assertEquals(bucketOwners.size(), redundantCopies + 1);
         DistributedMember bucketOwner = (DistributedMember) 
bucketOwners.iterator().next();
         assertNotNull(bucketOwner);
-        getLogWriter().info("Selected distributed member " + bucketOwner + " 
to disconnect because it hosts bucketId " + bucketId);
+        LogWriterUtils.getLogWriter().info("Selected distributed member " + 
bucketOwner + " to disconnect because it hosts bucketId " + bucketId);
         return bucketOwner;
       }
     });
     assertNotNull(bucketHost);
     
     // Disconnect the selected host 
-    Map stillHasDS = invokeInEveryVM(new SerializableCallable("Disconnect 
provided bucketHost") {
+    Map stillHasDS = Invoke.invokeInEveryVM(new 
SerializableCallable("Disconnect provided bucketHost") {
       public Object call() throws Exception {
         if (getSystem().getDistributedMember().equals(bucketHost)) {
-          getLogWriter().info("Disconnecting distributed member " + 
getSystem().getDistributedMember());
+          LogWriterUtils.getLogWriter().info("Disconnecting distributed member 
" + getSystem().getDistributedMember());
           disconnectFromDS();
           return Boolean.FALSE;
         }
@@ -491,7 +494,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
                   TimeUnit.MILLISECONDS.sleep(250);
                 }
                 catch (InterruptedException e) {
-                  fail("Interrupted, ah!", e);
+                  Assert.fail("Interrupted, ah!", e);
                 }
               }
             }
@@ -519,7 +522,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest 
extends
                     assertEquals(pr.getRedundantCopies() + 1, owners.size());
                     break; // retry loop
                   } catch (ForceReattemptException retryIt) {
-                    getLogWriter().info("Need to retry validation for bucket 
in PR " + pr, retryIt);
+                    LogWriterUtils.getLogWriter().info("Need to retry 
validation for bucket in PR " + pr, retryIt);
                   }
                 } while (true); // retry loop
               } // bucketId loop

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
index 4bbdbe4..28e1bfb 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
@@ -34,6 +34,7 @@ import com.gemstone.gemfire.cache.util.ObjectSizer;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.lru.Sizeable;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -177,7 +178,7 @@ public class PartitionedRegionLocalMaxMemoryDUnitTest 
extends
             i++;
           }
           assertEquals(1, pr.getDataStore().localBucket2RegionMap.size());
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
           "putObjectInPartitionRegion() - Put operation done successfully");
         }
         else {
@@ -190,7 +191,7 @@ public class PartitionedRegionLocalMaxMemoryDUnitTest 
extends
             fail("Bucket gets created even if no memory is available");
           }
           catch (PartitionedRegionStorageException e) {
-            getLogWriter()
+            LogWriterUtils.getLogWriter()
             .info(
             "putObjectInPartitionRegion()- got correct 
PartitionedRegionStorageException while creating bucket when no memory is 
available");
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
index a9b7619..8508587 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class PartitionedRegionLocalMaxMemoryOffHeapDUnitTest 
extends Partitioned
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownPartitionedRegionDUnitTest() throws 
Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class PartitionedRegionLocalMaxMemoryOffHeapDUnitTest 
extends Partitioned
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
index 6ace0a5..22d1fd7 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
@@ -20,9 +20,11 @@ import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -93,22 +95,22 @@ public class PartitionedRegionMultipleDUnitTest extends
     /** creationg and performing put(),get() operations on Partition Region */
     createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Partition Regions Successfully 
Created ");
     validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Partition Regions Successfully 
Validated ");
     putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Put() Operation done 
Successfully in Partition Regions ");
     getInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Partition Regions Successfully 
Validated ");
   }
@@ -148,38 +150,38 @@ public class PartitionedRegionMultipleDUnitTest extends
      */
     createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Partition Regions 
Successfully Created ");
     validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Partition Regions 
Successfully Validated ");
     putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Put() Operation done 
Successfully in Partition Regions ");
     destroyInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Destroy(Key) Operation done 
Successfully in Partition Regions ");
     getDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3,
         startIndexForRegion, endIndexForRegion, afterPutFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Get() Operation after destoy 
keys done Successfully in Partition Regions ");
     putDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3,
         startIndexForRegion, endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Put() Operation after 
destroy keys done Successfully in Partition Regions ");
     afterPutFlag = 1;
     getDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3,
         startIndexForRegion, endIndexForRegion, afterPutFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Get() Operation after Put() 
done Successfully in Partition Regions ");
   }
@@ -210,22 +212,22 @@ public class PartitionedRegionMultipleDUnitTest extends
     /** creating Partition Regions and testing for the APIs contains() */
     createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Partition Regions 
Successfully Created ");
     validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Partition Regions 
Successfully Validated ");
     putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Put() Operation 
done Successfully in Partition Regions ");
     destroyInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Destroy(Key) 
Operation done Successfully in Partition Regions ");
     async[0] = vm0.invokeAsync(validateContainsAPIForPartitionRegion(
@@ -238,16 +240,16 @@ public class PartitionedRegionMultipleDUnitTest extends
         startIndexForRegion, endIndexForRegion));
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 120 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 120 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
    }
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Validation of 
Contains APIs done Successfully in Partition Regions ");
   }
@@ -298,12 +300,12 @@ public class PartitionedRegionMultipleDUnitTest extends
         startIndexForRegion, endIndexForRegion));
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -334,12 +336,12 @@ public class PartitionedRegionMultipleDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -367,12 +369,12 @@ public class PartitionedRegionMultipleDUnitTest extends
         endIndexForRegion));
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) { 
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
      
     for (int count = 0; count < AsyncInvocationArrSize; count++) { 
       if (async[count].exceptionOccurred()) {
-        fail("Failed due to exception: "+ async[count].getException(),
+        Assert.fail("Failed due to exception: "+ async[count].getException(),
             async[count].getException());
       }
     }  
@@ -406,12 +408,12 @@ public class PartitionedRegionMultipleDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
    }   
   }
@@ -464,7 +466,7 @@ public class PartitionedRegionMultipleDUnitTest extends
             }
           }
 
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - Get() Validations 
done Successfully in Partition Region "
                       + pr.getName());
@@ -479,7 +481,7 @@ public class PartitionedRegionMultipleDUnitTest extends
             }
           }
 
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - containsKey() 
Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -494,7 +496,7 @@ public class PartitionedRegionMultipleDUnitTest extends
               assertTrue(conKey);
             }
           }
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - 
containsValueForKey() Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -508,7 +510,7 @@ public class PartitionedRegionMultipleDUnitTest extends
               assertTrue(conKey);
             }
           }
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - containsValue() 
Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -543,9 +545,9 @@ public class PartitionedRegionMultipleDUnitTest extends
         startIndexForRegion, endIndexForRegion, afterPutFlag));
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
         if (async[count].exceptionOccurred()) {
-          fail("exception during " + count, async[count].getException());
+          Assert.fail("exception during " + count, 
async[count].getException());
         }
     }
     
@@ -586,12 +588,12 @@ public class PartitionedRegionMultipleDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
index 042e1f7..002a5f6 100644
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
@@ -23,6 +23,7 @@ import 
com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import 
com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.control.OffHeapMemoryMonitor;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 public class PartitionedRegionOffHeapEvictionDUnitTest extends
@@ -33,7 +34,7 @@ public class PartitionedRegionOffHeapEvictionDUnitTest extends
   }  
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -43,12 +44,8 @@ public class PartitionedRegionOffHeapEvictionDUnitTest 
extends
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
----------------------------------------------------------------------
diff --git 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
index 496026d..f35b39a 100755
--- 
a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
+++ 
b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
@@ -23,9 +23,11 @@ import java.util.*;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.*;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -83,7 +85,7 @@ public class PartitionedRegionPRIDDUnitTest extends
     // Create 1/2 * MAX_REGIONS regions in VM 0,1,2 with scope D_ACK.
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, prPrefix);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPRIDGenerationInMultiplePartitionRegion() - Partition regions 
on 3 nodes successfully created");
 
@@ -99,7 +101,7 @@ public class PartitionedRegionPRIDDUnitTest extends
     // VM 3 contains regions from id MAX_REGIONS to 2*MAX_REGIONS only.
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, pr2_redundancy, prPrefix);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPRIDGenerationInMultiplePartitionRegion() - Partition regions 
on 4 nodes successfully created");
     // validating PRID generation for multiple partition regions    
@@ -116,12 +118,12 @@ public class PartitionedRegionPRIDDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("VM " + count 
+        Assert.fail("VM " + count 
             + " encountered this exception during async invocation", 
             async[count].getException());
       }
@@ -206,10 +208,10 @@ public class PartitionedRegionPRIDDUnitTest extends
         if (prIdPRSet.size() != PartitionedRegion.prIdToPR.size())
           fail("Duplicate PRID are generated in prIdToPR");
 
-        getLogWriter().info("Size of allPartition region : " + prIdSet.size());
-        getLogWriter()
+        LogWriterUtils.getLogWriter().info("Size of allPartition region : " + 
prIdSet.size());
+        LogWriterUtils.getLogWriter()
             .info("Size of prIdToPR region     : " + prIdPRSet.size());
-        getLogWriter().info("PRID generated successfully");
+        LogWriterUtils.getLogWriter().info("PRID generated successfully");
       }
     };
     return validatePRID;
@@ -233,12 +235,12 @@ public class PartitionedRegionPRIDDUnitTest extends
       numNodes++;
     }
     for (int i = 0; i < numNodes; i++) {
-      DistributedTestCase.join(async[i], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[i], 30 * 1000);
     }
     
     for (int i = 0; i < numNodes; i++) {
       if (async[i].exceptionOccurred()) {
-        fail("VM " + i 
+        Assert.fail("VM " + i 
             + " encountered this exception during async invocation", 
             async[i].getException());
       }


Reply via email to