http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java new file mode 100755 index 0000000..711500e --- /dev/null +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java @@ -0,0 +1,617 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.gemstone.gemfire.distributed.internal.locks; + +import static org.junit.Assert.*; + +import java.util.*; + +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.gemstone.gemfire.CancelCriterion; +import com.gemstone.gemfire.LogWriter; +import com.gemstone.gemfire.SystemFailure; +import com.gemstone.gemfire.internal.logging.LocalLogWriter; +import com.gemstone.gemfire.test.junit.categories.IntegrationTest; +import com.gemstone.gemfire.internal.logging.InternalLogWriter; + +import dunit.DistributedTestCase; +import dunit.DistributedTestCase.WaitCriterion; + +/** + * Tests the Collaboration Lock used internally by dlock service. + * + * @author Kirk Lund + * @since 4.1.1 + */ +@Category(IntegrationTest.class) +@Ignore("Test is broken and was named CollaborationJUnitDisabledTest") +public class CollaborationJUnitTest { + + protected LogWriter log = new LocalLogWriter(InternalLogWriter.INFO_LEVEL); + protected Collaboration collaboration; + + @Before + public void setUp() throws Exception { + this.collaboration = new Collaboration(new CancelCriterion() { + @Override + public String cancelInProgress() { + return null; + } + @Override + public RuntimeException generateCancelledException(Throwable e) { + return null; + } + }); + } + + @After + public void tearDown() throws Exception { + this.collaboration = null; + } + + protected volatile boolean flagTestBlocksUntilRelease = false; + protected volatile boolean threadBStartedTestBlocksUntilRelease = false; + + @Test + public void testBlocksUntilRelease() throws Exception { + this.log.info("[testBlocksUntilRelease]"); + Thread threadA = new Thread(group, new Runnable() { + @Override + public void run() { + collaboration.acquireUninterruptibly("topicA"); + try { + flagTestBlocksUntilRelease = true; + while(flagTestBlocksUntilRelease) { + try { + Thread.sleep(10); + } + catch (InterruptedException ignore) {fail("interrupted");} + } + } + finally { + collaboration.release(); + } + } + }); + + // thread one acquires + threadA.start(); + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return CollaborationJUnitTest.this.flagTestBlocksUntilRelease; + } + @Override + public String description() { + return "waiting for thread"; + } + }; + DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true); + assertTrue(this.collaboration.hasCurrentTopic(threadA)); + + // thread two blocks until one releeases + Thread threadB = new Thread(group, new Runnable() { + @Override + public void run() { + threadBStartedTestBlocksUntilRelease = true; + collaboration.acquireUninterruptibly("topicB"); + try { + flagTestBlocksUntilRelease = true; + WaitCriterion ev2 = new WaitCriterion() { + @Override + public boolean done() { + return !flagTestBlocksUntilRelease; + } + @Override + public String description() { + return "waiting for release"; + } + }; + DistributedTestCase.waitForCriterion(ev2, 20 * 1000, 200, true); + } + finally { + collaboration.release(); + } + } + }); + + // start up threadB + threadB.start(); + ev = new WaitCriterion() { + @Override + public boolean done() { + return threadBStartedTestBlocksUntilRelease; + } + @Override + public String description() { + return "waiting for thread b"; + } + }; + DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true); + + // threadA holds topic and threadB is waiting... + assertTrue(this.collaboration.hasCurrentTopic(threadA)); + assertFalse(this.collaboration.hasCurrentTopic(threadB)); + + // let threadA release so that threadB gets lock + this.flagTestBlocksUntilRelease = false; + DistributedTestCase.join(threadA, 30 * 1000, null); + + // make sure threadB is doing what it's supposed to do... + ev = new WaitCriterion() { + @Override + public boolean done() { + return flagTestBlocksUntilRelease; + } + @Override + public String description() { + return "threadB"; + } + }; + DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true); + // threadB must have lock now... let threadB release + assertTrue(this.collaboration.hasCurrentTopic(threadB)); + this.flagTestBlocksUntilRelease = false; + DistributedTestCase.join(threadB, 30 * 1000, null); + + // collaboration should be free now + assertFalse(this.collaboration.hasCurrentTopic(threadA)); + assertFalse(this.collaboration.hasCurrentTopic(threadB)); + assertFalse(this.collaboration.hasCurrentTopic()); + } + + protected volatile boolean threadAFlag_TestLateComerJoinsIn = false; + protected volatile boolean threadBFlag_TestLateComerJoinsIn = false; + protected volatile boolean threadCFlag_TestLateComerJoinsIn = true; + protected volatile boolean threadDFlag_TestLateComerJoinsIn = false; + + @Test + public void testLateComerJoinsIn() throws Exception { + this.log.info("[testLateComerJoinsIn]"); + + final Object topicA = "topicA"; + final Object topicB = "topicB"; + + // threads one and two acquire + Thread threadA = new Thread(group, new Runnable() { + @Override + public void run() { + collaboration.acquireUninterruptibly(topicA); + try { + threadAFlag_TestLateComerJoinsIn = true; + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return !threadAFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + } + finally { + collaboration.release(); + } + } + }); + threadA.start(); + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return threadAFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return "wait for ThreadA"; + } + }; + DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true); + assertTrue(this.collaboration.hasCurrentTopic(threadA)); + assertTrue(this.collaboration.isCurrentTopic(topicA)); + + Thread threadB = new Thread(group, new Runnable() { + @Override + public void run() { + collaboration.acquireUninterruptibly(topicA); + try { + threadBFlag_TestLateComerJoinsIn = true; + WaitCriterion ev2 = new WaitCriterion() { + @Override + public boolean done() { + return !threadBFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev2, 60 * 1000, 200, true); + } + finally { + collaboration.release(); + } + } + }); + threadB.start(); + ev = new WaitCriterion() { + @Override + public boolean done() { + return threadBFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return ""; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + assertTrue(this.collaboration.hasCurrentTopic(threadB)); + + // thread three blocks for new topic + Thread threadC = new Thread(group, new Runnable() { + @Override + public void run() { + threadCFlag_TestLateComerJoinsIn = false; + collaboration.acquireUninterruptibly(topicB); + try { + threadCFlag_TestLateComerJoinsIn = true; + WaitCriterion ev2 = new WaitCriterion() { + @Override + public boolean done() { + return !threadCFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev2, 60 * 1000, 200, true); + } + finally { + collaboration.release(); + } + } + }); + threadC.start(); + ev = new WaitCriterion() { + @Override + public boolean done() { + return threadCFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + assertFalse(this.collaboration.hasCurrentTopic(threadC)); + assertFalse(this.collaboration.isCurrentTopic(topicB)); + + // thread four (lateComer) acquires current topic immediately + Thread threadD = new Thread(group, new Runnable() { + @Override + public void run() { + collaboration.acquireUninterruptibly(topicA); + try { + threadDFlag_TestLateComerJoinsIn = true; + while(threadDFlag_TestLateComerJoinsIn) { + try { + Thread.sleep(10); + } + catch (InterruptedException ignore) {fail("interrupted");} + } + } + finally { + collaboration.release(); + } + } + }); + threadD.start(); + ev = new WaitCriterion() { + @Override + public boolean done() { + return threadDFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + assertTrue(this.collaboration.hasCurrentTopic(threadD)); + + // release threadA + this.threadAFlag_TestLateComerJoinsIn = false; + DistributedTestCase.join(threadA, 30 * 1000, null); + assertFalse(this.collaboration.hasCurrentTopic(threadA)); + assertTrue(this.collaboration.hasCurrentTopic(threadB)); + assertFalse(this.collaboration.hasCurrentTopic(threadC)); + assertTrue(this.collaboration.hasCurrentTopic(threadD)); + assertTrue(this.collaboration.isCurrentTopic(topicA)); + assertFalse(this.collaboration.isCurrentTopic(topicB)); + + // release threadB + this.threadBFlag_TestLateComerJoinsIn = false; + DistributedTestCase.join(threadB, 30 * 1000, null); + assertFalse(this.collaboration.hasCurrentTopic(threadB)); + assertFalse(this.collaboration.hasCurrentTopic(threadC)); + assertTrue(this.collaboration.hasCurrentTopic(threadD)); + assertTrue(this.collaboration.isCurrentTopic(topicA)); + assertFalse(this.collaboration.isCurrentTopic(topicB)); + + // release threadD + this.threadDFlag_TestLateComerJoinsIn = false; + DistributedTestCase.join(threadD, 30 * 1000, null); + ev = new WaitCriterion() { + @Override + public boolean done() { + return threadCFlag_TestLateComerJoinsIn; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + assertTrue(this.collaboration.hasCurrentTopic(threadC)); + assertFalse(this.collaboration.hasCurrentTopic(threadD)); + assertFalse(this.collaboration.isCurrentTopic(topicA)); + assertTrue(this.collaboration.isCurrentTopic(topicB)); + + // release threadC + this.threadCFlag_TestLateComerJoinsIn = false; + DistributedTestCase.join(threadC, 30 * 1000, null); + assertFalse(this.collaboration.hasCurrentTopic(threadC)); + assertFalse(this.collaboration.isCurrentTopic(topicA)); + assertFalse(this.collaboration.isCurrentTopic(topicB)); + } + + protected List waitingList = Collections.synchronizedList(new ArrayList()); + protected List fairnessList = Collections.synchronizedList(new ArrayList()); + protected volatile boolean runTestFairnessStressfully = true; + + @Test + public void testFairnessStressfully() throws Exception { + this.log.info("[testFairnessStressfully]"); + final int numThreads = 20; + Thread threads[] = new Thread[numThreads]; + + Runnable run = new Runnable() { + public void run() { + boolean released = false; + try { + String uniqueTopic = Thread.currentThread().getName(); + while(runTestFairnessStressfully) { + waitingList.add(uniqueTopic); + collaboration.acquireUninterruptibly(uniqueTopic); + try { + released = false; + fairnessList.add(uniqueTopic); + waitingList.remove(uniqueTopic); + } + finally { + // wait for the other threads to line up... + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return !runTestFairnessStressfully || waitingList.size() >= numThreads - 1; + } + @Override + public String description() { + return "other threads lining up"; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + collaboration.release(); + released = true; + } + } + } + finally { + if (!released) { + collaboration.release(); + } + } + } + }; + + try { + // many threads loop: acquire and release with unique topic + for (int t = 0; t < threads.length; t++) { + threads[t] = new Thread(group, run, String.valueOf(t)); + threads[t].start(); + } + + log.info("Started all threads... waiting for test to complete."); + + // wait for numThreads * 10 + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return fairnessList.size() >= numThreads * 20; + } + @Override + public String description() { + return "waiting for numThreads * 10"; + } + }; + DistributedTestCase.waitForCriterion(ev, 5 * 60 * 1000, 200, true); + } + finally { + if (this.runTestFairnessStressfully) { + this.runTestFairnessStressfully = false; + } + } + + for (int t = 0; t < threads.length; t++) { + DistributedTestCase.join(threads[t], 30 * 1000, null); + } + + // assert that all topics are acquired in order + // count number of occurrences of each thread + int count[] = new int[numThreads]; + for (int i = 0; i < count.length; i++) { // shouldn't be necessary + count[i] = 0; + } + synchronized(this.fairnessList) { + for (Iterator iter = this.fairnessList.iterator(); iter.hasNext();) { + int id = Integer.valueOf((String)iter.next()).intValue(); + count[id] = count[id]+1; + } + } + + int totalLocks = 0; + int minLocks = Integer.MAX_VALUE; + int maxLocks = 0; + for (int i = 0; i < count.length; i++) { + int locks = count[i]; + this.log.fine("testFairnessStressfully thread-" + i + " acquired topic " + + locks + " times."); + if (locks < minLocks) minLocks = locks; + if (locks > maxLocks) maxLocks = locks; + totalLocks = totalLocks + locks; + } + + this.log.info("[testFairnessStressfully] totalLocks=" + totalLocks + + " minLocks=" + minLocks + + " maxLocks=" + maxLocks); + + int expectedLocks = (totalLocks / numThreads) + 1; + + // NOTE: if you turn on fine logs, this deviation may be too small... + // slower machines may also fail depending on thread scheduling + int deviation = (int)(expectedLocks * 0.25); + int lowThreshold = expectedLocks - deviation; + int highThreshold = expectedLocks + deviation; + + this.log.info("[testFairnessStressfully] deviation=" + deviation + + " expectedLocks=" + expectedLocks + + " lowThreshold=" + lowThreshold + + " highThreshold=" + highThreshold); + + // if these assertions keep failing we'll have to rewrite the test + // to handle scheduling of the threads... + + assertTrue("minLocks is less than lowThreshold", + minLocks >= lowThreshold); + assertTrue("maxLocks is greater than highThreshold", + maxLocks <= highThreshold); + } + + @Test + public void testHasCurrentTopic() throws Exception { + this.log.info("[testHasCurrentTopic]"); + assertTrue(!this.collaboration.hasCurrentTopic()); + this.collaboration.acquireUninterruptibly("testHasCurrentTopic"); + try { + assertTrue(this.collaboration.hasCurrentTopic()); + } + finally { + this.collaboration.release(); + } + assertTrue(!this.collaboration.hasCurrentTopic()); + } + + protected volatile boolean flagTestThreadHasCurrentTopic = false; + + @Test + public void testThreadHasCurrentTopic() throws Exception { + this.log.info("[testThreadHasCurrentTopic]"); + Thread thread = new Thread(group, new Runnable() { + @Override + public void run() { + collaboration.acquireUninterruptibly("testThreadHasCurrentTopic"); + try { + flagTestThreadHasCurrentTopic = true; + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return !flagTestThreadHasCurrentTopic; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + } + finally { + collaboration.release(); + } + } + }); + + // before starting thread, hasCurrentTopic(thread) returns false + assertTrue(!this.collaboration.hasCurrentTopic(thread)); + thread.start(); + WaitCriterion ev = new WaitCriterion() { + @Override + public boolean done() { + return flagTestThreadHasCurrentTopic; + } + @Override + public String description() { + return null; + } + }; + DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true); + + // after starting thread, hasCurrentTopic(thread) returns true + assertTrue(this.collaboration.hasCurrentTopic(thread)); + this.flagTestThreadHasCurrentTopic = false; + DistributedTestCase.join(thread, 30 * 1000, null); + + // after thread finishes, hasCurrentTopic(thread) returns false + assertTrue(!this.collaboration.hasCurrentTopic(thread)); + } + + @Test + public void testIsCurrentTopic() throws Exception { + this.log.info("[testIsCurrentTopic]"); + Object topic = "testIsCurrentTopic"; + assertTrue(!this.collaboration.isCurrentTopic(topic)); + this.collaboration.acquireUninterruptibly(topic); + try { + assertTrue(this.collaboration.isCurrentTopic(topic)); + } + finally { + this.collaboration.release(); + } + assertTrue(!this.collaboration.isCurrentTopic(topic)); + } + + protected final ThreadGroup group = + new ThreadGroup("CollaborationJUnitTest Threads") { + @Override + public void uncaughtException(Thread t, Throwable e) + { + if (e instanceof VirtualMachineError) { + SystemFailure.setFailure((VirtualMachineError)e); // don't throw + } + String s = "Uncaught exception in thread " + t; + log.error(s, e); + fail(s); + } + }; +} +
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java deleted file mode 100644 index 67de3e3..0000000 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitDisabledTest.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.gemstone.gemfire.distributed.internal.tcpserver; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.util.Properties; -import java.util.Vector; - -import junit.framework.Assert; - -import com.gemstone.gemfire.cache.CacheException; -import com.gemstone.gemfire.cache30.CacheSerializableRunnable; -import com.gemstone.gemfire.distributed.Locator; -import com.gemstone.gemfire.distributed.internal.DistributionConfig; -import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem; -import com.gemstone.gemfire.distributed.internal.tcpserver.TcpServer; -import com.gemstone.gemfire.internal.AvailablePort; -import com.gemstone.gemfire.internal.Version; - -import dunit.DistributedTestCase; -import dunit.Host; -import dunit.VM; - -/** - * This tests the rolling upgrade for locators with - * different GOSSIPVERSION. - * - * @author shobhit - * - */ -public class TcpServerBackwardCompatDUnitDisabledTest extends DistributedTestCase { - - /** - * @param name - */ - public TcpServerBackwardCompatDUnitDisabledTest(String name) { - super(name); - } - - @Override - public void setUp() throws Exception { - super.setUp(); - disconnectAllFromDS(); - invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") { - - @Override - public void run2() throws CacheException { - TcpServer.isTesting = true; - } - }); - } - - @Override - public void tearDown2() throws Exception { - invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") { - - @Override - public void run2() throws CacheException { - TcpServer.isTesting = false; - } - }); - super.tearDown2(); - } - - /** - * This test starts two locators with current GOSSIPVERSION - * and then shuts down one of them and restart it with new - * GOSSIPVERSION and verifies that it has recoverd the system - * View. Then we upgrade next locator. - */ - public void testGossipVersionBackwardCompatibility() { - Host host = Host.getHost(0); - final VM locator0 = host.getVM(0); - final VM locator1 = host.getVM(1); - final VM locatorRestart0 = host.getVM(2); - final VM member = host.getVM(3); - - // Create properties for locator0 - final int port0 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - final File logFile0 = new File(getUniqueName() + "-locator" + port0 + ".log"); - - // Create properties for locator1 - int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - while (port == port0) { - port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - } - final int port1 = port; - - final File logFile1 = new File(getUniqueName() + "-locator" + port1 + ".log"); - - final String locators = host.getHostName() + "[" + port0 + "]," + - host.getHostName() + "[" + port1 + "]"; - - final Properties props = new Properties(); - props.setProperty(DistributionConfig.LOCATORS_NAME, locators); - props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0"); - props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false"); - - // Start locator0 with props. - //props.setProperty(DistributionConfig.START_LOCATOR_NAME, host.getHostName() + "["+port0+"]"); - locator0.invoke(new CacheSerializableRunnable("Starting first locator on port " + port0) { - - @Override - public void run2() throws CacheException { - try { - TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION-100, Version.CURRENT_ORDINAL); - - Locator.startLocatorAndDS(port0, logFile0, props); - } catch (IOException e) { - fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e); - } - } - }); - - // Start a new member to add it to discovery set of locator0. - member.invoke(new CacheSerializableRunnable("Start a member") { - - @Override - public void run2() throws CacheException { - disconnectFromDS(); - TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION-100, Version.CURRENT_ORDINAL); - InternalDistributedSystem.connect(props); - } - }); - - // Start locator1 with props. - //props.setProperty(DistributionConfig.START_LOCATOR_NAME, host.getHostName() + "["+port1+"]"); - locator1.invoke(new CacheSerializableRunnable("Starting second locator on port " + port1) { - - @Override - public void run2() throws CacheException { - try { - TcpServer.TESTVERSION -= 100; - TcpServer.OLDTESTVERSION -= 100; - TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION, Version.CURRENT_ORDINAL); - TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.OLDTESTVERSION, Version.GFE_57.ordinal()); - assertEquals("Gossip Version and Test version are not same", TcpServer.GOSSIPVERSION, TcpServer.TESTVERSION); - assertEquals("Previous Gossip Version and Test version are not same", TcpServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION); - - Locator.startLocatorAndDS(port1, logFile1, props); - - // Start a gossip client to connect to first locator "locator0". - fail("this test must be fixed to work with the jgroups replacement"); - // TODO -// final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1), 500); -// client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), 5000, false); - - WaitCriterion ev = new WaitCriterion() { - public boolean done() { - try { - // TODO -// Vector members = client.getMembers("mygroup1", -// new IpAddress(InetAddress.getLocalHost(), port0), true, 5000); -// return members.size() == 2; - } - catch (Exception e) { - e.printStackTrace(); - fail("unexpected exception"); - } - return false; // NOTREACHED - } - public String description() { - return null; - } - }; - - DistributedTestCase.waitForCriterion(ev, 1000, 200, true); - fail("this test must be fixed to work with the jgroups replacement"); - // TODO -// Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port0), true, 5000); -// Assert.assertEquals(2, members.size()); -// Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port0))); -// Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port1))); - - } catch (IOException e) { - fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e); - } - } - }); - - // Stop first locator currently running in locator0 VM. - locator0.invoke(new CacheSerializableRunnable("Stopping first locator") { - - @Override - public void run2() throws CacheException { - Locator.getLocator().stop(); - disconnectFromDS(); - } - }); - - // Restart first locator in new VM. - //props.setProperty(DistributionConfig.START_LOCATOR_NAME, host.getHostName() + "["+port0+"]"); - locatorRestart0.invoke(new CacheSerializableRunnable("Restarting first locator on port " + port0) { - - @Override - public void run2() throws CacheException { - try { - TcpServer.TESTVERSION -= 100; - TcpServer.OLDTESTVERSION -= 100; - TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION, Version.CURRENT_ORDINAL); - TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.OLDTESTVERSION, Version.GFE_57.ordinal()); - assertEquals("Gossip Version and Test version are not same", TcpServer.GOSSIPVERSION, TcpServer.TESTVERSION); - assertEquals("Previous Gossip Version and Test version are not same", TcpServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION); - - Locator.startLocatorAndDS(port0, logFile0, props); - - // A new gossip client with new GOSSIPVERSION must be able - // to connect with new locator on port1, remote locator. - // Reuse locator0 VM. - fail("this test must be fixed to work with the jgroups replacement"); - // TODO -// final GossipClient client2 = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1), 500); -// Vector<IpAddress> members = client2.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), true, 5000); -// Assert.assertEquals(2, members.size()); - // As they are coming from other locator, their pid is of other locator process. -// getLogWriter().info(members.get(0) + " " + members.get(1)); - - // TODO -// for (IpAddress ipAddr : members) { -// int port = ipAddr.getPort(); -// String hostname = ipAddr.getIpAddress().getHostAddress(); -// int pid = ipAddr.getProcessId(); -// Assert.assertTrue(" " + ipAddr, port == port0 || port == port1); -// Assert.assertTrue(" " + ipAddr, hostname.equals(InetAddress.getLocalHost().getHostAddress())); -// Assert.assertTrue(" " + ipAddr, pid == locator1.getPid()); -// } - - } catch (IOException e) { - fail("Locator0 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e); - } - } - }); - } -} http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java new file mode 100644 index 0000000..2f5b80b --- /dev/null +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.gemstone.gemfire.distributed.internal.tcpserver; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.util.Properties; +import java.util.Vector; + +import org.junit.Ignore; +import org.junit.experimental.categories.Category; + +import junit.framework.Assert; + +import com.gemstone.gemfire.cache.CacheException; +import com.gemstone.gemfire.cache30.CacheSerializableRunnable; +import com.gemstone.gemfire.distributed.Locator; +import com.gemstone.gemfire.distributed.internal.DistributionConfig; +import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem; +import com.gemstone.gemfire.distributed.internal.tcpserver.TcpServer; +import com.gemstone.gemfire.internal.AvailablePort; +import com.gemstone.gemfire.internal.Version; +import com.gemstone.gemfire.test.junit.categories.DistributedTest; + +import dunit.DistributedTestCase; +import dunit.Host; +import dunit.VM; + +/** + * This tests the rolling upgrade for locators with + * different GOSSIPVERSION. + * + * @author shobhit + * + */ +@Category(DistributedTest.class) +@Ignore("Test was disabled by renaming to DisabledTest") +public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase { + + /** + * @param name + */ + public TcpServerBackwardCompatDUnitTest(String name) { + super(name); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + disconnectAllFromDS(); + invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") { + + @Override + public void run2() throws CacheException { + TcpServer.isTesting = true; + } + }); + } + + @Override + public void tearDown2() throws Exception { + invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") { + + @Override + public void run2() throws CacheException { + TcpServer.isTesting = false; + } + }); + super.tearDown2(); + } + + /** + * This test starts two locators with current GOSSIPVERSION + * and then shuts down one of them and restart it with new + * GOSSIPVERSION and verifies that it has recoverd the system + * View. Then we upgrade next locator. + */ + public void testGossipVersionBackwardCompatibility() { + Host host = Host.getHost(0); + final VM locator0 = host.getVM(0); + final VM locator1 = host.getVM(1); + final VM locatorRestart0 = host.getVM(2); + final VM member = host.getVM(3); + + // Create properties for locator0 + final int port0 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); + final File logFile0 = new File(getUniqueName() + "-locator" + port0 + ".log"); + + // Create properties for locator1 + int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); + while (port == port0) { + port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); + } + final int port1 = port; + + final File logFile1 = new File(getUniqueName() + "-locator" + port1 + ".log"); + + final String locators = host.getHostName() + "[" + port0 + "]," + + host.getHostName() + "[" + port1 + "]"; + + final Properties props = new Properties(); + props.setProperty(DistributionConfig.LOCATORS_NAME, locators); + props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0"); + props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false"); + + // Start locator0 with props. + //props.setProperty(DistributionConfig.START_LOCATOR_NAME, host.getHostName() + "["+port0+"]"); + locator0.invoke(new CacheSerializableRunnable("Starting first locator on port " + port0) { + + @Override + public void run2() throws CacheException { + try { + TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION-100, Version.CURRENT_ORDINAL); + + Locator.startLocatorAndDS(port0, logFile0, props); + } catch (IOException e) { + fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e); + } + } + }); + + // Start a new member to add it to discovery set of locator0. + member.invoke(new CacheSerializableRunnable("Start a member") { + + @Override + public void run2() throws CacheException { + disconnectFromDS(); + TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION-100, Version.CURRENT_ORDINAL); + InternalDistributedSystem.connect(props); + } + }); + + // Start locator1 with props. + //props.setProperty(DistributionConfig.START_LOCATOR_NAME, host.getHostName() + "["+port1+"]"); + locator1.invoke(new CacheSerializableRunnable("Starting second locator on port " + port1) { + + @Override + public void run2() throws CacheException { + try { + TcpServer.TESTVERSION -= 100; + TcpServer.OLDTESTVERSION -= 100; + TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION, Version.CURRENT_ORDINAL); + TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.OLDTESTVERSION, Version.GFE_57.ordinal()); + assertEquals("Gossip Version and Test version are not same", TcpServer.GOSSIPVERSION, TcpServer.TESTVERSION); + assertEquals("Previous Gossip Version and Test version are not same", TcpServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION); + + Locator.startLocatorAndDS(port1, logFile1, props); + + // Start a gossip client to connect to first locator "locator0". + fail("this test must be fixed to work with the jgroups replacement"); + // TODO +// final GossipClient client = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1), 500); +// client.register("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), 5000, false); + + WaitCriterion ev = new WaitCriterion() { + public boolean done() { + try { + // TODO +// Vector members = client.getMembers("mygroup1", +// new IpAddress(InetAddress.getLocalHost(), port0), true, 5000); +// return members.size() == 2; + } + catch (Exception e) { + e.printStackTrace(); + fail("unexpected exception"); + } + return false; // NOTREACHED + } + public String description() { + return null; + } + }; + + DistributedTestCase.waitForCriterion(ev, 1000, 200, true); + fail("this test must be fixed to work with the jgroups replacement"); + // TODO +// Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port0), true, 5000); +// Assert.assertEquals(2, members.size()); +// Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port0))); +// Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port1))); + + } catch (IOException e) { + fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e); + } + } + }); + + // Stop first locator currently running in locator0 VM. + locator0.invoke(new CacheSerializableRunnable("Stopping first locator") { + + @Override + public void run2() throws CacheException { + Locator.getLocator().stop(); + disconnectFromDS(); + } + }); + + // Restart first locator in new VM. + //props.setProperty(DistributionConfig.START_LOCATOR_NAME, host.getHostName() + "["+port0+"]"); + locatorRestart0.invoke(new CacheSerializableRunnable("Restarting first locator on port " + port0) { + + @Override + public void run2() throws CacheException { + try { + TcpServer.TESTVERSION -= 100; + TcpServer.OLDTESTVERSION -= 100; + TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.TESTVERSION, Version.CURRENT_ORDINAL); + TcpServer.getGossipVersionMapForTestOnly().put(TcpServer.OLDTESTVERSION, Version.GFE_57.ordinal()); + assertEquals("Gossip Version and Test version are not same", TcpServer.GOSSIPVERSION, TcpServer.TESTVERSION); + assertEquals("Previous Gossip Version and Test version are not same", TcpServer.OLDGOSSIPVERSION, TcpServer.OLDTESTVERSION); + + Locator.startLocatorAndDS(port0, logFile0, props); + + // A new gossip client with new GOSSIPVERSION must be able + // to connect with new locator on port1, remote locator. + // Reuse locator0 VM. + fail("this test must be fixed to work with the jgroups replacement"); + // TODO +// final GossipClient client2 = new GossipClient(new IpAddress(InetAddress.getLocalHost(), port1), 500); +// Vector<IpAddress> members = client2.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port1), true, 5000); +// Assert.assertEquals(2, members.size()); + // As they are coming from other locator, their pid is of other locator process. +// getLogWriter().info(members.get(0) + " " + members.get(1)); + + // TODO +// for (IpAddress ipAddr : members) { +// int port = ipAddr.getPort(); +// String hostname = ipAddr.getIpAddress().getHostAddress(); +// int pid = ipAddr.getProcessId(); +// Assert.assertTrue(" " + ipAddr, port == port0 || port == port1); +// Assert.assertTrue(" " + ipAddr, hostname.equals(InetAddress.getLocalHost().getHostAddress())); +// Assert.assertTrue(" " + ipAddr, pid == locator1.getPid()); +// } + + } catch (IOException e) { + fail("Locator0 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e); + } + } + }); + } +} http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java deleted file mode 100755 index ffd5726..0000000 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitDisabledTest.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.gemstone.gemfire.internal.cache; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Map; - -import com.gemstone.gemfire.cache.EvictionAlgorithm; -import com.gemstone.gemfire.internal.cache.lru.HeapEvictor; -import com.gemstone.gemfire.internal.cache.lru.MemLRUCapacityController; - -public class EvictionDUnitDisabledTest extends EvictionTestBase { - private static final long serialVersionUID = 270073077723092256L; - - public EvictionDUnitDisabledTest(String name) { - super(name); - } - - public void testDummyInlineNCentralizedEviction() { - prepareScenario1(EvictionAlgorithm.LRU_HEAP,0); - putData("PR1", 50, 1); - - final int expectedEviction1 = getExpectedEvictionRatioOnVm(dataStore1); - final int expectedEviction2 = getExpectedEvictionRatioOnVm(dataStore2); - - raiseFakeNotification(dataStore1, "PR1", expectedEviction1); - raiseFakeNotification(dataStore2, "PR1", expectedEviction2); - validateNoOfEvictions("PR1", expectedEviction1 + expectedEviction2); - - putData("PR1", 4, 1); - validateNoOfEvictions("PR1", 4 + expectedEviction1 + expectedEviction2); - } - - public void testThreadPoolSize() { - prepareScenario1(EvictionAlgorithm.LRU_HEAP,0); - putData("PR1", 50, 1); - raiseFakeNotification(dataStore1, "PR1", getExpectedEvictionRatioOnVm(dataStore1)); - verifyThreadPoolTaskCount(HeapEvictor.MAX_EVICTOR_THREADS); - } - - public void testCentralizedEvictionnForDistributedRegionWithDummyEvent() { - prepareScenario1(EvictionAlgorithm.LRU_HEAP,0); - createDistributedRegion(); - putDataInDistributedRegion(50, 1); - raiseFakeNotification(dataStore1, "DR1", getExpectedEvictionRatioOnVm(dataStore1)); - } - - /** - * Test Case Description: 2 VM's. 2 PR's. 4 buckets each PR. PR1 has action - * -Local destroy and PR2 has action - Overflow To Disk. - * - * Test Case verifies:If naturally Eviction up and eviction Down events are - * raised. Centralized and Inline eviction are happening.All this verificatio - * is done thorugh logs. It also verifies that during eviction, if one node - * goes down and then comes up again causing GII to take place, the system - * doesnot throw an OOME. - */ - public void testEvictionWithNodeDown() { - prepareScenario2(EvictionAlgorithm.LRU_HEAP, "PR3", "PR4"); - putDataInDataStore3("PR3", 100, 1); - fakeNotification(); - print("PR3"); - killVm(); - bringVMBackToLife(); - assertEquals(100, getPRSize("PR3")); - assertEquals(0, getPRSize("PR4")); - } - - public void testEntryLruEvictions() { - int extraEntries=1; - createCache(); - maxEnteries=3; - createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries); - - final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); - getLogWriter().info( - "PR- " +pr.getEvictionAttributes().getMaximum()); - - for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) { - pr.put(new Integer(counter), new byte[1 * 1024 * 1024]); - } - - assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); - } - - - public void testEntryLru() { - createCache(); - maxEnteries=12; - createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries); - - final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); - getLogWriter().info( - "PR- " +pr.getEvictionAttributes().getMaximum()); - for (int i = 0; i < 3; i++) { - // assume mod-based hashing for bucket creation - pr.put(new Integer(i), "value0"); - pr.put(new Integer(i - + pr.getPartitionAttributes().getTotalNumBuckets()), "value1"); - pr.put(new Integer(i - + (pr.getPartitionAttributes().getTotalNumBuckets()) * 2), - "value2"); - } - pr.put(new Integer(3), "value0"); - - for (int i = 0; i < 2; i++) { - pr.put(new Integer(i - + pr.getPartitionAttributes().getTotalNumBuckets())*3, "value1"); - } - assertEquals(0,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); - } - - public void testCheckEntryLruEvictionsIn1DataStore() { - int extraEntries=10; - createCache(); - maxEnteries=20; - createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 5, 1, 1000,maxEnteries); - - final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); - getLogWriter().info( - "PR- " +pr.getEvictionAttributes().getMaximum()); - - for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) { - pr.put(new Integer(counter), new byte[1 * 1024 * 1024]); - } - - assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); - - for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i - .hasNext();) { - final Map.Entry entry = (Map.Entry)i.next(); - final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); - if (bucketRegion == null) { - continue; - } - getLogWriter().info( - "FINAL bucket= " + bucketRegion.getFullPath() + "size= " - + bucketRegion.size() + " count= "+bucketRegion.entryCount()); - assertEquals(4,bucketRegion.size()); - } - } - - public void testCheckEntryLruEvictionsIn2DataStore() { - maxEnteries=20; - prepareScenario1(EvictionAlgorithm.LRU_ENTRY,maxEnteries); - putData("PR1", 60, 1); - validateNoOfEvictions("PR1", 20); - } - - - public void testMemLruForPRAndDR() { - createCache(); - createPartitionedRegion(true, EvictionAlgorithm.LRU_MEMORY, "PR1", 4, 1, 1000,40); - createDistRegionWithMemEvictionAttr(); - PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); - DistributedRegion dr = (DistributedRegion)cache.getRegion("DR1"); - - assertEquals(pr.getLocalMaxMemory(), pr.getEvictionAttributes().getMaximum()); - assertEquals(MemLRUCapacityController.DEFAULT_MAXIMUM_MEGABYTES, dr.getEvictionAttributes().getMaximum()); - - for (int i = 0; i < 41; i++) { - pr.put(new Integer(i), new byte[1 * 1024 * 1024]); - } - - assertTrue(1<=((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); - assertTrue(((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()<=2); - - for (int i = 0; i < 11; i++) { - dr.put(new Integer(i), new byte[1 * 1024 * 1024]); - } - - assertTrue(1<=((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()); - assertTrue(((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()<=2); - } - - public void testEachTaskSize() { - createCache(); - createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR1", 6, 1, - 1000, 40); - createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR2", 10, 1, - 1000, 40); - createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR3", 15, 1, - 1000, 40); - createDistRegion(); - - ArrayList<Integer> taskSetSizes = getTestTaskSetSizes(); - if (taskSetSizes != null) { - for (Integer size : taskSetSizes) { - assertEquals(8, size.intValue()); - } - } - - /* - final PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1"); - final PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2"); - final PartitionedRegion pr3 = (PartitionedRegion)cache.getRegion("PR3"); - final DistributedRegion dr1 = (DistributedRegion)cache.getRegion("DR1"); - - for (int counter = 1; counter <= 18; counter++) { - pr1.put(new Integer(counter), new byte[1 * 1024 * 1024]); - } - getLogWriter().info("Size of PR1 before eviction = "+ pr1.size()); - - for (int counter = 1; counter <= 30; counter++) { - pr2.put(new Integer(counter), new byte[1 * 1024 * 1024]); - } - getLogWriter().info("Size of PR2 before eviction = "+ pr2.size()); - - for (int counter = 1; counter <= 45; counter++) { - pr3.put(new Integer(counter), new byte[1 * 1024 * 1024]); - } - getLogWriter().info("Size of PR3 before eviction = "+ pr3.size()); - - for (int counter = 1; counter <= 150; counter++) { - dr1.put(new Integer(counter), new byte[1 * 1024 * 1024]); - } - getLogWriter().info("Size of DR1 before eviction = "+ dr1.size()); - - - getLogWriter().info("Size of PR1 after eviction = "+ pr1.size()); - getLogWriter().info("Size of PR2 after eviction = "+ pr2.size()); - getLogWriter().info("Size of PR3 after eviction = "+ pr3.size()); - getLogWriter().info("Size of PR4 after eviction = "+ dr1.size());*/ - } -} http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java new file mode 100755 index 0000000..33807b7 --- /dev/null +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java @@ -0,0 +1,246 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.gemstone.gemfire.internal.cache; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; + +import org.junit.Ignore; +import org.junit.experimental.categories.Category; + +import com.gemstone.gemfire.cache.EvictionAlgorithm; +import com.gemstone.gemfire.internal.cache.lru.HeapEvictor; +import com.gemstone.gemfire.internal.cache.lru.MemLRUCapacityController; +import com.gemstone.gemfire.test.junit.categories.DistributedTest; + +@Category(DistributedTest.class) +@Ignore("Test was disabled by renaming to DisabledTest") +public class EvictionDUnitTest extends EvictionTestBase { + private static final long serialVersionUID = 270073077723092256L; + + public EvictionDUnitTest(String name) { + super(name); + } + + public void testDummyInlineNCentralizedEviction() { + prepareScenario1(EvictionAlgorithm.LRU_HEAP,0); + putData("PR1", 50, 1); + + final int expectedEviction1 = getExpectedEvictionRatioOnVm(dataStore1); + final int expectedEviction2 = getExpectedEvictionRatioOnVm(dataStore2); + + raiseFakeNotification(dataStore1, "PR1", expectedEviction1); + raiseFakeNotification(dataStore2, "PR1", expectedEviction2); + validateNoOfEvictions("PR1", expectedEviction1 + expectedEviction2); + + putData("PR1", 4, 1); + validateNoOfEvictions("PR1", 4 + expectedEviction1 + expectedEviction2); + } + + public void testThreadPoolSize() { + prepareScenario1(EvictionAlgorithm.LRU_HEAP,0); + putData("PR1", 50, 1); + raiseFakeNotification(dataStore1, "PR1", getExpectedEvictionRatioOnVm(dataStore1)); + verifyThreadPoolTaskCount(HeapEvictor.MAX_EVICTOR_THREADS); + } + + public void testCentralizedEvictionnForDistributedRegionWithDummyEvent() { + prepareScenario1(EvictionAlgorithm.LRU_HEAP,0); + createDistributedRegion(); + putDataInDistributedRegion(50, 1); + raiseFakeNotification(dataStore1, "DR1", getExpectedEvictionRatioOnVm(dataStore1)); + } + + /** + * Test Case Description: 2 VM's. 2 PR's. 4 buckets each PR. PR1 has action + * -Local destroy and PR2 has action - Overflow To Disk. + * + * Test Case verifies:If naturally Eviction up and eviction Down events are + * raised. Centralized and Inline eviction are happening.All this verificatio + * is done thorugh logs. It also verifies that during eviction, if one node + * goes down and then comes up again causing GII to take place, the system + * doesnot throw an OOME. + */ + public void testEvictionWithNodeDown() { + prepareScenario2(EvictionAlgorithm.LRU_HEAP, "PR3", "PR4"); + putDataInDataStore3("PR3", 100, 1); + fakeNotification(); + print("PR3"); + killVm(); + bringVMBackToLife(); + assertEquals(100, getPRSize("PR3")); + assertEquals(0, getPRSize("PR4")); + } + + public void testEntryLruEvictions() { + int extraEntries=1; + createCache(); + maxEnteries=3; + createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries); + + final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); + getLogWriter().info( + "PR- " +pr.getEvictionAttributes().getMaximum()); + + for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) { + pr.put(new Integer(counter), new byte[1 * 1024 * 1024]); + } + + assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); + } + + + public void testEntryLru() { + createCache(); + maxEnteries=12; + createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries); + + final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); + getLogWriter().info( + "PR- " +pr.getEvictionAttributes().getMaximum()); + for (int i = 0; i < 3; i++) { + // assume mod-based hashing for bucket creation + pr.put(new Integer(i), "value0"); + pr.put(new Integer(i + + pr.getPartitionAttributes().getTotalNumBuckets()), "value1"); + pr.put(new Integer(i + + (pr.getPartitionAttributes().getTotalNumBuckets()) * 2), + "value2"); + } + pr.put(new Integer(3), "value0"); + + for (int i = 0; i < 2; i++) { + pr.put(new Integer(i + + pr.getPartitionAttributes().getTotalNumBuckets())*3, "value1"); + } + assertEquals(0,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); + } + + public void testCheckEntryLruEvictionsIn1DataStore() { + int extraEntries=10; + createCache(); + maxEnteries=20; + createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 5, 1, 1000,maxEnteries); + + final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); + getLogWriter().info( + "PR- " +pr.getEvictionAttributes().getMaximum()); + + for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) { + pr.put(new Integer(counter), new byte[1 * 1024 * 1024]); + } + + assertEquals(extraEntries,((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); + + for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i + .hasNext();) { + final Map.Entry entry = (Map.Entry)i.next(); + final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); + if (bucketRegion == null) { + continue; + } + getLogWriter().info( + "FINAL bucket= " + bucketRegion.getFullPath() + "size= " + + bucketRegion.size() + " count= "+bucketRegion.entryCount()); + assertEquals(4,bucketRegion.size()); + } + } + + public void testCheckEntryLruEvictionsIn2DataStore() { + maxEnteries=20; + prepareScenario1(EvictionAlgorithm.LRU_ENTRY,maxEnteries); + putData("PR1", 60, 1); + validateNoOfEvictions("PR1", 20); + } + + + public void testMemLruForPRAndDR() { + createCache(); + createPartitionedRegion(true, EvictionAlgorithm.LRU_MEMORY, "PR1", 4, 1, 1000,40); + createDistRegionWithMemEvictionAttr(); + PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1"); + DistributedRegion dr = (DistributedRegion)cache.getRegion("DR1"); + + assertEquals(pr.getLocalMaxMemory(), pr.getEvictionAttributes().getMaximum()); + assertEquals(MemLRUCapacityController.DEFAULT_MAXIMUM_MEGABYTES, dr.getEvictionAttributes().getMaximum()); + + for (int i = 0; i < 41; i++) { + pr.put(new Integer(i), new byte[1 * 1024 * 1024]); + } + + assertTrue(1<=((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()); + assertTrue(((AbstractLRURegionMap)pr.entries)._getLruList().stats().getEvictions()<=2); + + for (int i = 0; i < 11; i++) { + dr.put(new Integer(i), new byte[1 * 1024 * 1024]); + } + + assertTrue(1<=((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()); + assertTrue(((AbstractLRURegionMap)dr.entries)._getLruList().stats().getEvictions()<=2); + } + + public void testEachTaskSize() { + createCache(); + createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR1", 6, 1, + 1000, 40); + createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR2", 10, 1, + 1000, 40); + createPartitionedRegion(true, EvictionAlgorithm.LRU_HEAP, "PR3", 15, 1, + 1000, 40); + createDistRegion(); + + ArrayList<Integer> taskSetSizes = getTestTaskSetSizes(); + if (taskSetSizes != null) { + for (Integer size : taskSetSizes) { + assertEquals(8, size.intValue()); + } + } + + /* + final PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1"); + final PartitionedRegion pr2 = (PartitionedRegion)cache.getRegion("PR2"); + final PartitionedRegion pr3 = (PartitionedRegion)cache.getRegion("PR3"); + final DistributedRegion dr1 = (DistributedRegion)cache.getRegion("DR1"); + + for (int counter = 1; counter <= 18; counter++) { + pr1.put(new Integer(counter), new byte[1 * 1024 * 1024]); + } + getLogWriter().info("Size of PR1 before eviction = "+ pr1.size()); + + for (int counter = 1; counter <= 30; counter++) { + pr2.put(new Integer(counter), new byte[1 * 1024 * 1024]); + } + getLogWriter().info("Size of PR2 before eviction = "+ pr2.size()); + + for (int counter = 1; counter <= 45; counter++) { + pr3.put(new Integer(counter), new byte[1 * 1024 * 1024]); + } + getLogWriter().info("Size of PR3 before eviction = "+ pr3.size()); + + for (int counter = 1; counter <= 150; counter++) { + dr1.put(new Integer(counter), new byte[1 * 1024 * 1024]); + } + getLogWriter().info("Size of DR1 before eviction = "+ dr1.size()); + + + getLogWriter().info("Size of PR1 after eviction = "+ pr1.size()); + getLogWriter().info("Size of PR2 after eviction = "+ pr2.size()); + getLogWriter().info("Size of PR3 after eviction = "+ pr3.size()); + getLogWriter().info("Size of PR4 after eviction = "+ dr1.size());*/ + } +} http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java index 386f8ce..060aea7 100644 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java @@ -34,7 +34,7 @@ import dunit.VM; * Performs eviction dunit tests for off-heap memory. * @author rholmes */ -public class OffHeapEvictionDUnitTest extends EvictionDUnitDisabledTest { +public class OffHeapEvictionDUnitTest extends EvictionDUnitTest { public OffHeapEvictionDUnitTest(String name) { super(name); } http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java index 0ee9d4f..5ec4af8 100755 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java +++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionPerfJUnitPerformanceTest.java @@ -21,6 +21,7 @@ import java.util.Arrays; import org.junit.After; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -31,14 +32,15 @@ import com.gemstone.gemfire.cache.Scope; import com.gemstone.gemfire.internal.cache.DiskRegionHelperFactory; import com.gemstone.gemfire.internal.cache.DiskRegionProperties; import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase; -import com.gemstone.gemfire.test.junit.categories.IntegrationTest; +import com.gemstone.gemfire.test.junit.categories.PerformanceTest; /** * Consolidated Disk Region Perftest. Overflow, Persist, OverflowWithPersist * modes are tested for Sync, AsyncWithBuffer and AsyncWithoutBufer writes. * */ -@Category(IntegrationTest.class) +@Category(PerformanceTest.class) +@Ignore("Tests have no assertions") public class DiskRegionPerfJUnitPerformanceTest extends DiskRegionTestingBase { LogWriter log = null; http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueStartStopJUnitDisabledTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueStartStopJUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueStartStopJUnitDisabledTest.java deleted file mode 100755 index 4cfc9ba..0000000 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueStartStopJUnitDisabledTest.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.gemstone.gemfire.internal.cache.ha; - -import java.io.IOException; -import java.util.Properties; - -import com.gemstone.gemfire.cache.Cache; -import com.gemstone.gemfire.cache.CacheException; -import com.gemstone.gemfire.cache.CacheFactory; -import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; -import com.gemstone.gemfire.internal.cache.RegionQueue; -import com.gemstone.gemfire.distributed.DistributedSystem; -import com.gemstone.gemfire.internal.Assert; - -import junit.framework.TestCase; - -/** - * @author Mitul Bid - * - */ -public class HARegionQueueStartStopJUnitDisabledTest extends TestCase -{ - - /** - * Creates the cache instance for the test - * - * @return the cache instance - * @throws CacheException - - * thrown if any exception occurs in cache creation - */ - private Cache createCache() throws CacheException - { - return CacheFactory.create(DistributedSystem.connect(new Properties())); - } - - /** - * Creates HA region-queue object - * - * @return HA region-queue object - * @throws IOException - * @throws ClassNotFoundException - * @throws CacheException - * @throws InterruptedException - */ - private RegionQueue createHARegionQueue(String name, Cache cache) - throws IOException, ClassNotFoundException, CacheException, InterruptedException - { - RegionQueue regionqueue =HARegionQueue.getHARegionQueueInstance(name, cache,HARegionQueue.NON_BLOCKING_HA_QUEUE, false); - return regionqueue; - } - - public void testStartStop() - { - try { - boolean exceptionOccured = false; - Cache cache = createCache(); - createHARegionQueue("test", cache); - Assert - .assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting() != null); - HARegionQueue.stopHAServices(); - try { - HARegionQueue.getDispatchedMessagesMapForTesting(); - } - catch (NullPointerException e) { - exceptionOccured = true; - } - if (!exceptionOccured) { - fail("Expected exception to occur but did not occur"); - } - HARegionQueue.startHAServices((GemFireCacheImpl)cache); - Assert - .assertTrue(HARegionQueue.getDispatchedMessagesMapForTesting() != null); - cache.close(); - try { - HARegionQueue.getDispatchedMessagesMapForTesting(); - } - catch (NullPointerException e) { - exceptionOccured = true; - } - if (!exceptionOccured) { - fail("Expected exception to occur but did not occur"); - } - - cache = createCache(); - - try { - HARegionQueue.getDispatchedMessagesMapForTesting(); - } - catch (NullPointerException e) { - exceptionOccured = true; - } - if (!exceptionOccured) { - fail("Expected exception to occur but did not occur"); - } - - } - catch (Exception e) { - e.printStackTrace(); - fail("Test failed due to " + e); - } - - } - - -} http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c5efb805/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java ---------------------------------------------------------------------- diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java deleted file mode 100644 index 6b957e8..0000000 --- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitDisabledTest.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.gemstone.gemfire.internal.cache.ha; - -import java.util.Iterator; -import java.util.Properties; - -import com.gemstone.gemfire.cache.AttributesFactory; -import com.gemstone.gemfire.cache.Cache; -import com.gemstone.gemfire.cache.CacheFactory; -import com.gemstone.gemfire.cache.DataPolicy; -import com.gemstone.gemfire.cache.Region; -import com.gemstone.gemfire.cache.RegionAttributes; -import com.gemstone.gemfire.cache.Scope; -import com.gemstone.gemfire.cache.client.internal.PoolImpl; -import com.gemstone.gemfire.cache.server.CacheServer; -import com.gemstone.gemfire.cache30.ClientServerTestCase; -import com.gemstone.gemfire.distributed.DistributedSystem; -import com.gemstone.gemfire.internal.AvailablePort; - -import dunit.DistributedTestCase; -import dunit.Host; -import dunit.VM; - -/** - * This is Dunit test for bug 36109. This test has a cache-client having a primary - * and a secondary cache-server as its endpoint. Primary does some operations - * and is stopped, the client fails over to secondary and does some operations - * and it is verified that the 'invalidates' stats at the client is same as the - * total number of operations done by both primary and secondary. The bug was - * appearing because invalidate stats was part of Endpoint which used to get - * closed during fail over , with the failed endpoint getting closed. This bug - * has been fixed by moving the invalidate stat to be part of our implementation. - * - * @author Dinesh Patel - * - */ -public class StatsBugDUnitDisabledTest extends DistributedTestCase -{ - /** primary cache server */ - VM primary = null; - - /** secondary cache server */ - VM secondary = null; - - /** the cache client */ - VM client1 = null; - - /** the cache */ - private static Cache cache = null; - - /** port for the primary cache server */ - private static int PORT1; - - /** port for the secondary cache server */ - private static int PORT2; - - /** name of the test region */ - private static final String REGION_NAME = "StatsBugDUnitTest_Region"; - - /** brige-writer instance( used to get connection proxy handle) */ - private static PoolImpl pool = null; - - /** total number of cache servers */ - private static final int TOTAL_SERVERS = 2; - - /** number of puts done by each server */ - private static final int PUTS_PER_SERVER = 10; - - /** prefix added to the keys of events generated on primary */ - private static final String primaryPrefix = "primary_"; - - /** prefix added to the keys of events generated on secondary */ - private static final String secondaryPrefix = "secondary_"; - - /** - * Constructor - * - * @param name - - * name for this test instance - */ - public StatsBugDUnitDisabledTest(String name) { - super(name); - } - - /** - * Creates the primary and the secondary cache servers - * - * @throws Exception - - * thrown if any problem occurs in initializing the test - */ - public void setUp() throws Exception - { - disconnectAllFromDS(); - super.setUp(); - final Host host = Host.getHost(0); - primary = host.getVM(0); - secondary = host.getVM(1); - client1 = host.getVM(2); - PORT1 = ((Integer)primary.invoke(StatsBugDUnitDisabledTest.class, - "createServerCache")).intValue(); - PORT2 = ((Integer)secondary.invoke(StatsBugDUnitDisabledTest.class, - "createServerCache")).intValue(); - } - - /** - * Create the cache - * - * @param props - - * properties for DS - * @return the cache instance - * @throws Exception - - * thrown if any problem occurs in cache creation - */ - private Cache createCache(Properties props) throws Exception - { - DistributedSystem ds = getSystem(props); - ds.disconnect(); - ds = getSystem(props); - Cache cache = null; - cache = CacheFactory.create(ds); - if (cache == null) { - throw new Exception("CacheFactory.create() returned null "); - } - return cache; - } - - /** - * close the cache instances in server and client during tearDown - * - * @throws Exception - * thrown if any problem occurs in closing cache - */ - public void tearDown2() throws Exception - { - super.tearDown2(); - // close client - client1.invoke(StatsBugDUnitDisabledTest.class, "closeCache"); - - // close server - primary.invoke(StatsBugDUnitDisabledTest.class, "closeCache"); - secondary.invoke(StatsBugDUnitDisabledTest.class, "closeCache"); - } - - /** - * This test does the following:<br> - * 1)Create and populate the client<br> - * 2)Do some operations from the primary cache-server<br> - * 3)Stop the primary cache-server<br> - * 4)Wait some time to allow client to failover to secondary and do some - * operations from secondary<br> - * 5)Verify that the invalidates stats at the client accounts for the - * operations done by both, primary and secondary. - * - * @throws Exception - - * thrown if any problem occurs in test execution - */ - public void testBug36109() throws Exception - { - getLogWriter().info("testBug36109 : BEGIN"); - client1.invoke(StatsBugDUnitDisabledTest.class, "createClientCacheForInvalidates", new Object[] { - getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) }); - client1.invoke(StatsBugDUnitDisabledTest.class, "prepopulateClient"); - primary.invoke(StatsBugDUnitDisabledTest.class, "doEntryOperations", - new Object[] { primaryPrefix }); - pause(3000); - primary.invoke(StatsBugDUnitDisabledTest.class, "stopServer"); - try { - Thread.sleep(5000); - } - catch (InterruptedException ignore) { - fail("interrupted"); - } - - secondary.invoke(StatsBugDUnitDisabledTest.class, "doEntryOperations", - new Object[] { secondaryPrefix }); - try { - Thread.sleep(5000); - } - catch (InterruptedException ignore) { - fail("interrupted"); - } - - client1.invoke(StatsBugDUnitDisabledTest.class, "verifyNumInvalidates"); - getLogWriter().info("testBug36109 : END"); - } - - /** - * Creates and starts the cache-server - * - * @return - the port on which cache-server is running - * @throws Exception - - * thrown if any problem occurs in cache/server creation - */ - public static Integer createServerCache() throws Exception - { - StatsBugDUnitDisabledTest test = new StatsBugDUnitDisabledTest("temp"); - Properties props = new Properties(); - cache = test.createCache(props); - AttributesFactory factory = new AttributesFactory(); - factory.setScope(Scope.DISTRIBUTED_ACK); - factory.setDataPolicy(DataPolicy.REPLICATE); - - RegionAttributes attrs = factory.create(); - - cache.createRegion(REGION_NAME, attrs); - CacheServer server = cache.addCacheServer(); - int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - server.setPort(port); - server.setNotifyBySubscription(false); - server.setSocketBufferSize(32768); - server.start(); - getLogWriter().info("Server started at PORT = " + port); - return new Integer(port); - } - - /** - * Initializes the cache client - * - * @param port1 - - * port for the primary cache-server - * @param port2-port - * for the secondary cache-server - * @throws Exception-thrown - * if any problem occurs in initializing the client - */ - public static void createClientCache(String host, Integer port1, Integer port2) - throws Exception - { - StatsBugDUnitDisabledTest test = new StatsBugDUnitDisabledTest("temp"); - cache = test.createCache(createProperties1()); - AttributesFactory factory = new AttributesFactory(); - factory.setScope(Scope.DISTRIBUTED_ACK); - pool = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, host, new int[] {port1.intValue(),port2.intValue()}, true, -1, 3, null); - RegionAttributes attrs = factory.create(); - Region region = cache.createRegion(REGION_NAME, attrs); - region.registerInterest("ALL_KEYS"); - getLogWriter().info("Client cache created"); - } - - /** - * Initializes the cache client - * - * @param port1 - - * port for the primary cache-server - * @param port2-port - * for the secondary cache-server - * @throws Exception-thrown - * if any problem occurs in initializing the client - */ - public static void createClientCacheForInvalidates(String host, Integer port1, Integer port2) - throws Exception - { - StatsBugDUnitDisabledTest test = new StatsBugDUnitDisabledTest("temp"); - cache = test.createCache(createProperties1()); - AttributesFactory factory = new AttributesFactory(); - factory.setScope(Scope.DISTRIBUTED_ACK); - pool = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, host, new int[] {port1.intValue(),port2.intValue()}, true, -1, 3, null); - RegionAttributes attrs = factory.create(); - Region region = cache.createRegion(REGION_NAME, attrs); - region.registerInterest("ALL_KEYS", false, false); - getLogWriter().info("Client cache created"); - } - - /** - * Verify that the invalidates stats at the client accounts for the operations - * done by both, primary and secondary. - * - */ - public static void verifyNumInvalidates() - { - long invalidatesRecordedByStats = pool.getInvalidateCount(); - getLogWriter().info( - "invalidatesRecordedByStats = " + invalidatesRecordedByStats); - - int expectedInvalidates = TOTAL_SERVERS * PUTS_PER_SERVER; - getLogWriter().info("expectedInvalidates = " + expectedInvalidates); - - if (invalidatesRecordedByStats != expectedInvalidates) { - fail("Invalidates received by client(" + invalidatesRecordedByStats - + ") does not match with the number of operations(" - + expectedInvalidates + ") done at server"); - } - } - - /** - * Stops the cache server - * - */ - public static void stopServer() - { - try { - Iterator iter = cache.getCacheServers().iterator(); - if (iter.hasNext()) { - CacheServer server = (CacheServer)iter.next(); - server.stop(); - } - } - catch (Exception e) { - fail("failed while stopServer()" + e); - } - } - - /** - * create properties for a loner VM - */ - private static Properties createProperties1() - { - Properties props = new Properties(); - props.setProperty("mcast-port", "0"); - props.setProperty("locators", ""); - return props; - } - - - /** - * Do PUT operations - * - * @param keyPrefix - - * string prefix for the keys for all the entries do be done - * @throws Exception - - * thrown if any exception occurs in doing PUTs - */ - public static void doEntryOperations(String keyPrefix) throws Exception - { - Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME); - for (int i = 0; i < PUTS_PER_SERVER; i++) { - r1.put(keyPrefix + i, keyPrefix + "val-" + i); - } - } - - /** - * Prepopulate the client with the entries that will be done by cache-servers - * - * @throws Exception - */ - public static void prepopulateClient() throws Exception - { - doEntryOperations(primaryPrefix); - doEntryOperations(secondaryPrefix); - } - - /** - * Close the cache - * - */ - public static void closeCache() - { - if (cache != null && !cache.isClosed()) { - cache.close(); - cache.getDistributedSystem().disconnect(); - } - } -}