Scott Jewell created GEODE-1560: ----------------------------------- Summary: CI Failure: LocatorDUnitTest.testMultipleLocatorsRestartingAtSameTime NPE Key: GEODE-1560 URL: https://issues.apache.org/jira/browse/GEODE-1560 Project: Geode Issue Type: Bug Components: membership Reporter: Scott Jewell
It appears to be perhaps a tainted environment or multiple tests running concurrently interfering with each other? Bottom of log shows this test experiencing multiple failures. Error Message java.lang.NullPointerException Stacktrace java.lang.NullPointerException at com.gemstone.gemfire.distributed.LocatorDUnitTest.testMultipleLocatorsRestartingAtSameTime(LocatorDUnitTest.java:1579) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) at org.junit.rules.RunRules.evaluate(RunRules.java:20) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288) at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.runners.ParentRunner.run(ParentRunner.java:363) at org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecuter.runTestClass(JUnitTestClassExecuter.java:112) at org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecuter.execute(JUnitTestClassExecuter.java:56) at org.gradle.api.internal.tasks.testing.junit.JUnitTestClassProcessor.processTestClass(JUnitTestClassProcessor.java:66) at org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor.processTestClass(SuiteTestClassProcessor.java:51) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:35) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24) at org.gradle.messaging.dispatch.ContextClassLoaderDispatch.dispatch(ContextClassLoaderDispatch.java:32) at org.gradle.messaging.dispatch.ProxyDispatchAdapter$DispatchingInvocationHandler.invoke(ProxyDispatchAdapter.java:93) at com.sun.proxy.$Proxy2.processTestClass(Unknown Source) at org.gradle.api.internal.tasks.testing.worker.TestWorker.processTestClass(TestWorker.java:109) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:35) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24) at org.gradle.messaging.remote.internal.hub.MessageHub$Handler.run(MessageHub.java:360) at org.gradle.internal.concurrent.ExecutorPolicy$CatchAndRecordFailures.onExecute(ExecutorPolicy.java:54) at org.gradle.internal.concurrent.StoppableExecutorImpl$1.run(StoppableExecutorImpl.java:40) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Standard Output Executing [/home/jenkins/jenkins-slave/tools/hudson.model.JDK/latest1.8/jre/bin/java, -classpath, /home/jenkins/jenkins-slave/workspace/Geode-nightly/geode-core/build/classes/test:/home/jenkins/jenkins-slave/workspace/Geode-nightly/geode-core/build/resources/test:/home/jenkins/jenkins-slave/workspace/Geode-nightly/geode-core/build/classes/main:/home/jenkins/jenkins-slave/workspace/Geode-nightly/geode-core/build/resources/main:/home/jenkins/jenkins-slave/workspace/Geode-nightly/ <snipped classpath> ...[truncated 252355 chars]... nordered connection [vm_0][info 2016/06/17 09:20:14.221 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=2 port=60365> tid=0x4c] Performing final check for suspect member 67.195.81.149(2109)<v5>:1030 reason=member unexpectedly shut down shared, unordered connection [vm_3][info 2016/06/17 09:20:14.223 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=4 port=55419> tid=0x43] Performing final check for suspect member 67.195.81.149(2109)<v5>:1030 reason=member unexpectedly shut down shared, unordered connection [info 2016/06/17 09:20:14.226 UTC <Geode Failure Detection Server thread 0> tid=0x53] GMSHealthMonitor server thread exiting [vm_2][info 2016/06/17 09:20:14.228 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=5 port=47101> tid=0x4c] Performing final check for suspect member 67.195.81.149(2109)<v5>:1030 reason=member unexpectedly shut down shared, unordered connection [vm_3][info 2016/06/17 09:20:14.228 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=4 port=55419> tid=0x43] Final check failed - requesting removal of suspect member 67.195.81.149(2109)<v5>:1030 [vm_1][info 2016/06/17 09:20:14.228 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=1 port=41689> tid=0x4a] Final check failed - requesting removal of suspect member 67.195.81.149(2109)<v5>:1030 [vm_4][info 2016/06/17 09:20:14.228 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=3 port=49179> tid=0x42] Final check failed - requesting removal of suspect member 67.195.81.149(2109)<v5>:1030 [vm_4][info 2016/06/17 09:20:14.230 UTC <unicast receiver,asf905-46898> tid=0x29] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2213)<v3>:1028 reason=member unexpectedly shut down shared, unordered connection [vm_2][info 2016/06/17 09:20:14.230 UTC <unicast receiver,asf905-29718> tid=0x2a] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2483)<v4>:1029 reason=member unexpectedly shut down shared, unordered connection [vm_0][info 2016/06/17 09:20:14.230 UTC <unicast receiver,asf905-22535> tid=0x2b] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2213)<v3>:1028 reason=member unexpectedly shut down shared, unordered connection [vm_2][info 2016/06/17 09:20:14.232 UTC <unicast receiver,asf905-29718> tid=0x2a] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2213)<v3>:1028 reason=member unexpectedly shut down shared, unordered connection [vm_3][info 2016/06/17 09:20:14.231 UTC <unicast receiver,asf905-50992> tid=0x29] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2191:locator)<ec><v1>:1026 reason=member unexpectedly shut down shared, unordered connection [vm_3][info 2016/06/17 09:20:14.232 UTC <unicast receiver,asf905-50992> tid=0x29] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2483)<v4>:1029 reason=member unexpectedly shut down shared, unordered connection [vm_4][info 2016/06/17 09:20:14.232 UTC <unicast receiver,asf905-46898> tid=0x29] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2191:locator)<ec><v1>:1026 reason=member unexpectedly shut down shared, unordered connection [vm_2][info 2016/06/17 09:20:14.232 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=5 port=47101> tid=0x4c] Final check failed - requesting removal of suspect member 67.195.81.149(2109)<v5>:1030 [vm_0][info 2016/06/17 09:20:14.231 UTC <unicast receiver,asf905-22535> tid=0x2b] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2483)<v4>:1029 reason=member unexpectedly shut down shared, unordered connection [vm_0][info 2016/06/17 09:20:14.233 UTC <unicast receiver,asf905-22535> tid=0x2b] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2191:locator)<ec><v1>:1026 reason=member unexpectedly shut down shared, unordered connection [vm_2][info 2016/06/17 09:20:14.233 UTC <unicast receiver,asf905-29718> tid=0x2a] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2191:locator)<ec><v1>:1026 reason=member unexpectedly shut down shared, unordered connection [vm_1][info 2016/06/17 09:20:14.230 UTC <unicast receiver,asf905-1766> tid=0x2a] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2213)<v3>:1028 reason=member unexpectedly shut down shared, unordered connection [vm_1][info 2016/06/17 09:20:14.231 UTC <unicast receiver,asf905-1766> tid=0x2a] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2483)<v4>:1029 reason=member unexpectedly shut down shared, unordered connection [info 2016/06/17 09:20:14.229 UTC <Geode Failure Detection Server thread 0> tid=0x53] GMSHealthMonitor server socket closed. [vm_0][info 2016/06/17 09:20:14.234 UTC <P2P message reader for 67.195.81.149(2109)<v5>:1030 shared unordered uid=2 port=60365> tid=0x4c] Final check passed for suspect member 67.195.81.149(2109)<v5>:1030 [vm_1][info 2016/06/17 09:20:14.235 UTC <unicast receiver,asf905-1766> tid=0x2a] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2209:locator)<ec><v2>:1027 reason=member unexpectedly shut down shared, unordered connection [info 2016/06/17 09:20:14.236 UTC <Test worker> tid=0x13] GMSHealthMonitor serverSocketExecutor is terminated [vm_0][info 2016/06/17 09:20:14.236 UTC <unicast receiver,asf905-22535> tid=0x2b] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2209:locator)<ec><v2>:1027 reason=member unexpectedly shut down shared, unordered connection [vm_3][info 2016/06/17 09:20:14.236 UTC <unicast receiver,asf905-50992> tid=0x29] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2209:locator)<ec><v2>:1027 reason=member unexpectedly shut down shared, unordered connection [vm_4][info 2016/06/17 09:20:14.236 UTC <unicast receiver,asf905-46898> tid=0x29] Membership received a request to remove 67.195.81.149(2109)<v5>:1030 from 67.195.81.149(2209:locator)<ec><v2>:1027 reason=member unexpectedly shut down shared, unordered connection [vm_0][info 2016/06/17 09:20:14.247 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/605129648.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/605129648@3c685d9c [vm_0][info 2016/06/17 09:20:14.248 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_0] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/605129648.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/605129648@3c685d9c (took 0 ms) [vm_1][info 2016/06/17 09:20:14.251 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/770909585.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/770909585@59dc200e [vm_1][info 2016/06/17 09:20:14.252 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_1] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/770909585.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/770909585@59dc200e (took 0 ms) [vm_2][info 2016/06/17 09:20:14.254 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/571500128.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/571500128@74e866aa [vm_2][info 2016/06/17 09:20:14.254 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_2] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/571500128.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/571500128@74e866aa (took 0 ms) [vm_3][info 2016/06/17 09:20:14.257 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/1955872437.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/1955872437@7403f00f [vm_3][info 2016/06/17 09:20:14.258 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_3] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/1955872437.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$8/1955872437@7403f00f (took 0 ms) [vm_4][info 2016/06/17 09:20:14.269 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$6/259561976.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$6/259561976@ad5f76 [vm_4][info 2016/06/17 09:20:14.270 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_4] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$6/259561976.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$6/259561976@ad5f76 (took 0 ms) [info 2016/06/17 09:20:14.345 UTC <Test worker> tid=0x13] <ExpectedException action=remove>Removing shunned member</ExpectedException> [info 2016/06/17 09:20:14.346 UTC <Test worker> tid=0x13] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_0][info 2016/06/17 09:20:14.352 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" [vm_0][info 2016/06/17 09:20:14.353 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_0][info 2016/06/17 09:20:14.353 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_0][info 2016/06/17 09:20:14.353 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_0][info 2016/06/17 09:20:14.353 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_0] from com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" (took 0 ms) [vm_1][info 2016/06/17 09:20:14.359 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" [vm_1][info 2016/06/17 09:20:14.359 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_1][info 2016/06/17 09:20:14.359 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_1][info 2016/06/17 09:20:14.359 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_1][info 2016/06/17 09:20:14.360 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_1] from com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" (took 0 ms) [vm_2][info 2016/06/17 09:20:14.364 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" [vm_2][info 2016/06/17 09:20:14.364 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_2][info 2016/06/17 09:20:14.365 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_2][info 2016/06/17 09:20:14.365 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_2][info 2016/06/17 09:20:14.365 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_2] from com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" (took 0 ms) [vm_3][info 2016/06/17 09:20:14.370 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" [vm_3][info 2016/06/17 09:20:14.370 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_3][info 2016/06/17 09:20:14.370 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_3][info 2016/06/17 09:20:14.370 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_3][info 2016/06/17 09:20:14.370 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_3] from com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" (took 0 ms) [vm_4][info 2016/06/17 09:20:14.380 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" [vm_4][info 2016/06/17 09:20:14.381 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_4][info 2016/06/17 09:20:14.382 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_4][info 2016/06/17 09:20:14.382 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] <ExpectedException action=remove>Removing shunned member</ExpectedException> [vm_4][info 2016/06/17 09:20:14.383 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_4] from com.gemstone.gemfire.test.dunit.IgnoredException$1.run with 0 args on object: "IgnoredException remove" (took 2 ms) [vm_0][info 2016/06/17 09:20:14.387 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/2045631151.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/2045631151@6673d4a1 [vm_0][info 2016/06/17 09:20:14.419 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GemFireCache[id = 1505642518; isClosing = true; isShutDownAll = false; created = Fri Jun 17 09:19:49 UTC 2016; server = false; copyOnRead = false; lockLease = 120; lockTimeout = 60]: Now closing. [vm_0][info 2016/06/17 09:20:14.469 UTC <com.gemstone.gemfire.distributed.internal.InternalLocator$1@1a102134> tid=0x4f] Stopping Distribution Locator on asf905.gq1.ygridcore.net[22591] [vm_0][info 2016/06/17 09:20:14.484 UTC <Distribution Locator on asf905.gq1.ygridcore.net[null]> tid=0x1d] locator shutting down [vm_0][info 2016/06/17 09:20:14.487 UTC <Distribution Locator on asf905.gq1.ygridcore.net[null]> tid=0x1d] Closing locator's cache [vm_0][info 2016/06/17 09:20:14.487 UTC <com.gemstone.gemfire.distributed.internal.InternalLocator$1@1a102134> tid=0x4f] Distribution Locator on asf905.gq1.ygridcore.net[0] is stopped [vm_0][info 2016/06/17 09:20:14.490 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Shutting down DistributionManager 67.195.81.149(2186:locator)<ec><v0>:1025. [vm_3][info 2016/06/17 09:20:14.493 UTC <Pooled High Priority Message Processor 1> tid=0x3f] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_2][info 2016/06/17 09:20:14.493 UTC <Pooled High Priority Message Processor 2> tid=0x43] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_1][info 2016/06/17 09:20:14.493 UTC <Pooled High Priority Message Processor 1> tid=0x3f] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_1][info 2016/06/17 09:20:14.494 UTC <Pooled High Priority Message Processor 1> tid=0x3f] This member is becoming the membership coordinator with address 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_3][info 2016/06/17 09:20:14.494 UTC <Pooled High Priority Message Processor 1> tid=0x3f] Member at 67.195.81.149(2186:locator)<ec><v0>:1025 gracefully left the distributed cache: shutdown message received [vm_4][info 2016/06/17 09:20:14.494 UTC <Pooled High Priority Message Processor 1> tid=0x43] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_2][info 2016/06/17 09:20:14.495 UTC <Pooled High Priority Message Processor 2> tid=0x43] Member at 67.195.81.149(2186:locator)<ec><v0>:1025 gracefully left the distributed cache: shutdown message received [vm_4][info 2016/06/17 09:20:14.496 UTC <Pooled High Priority Message Processor 1> tid=0x43] Member at 67.195.81.149(2186:locator)<ec><v0>:1025 gracefully left the distributed cache: shutdown message received [vm_1][info 2016/06/17 09:20:14.498 UTC <Pooled High Priority Message Processor 1> tid=0x3f] ViewCreator starting on:67.195.81.149(2191:locator)<ec><v1>:1026 [vm_1][info 2016/06/17 09:20:14.499 UTC <Pooled High Priority Message Processor 1> tid=0x3f] Member at 67.195.81.149(2186:locator)<ec><v0>:1025 gracefully left the distributed cache: shutdown message received [vm_1][info 2016/06/17 09:20:14.499 UTC <Geode Membership View Creator> tid=0x4d] View Creator thread is starting [vm_1][info 2016/06/17 09:20:14.500 UTC <Geode Membership View Creator> tid=0x4d] 67.195.81.149(2109)<v5>:1030 had a weight of 10 [vm_1][info 2016/06/17 09:20:14.501 UTC <Geode Membership View Creator> tid=0x4d] preparing new view View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_1]failure detection ports: 18067 4457 8640 12514 [vm_1][info 2016/06/17 09:20:14.505 UTC <Geode Membership View Creator> tid=0x4d] finished waiting for responses to view preparation [vm_1][info 2016/06/17 09:20:14.506 UTC <Geode Membership View Creator> tid=0x4d] received new view: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_1]old view is: View[67.195.81.149(2186:locator)<ec><v0>:1025|5] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029, 67.195.81.149(2109)<v5>:1030] [vm_1][info 2016/06/17 09:20:14.506 UTC <Geode Membership View Creator> tid=0x4d] 67.195.81.149(2109)<v5>:1030 had a weight of 10 [vm_1][info 2016/06/17 09:20:14.506 UTC <Geode Membership View Creator> tid=0x4d] Peer locator received new membership view: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_1][info 2016/06/17 09:20:14.508 UTC <Geode Membership View Creator> tid=0x4d] sending new view View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_1]failure detection ports: 18067 4457 8640 12514 [vm_1][info 2016/06/17 09:20:14.510 UTC <View Message Processor> tid=0x42] I, 67.195.81.149(2191:locator)<ec><v1>:1026, am the elder. [vm_4][info 2016/06/17 09:20:14.511 UTC <unicast receiver,asf905-46898> tid=0x29] received new view: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_4]old view is: View[67.195.81.149(2186:locator)<ec><v0>:1025|5] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029, 67.195.81.149(2109)<v5>:1030] [vm_3][info 2016/06/17 09:20:14.511 UTC <unicast receiver,asf905-50992> tid=0x29] received new view: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_3]old view is: View[67.195.81.149(2186:locator)<ec><v0>:1025|5] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029, 67.195.81.149(2109)<v5>:1030] [vm_2][info 2016/06/17 09:20:14.511 UTC <unicast receiver,asf905-29718> tid=0x2a] received new view: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_2]old view is: View[67.195.81.149(2186:locator)<ec><v0>:1025|5] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029, 67.195.81.149(2109)<v5>:1030] [vm_2][info 2016/06/17 09:20:14.511 UTC <unicast receiver,asf905-29718> tid=0x2a] Peer locator received new membership view: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_0][info 2016/06/17 09:20:14.531 UTC <Geode Membership View Creator> tid=0x31] View Creator is processing 1 requests for the next membership view [vm_0][info 2016/06/17 09:20:14.532 UTC <Geode Membership View Creator> tid=0x31] 67.195.81.149(2109)<v5>:1030 had a weight of 10 [vm_0][info 2016/06/17 09:20:14.533 UTC <Geode Membership View Creator> tid=0x31] preparing new view View[67.195.81.149(2186:locator)<ec><v0>:1025|6] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] crashed: [67.195.81.149(2109)<v5>:1030] [vm_0]failure detection ports: 57887 18067 4457 8640 12514 [vm_3][info 2016/06/17 09:20:14.534 UTC <unicast receiver,asf905-50992> tid=0x29] Ignoring the view View[67.195.81.149(2186:locator)<ec><v0>:1025|6] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] crashed: [67.195.81.149(2109)<v5>:1030] from member 67.195.81.149(2186:locator)<ec><v0>:1025, which is not in my current view View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_1][info 2016/06/17 09:20:14.534 UTC <unicast receiver,asf905-1766> tid=0x2a] Ignoring the view View[67.195.81.149(2186:locator)<ec><v0>:1025|6] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] crashed: [67.195.81.149(2109)<v5>:1030] from member 67.195.81.149(2186:locator)<ec><v0>:1025, which is not in my current view View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_2][info 2016/06/17 09:20:14.535 UTC <unicast receiver,asf905-29718> tid=0x2a] Ignoring the view View[67.195.81.149(2186:locator)<ec><v0>:1025|6] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] crashed: [67.195.81.149(2109)<v5>:1030] from member 67.195.81.149(2186:locator)<ec><v0>:1025, which is not in my current view View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_4][info 2016/06/17 09:20:14.535 UTC <unicast receiver,asf905-46898> tid=0x29] Ignoring the view View[67.195.81.149(2186:locator)<ec><v0>:1025|6] members: [67.195.81.149(2186:locator)<ec><v0>:1025, 67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] crashed: [67.195.81.149(2109)<v5>:1030] from member 67.195.81.149(2186:locator)<ec><v0>:1025, which is not in my current view View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_0][info 2016/06/17 09:20:14.598 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Now closing distribution for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_0][info 2016/06/17 09:20:14.599 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Stopping membership services [vm_2][info 2016/06/17 09:20:14.601 UTC <unicast receiver,asf905-29718> tid=0x2a] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_4][info 2016/06/17 09:20:14.601 UTC <unicast receiver,asf905-46898> tid=0x29] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_0][info 2016/06/17 09:20:14.601 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor server socket is closed in stopServices(). [vm_1][info 2016/06/17 09:20:14.601 UTC <unicast receiver,asf905-1766> tid=0x2a] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_0][info 2016/06/17 09:20:14.601 UTC <Geode Failure Detection Server thread 0> tid=0x2f] GMSHealthMonitor server thread exiting [vm_3][info 2016/06/17 09:20:14.601 UTC <unicast receiver,asf905-50992> tid=0x29] received leave request from 67.195.81.149(2186:locator)<ec><v0>:1025 for 67.195.81.149(2186:locator)<ec><v0>:1025 [vm_0][info 2016/06/17 09:20:14.602 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor serverSocketExecutor is terminated [vm_0][info 2016/06/17 09:20:14.613 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] DistributionManager stopped in 123ms. [vm_0][info 2016/06/17 09:20:14.613 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Marking DistributionManager 67.195.81.149(2186:locator)<ec><v0>:1025 as closed. [vm_0][info 2016/06/17 09:20:14.665 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_0] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/2045631151.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/2045631151@6673d4a1 (took 277 ms) [vm_1][info 2016/06/17 09:20:14.667 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/1842040959.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/1842040959@19e3269f [vm_1][info 2016/06/17 09:20:14.695 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GemFireCache[id = 489661411; isClosing = true; isShutDownAll = false; created = Fri Jun 17 09:19:51 UTC 2016; server = false; copyOnRead = false; lockLease = 120; lockTimeout = 60]: Now closing. [vm_1][info 2016/06/17 09:20:14.747 UTC <com.gemstone.gemfire.distributed.internal.InternalLocator$1@1b90f4e4> tid=0x51] Stopping Distribution Locator on asf905.gq1.ygridcore.net[23732] [vm_1][info 2016/06/17 09:20:14.750 UTC <Distribution Locator on asf905.gq1.ygridcore.net[null]> tid=0x1d] locator shutting down [vm_1][info 2016/06/17 09:20:14.752 UTC <Distribution Locator on asf905.gq1.ygridcore.net[null]> tid=0x1d] Closing locator's cache [vm_1][info 2016/06/17 09:20:14.752 UTC <com.gemstone.gemfire.distributed.internal.InternalLocator$1@1b90f4e4> tid=0x51] Distribution Locator on asf905.gq1.ygridcore.net[0] is stopped [vm_1][info 2016/06/17 09:20:14.754 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Shutting down DistributionManager 67.195.81.149(2191:locator)<ec><v1>:1026. [vm_3][info 2016/06/17 09:20:14.756 UTC <Pooled High Priority Message Processor 1> tid=0x3f] received leave request from 67.195.81.149(2191:locator)<ec><v1>:1026 for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_2][info 2016/06/17 09:20:14.756 UTC <Pooled High Priority Message Processor 2> tid=0x43] received leave request from 67.195.81.149(2191:locator)<ec><v1>:1026 for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_4][info 2016/06/17 09:20:14.756 UTC <Pooled High Priority Message Processor 1> tid=0x43] received leave request from 67.195.81.149(2191:locator)<ec><v1>:1026 for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_2][info 2016/06/17 09:20:14.757 UTC <Pooled High Priority Message Processor 2> tid=0x43] This member is becoming the membership coordinator with address 67.195.81.149(2209:locator)<ec><v2>:1027 [vm_4][info 2016/06/17 09:20:14.757 UTC <Pooled High Priority Message Processor 1> tid=0x43] Member at 67.195.81.149(2191:locator)<ec><v1>:1026 gracefully left the distributed cache: shutdown message received [vm_3][info 2016/06/17 09:20:14.757 UTC <Pooled High Priority Message Processor 1> tid=0x3f] Member at 67.195.81.149(2191:locator)<ec><v1>:1026 gracefully left the distributed cache: shutdown message received [vm_2][info 2016/06/17 09:20:14.760 UTC <Pooled High Priority Message Processor 2> tid=0x43] ViewCreator starting on:67.195.81.149(2209:locator)<ec><v2>:1027 [vm_2][info 2016/06/17 09:20:14.760 UTC <Pooled High Priority Message Processor 2> tid=0x43] Member at 67.195.81.149(2191:locator)<ec><v1>:1026 gracefully left the distributed cache: shutdown message received [vm_2][info 2016/06/17 09:20:14.761 UTC <Geode Membership View Creator> tid=0x52] View Creator thread is starting [vm_2][info 2016/06/17 09:20:14.762 UTC <Geode Membership View Creator> tid=0x52] preparing new view View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_2]failure detection ports: 4457 8640 12514 [vm_2][info 2016/06/17 09:20:14.765 UTC <Geode Membership View Creator> tid=0x52] finished waiting for responses to view preparation [vm_2][info 2016/06/17 09:20:14.765 UTC <Geode Membership View Creator> tid=0x52] received new view: View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_2]old view is: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_2][info 2016/06/17 09:20:14.765 UTC <Geode Membership View Creator> tid=0x52] Peer locator received new membership view: View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_2][info 2016/06/17 09:20:14.767 UTC <Geode Membership View Creator> tid=0x52] sending new view View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_2]failure detection ports: 4457 8640 12514 [vm_2][info 2016/06/17 09:20:14.768 UTC <View Message Processor> tid=0x46] I, 67.195.81.149(2209:locator)<ec><v2>:1027, am the elder. [vm_3][info 2016/06/17 09:20:14.769 UTC <unicast receiver,asf905-50992> tid=0x29] received new view: View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_4][info 2016/06/17 09:20:14.769 UTC <unicast receiver,asf905-46898> tid=0x29] received new view: View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_4]old view is: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_3]old view is: View[67.195.81.149(2191:locator)<ec><v1>:1026|19] members: [67.195.81.149(2191:locator)<ec><v1>:1026, 67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2186:locator)<ec><v0>:1025] crashed: [67.195.81.149(2109)<v5>:1030] [vm_1][info 2016/06/17 09:20:14.857 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Now closing distribution for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_1][info 2016/06/17 09:20:14.858 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Stopping membership services [vm_4][info 2016/06/17 09:20:14.860 UTC <unicast receiver,asf905-46898> tid=0x29] received leave request from 67.195.81.149(2191:locator)<ec><v1>:1026 for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_2][info 2016/06/17 09:20:14.860 UTC <unicast receiver,asf905-29718> tid=0x2a] received leave request from 67.195.81.149(2191:locator)<ec><v1>:1026 for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_1][info 2016/06/17 09:20:14.860 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor server socket is closed in stopServices(). [vm_1][info 2016/06/17 09:20:14.860 UTC <Geode Failure Detection Server thread 0> tid=0x2e] GMSHealthMonitor server thread exiting [vm_3][info 2016/06/17 09:20:14.860 UTC <unicast receiver,asf905-50992> tid=0x29] received leave request from 67.195.81.149(2191:locator)<ec><v1>:1026 for 67.195.81.149(2191:locator)<ec><v1>:1026 [vm_1][info 2016/06/17 09:20:14.861 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor serverSocketExecutor is terminated [vm_1][info 2016/06/17 09:20:14.870 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] DistributionManager stopped in 116ms. [vm_1][info 2016/06/17 09:20:14.870 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Marking DistributionManager 67.195.81.149(2191:locator)<ec><v1>:1026 as closed. [vm_1][info 2016/06/17 09:20:14.920 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_1] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/1842040959.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/1842040959@19e3269f (took 252 ms) [vm_2][info 2016/06/17 09:20:14.923 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/191678626.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/191678626@6f827b31 [vm_2][info 2016/06/17 09:20:14.945 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GemFireCache[id = 1961479738; isClosing = true; isShutDownAll = false; created = Fri Jun 17 09:19:54 UTC 2016; server = false; copyOnRead = false; lockLease = 120; lockTimeout = 60]: Now closing. [vm_2][info 2016/06/17 09:20:14.984 UTC <com.gemstone.gemfire.distributed.internal.InternalLocator$1@6e462170> tid=0x55] Stopping Distribution Locator on asf905.gq1.ygridcore.net[29658] [vm_2][info 2016/06/17 09:20:14.987 UTC <Distribution Locator on asf905.gq1.ygridcore.net[null]> tid=0x1d] locator shutting down [vm_2][info 2016/06/17 09:20:14.989 UTC <Distribution Locator on asf905.gq1.ygridcore.net[null]> tid=0x1d] Closing locator's cache [vm_2][info 2016/06/17 09:20:14.989 UTC <com.gemstone.gemfire.distributed.internal.InternalLocator$1@6e462170> tid=0x55] Distribution Locator on asf905.gq1.ygridcore.net[0] is stopped [vm_2][info 2016/06/17 09:20:14.991 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Shutting down DistributionManager 67.195.81.149(2209:locator)<ec><v2>:1027. [vm_4][info 2016/06/17 09:20:14.993 UTC <Pooled High Priority Message Processor 1> tid=0x43] received leave request from 67.195.81.149(2209:locator)<ec><v2>:1027 for 67.195.81.149(2209:locator)<ec><v2>:1027 [vm_3][info 2016/06/17 09:20:14.993 UTC <Pooled High Priority Message Processor 1> tid=0x3f] received leave request from 67.195.81.149(2209:locator)<ec><v2>:1027 for 67.195.81.149(2209:locator)<ec><v2>:1027 [vm_3][info 2016/06/17 09:20:14.994 UTC <Pooled High Priority Message Processor 1> tid=0x3f] This member is becoming the membership coordinator with address 67.195.81.149(2213)<v3>:1028 [vm_4][info 2016/06/17 09:20:14.994 UTC <Pooled High Priority Message Processor 1> tid=0x43] Member at 67.195.81.149(2209:locator)<ec><v2>:1027 gracefully left the distributed cache: shutdown message received [vm_3][info 2016/06/17 09:20:14.998 UTC <Pooled High Priority Message Processor 1> tid=0x3f] ViewCreator starting on:67.195.81.149(2213)<v3>:1028 [vm_3][info 2016/06/17 09:20:14.998 UTC <Geode Membership View Creator> tid=0x4b] View Creator thread is starting [vm_3][info 2016/06/17 09:20:14.999 UTC <Pooled High Priority Message Processor 1> tid=0x3f] Member at 67.195.81.149(2209:locator)<ec><v2>:1027 gracefully left the distributed cache: shutdown message received [vm_3][info 2016/06/17 09:20:15.000 UTC <Geode Membership View Creator> tid=0x4b] preparing new view View[67.195.81.149(2213)<v3>:1028|37] members: [67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2209:locator)<ec><v2>:1027] [vm_3]failure detection ports: 8640 12514 [vm_3][info 2016/06/17 09:20:15.003 UTC <Geode Membership View Creator> tid=0x4b] finished waiting for responses to view preparation [vm_3][info 2016/06/17 09:20:15.004 UTC <Geode Membership View Creator> tid=0x4b] received new view: View[67.195.81.149(2213)<v3>:1028|37] members: [67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2209:locator)<ec><v2>:1027] [vm_3]old view is: View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_3][info 2016/06/17 09:20:15.005 UTC <Geode Membership View Creator> tid=0x4b] sending new view View[67.195.81.149(2213)<v3>:1028|37] members: [67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2209:locator)<ec><v2>:1027] [vm_3]failure detection ports: 8640 12514 [vm_3][info 2016/06/17 09:20:15.006 UTC <View Message Processor> tid=0x3c] I, 67.195.81.149(2213)<v3>:1028, am the elder. [vm_4][info 2016/06/17 09:20:15.007 UTC <unicast receiver,asf905-46898> tid=0x29] received new view: View[67.195.81.149(2213)<v3>:1028|37] members: [67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2209:locator)<ec><v2>:1027] [vm_4]old view is: View[67.195.81.149(2209:locator)<ec><v2>:1027|26] members: [67.195.81.149(2209:locator)<ec><v2>:1027, 67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2191:locator)<ec><v1>:1026] [vm_2][info 2016/06/17 09:20:15.094 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Now closing distribution for 67.195.81.149(2209:locator)<ec><v2>:1027 [vm_2][info 2016/06/17 09:20:15.094 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Stopping membership services [vm_3][info 2016/06/17 09:20:15.096 UTC <unicast receiver,asf905-50992> tid=0x29] received leave request from 67.195.81.149(2209:locator)<ec><v2>:1027 for 67.195.81.149(2209:locator)<ec><v2>:1027 [vm_4][info 2016/06/17 09:20:15.096 UTC <unicast receiver,asf905-46898> tid=0x29] received leave request from 67.195.81.149(2209:locator)<ec><v2>:1027 for 67.195.81.149(2209:locator)<ec><v2>:1027 [vm_2][info 2016/06/17 09:20:15.095 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor server socket is closed in stopServices(). [vm_2][info 2016/06/17 09:20:15.096 UTC <Geode Failure Detection Server thread 0> tid=0x2e] GMSHealthMonitor server thread exiting [vm_2][info 2016/06/17 09:20:15.096 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor serverSocketExecutor is terminated [vm_2][info 2016/06/17 09:20:15.103 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] DistributionManager stopped in 112ms. [vm_2][info 2016/06/17 09:20:15.103 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Marking DistributionManager 67.195.81.149(2209:locator)<ec><v2>:1027 as closed. [vm_2][info 2016/06/17 09:20:15.151 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_2] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/191678626.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/191678626@6f827b31 (took 228 ms) [vm_3][info 2016/06/17 09:20:15.154 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/465380541.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/465380541@35024eff [vm_3][info 2016/06/17 09:20:15.155 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Shutting down DistributionManager 67.195.81.149(2213)<v3>:1028. [vm_4][info 2016/06/17 09:20:15.158 UTC <Pooled High Priority Message Processor 1> tid=0x43] received leave request from 67.195.81.149(2213)<v3>:1028 for 67.195.81.149(2213)<v3>:1028 [vm_4][info 2016/06/17 09:20:15.159 UTC <Pooled High Priority Message Processor 1> tid=0x43] This member is becoming the membership coordinator with address 67.195.81.149(2483)<v4>:1029 [vm_4][info 2016/06/17 09:20:15.163 UTC <Pooled High Priority Message Processor 1> tid=0x43] ViewCreator starting on:67.195.81.149(2483)<v4>:1029 [vm_4][info 2016/06/17 09:20:15.163 UTC <Geode Membership View Creator> tid=0x4e] View Creator thread is starting [vm_4][info 2016/06/17 09:20:15.164 UTC <Geode Membership View Creator> tid=0x4e] received new view: View[67.195.81.149(2483)<v4>:1029|49] members: [67.195.81.149(2483)<v4>:1029{lead}] shutdown: [67.195.81.149(2213)<v3>:1028] [vm_4]old view is: View[67.195.81.149(2213)<v3>:1028|37] members: [67.195.81.149(2213)<v3>:1028{lead}, 67.195.81.149(2483)<v4>:1029] shutdown: [67.195.81.149(2209:locator)<ec><v2>:1027] [vm_4][info 2016/06/17 09:20:15.164 UTC <Pooled High Priority Message Processor 1> tid=0x43] Member at 67.195.81.149(2213)<v3>:1028 gracefully left the distributed cache: shutdown message received [vm_4][info 2016/06/17 09:20:15.165 UTC <Geode Membership View Creator> tid=0x4e] no recipients for new view aside from myself [vm_4][info 2016/06/17 09:20:15.166 UTC <View Message Processor> tid=0x40] 67.195.81.149(2483)<v4>:1029 is the elder and the only member. [vm_3][info 2016/06/17 09:20:15.261 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Now closing distribution for 67.195.81.149(2213)<v3>:1028 [vm_3][info 2016/06/17 09:20:15.261 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Stopping membership services [vm_3][info 2016/06/17 09:20:15.263 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor server socket is closed in stopServices(). [vm_4][info 2016/06/17 09:20:15.263 UTC <unicast receiver,asf905-46898> tid=0x29] received leave request from 67.195.81.149(2213)<v3>:1028 for 67.195.81.149(2213)<v3>:1028 [vm_3][info 2016/06/17 09:20:15.263 UTC <Geode Failure Detection Server thread 0> tid=0x2d] GMSHealthMonitor server thread exiting [vm_3][info 2016/06/17 09:20:15.264 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor serverSocketExecutor is terminated [vm_3][info 2016/06/17 09:20:15.273 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] DistributionManager stopped in 117ms. [vm_3][info 2016/06/17 09:20:15.274 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Marking DistributionManager 67.195.81.149(2213)<v3>:1028 as closed. [vm_3][info 2016/06/17 09:20:15.366 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_3] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/465380541.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$9/465380541@35024eff (took 211 ms) [vm_4][info 2016/06/17 09:20:15.373 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$7/1904510272.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$7/1904510272@371b8f87 [vm_4][info 2016/06/17 09:20:15.375 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Shutting down DistributionManager 67.195.81.149(2483)<v4>:1029. [vm_4][info 2016/06/17 09:20:15.481 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Now closing distribution for 67.195.81.149(2483)<v4>:1029 [vm_4][info 2016/06/17 09:20:15.481 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Stopping membership services [vm_4][info 2016/06/17 09:20:15.483 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor server socket is closed in stopServices(). [vm_4][info 2016/06/17 09:20:15.484 UTC <Geode Failure Detection Server thread 0> tid=0x2d] GMSHealthMonitor server thread exiting [vm_4][info 2016/06/17 09:20:15.484 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] GMSHealthMonitor serverSocketExecutor is terminated [vm_4][info 2016/06/17 09:20:15.493 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] DistributionManager stopped in 118ms. [vm_4][info 2016/06/17 09:20:15.493 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Marking DistributionManager 67.195.81.149(2483)<v4>:1029 as closed. [vm_4][info 2016/06/17 09:20:15.590 UTC <RMI TCP Connection(1)-67.195.81.149> tid=0x1b] Got result: null [vm_4] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$7/1904510272.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$7/1904510272@371b8f87 (took 216 ms) [locator][info 2016/06/17 09:20:15.611 UTC <RMI TCP Connection(2)-67.195.81.149> tid=0x1b] Received method: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$5/28871561.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$5/28871561@60944f82 [locator][info 2016/06/17 09:20:15.616 UTC <RMI TCP Connection(2)-67.195.81.149> tid=0x1b] Got result: null [locator] from com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$5/28871561.run with 0 args on object: com.gemstone.gemfire.test.dunit.internal.JUnit4DistributedTestCase$$Lambda$5/28871561@60944f82 (took 4 ms) Standard Error Suspicious strings were written to the log during this run. Fix the strings or use IgnoredException.addIgnoredException to ignore. ----------------------------------------------------------------------- Found suspect string in log4j at line 2738 [error 2016/06/17 09:20:00.793 UTC <Geode UDP INT-1,asf905-22535> tid=0x42] Exception deserializing message payload: [dst: 67.195.81.149<v0>:1025, src: 67.195.81.149<v5>:1030 (2 headers), size=189 bytes, flags=OOB|NO_RELIABILITY|INTERNAL] java.io.IOException: Unknown header byte: 0 at com.gemstone.gemfire.internal.InternalDataSerializer.basicReadObject(InternalDataSerializer.java:2979) at com.gemstone.gemfire.DataSerializer.readObject(DataSerializer.java:3271) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger.readJGMessage(JGroupsMessenger.java:849) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger$JGroupsReceiver.receive(JGroupsMessenger.java:1043) at org.jgroups.JChannel.invokeCallback(JChannel.java:816) at org.jgroups.JChannel.up(JChannel.java:741) at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1030) at org.jgroups.protocols.FRAG2.up(FRAG2.java:165) at org.jgroups.protocols.FlowControl.up(FlowControl.java:381) at org.jgroups.protocols.UNICAST3.up(UNICAST3.java:435) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.StatRecorder.up(StatRecorder.java:75) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.AddressManager.up(AddressManager.java:75) at org.jgroups.protocols.TP.passMessageUp(TP.java:1567) at org.jgroups.protocols.TP$SingleMessageHandler.run(TP.java:1783) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ----------------------------------------------------------------------- Found suspect string in log4j at line 2758 [error 2016/06/17 09:20:03.500 UTC <Geode UDP INT-2,asf905-22535> tid=0x45] Exception deserializing message payload: [dst: 67.195.81.149<v0>:1025, src: 67.195.81.149<v5>:1030 (2 headers), size=189 bytes, flags=OOB|NO_RELIABILITY|INTERNAL] java.io.IOException: Unknown header byte: 0 at com.gemstone.gemfire.internal.InternalDataSerializer.basicReadObject(InternalDataSerializer.java:2979) at com.gemstone.gemfire.DataSerializer.readObject(DataSerializer.java:3271) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger.readJGMessage(JGroupsMessenger.java:849) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger$JGroupsReceiver.receive(JGroupsMessenger.java:1043) at org.jgroups.JChannel.invokeCallback(JChannel.java:816) at org.jgroups.JChannel.up(JChannel.java:741) at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1030) at org.jgroups.protocols.FRAG2.up(FRAG2.java:165) at org.jgroups.protocols.FlowControl.up(FlowControl.java:381) at org.jgroups.protocols.UNICAST3.up(UNICAST3.java:435) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.StatRecorder.up(StatRecorder.java:75) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.AddressManager.up(AddressManager.java:75) at org.jgroups.protocols.TP.passMessageUp(TP.java:1567) at org.jgroups.protocols.TP$SingleMessageHandler.run(TP.java:1783) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ----------------------------------------------------------------------- Found suspect string in log4j at line 2778 [error 2016/06/17 09:20:06.824 UTC <Geode UDP INT-1,asf905-22535> tid=0x42] Exception deserializing message payload: [dst: 67.195.81.149<v0>:1025, src: 67.195.81.149<v5>:1030 (2 headers), size=189 bytes, flags=OOB|NO_RELIABILITY|INTERNAL] java.io.IOException: Unknown header byte: 0 at com.gemstone.gemfire.internal.InternalDataSerializer.basicReadObject(InternalDataSerializer.java:2979) at com.gemstone.gemfire.DataSerializer.readObject(DataSerializer.java:3271) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger.readJGMessage(JGroupsMessenger.java:849) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger$JGroupsReceiver.receive(JGroupsMessenger.java:1043) at org.jgroups.JChannel.invokeCallback(JChannel.java:816) at org.jgroups.JChannel.up(JChannel.java:741) at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1030) at org.jgroups.protocols.FRAG2.up(FRAG2.java:165) at org.jgroups.protocols.FlowControl.up(FlowControl.java:381) at org.jgroups.protocols.UNICAST3.up(UNICAST3.java:435) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.StatRecorder.up(StatRecorder.java:75) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.AddressManager.up(AddressManager.java:75) at org.jgroups.protocols.TP.passMessageUp(TP.java:1567) at org.jgroups.protocols.TP$SingleMessageHandler.run(TP.java:1783) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ----------------------------------------------------------------------- Found suspect string in log4j at line 2798 [error 2016/06/17 09:20:09.835 UTC <Geode UDP INT-2,asf905-22535> tid=0x45] Exception deserializing message payload: [dst: 67.195.81.149<v0>:1025, src: 67.195.81.149<v5>:1030 (2 headers), size=189 bytes, flags=OOB|NO_RELIABILITY|INTERNAL] java.io.IOException: Unknown header byte: 0 at com.gemstone.gemfire.internal.InternalDataSerializer.basicReadObject(InternalDataSerializer.java:2979) at com.gemstone.gemfire.DataSerializer.readObject(DataSerializer.java:3271) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger.readJGMessage(JGroupsMessenger.java:849) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger$JGroupsReceiver.receive(JGroupsMessenger.java:1043) at org.jgroups.JChannel.invokeCallback(JChannel.java:816) at org.jgroups.JChannel.up(JChannel.java:741) at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1030) at org.jgroups.protocols.FRAG2.up(FRAG2.java:165) at org.jgroups.protocols.FlowControl.up(FlowControl.java:381) at org.jgroups.protocols.UNICAST3.up(UNICAST3.java:435) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.StatRecorder.up(StatRecorder.java:75) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.AddressManager.up(AddressManager.java:75) at org.jgroups.protocols.TP.passMessageUp(TP.java:1567) at org.jgroups.protocols.TP$SingleMessageHandler.run(TP.java:1783) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ----------------------------------------------------------------------- Found suspect string in log4j at line 2818 [error 2016/06/17 09:20:10.116 UTC <Geode UDP INT-1,asf905-22535> tid=0x42] Exception deserializing message payload: [dst: 67.195.81.149<v0>:1025, src: 67.195.81.149<v5>:1030 (2 headers), size=189 bytes, flags=OOB|NO_RELIABILITY|INTERNAL] java.io.IOException: Unknown header byte: 0 at com.gemstone.gemfire.internal.InternalDataSerializer.basicReadObject(InternalDataSerializer.java:2979) at com.gemstone.gemfire.DataSerializer.readObject(DataSerializer.java:3271) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger.readJGMessage(JGroupsMessenger.java:849) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.JGroupsMessenger$JGroupsReceiver.receive(JGroupsMessenger.java:1043) at org.jgroups.JChannel.invokeCallback(JChannel.java:816) at org.jgroups.JChannel.up(JChannel.java:741) at org.jgroups.stack.ProtocolStack.up(ProtocolStack.java:1030) at org.jgroups.protocols.FRAG2.up(FRAG2.java:165) at org.jgroups.protocols.FlowControl.up(FlowControl.java:381) at org.jgroups.protocols.UNICAST3.up(UNICAST3.java:435) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.StatRecorder.up(StatRecorder.java:75) at com.gemstone.gemfire.distributed.internal.membership.gms.messenger.AddressManager.up(AddressManager.java:75) at org.jgroups.protocols.TP.passMessageUp(TP.java:1567) at org.jgroups.protocols.TP$SingleMessageHandler.run(TP.java:1783) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) ----------------------------------------------------------------------- Found suspect string in log4j at line 2858 [fatal 2016/06/17 09:20:14.091 UTC <Test worker> tid=0x13] Rejected new system node 67.195.81.149(2109)<v5>:1030 because mcast was enabled which does not match the distributed system it is attempting to join. To fix this make sure the "mcast-port" gemfire property is set the same on all members of the same distributed system. com.gemstone.gemfire.IncompatibleSystemException: Rejected new system node 67.195.81.149(2109)<v5>:1030 because mcast was enabled which does not match the distributed system it is attempting to join. To fix this make sure the "mcast-port" gemfire property is set the same on all members of the same distributed system. at com.gemstone.gemfire.distributed.internal.DistributionManager.sendStartupMessage(DistributionManager.java:2678) at com.gemstone.gemfire.distributed.internal.DistributionManager.create(DistributionManager.java:557) at com.gemstone.gemfire.distributed.internal.InternalDistributedSystem.initialize(InternalDistributedSystem.java:616) at com.gemstone.gemfire.distributed.internal.InternalDistributedSystem.newInstance(InternalDistributedSystem.java:241) at com.gemstone.gemfire.distributed.DistributedSystem.connect(DistributedSystem.java:238) at com.gemstone.gemfire.distributed.LocatorDUnitTest.testMultipleLocatorsRestartingAtSameTime(LocatorDUnitTest.java:1483) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55) at org.junit.rules.RunRules.evaluate(RunRules.java:20) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57) at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288) at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.runners.ParentRunner.run(ParentRunner.java:363) at org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecuter.runTestClass(JUnitTestClassExecuter.java:112) at org.gradle.api.internal.tasks.testing.junit.JUnitTestClassExecuter.execute(JUnitTestClassExecuter.java:56) at org.gradle.api.internal.tasks.testing.junit.JUnitTestClassProcessor.processTestClass(JUnitTestClassProcessor.java:66) at org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor.processTestClass(SuiteTestClassProcessor.java:51) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:35) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24) at org.gradle.messaging.dispatch.ContextClassLoaderDispatch.dispatch(ContextClassLoaderDispatch.java:32) at org.gradle.messaging.dispatch.ProxyDispatchAdapter$DispatchingInvocationHandler.invoke(ProxyDispatchAdapter.java:93) at com.sun.proxy.$Proxy2.processTestClass(Unknown Source) at org.gradle.api.internal.tasks.testing.worker.TestWorker.processTestClass(TestWorker.java:109) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:35) at org.gradle.messaging.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24) at org.gradle.messaging.remote.internal.hub.MessageHub$Handler.run(MessageHub.java:360) at org.gradle.internal.concurrent.ExecutorPolicy$CatchAndRecordFailures.onExecute(ExecutorPolicy.java:54) at org.gradle.internal.concurrent.StoppableExecutorImpl$1.run(StoppableExecutorImpl.java:40) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) -- This message was sent by Atlassian JIRA (v6.3.4#6332)