agingade commented on a change in pull request #7440:
URL: https://github.com/apache/geode/pull/7440#discussion_r824955351



##########
File path: 
geode-core/src/distributedTest/java/org/apache/geode/internal/cache/RebalanceWhileCreatingRegionDistributedTest.java
##########
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import org.apache.logging.log4j.Logger;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.partitioned.RemoveBucketMessage;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.DistributedBlackboard;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+public class RebalanceWhileCreatingRegionDistributedTest implements 
Serializable {
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule();
+
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
+
+  @Rule
+  public DistributedBlackboard blackboard = new DistributedBlackboard();
+
+  private static final Logger logger = LogService.getLogger();
+
+  public static final String BEFORE_REMOVE_BUCKET_MESSAGE = 
"Before_RemoveBucketMessage";
+
+  public static final String AFTER_CREATE_PROXY_REGION = 
"After_CreateProxyRegion";
+
+  @Test
+  public void testRebalanceDuringRegionCreation() throws Exception {
+    // Init Blackboard
+    blackboard.initBlackboard();
+
+    // Start Locator
+    MemberVM locator = cluster.startLocatorVM(0);
+
+    // Start servers
+    int locatorPort = locator.getPort();
+    MemberVM server1 = cluster.startServerVM(1, locatorPort);
+    MemberVM server2 = cluster.startServerVM(2, locatorPort);
+    MemberVM accessor = cluster.startServerVM(4, locatorPort);
+
+    // Add DistributionMessageObserver
+    String regionName = testName.getMethodName();
+    Stream.of(server1, server2, accessor)
+        .forEach(server -> server.invoke(() -> 
addDistributionMessageObserver(regionName)));
+
+    // Create regions in each server
+    server1.invoke(() -> createRegion(regionName, RegionShortcut.PARTITION));
+    server2.invoke(() -> createRegion(regionName, RegionShortcut.PARTITION));
+
+    // Asynchronously wait to create the proxy region in the accessor
+    accessor.invokeAsync(() -> waitToCreateProxyRegion(regionName));
+
+    // Connect client1
+    ClientVM client1 =
+        cluster.startClientVM(5, c -> 
c.withServerConnection(server1.getPort(), server2.getPort()));
+
+    // Do puts
+    client1.invoke(() -> {
+      Region<Integer, Integer> region =
+          ClusterStartupRule.clientCacheRule.createProxyRegion(regionName);
+      IntStream.range(0, 3).forEach(i -> region.put(i, i));
+    });
+
+    // Start server3
+    MemberVM server3 = cluster.startServerVM(3, locatorPort);
+
+    // Create region in server3
+    server3.invoke(() -> createRegion(regionName, RegionShortcut.PARTITION));
+
+    // Add DistributionMessageObserver to server3
+    server3.invoke(() -> addDistributionMessageObserver(regionName));
+
+    // Rebalance
+    server1.invoke(() -> 
ClusterStartupRule.getCache().getResourceManager().createRebalanceFactory()
+        .start().getResults());
+
+    // Stop server3
+    server3.invoke(() -> ClusterStartupRule.getCache().close());
+
+    // Connect client to accessor
+    ClientVM client2 =
+        cluster.startClientVM(6, c -> 
c.withServerConnection(accessor.getPort())
+            .withCacheSetup(cf -> cf.setPoolReadTimeout(20000)));
+
+    // Do puts
+    client2.invoke(() -> {
+      Region<Integer, Integer> region =
+          ClusterStartupRule.clientCacheRule.createProxyRegion(regionName);
+      IntStream.range(0, 3).forEach(i -> region.put(i, i));
+    });

Review comment:
       It will be good to add validation for some of the expected state. 
   - Wait for accessor to finish and see it proxy-region creation completed 
successfully. Accessing that region in accessor node to fetch and perform cache 
operation (say put and get) to see there is no issue with the region and all 
the expected data is present.

##########
File path: 
geode-core/src/distributedTest/java/org/apache/geode/internal/cache/RebalanceWhileCreatingRegionDistributedTest.java
##########
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import org.apache.logging.log4j.Logger;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionFactory;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.DistributionMessage;
+import org.apache.geode.distributed.internal.DistributionMessageObserver;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import 
org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.partitioned.RemoveBucketMessage;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.DistributedBlackboard;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
+
+public class RebalanceWhileCreatingRegionDistributedTest implements 
Serializable {
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule();
+
+  @Rule
+  public SerializableTestName testName = new SerializableTestName();
+
+  @Rule
+  public DistributedBlackboard blackboard = new DistributedBlackboard();
+
+  private static final Logger logger = LogService.getLogger();
+
+  public static final String BEFORE_REMOVE_BUCKET_MESSAGE = 
"Before_RemoveBucketMessage";
+
+  public static final String AFTER_CREATE_PROXY_REGION = 
"After_CreateProxyRegion";
+
+  @Test
+  public void testRebalanceDuringRegionCreation() throws Exception {
+    // Init Blackboard
+    blackboard.initBlackboard();
+
+    // Start Locator
+    MemberVM locator = cluster.startLocatorVM(0);
+
+    // Start servers
+    int locatorPort = locator.getPort();
+    MemberVM server1 = cluster.startServerVM(1, locatorPort);
+    MemberVM server2 = cluster.startServerVM(2, locatorPort);
+    MemberVM accessor = cluster.startServerVM(4, locatorPort);
+
+    // Add DistributionMessageObserver
+    String regionName = testName.getMethodName();
+    Stream.of(server1, server2, accessor)
+        .forEach(server -> server.invoke(() -> 
addDistributionMessageObserver(regionName)));
+
+    // Create regions in each server
+    server1.invoke(() -> createRegion(regionName, RegionShortcut.PARTITION));
+    server2.invoke(() -> createRegion(regionName, RegionShortcut.PARTITION));
+
+    // Asynchronously wait to create the proxy region in the accessor
+    accessor.invokeAsync(() -> waitToCreateProxyRegion(regionName));
+
+    // Connect client1
+    ClientVM client1 =
+        cluster.startClientVM(5, c -> 
c.withServerConnection(server1.getPort(), server2.getPort()));
+
+    // Do puts
+    client1.invoke(() -> {
+      Region<Integer, Integer> region =
+          ClusterStartupRule.clientCacheRule.createProxyRegion(regionName);
+      IntStream.range(0, 3).forEach(i -> region.put(i, i));
+    });
+
+    // Start server3
+    MemberVM server3 = cluster.startServerVM(3, locatorPort);
+
+    // Create region in server3
+    server3.invoke(() -> createRegion(regionName, RegionShortcut.PARTITION));
+
+    // Add DistributionMessageObserver to server3
+    server3.invoke(() -> addDistributionMessageObserver(regionName));
+
+    // Rebalance
+    server1.invoke(() -> 
ClusterStartupRule.getCache().getResourceManager().createRebalanceFactory()
+        .start().getResults());
+
+    // Stop server3
+    server3.invoke(() -> ClusterStartupRule.getCache().close());
+
+    // Connect client to accessor
+    ClientVM client2 =
+        cluster.startClientVM(6, c -> 
c.withServerConnection(accessor.getPort())
+            .withCacheSetup(cf -> cf.setPoolReadTimeout(20000)));
+
+    // Do puts
+    client2.invoke(() -> {
+      Region<Integer, Integer> region =
+          ClusterStartupRule.clientCacheRule.createProxyRegion(regionName);
+      IntStream.range(0, 3).forEach(i -> region.put(i, i));
+    });
+  }
+
+  @Test
+  public void testMoveSingleBucketDuringRegionCreation() throws Exception {
+    // Init Blackboard
+    blackboard.initBlackboard();
+
+    // Start Locator
+    MemberVM locator = cluster.startLocatorVM(0);
+
+    // Start servers
+    int locatorPort = locator.getPort();
+    MemberVM server1 = cluster.startServerVM(1, locatorPort);
+    MemberVM server2 = cluster.startServerVM(2, locatorPort);
+    MemberVM accessor = cluster.startServerVM(3, locatorPort);
+
+    // Add DistributionMessageObserver
+    String regionName = testName.getMethodName();
+    Stream.of(server1, server2, accessor)
+        .forEach(server -> server.invoke(() -> 
addDistributionMessageObserver(regionName)));
+
+    // Create regions in each server
+    InternalDistributedMember source = server1.invoke(() -> {
+      createSingleBucketRegion(regionName, RegionShortcut.PARTITION);
+      Region<Integer, Integer> region =
+          ClusterStartupRule.getCache().getRegion(regionName);
+      region.put(123, 123);
+      PartitionedRegionDataStore partitionedRegionDataStore =
+          ((PartitionedRegion) region).getDataStore();
+      // Make sure server1 has the primary bucket
+      assertThat(partitionedRegionDataStore).isNotNull();
+      
assertThat(partitionedRegionDataStore.getNumberOfPrimaryBucketsManaged()).isEqualTo(1);
+      return InternalDistributedSystem.getAnyInstance().getDistributedMember();
+    });
+
+    InternalDistributedMember destination = server2.invoke(() -> {
+      createSingleBucketRegion(regionName, RegionShortcut.PARTITION);
+      Region<Integer, Integer> region =
+          ClusterStartupRule.getCache().getRegion(regionName);
+      PartitionedRegionDataStore partitionedRegionDataStore =
+          ((PartitionedRegion) region).getDataStore();
+      // Make sure server2 does not have primary bucket
+      assertThat(partitionedRegionDataStore).isNotNull();
+      
assertThat(partitionedRegionDataStore.getNumberOfPrimaryBucketsManaged()).isEqualTo(0);
+      return InternalDistributedSystem.getAnyInstance().getDistributedMember();
+    });
+
+    // Asynchronously wait to create the proxy region in the accessor
+    AsyncInvocation asyncInvocation = accessor.invokeAsync(() -> {
+      waitToCreateSingleBucketProxyRegion(regionName);
+    });
+
+    // Move the primary bucket from server1 to server2 and close the cache in 
the end
+    server2.invoke(() -> {
+      PartitionedRegion partitionedRegion =
+          (PartitionedRegion) 
ClusterStartupRule.getCache().getRegion(regionName);
+      PartitionedRegionDataStore partitionedRegionDataStore = 
partitionedRegion.getDataStore();
+      partitionedRegionDataStore.moveBucket(0, source, true);
+      ClusterStartupRule.getCache().close();
+    });
+
+    asyncInvocation.get();
+
+    // Make sure the accessor knows that the primary bucket has moved to 
server2
+    accessor.invoke(() -> {
+      PartitionedRegion pr =
+          (PartitionedRegion) 
ClusterStartupRule.getCache().getRegion(regionName);
+      
assertThat(pr.getRegionAdvisor().getBucket(0).getBucketAdvisor().getProfile(source))

Review comment:
       Adding check to see if the bucket is moved to destination will confirm 
the bucket has moved successfully

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java
##########
@@ -180,19 +180,23 @@ public void processProfilesQueuedDuringInitialization() {
               logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
                   "applying queued profile removal for all buckets for {}", 
qbp.memberId);
             }
-            for (int i = 0; i < buckets.length; i++) {
-              BucketAdvisor ba = buckets[i].getBucketAdvisor();
-              int serial = qbp.serials[i];
-              if (serial != ILLEGAL_SERIAL) {
-                ba.removeIdWithSerial(qbp.memberId, serial, qbp.destroyed);
-              }
-            } // for
+            if (qbp.serials.length == 1) {
+              
buckets[qbp.bucketId].getBucketAdvisor().removeIdWithSerial(qbp.memberId,
+                  qbp.serials[0], qbp.destroyed);
+            } else {
+              for (int i = 0; i < buckets.length; i++) {
+                BucketAdvisor ba = buckets[i].getBucketAdvisor();
+                int serial = qbp.serials[i];
+                if (serial != ILLEGAL_SERIAL) {
+                  ba.removeIdWithSerial(qbp.memberId, serial, qbp.destroyed);
+                }
+              } // for

Review comment:
       Please create a new ticket to refactor the QueuedBucketProfile. 

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
##########
@@ -2544,4 +2544,9 @@ void superTxApplyPut(Operation putOp, Object key, Object 
wrappedNewValue, boolea
         versionTag, tailKey);
   }
 
+  @Override
+  Set<InternalDistributedMember> getDestroyRegionRecipients() {
+    return 
getSystem().getDistributionManager().getOtherDistributionManagerIds();

Review comment:
       Can we add comment to say why all distributed members are fetched...




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to