DonalEvans commented on a change in pull request #7431:
URL: https://github.com/apache/geode/pull/7431#discussion_r828520584



##########
File path: 
geode-for-redis/src/main/java/org/apache/geode/redis/internal/data/RedisList.java
##########
@@ -148,7 +175,7 @@ public long lpush(List<byte[]> elementsToAdd, 
Region<RedisKey, RedisData> region
    */
   public byte[] lpop(Region<RedisKey, RedisData> region, RedisKey key) {
     byte[] popped = elementRemove(0);
-    RemoveElementsByIndex removed = new RemoveElementsByIndex();
+    RemoveElementsByIndex removed = new 
RemoveElementsByIndex(incrementAndGetVersion());

Review comment:
       I think this change will conflict with work that's being done for RPOP, 
since that's also modifying `lpop()` to use versioning properly. This PR might 
need to wait for RPOP to be merged, then rebase to pick up the changes.

##########
File path: 
geode-for-redis/src/test/java/org/apache/geode/redis/internal/data/RedisListTest.java
##########
@@ -110,6 +118,58 @@ public void equals_returnsTrue_givenDifferentEmptyLists() {
     assertThat(list2).isEqualTo(list1);
   }
 
+  @Test
+  public void lrem_storesStableDelta_inOrderRemove() {
+    Region<RedisKey, RedisData> region = 
uncheckedCast(mock(PartitionedRegion.class));
+    when(region.put(any(), 
any())).thenAnswer(this::validateDeltaSerialization);
+
+    byte[] element = new byte[] {1};
+    RedisList list = createRedisListWithDuplicateElements();
+
+    list.lrem(2, element, region, null);
+
+    verify(region).put(any(), any());
+    assertThat(list.hasDelta()).isFalse();
+  }
+
+  @Test
+  public void lrem_storesStableDelta_reverseOrderRemove() {

Review comment:
       Since in-order removing and reverse-order removing now both use the same 
Delta type, this test is redundant.

##########
File path: 
geode-for-redis/src/main/java/org/apache/geode/redis/internal/data/RedisList.java
##########
@@ -168,8 +195,10 @@ public void applyAddByteArrayDelta(byte[] bytes) {
 
   @Override
   public void applyRemoveElementsByIndex(List<Integer> indexes) {
-    for (int index : indexes) {
-      elementRemove(index);
+    if (indexes.size() == 1) {
+      elementRemove(indexes.get(0));
+    } else {
+      elementList.removeIndexes(indexes);

Review comment:
       I think this line needs to be synchronized, to prevent concurrent 
modification/reading of the contents of `elementList` by other methods. For 
consistency, maybe `elementList.removeIndexes()` could be wrapped in a 
synchronized method in `RedisList`

##########
File path: 
geode-for-redis/src/distributedTest/java/org/apache/geode/redis/internal/commands/executor/list/LRemDUnitTest.java
##########
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+package org.apache.geode.redis.internal.commands.executor.list;
+
+import static 
org.apache.geode.test.dunit.rules.RedisClusterStartupRule.BIND_ADDRESS;
+import static 
org.apache.geode.test.dunit.rules.RedisClusterStartupRule.REDIS_CLIENT_TIMEOUT;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.JedisCluster;
+
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.dunit.rules.RedisClusterStartupRule;
+import org.apache.geode.test.junit.rules.ExecutorServiceRule;
+
+public class LRemDUnitTest {
+  private static final int LIST_SIZE_FOR_BUCKET_TEST = 10000;
+  private static final int UNIQUE_ELEMENTS = 5000;
+  private static final int COUNT_OF_UNIQUE_ELEMENT = 2; // How many times a 
unique element is
+                                                        // repeated in list
+
+  @Rule
+  public RedisClusterStartupRule clusterStartUp = new 
RedisClusterStartupRule();
+
+  @Rule
+  public ExecutorServiceRule executor = new ExecutorServiceRule();
+
+  private static JedisCluster jedis;
+
+  @Before
+  public void testSetup() {
+    MemberVM locator = clusterStartUp.startLocatorVM(0);
+    clusterStartUp.startRedisVM(1, locator.getPort());
+    clusterStartUp.startRedisVM(2, locator.getPort());
+    clusterStartUp.startRedisVM(3, locator.getPort());
+    int redisServerPort = clusterStartUp.getRedisPort(1);
+    jedis = new JedisCluster(new HostAndPort(BIND_ADDRESS, redisServerPort), 
REDIS_CLIENT_TIMEOUT);
+    clusterStartUp.flushAll();
+  }
+
+  @After
+  public void tearDown() {
+    jedis.close();
+  }
+
+  @Test
+  public void shouldDistributeDataAmongCluster_andRetainDataAfterServerCrash() 
{
+    String key = makeListKeyWithHashtag(1, 
clusterStartUp.getKeyOnServer("lrem", 1));
+
+    // Create initial list and push it
+    final int initialListSize = 30;
+    final int uniqueElements = 3;
+    List<String> elementList = new ArrayList<>();
+    for (int i = 0; i < initialListSize; i++) {
+      elementList.add(makeElementString(key, i % uniqueElements));
+    }
+    jedis.lpush(key, elementList.toArray(new String[] {}));
+
+    // Remove all elements except for ELEMENT_TO_CHECK
+    final int uniqueElementsCount = initialListSize / uniqueElements;
+    assertThat(jedis.lrem(key, 0, makeElementString(key, 
0))).isEqualTo(uniqueElementsCount);
+    assertThat(jedis.lrem(key, -uniqueElementsCount, makeElementString(key, 
1)))
+        .isEqualTo(uniqueElementsCount);
+
+    clusterStartUp.crashVM(1); // kill primary server
+
+    assertThat(jedis.llen(key)).isEqualTo(uniqueElementsCount);
+    assertThat(jedis.lrem(key, uniqueElementsCount, makeElementString(key, 2)))
+        .isEqualTo(uniqueElementsCount);
+    assertThat(jedis.exists(key)).isFalse();
+  }
+
+
+  @Test
+  public void givenBucketsMoveDuringLrem_thenOperationsAreNotLost() throws 
Exception {
+    AtomicBoolean running = new AtomicBoolean(true);
+
+    List<String> listHashtags = makeListHashtags();
+    String key1 = makeListKeyWithHashtag(1, listHashtags.get(0));
+    String key2 = makeListKeyWithHashtag(2, listHashtags.get(1));
+    String key3 = makeListKeyWithHashtag(3, listHashtags.get(2));
+
+    List<String> elementList1 = makeListWithRepeatingElements(key1);
+    List<String> elementList2 = makeListWithRepeatingElements(key2);
+    List<String> elementList3 = makeListWithRepeatingElements(key3);
+
+    jedis.lpush(key1, elementList1.toArray(new String[] {}));
+    jedis.lpush(key2, elementList2.toArray(new String[] {}));
+    jedis.lpush(key3, elementList3.toArray(new String[] {}));
+
+    Future<Integer> future1 =
+        executor.submit(() -> performLremAndVerify(key1, running, 
elementList1));
+    Future<Integer> future2 =
+        executor.submit(() -> performLremAndVerify(key2, running, 
elementList2));
+    Future<Integer> future3 =
+        executor.submit(() -> performLremAndVerify(key3, running, 
elementList3));
+
+    for (int i = 0; i < 50; i++) {
+      clusterStartUp.moveBucketForKey(listHashtags.get(i % 
listHashtags.size()));
+      Thread.sleep(500);
+    }
+
+    running.set(false);
+
+    verifyLremResult(key1, future1.get());
+    verifyLremResult(key2, future2.get());
+    verifyLremResult(key3, future3.get());
+  }
+
+  private void verifyLremResult(String key, int iterationCount) {
+    for (int i = UNIQUE_ELEMENTS - 1; i >= iterationCount; i--) {
+      String element = makeElementString(key, i);
+      assertThat(jedis.lrem(key, COUNT_OF_UNIQUE_ELEMENT, element))
+          .isEqualTo(COUNT_OF_UNIQUE_ELEMENT);
+    }
+    assertThat(jedis.exists(key)).isFalse();
+  }
+
+  private Integer performLremAndVerify(String key, AtomicBoolean isRunning, 
List<String> list) {
+    assertThat(jedis.llen(key)).isEqualTo(LIST_SIZE_FOR_BUCKET_TEST);
+    int count = COUNT_OF_UNIQUE_ELEMENT;
+
+    int iterationCount = 0;
+    while (isRunning.get()) {
+      count = -count;
+      String element = makeElementString(key, iterationCount);
+      assertThat(jedis.lrem(key, count, 
element)).isEqualTo(COUNT_OF_UNIQUE_ELEMENT);

Review comment:
       Could we have some more assertions here, confirming that the contents of 
the list is correct after each LREM? Maybe checking the size of the list is as 
expected, and that the list doesn't contain the elements that have just been 
removed, using `lrange()`? That way if something goes wrong, we can fail faster 
and return an error immediately afterwards, rather than relying on a final 
verification step once all the operations have finished.

##########
File path: 
geode-for-redis/src/main/java/org/apache/geode/redis/internal/data/collections/SizeableByteArrayList.java
##########
@@ -31,6 +32,57 @@
       roundUpSize(getObjectHeaderSize() + 3 * getReferenceSize());
   private int memberOverhead;
 
+  /**
+   * @param o element to remove from the list
+   * @param count number of elements that match object o to remove from the 
list.
+   *        Count that is equal to 0 removes all matching elements from the 
list.
+   * @return list of indexes that were removed in order.
+   */
+  public List<Integer> remove(Object o, int count) {
+    if (0 <= count) {
+      count = count == 0 ? this.size() : count;
+      return removeObjectsStartingAtHead(o, count);
+    } else {
+      return removeObjectsStartingAtTail(o, -count);
+    }
+  }
+
+  private List<Integer> removeObjectsStartingAtHead(Object o, int count) {
+    int index = 0;
+    ListIterator<byte[]> iterator = this.listIterator(index);
+    List<Integer> indexesRemoved = new LinkedList<>();
+
+    while (iterator.hasNext() && count != indexesRemoved.size()) {
+      byte[] element = iterator.next();
+      if (Arrays.equals(element, (byte[]) o)) {
+        iterator.remove();
+        memberOverhead -= calculateByteArrayOverhead(element);
+        indexesRemoved.add(index);
+      }
+
+      index++;
+    }
+    return indexesRemoved;
+  }
+
+  private List<Integer> removeObjectsStartingAtTail(Object o, int count) {
+    int index = this.size() - 1;
+    ListIterator<byte[]> descendingIterator = this.listIterator(this.size());

Review comment:
       Small nitpick, but the uses of `this` on these lines are unnecessary. 
Removing them helps tidy up the code a little. Also, instead of manually 
keeping track of `index` in a variable in this method, and in the method above, 
you could use the `nextIndex()` or `previousIndex()` methods on `ListIterator` 
to determine the index of the element being removed.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@geode.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to