adoroszlai commented on a change in pull request #1147:
URL: https://github.com/apache/hadoop-ozone/pull/1147#discussion_r456456312



##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
##########
@@ -43,6 +44,7 @@
   private final Lock lock = new ReentrantLock();
   private static ContainerCache cache;
   private static final float LOAD_FACTOR = 0.75f;
+  private final Striped<Lock> rocksDBLock = Striped.lazyWeakLock(1024);

Review comment:
       Would be nice to make the number of stripes configurable (later).

##########
File path: 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
##########
@@ -63,6 +70,8 @@ public void testContainerCacheEviction() throws Exception {
     conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
 
     ContainerCache cache = ContainerCache.getInstance(conf);
+    cache.clear();
+    Assert.assertTrue(cache.size() == 0);

Review comment:
       ```suggestion
       Assert.assertEquals(0, cache.size());
   ```

##########
File path: 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
##########
@@ -123,4 +132,47 @@ public void testContainerCacheEviction() throws Exception {
     thrown.expect(IllegalArgumentException.class);
     db5.close();
   }
+
+  @Test
+  public void testConcurrentDBGet() throws Exception {
+    File root = new File(testRoot);
+    root.mkdirs();
+    root.deleteOnExit();
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    cache.clear();
+    Assert.assertTrue(cache.size() == 0);

Review comment:
       ```suggestion
       Assert.assertEquals(0, cache.size());
   ```

##########
File path: 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
##########
@@ -123,4 +132,47 @@ public void testContainerCacheEviction() throws Exception {
     thrown.expect(IllegalArgumentException.class);
     db5.close();
   }
+
+  @Test
+  public void testConcurrentDBGet() throws Exception {
+    File root = new File(testRoot);
+    root.mkdirs();
+    root.deleteOnExit();
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    cache.clear();
+    Assert.assertTrue(cache.size() == 0);
+    File containerDir = new File(root, "cont1");
+    createContainerDB(conf, containerDir);
+    ExecutorService executorService = Executors.newFixedThreadPool(2);
+    Runnable task = () -> {
+      try {
+        ReferenceCountedDB db1 = cache.getDB(1, "RocksDB",
+            containerDir.getPath(), conf);
+        Assert.assertTrue(db1 != null);
+      } catch (IOException e) {
+        Assert.fail("Should get the DB instance");
+      }
+    };
+    List<Future> futureList = new ArrayList<>();
+    futureList.add(executorService.submit(task));
+    futureList.add(executorService.submit(task));
+    for (Future future: futureList) {
+      try {
+        future.get();
+      } catch (InterruptedException| ExecutionException e) {
+        Assert.fail("Should get the DB instance");
+      }
+    }
+
+    ReferenceCountedDB db = cache.getDB(1, "RocksDB",
+        containerDir.getPath(), conf);
+    db.close();
+    db.close();
+    db.close();
+    Assert.assertTrue(cache.size() == 1);

Review comment:
       ```suggestion
       Assert.assertEquals(1, cache.size());
   ```

##########
File path: 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
##########
@@ -219,4 +220,68 @@ public void testContainerReader() throws Exception {
           keyValueContainerData.getNumPendingDeletionBlocks());
     }
   }
+
+  @Test
+  public void testMultipleContainerReader() throws Exception {
+    final int volumeNum = 10;
+    StringBuffer datanodeDirs = new StringBuffer();
+    File[] volumeDirs = new File[volumeNum];
+    for (int i = 0; i < volumeNum; i++) {
+      volumeDirs[i] = tempDir.newFolder();
+      datanodeDirs = datanodeDirs.append(volumeDirs[i]).append(",");
+    }
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
+        datanodeDirs.toString());
+    MutableVolumeSet volumeSets =
+        new MutableVolumeSet(datanodeId.toString(), conf);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    cache.clear();
+
+    RoundRobinVolumeChoosingPolicy policy =
+        new RoundRobinVolumeChoosingPolicy();
+
+    final int containerCount = 100;
+    blockCount = containerCount;
+    for (int i = 0; i < containerCount; i++) {
+      KeyValueContainerData keyValueContainerData =
+          new KeyValueContainerData(i, ChunkLayOutVersion.FILE_PER_BLOCK,
+              (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
+              datanodeId.toString());
+
+      KeyValueContainer keyValueContainer =
+          new KeyValueContainer(keyValueContainerData,
+              conf);
+      keyValueContainer.create(volumeSets, policy, scmId);
+
+      List<Long> blkNames;
+      if (i % 2 == 0) {
+        blkNames = addBlocks(keyValueContainer, true);
+        markBlocksForDelete(keyValueContainer, true, blkNames, i);
+      } else {
+        blkNames = addBlocks(keyValueContainer, false);
+        markBlocksForDelete(keyValueContainer, false, blkNames, i);
+      }
+    }
+
+    List<HddsVolume> hddsVolumes = volumeSets.getVolumesList();
+    ContainerReader[] containerReaders = new ContainerReader[volumeNum];
+    Thread[] threads = new Thread[volumeNum];
+    for (int i = 0; i < volumeNum; i++) {
+      containerReaders[i] = new ContainerReader(volumeSets,
+          hddsVolumes.get(i), containerSet, conf);
+      threads[i] = new Thread(containerReaders[i]);
+    }
+    long startTime = System.currentTimeMillis();
+    for (int i = 0; i < volumeNum; i++) {
+      threads[i].start();
+    }
+    for (int i = 0; i < volumeNum; i++) {
+      threads[i].join();
+    }
+    System.out.println("Open " + volumeNum + " Volume with " + containerCount +
+        " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s");
+    Assert.assertTrue(
+        containerSet.getContainerMap().entrySet().size() == containerCount);

Review comment:
       ```suggestion
       Assert.assertEquals(containerCount,
           containerSet.getContainerMap().entrySet().size());
   ```

##########
File path: 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
##########
@@ -219,4 +220,68 @@ public void testContainerReader() throws Exception {
           keyValueContainerData.getNumPendingDeletionBlocks());
     }
   }
+
+  @Test
+  public void testMultipleContainerReader() throws Exception {
+    final int volumeNum = 10;
+    StringBuffer datanodeDirs = new StringBuffer();
+    File[] volumeDirs = new File[volumeNum];
+    for (int i = 0; i < volumeNum; i++) {
+      volumeDirs[i] = tempDir.newFolder();
+      datanodeDirs = datanodeDirs.append(volumeDirs[i]).append(",");
+    }
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
+        datanodeDirs.toString());
+    MutableVolumeSet volumeSets =
+        new MutableVolumeSet(datanodeId.toString(), conf);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    cache.clear();
+
+    RoundRobinVolumeChoosingPolicy policy =
+        new RoundRobinVolumeChoosingPolicy();
+
+    final int containerCount = 100;
+    blockCount = containerCount;
+    for (int i = 0; i < containerCount; i++) {
+      KeyValueContainerData keyValueContainerData =
+          new KeyValueContainerData(i, ChunkLayOutVersion.FILE_PER_BLOCK,
+              (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
+              datanodeId.toString());
+
+      KeyValueContainer keyValueContainer =
+          new KeyValueContainer(keyValueContainerData,
+              conf);
+      keyValueContainer.create(volumeSets, policy, scmId);
+
+      List<Long> blkNames;
+      if (i % 2 == 0) {
+        blkNames = addBlocks(keyValueContainer, true);
+        markBlocksForDelete(keyValueContainer, true, blkNames, i);
+      } else {
+        blkNames = addBlocks(keyValueContainer, false);
+        markBlocksForDelete(keyValueContainer, false, blkNames, i);
+      }
+    }
+
+    List<HddsVolume> hddsVolumes = volumeSets.getVolumesList();
+    ContainerReader[] containerReaders = new ContainerReader[volumeNum];
+    Thread[] threads = new Thread[volumeNum];
+    for (int i = 0; i < volumeNum; i++) {
+      containerReaders[i] = new ContainerReader(volumeSets,
+          hddsVolumes.get(i), containerSet, conf);
+      threads[i] = new Thread(containerReaders[i]);
+    }
+    long startTime = System.currentTimeMillis();
+    for (int i = 0; i < volumeNum; i++) {
+      threads[i].start();
+    }
+    for (int i = 0; i < volumeNum; i++) {
+      threads[i].join();
+    }
+    System.out.println("Open " + volumeNum + " Volume with " + containerCount +
+        " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s");
+    Assert.assertTrue(
+        containerSet.getContainerMap().entrySet().size() == containerCount);
+    Assert.assertTrue(cache.size() == containerCount);

Review comment:
       ```suggestion
       Assert.assertEquals(containerCount, cache.size());
   ```

##########
File path: 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
##########
@@ -123,4 +132,47 @@ public void testContainerCacheEviction() throws Exception {
     thrown.expect(IllegalArgumentException.class);
     db5.close();
   }
+
+  @Test
+  public void testConcurrentDBGet() throws Exception {
+    File root = new File(testRoot);
+    root.mkdirs();
+    root.deleteOnExit();
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    cache.clear();
+    Assert.assertTrue(cache.size() == 0);
+    File containerDir = new File(root, "cont1");
+    createContainerDB(conf, containerDir);
+    ExecutorService executorService = Executors.newFixedThreadPool(2);
+    Runnable task = () -> {
+      try {
+        ReferenceCountedDB db1 = cache.getDB(1, "RocksDB",
+            containerDir.getPath(), conf);
+        Assert.assertTrue(db1 != null);

Review comment:
       ```suggestion
           Assert.assertNotNull(db1);
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org

Reply via email to