Hexiaoqiao commented on a change in pull request #4085: URL: https://github.com/apache/hadoop/pull/4085#discussion_r832238320
########## File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java ########## @@ -602,6 +605,54 @@ public void run() {} + "volumeMap.", 0, totalNumReplicas); } + @Test(timeout = 30000) + public void testCurrentWriteAndDeleteBlock() throws Exception { Review comment: testCurrentXXX -> testConcurrentXXX ? ########## File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java ########## @@ -602,6 +605,54 @@ public void run() {} + "volumeMap.", 0, totalNumReplicas); } + @Test(timeout = 30000) + public void testCurrentWriteAndDeleteBlock() throws Exception { + // Feed FsDataset with block metadata. + final int numBlocks = 1000; + final int threadCount = 10; + // Generate data blocks. + ExecutorService pool = Executors.newFixedThreadPool(threadCount); + List<Future<?>> futureList = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + Thread thread = new Thread() { + @Override + public void run() { + try { + for (int i = 0; i < numBlocks; i++) { + String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length]; Review comment: The `numBlocks` and `BLOCK_POOL_IDS.length` are both static, so bpid is one certain value. Do you mean to random it? ########## File path: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java ########## @@ -602,6 +605,54 @@ public void run() {} + "volumeMap.", 0, totalNumReplicas); } + @Test(timeout = 30000) + public void testCurrentWriteAndDeleteBlock() throws Exception { + // Feed FsDataset with block metadata. + final int numBlocks = 1000; + final int threadCount = 10; + // Generate data blocks. + ExecutorService pool = Executors.newFixedThreadPool(threadCount); + List<Future<?>> futureList = new ArrayList<>(); + for (int i = 0; i < threadCount; i++) { + Thread thread = new Thread() { + @Override + public void run() { + try { + for (int i = 0; i < numBlocks; i++) { + String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length]; + ExtendedBlock eb = new ExtendedBlock(bpid, i); + ReplicaHandler replica = null; + try { + replica = dataset.createRbw(StorageType.DEFAULT, null, eb, + false); + if (i % 2 > 0) { + dataset.invalidate(bpid, new Block[]{eb.getLocalBlock()}); + } + } finally { + if (replica != null) { + replica.close(); + } + } + } + } catch (Exception e) { + e.printStackTrace(); + } + } + }; + thread.setName("AddBlock" + i); + futureList.add(pool.submit(thread)); + } + // Wait for data generation + for (Future<?> f : futureList) { + f.get(); + } + int totalNumReplicas = 0; Review comment: Suggest to verify different blockpool's blocks num separately here. Sum them will loss some key information such as above comment. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org