timmylicheng commented on a change in pull request #28: HDDS-1569 Support creating multiple pipelines with same datanode URL: https://github.com/apache/hadoop-ozone/pull/28#discussion_r337896461
########## File path: hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java ########## @@ -147,10 +152,45 @@ private void initializePipelineState() throws IOException { } } + private boolean exceedPipelineNumberLimit(ReplicationFactor factor) { + if (factor != ReplicationFactor.THREE) { + // Only put limits for Factor THREE pipelines. + return false; + } + // Per datanode limit + if (heavyNodeCriteria > 0) { + return (stateManager.getPipelines(ReplicationType.RATIS, factor).size() - + stateManager.getPipelines(ReplicationType.RATIS, factor, + Pipeline.PipelineState.CLOSED).size()) > heavyNodeCriteria * + nodeManager.getNodeCount(HddsProtos.NodeState.HEALTHY) / + factor.getNumber(); + } + + // Global limit + if (pipelineNumberLimit > 0) { + return (stateManager.getPipelines(ReplicationType.RATIS, + ReplicationFactor.THREE).size() - stateManager.getPipelines( + ReplicationType.RATIS, ReplicationFactor.THREE, + Pipeline.PipelineState.CLOSED).size()) > + (pipelineNumberLimit - stateManager.getPipelines( + ReplicationType.RATIS, ReplicationFactor.ONE).size()); + } + + return false; + } + @Override public synchronized Pipeline createPipeline( ReplicationType type, ReplicationFactor factor) throws IOException { lock.writeLock().lock(); + if (type == ReplicationType.RATIS && exceedPipelineNumberLimit(factor)) { Review comment: Good point. Updating. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services --------------------------------------------------------------------- To unsubscribe, e-mail: hdfs-dev-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-dev-h...@hadoop.apache.org