adoroszlai commented on a change in pull request #1826:
URL: https://github.com/apache/ozone/pull/1826#discussion_r590580909



##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/BaseGenerator.java
##########
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon.containergenerator;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.ozone.freon.BaseFreonGenerator;
+
+import picocli.CommandLine.Option;
+
+public abstract class BaseGenerator extends BaseFreonGenerator implements
+    Callable<Void> {
+
+  @Option(names = {"-u", "--user"},
+      description = "Owner of the files",
+      defaultValue = "ozone")
+  private static String userId;
+
+  @Option(names = {"--key-size"},
+      description = "Size of the generated keys (in bytes) in each of the "
+          + "containers",
+      defaultValue = "16000000")
+  private int keySize;
+
+  @Option(names = {"--size"},
+      description = "Size of generated containers",
+      defaultValue = "5000000000")

Review comment:
       Should it respect `ozone.scm.container.size`?  (The `compose` clusters 
have their container size set to 1GB.)

##########
File path: 
hadoop-ozone/dist/src/main/compose/upgrade/delete-and-regenerate-data.sh
##########
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script can be run only if the cluster is already started 
+# one, and initialized (but not data is written, yet).
+
+set -e
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+cd "${COMPOSE_DIR}"
+
+# shellcheck source=/dev/null
+source "${COMPOSE_DIR}/../testlib.sh"
+
+#read OZONE_VOLUME from here
+# shellcheck source=/dev/null
+source "$COMPOSE_DIR"/.env
+
+rm -rf "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm}
+mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm}
+
+
+#During the first start, all the required VERSION and metadata files will be 
created
+start_docker_env
+
+#data generation requires offline cluster
+docker-compose stop
+
+#generate metadadata (-n1 means: only one container is generated)
+docker-compose run scm ozone freon cgscm -n 1
+docker-compose run om ozone freon cgom -n 1

Review comment:
       Should it set `-u hadoop` to match the user of the runner image?
   
   Without it, `/vol1` volume is not shown by `ozone sh volume list`.

##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon.containergenerator;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.SplittableRandom;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumData;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Container generator for datanode metadata/data.
+ */
+@Command(name = "cgdn",
+    description = "Offline container metadata generator for Ozone Datanodes.",
+    optionListHeading =
+        "\nExecute this command with different parameters for each datanodes. "
+            + "For example if you have 10 datanodes, use "
+            + "'ozone freon cgdn --index=1 --datanodes=10', 'ozone freon"
+            + " cgdn --index=2 --datanodes=10', 'ozone freon cgdn "
+            + "--index=3 --datanodes=10', ...\n\n",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class GeneratorDatanode extends BaseGenerator {
+
+  @Option(names = {"--datanodes"},
+      description = "Number of datanodes (to generate only subsequent of the "

Review comment:
       ```suggestion
         description = "Number of datanodes (to generate only a subset of the "
   ```

##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon.containergenerator;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.SplittableRandom;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumData;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Container generator for datanode metadata/data.
+ */
+@Command(name = "cgdn",
+    description = "Offline container metadata generator for Ozone Datanodes.",
+    optionListHeading =
+        "\nExecute this command with different parameters for each datanodes. "
+            + "For example if you have 10 datanodes, use "
+            + "'ozone freon cgdn --index=1 --datanodes=10', 'ozone freon"
+            + " cgdn --index=2 --datanodes=10', 'ozone freon cgdn "
+            + "--index=3 --datanodes=10', ...\n\n",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class GeneratorDatanode extends BaseGenerator {
+
+  @Option(names = {"--datanodes"},
+      description = "Number of datanodes (to generate only subsequent of the "
+          + "required containers).",
+      defaultValue = "3")
+  private int datanodes;
+
+  @Option(names = {"--index"},
+      description = "Index of the datanode. For example datanode #3 should "
+          + "have only every 3rd container in a 10 node cluster.).",
+      defaultValue = "1")
+  private int datanodeIndex;
+
+  @Option(names = {"--zero"},
+      description = "User zero bytes instead of random data.",

Review comment:
       ```suggestion
         description = "Use zero bytes instead of random data.",
   ```

##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon.containergenerator;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.SplittableRandom;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumData;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Container generator for datanode metadata/data.
+ */
+@Command(name = "cgdn",
+    description = "Offline container metadata generator for Ozone Datanodes.",
+    optionListHeading =
+        "\nExecute this command with different parameters for each datanodes. "
+            + "For example if you have 10 datanodes, use "
+            + "'ozone freon cgdn --index=1 --datanodes=10', 'ozone freon"
+            + " cgdn --index=2 --datanodes=10', 'ozone freon cgdn "
+            + "--index=3 --datanodes=10', ...\n\n",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class GeneratorDatanode extends BaseGenerator {
+
+  @Option(names = {"--datanodes"},
+      description = "Number of datanodes (to generate only subsequent of the "
+          + "required containers).",
+      defaultValue = "3")
+  private int datanodes;
+
+  @Option(names = {"--index"},
+      description = "Index of the datanode. For example datanode #3 should "
+          + "have only every 3rd container in a 10 node cluster.).",
+      defaultValue = "1")
+  private int datanodeIndex;
+
+  @Option(names = {"--zero"},
+      description = "User zero bytes instead of random data.",
+      defaultValue = "false")
+  private boolean zero;
+
+  private ChunkManager chunkManager;
+
+  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+
+  private MutableVolumeSet volumeSet;
+
+  private Checksum checksum;
+
+  private ConfigurationSource config;
+
+  private Timer timer;
+
+  //Simulate ratis log index (incremented for each chunk write)
+  private int logCounter;
+  private String datanodeId;
+  private String scmId;
+  private int numberOfPipelines;
+  private int currentPipeline;
+
+  @Override
+  public Void call() throws Exception {
+    init();
+
+    numberOfPipelines = datanodes / 3;
+
+    //generate only containers for one datanodes
+    setTestNo(getTestNo() / numberOfPipelines);
+
+    currentPipeline = (datanodeIndex - 1) % numberOfPipelines;
+
+    config = createOzoneConfiguration();
+
+    BlockManager blockManager = new BlockManagerImpl(config);
+    chunkManager = ChunkManagerFactory
+        .createChunkManager(config, blockManager);
+
+    final Collection<String> storageDirs =
+        MutableVolumeSet.getDatanodeStorageDirs(config);
+
+    String firstStorageDir =
+        StorageLocation.parse(storageDirs.iterator().next())
+            .getUri().getPath();
+
+    final Path hddsDir = Paths.get(firstStorageDir, "hdds");
+    if (!Files.exists(hddsDir)) {
+      throw new NoSuchFieldException(hddsDir
+          + " doesn't exist. Please start a real cluster to initialize the "
+          + "VERSION descriptors, and re-start this generator after the files"
+          + " are created (but after cluster is stopped).");
+    }
+
+    final Path scmSpecificDir = Files.list(hddsDir)
+        .filter(Files::isDirectory)
+        .findFirst().get().getFileName();
+    if (scmSpecificDir == null) {

Review comment:
       Won't `findFirst()` return an empty optional if there is no directory?  
On which `get()` results in `NoSuchElementException`.

##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon.containergenerator;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.SplittableRandom;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumData;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Container generator for datanode metadata/data.
+ */
+@Command(name = "cgdn",
+    description = "Offline container metadata generator for Ozone Datanodes.",
+    optionListHeading =
+        "\nExecute this command with different parameters for each datanodes. "
+            + "For example if you have 10 datanodes, use "
+            + "'ozone freon cgdn --index=1 --datanodes=10', 'ozone freon"
+            + " cgdn --index=2 --datanodes=10', 'ozone freon cgdn "
+            + "--index=3 --datanodes=10', ...\n\n",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class GeneratorDatanode extends BaseGenerator {
+
+  @Option(names = {"--datanodes"},
+      description = "Number of datanodes (to generate only subsequent of the "
+          + "required containers).",
+      defaultValue = "3")
+  private int datanodes;
+
+  @Option(names = {"--index"},
+      description = "Index of the datanode. For example datanode #3 should "
+          + "have only every 3rd container in a 10 node cluster.).",
+      defaultValue = "1")
+  private int datanodeIndex;
+
+  @Option(names = {"--zero"},
+      description = "User zero bytes instead of random data.",
+      defaultValue = "false")
+  private boolean zero;
+
+  private ChunkManager chunkManager;
+
+  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+
+  private MutableVolumeSet volumeSet;
+
+  private Checksum checksum;
+
+  private ConfigurationSource config;
+
+  private Timer timer;
+
+  //Simulate ratis log index (incremented for each chunk write)
+  private int logCounter;
+  private String datanodeId;
+  private String scmId;
+  private int numberOfPipelines;
+  private int currentPipeline;
+
+  @Override
+  public Void call() throws Exception {
+    init();
+
+    numberOfPipelines = datanodes / 3;
+
+    //generate only containers for one datanodes
+    setTestNo(getTestNo() / numberOfPipelines);
+
+    currentPipeline = (datanodeIndex - 1) % numberOfPipelines;
+
+    config = createOzoneConfiguration();
+
+    BlockManager blockManager = new BlockManagerImpl(config);
+    chunkManager = ChunkManagerFactory
+        .createChunkManager(config, blockManager);
+
+    final Collection<String> storageDirs =
+        MutableVolumeSet.getDatanodeStorageDirs(config);
+
+    String firstStorageDir =
+        StorageLocation.parse(storageDirs.iterator().next())
+            .getUri().getPath();
+
+    final Path hddsDir = Paths.get(firstStorageDir, "hdds");
+    if (!Files.exists(hddsDir)) {
+      throw new NoSuchFieldException(hddsDir

Review comment:
       `NoSuchFieldException` seems strange in this context.  Intended 
`NoSuchFileException`?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to