This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new cb83827806 HDDS-9393. Introduce data unit support for objects creation
in freon (#5519)
cb83827806 is described below
commit cb838278061a362e802a80bc34b1e2ed04926474
Author: tanvipenumudy <[email protected]>
AuthorDate: Wed Nov 22 22:46:18 2023 +0530
HDDS-9393. Introduce data unit support for objects creation in freon (#5519)
---
.../org/apache/hadoop/hdds/conf/StorageSize.java | 11 +++++++
.../hadoop/ozone/freon/TestDataValidate.java | 2 +-
.../freon/TestFreonWithDatanodeFastRestart.java | 2 +-
.../ozone/freon/TestFreonWithPipelineDestroy.java | 2 +-
.../ozone/freon/TestHadoopDirTreeGenerator.java | 31 ++++++++++--------
.../ozone/freon/TestOmBucketReadWriteFileOps.java | 10 +++---
.../ozone/freon/TestOmBucketReadWriteKeyOps.java | 10 +++---
.../hadoop/ozone/freon/TestRandomKeyGenerator.java | 6 ++--
.../ozone/freon/AbstractOmBucketReadWriteOps.java | 16 +++++----
.../hadoop/ozone/freon/HadoopDirTreeGenerator.java | 14 ++++----
.../hadoop/ozone/freon/HadoopFsGenerator.java | 12 ++++---
.../hadoop/ozone/freon/OmMetadataGenerator.java | 19 +++++++----
.../ozone/freon/OzoneClientKeyGenerator.java | 15 +++++----
.../hadoop/ozone/freon/RandomKeyGenerator.java | 21 +++++++-----
.../hadoop/ozone/freon/RangeKeysGenerator.java | 14 ++++----
.../hadoop/ozone/freon/StorageSizeConverter.java | 38 ++++++++++++++++++++++
.../hadoop/ozone/freon/StreamingGenerator.java | 13 +++++---
17 files changed, 156 insertions(+), 80 deletions(-)
diff --git
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
index 15016be40a..9b67953bad 100644
---
a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
+++
b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
@@ -91,6 +91,14 @@ public class StorageSize {
}
+ public static StorageSize parse(String value, StorageUnit defaultUnit) {
+ try {
+ return parse(value);
+ } catch (IllegalArgumentException e) {
+ return new StorageSize(defaultUnit, Double.parseDouble(value));
+ }
+ }
+
public StorageUnit getUnit() {
return unit;
}
@@ -99,4 +107,7 @@ public class StorageSize {
return value;
}
+ public long toBytes() {
+ return (long) unit.toBytes(value);
+ }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
index b9eb231ff8..ae7f7772dd 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
@@ -70,7 +70,7 @@ public abstract class TestDataValidate {
cmd.execute("--num-of-volumes", "1",
"--num-of-buckets", "1",
"--num-of-keys", "1",
- "--key-size", "20971520",
+ "--key-size", "20MB",
"--factor", "THREE",
"--type", "RATIS",
"--validate-writes"
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
index 71c89e2e6b..8844dbbb91 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
@@ -121,7 +121,7 @@ public class TestFreonWithDatanodeFastRestart {
cmd.execute("--num-of-volumes", "1",
"--num-of-buckets", "1",
"--num-of-keys", "1",
- "--key-size", "20971520",
+ "--key-size", "20MB",
"--factor", "THREE",
"--type", "RATIS",
"--validate-writes"
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
index 50f49713bf..cf4a7ae543 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
@@ -108,7 +108,7 @@ public class TestFreonWithPipelineDestroy {
cmd.execute("--num-of-volumes", "1",
"--num-of-buckets", "1",
"--num-of-keys", "1",
- "--key-size", "20971520",
+ "--key-size", "20MB",
"--factor", "THREE",
"--type", "RATIS",
"--validate-writes"
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
index 01fd60cfd2..f99ee1a42e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -111,25 +113,25 @@ public class TestHadoopDirTreeGenerator {
out.close();
verifyDirTree("vol1", "bucket1", 1,
- 1, 1, 0);
+ 1, 1, "0");
verifyDirTree("vol2", "bucket1", 1,
- 5, 1, 5);
+ 5, 1, "5B");
verifyDirTree("vol3", "bucket1", 2,
- 5, 3, 1);
+ 5, 3, "1B");
verifyDirTree("vol4", "bucket1", 3,
- 2, 4, 2);
+ 2, 4, "2B");
verifyDirTree("vol5", "bucket1", 5,
- 4, 1, 0);
+ 4, 1, "0");
// default page size is Constants.LISTING_PAGE_SIZE = 1024
verifyDirTree("vol6", "bucket1", 2,
- 1, 1100, 0);
+ 1, 1100, "0");
} finally {
shutdown();
}
}
private void verifyDirTree(String volumeName, String bucketName, int depth,
- int span, int fileCount, int perFileSizeInBytes)
+ int span, int fileCount, String perFileSize)
throws IOException {
store.createVolume(volumeName);
@@ -140,7 +142,7 @@ public class TestHadoopDirTreeGenerator {
new Freon().execute(
new String[]{"-conf", confPath, "dtsg", "-d", depth + "", "-c",
fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath,
- "-g", perFileSizeInBytes + ""});
+ "-g", perFileSize});
// verify the directory structure
LOG.info("Started verifying the directory structure...");
FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
@@ -152,8 +154,9 @@ public class TestHadoopDirTreeGenerator {
// verify the num of peer directories, expected span count is 1
// as it has only one dir at root.
verifyActualSpan(1, fileStatuses);
- int actualDepth = traverseToLeaf(fileSystem, fileStatus.getPath(),
- 1, depth, span, fileCount, perFileSizeInBytes);
+ int actualDepth =
+ traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span,
+ fileCount, StorageSize.parse(perFileSize, StorageUnit.BYTES));
Assert.assertEquals("Mismatch depth in a path",
depth, actualDepth);
}
@@ -161,7 +164,7 @@ public class TestHadoopDirTreeGenerator {
private int traverseToLeaf(FileSystem fs, Path dirPath, int depth,
int expectedDepth, int expectedSpanCnt,
- int expectedFileCnt, int perFileSizeInBytes)
+ int expectedFileCnt, StorageSize perFileSize)
throws IOException {
FileStatus[] fileStatuses = fs.listStatus(dirPath);
// check the num of peer directories except root and leaf as both
@@ -175,10 +178,10 @@ public class TestHadoopDirTreeGenerator {
if (fileStatus.isDirectory()) {
++depth;
return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth,
- expectedSpanCnt, expectedFileCnt, perFileSizeInBytes);
+ expectedSpanCnt, expectedFileCnt, perFileSize);
} else {
- Assert.assertEquals("Mismatches file len",
- perFileSizeInBytes, fileStatus.getLen());
+ Assert.assertEquals("Mismatches file len", perFileSize.toBytes(),
+ fileStatus.getLen());
String fName = fileStatus.getPath().getName();
Assert.assertFalse("actualNumFiles:" + actualNumFiles +
", fName:" + fName + ", expectedFileCnt:" +
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
index 39ff29c623..8cb8edd01c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
@@ -129,7 +129,7 @@ public class TestOmBucketReadWriteFileOps {
.setPrefixFilePath("/dir1/").setTotalThreadCount(10)
.setNumOfReadOperations(5).setNumOfWriteOperations(3)
.setFileCountForRead(5).setFileCountForWrite(3).
- setFileSizeInBytes(64).setBufferSize(16));
+ setFileSize("64B").setBufferSize(16));
verifyFreonCommand(
new ParameterBuilder().setVolumeName("vol5").setBucketName("bucket1")
.setPrefixFilePath("/dir1/dir2/dir3").setTotalThreadCount(10)
@@ -157,7 +157,7 @@ public class TestOmBucketReadWriteFileOps {
new String[]{"-conf", confPath, "obrwf", "-P", rootPath,
"-r", String.valueOf(parameterBuilder.fileCountForRead),
"-w", String.valueOf(parameterBuilder.fileCountForWrite),
- "-g", String.valueOf(parameterBuilder.fileSizeInBytes),
+ "-g", parameterBuilder.fileSize,
"--buffer", String.valueOf(parameterBuilder.bufferSize),
"-l", String.valueOf(parameterBuilder.length),
"-c", String.valueOf(parameterBuilder.totalThreadCount),
@@ -257,7 +257,7 @@ public class TestOmBucketReadWriteFileOps {
private String prefixFilePath = "/dir1/dir2";
private int fileCountForRead = 100;
private int fileCountForWrite = 10;
- private long fileSizeInBytes = 256;
+ private String fileSize = "256B";
private int bufferSize = 64;
private int length = 10;
private int totalThreadCount = 100;
@@ -290,8 +290,8 @@ public class TestOmBucketReadWriteFileOps {
return this;
}
- private ParameterBuilder setFileSizeInBytes(long fileSizeInBytesParam) {
- fileSizeInBytes = fileSizeInBytesParam;
+ private ParameterBuilder setFileSize(String fileSizeParam) {
+ fileSize = fileSizeParam;
return this;
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
index f32eb47e44..7a1df34814 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java
@@ -129,7 +129,7 @@ public class TestOmBucketReadWriteKeyOps {
new ParameterBuilder().setVolumeName("vol4").setBucketName("bucket1")
.setTotalThreadCount(10).setNumOfReadOperations(5)
.setNumOfWriteOperations(3).setKeyCountForRead(5)
- .setKeyCountForWrite(3).setKeySizeInBytes(64)
+ .setKeyCountForWrite(3).setKeySize("64B")
.setBufferSize(16));
verifyFreonCommand(
new ParameterBuilder().setVolumeName("vol5").setBucketName("bucket1")
@@ -161,7 +161,7 @@ public class TestOmBucketReadWriteKeyOps {
"-b", parameterBuilder.bucketName,
"-k", String.valueOf(parameterBuilder.keyCountForRead),
"-w", String.valueOf(parameterBuilder.keyCountForWrite),
- "-g", String.valueOf(parameterBuilder.keySizeInBytes),
+ "-g", parameterBuilder.keySize,
"--buffer", String.valueOf(parameterBuilder.bufferSize),
"-l", String.valueOf(parameterBuilder.length),
"-c", String.valueOf(parameterBuilder.totalThreadCount),
@@ -242,7 +242,7 @@ public class TestOmBucketReadWriteKeyOps {
private String bucketName = "bucket1";
private int keyCountForRead = 100;
private int keyCountForWrite = 10;
- private long keySizeInBytes = 256;
+ private String keySize = "256B";
private int bufferSize = 64;
private int length = 10;
private int totalThreadCount = 100;
@@ -270,8 +270,8 @@ public class TestOmBucketReadWriteKeyOps {
return this;
}
- private ParameterBuilder setKeySizeInBytes(long keySizeInBytesParam) {
- keySizeInBytes = keySizeInBytesParam;
+ private ParameterBuilder setKeySize(String keySizeParam) {
+ keySize = keySizeParam;
return this;
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
index ad9c742fce..20c1727938 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
@@ -128,7 +128,7 @@ public class TestRandomKeyGenerator {
"--num-of-buckets", "1",
"--num-of-keys", "10",
"--num-of-threads", "10",
- "--key-size", "10240",
+ "--key-size", "10KB",
"--factor", "THREE",
"--type", "RATIS"
);
@@ -147,7 +147,7 @@ public class TestRandomKeyGenerator {
"--num-of-buckets", "1",
"--num-of-keys", "10",
"--num-of-threads", "10",
- "--key-size", "10240",
+ "--key-size", "10KB",
"--factor", "THREE",
"--type", "RATIS"
);
@@ -166,7 +166,7 @@ public class TestRandomKeyGenerator {
"--num-of-buckets", "1",
"--num-of-keys", "1",
"--num-of-threads", "1",
- "--key-size", String.valueOf(10L + Integer.MAX_VALUE),
+ "--key-size", "2.01GB",
"--factor", "THREE",
"--type", "RATIS",
"--validate-writes"
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
index c5b1cb229d..154f8e1ca9 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.freon;
import com.codahale.metrics.Timer;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.ozone.OzoneConsts;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -46,10 +47,11 @@ public abstract class AbstractOmBucketReadWriteOps extends
BaseFreonGenerator
LoggerFactory.getLogger(AbstractOmBucketReadWriteOps.class);
@Option(names = {"-g", "--size"},
- description = "Generated data size (in bytes) of each key/file to be " +
- "written.",
- defaultValue = "256")
- private long sizeInBytes;
+ description = "Generated data size of each key/file to be written. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "256B",
+ converter = StorageSizeConverter.class)
+ private StorageSize size;
@Option(names = {"--buffer"},
description = "Size of buffer used for generating the key/file content.",
@@ -103,7 +105,7 @@ public abstract class AbstractOmBucketReadWriteOps extends
BaseFreonGenerator
writeThreadCount = totalThreadCount - readThreadCount;
display();
- print("SizeInBytes: " + sizeInBytes);
+ print("SizeInBytes: " + size.toBytes());
print("bufferSize: " + bufferSize);
print("totalThreadCount: " + totalThreadCount);
print("readThreadPercentage: " + readThreadPercentage);
@@ -114,7 +116,7 @@ public abstract class AbstractOmBucketReadWriteOps extends
BaseFreonGenerator
print("numOfWriteOperations: " + numOfWriteOperations);
ozoneConfiguration = createOzoneConfiguration();
- contentGenerator = new ContentGenerator(sizeInBytes, bufferSize);
+ contentGenerator = new ContentGenerator(size.toBytes(), bufferSize);
timer = getMetrics().timer("om-bucket-read-write-ops");
initialize(ozoneConfiguration);
@@ -223,6 +225,6 @@ public abstract class AbstractOmBucketReadWriteOps extends
BaseFreonGenerator
protected abstract OutputStream create(String pathName) throws IOException;
protected long getSizeInBytes() {
- return sizeInBytes;
+ return size.toBytes();
}
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
index bc3353e22a..91d78234ba 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
@@ -67,11 +68,12 @@ public class HadoopDirTreeGenerator extends
BaseFreonGenerator
private int fileCount;
@Option(names = {"-g", "--file-size", "--fileSize"},
- description = "Generated data size(in bytes) of each file to be " +
- "written in each directory. Full name --fileSize will be removed " +
- "in later versions.",
- defaultValue = "4096")
- private long fileSizeInBytes;
+ description = "Generated data size of each file to be " +
+ "written in each directory. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "4KB",
+ converter = StorageSizeConverter.class)
+ private StorageSize fileSize;
@Option(names = {"-b", "--buffer"},
description = "Size of buffer used to generated the file content.",
@@ -113,7 +115,7 @@ public class HadoopDirTreeGenerator extends
BaseFreonGenerator
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);
- contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize);
+ contentGenerator = new ContentGenerator(fileSize.toBytes(), bufferSize);
timer = getMetrics().timer("file-create");
runTests(this::createDir);
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
index 7dd66937db..aced39c47b 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import com.codahale.metrics.Timer;
+import org.apache.hadoop.hdds.conf.StorageSize;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
@@ -49,9 +50,11 @@ public class HadoopFsGenerator extends BaseFreonGenerator
private String rootPath;
@Option(names = {"-s", "--size"},
- description = "Size of the generated files (in bytes)",
- defaultValue = "10240")
- private long fileSize;
+ description = "Size of the generated files. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "10KB",
+ converter = StorageSizeConverter.class)
+ private StorageSize fileSize;
@Option(names = {"--buffer"},
description = "Size of buffer used store the generated key content",
@@ -95,7 +98,8 @@ public class HadoopFsGenerator extends BaseFreonGenerator
}
contentGenerator =
- new ContentGenerator(fileSize, bufferSize, copyBufferSize,
flushOrSync);
+ new ContentGenerator(fileSize.toBytes(), bufferSize, copyBufferSize,
+ flushOrSync);
timer = getMetrics().timer("file-create");
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
index 106bf9d2ec..b214b0968a 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
@@ -38,6 +38,7 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -96,9 +97,11 @@ public class OmMetadataGenerator extends BaseFreonGenerator
private String bucketName;
@Option(names = {"-s", "--size"},
- description = "The size in byte of a file for the Create File/Key op.",
- defaultValue = "0")
- private int dataSize;
+ description = "The size in byte of a file for the Create File/Key op. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "0",
+ converter = StorageSizeConverter.class)
+ private StorageSize dataSize;
@Option(names = {"--buffer"},
description = "Size of buffer used to generated the key content.",
@@ -165,7 +168,7 @@ public class OmMetadataGenerator extends BaseFreonGenerator
mixedOperation = true;
}
init();
- contentGenerator = new ContentGenerator(dataSize, bufferSize);
+ contentGenerator = new ContentGenerator(dataSize.toBytes(), bufferSize);
omKeyArgsBuilder = ThreadLocal.withInitial(this::createKeyArgsBuilder);
OzoneConfiguration conf = createOzoneConfiguration();
replicationConfig = ReplicationConfig.getDefault(conf);
@@ -323,7 +326,8 @@ public class OmMetadataGenerator extends BaseFreonGenerator
case CREATE_KEY:
keyName = getPath(counter);
getMetrics().timer(operation.name()).time(() -> {
- try (OutputStream stream = bucket.createKey(keyName, dataSize)) {
+ try (OutputStream stream = bucket.createKey(keyName,
+ dataSize.toBytes())) {
contentGenerator.write(stream);
}
return null;
@@ -377,8 +381,9 @@ public class OmMetadataGenerator extends BaseFreonGenerator
case CREATE_FILE:
keyName = getPath(counter);
getMetrics().timer(operation.name()).time(() -> {
- try (OutputStream stream = bucket.createFile(
- keyName, dataSize, replicationConfig, true, false)) {
+ try (
+ OutputStream stream = bucket.createFile(keyName,
dataSize.toBytes(),
+ replicationConfig, true, false)) {
contentGenerator.write(stream);
}
return null;
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
index b119e27ea6..4a1958247a 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
@@ -24,6 +24,7 @@ import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -60,9 +61,11 @@ public class OzoneClientKeyGenerator extends
BaseFreonGenerator
private String bucketName;
@Option(names = {"-s", "--size"},
- description = "Size of the generated key (in bytes)",
- defaultValue = "10240")
- private long keySize;
+ description = "Size of the generated key. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "10KB",
+ converter = StorageSizeConverter.class)
+ private StorageSize keySize;
@Option(names = {"--buffer"},
description = "Size of buffer used to generated the key content.",
@@ -97,7 +100,7 @@ public class OzoneClientKeyGenerator extends
BaseFreonGenerator
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
- contentGenerator = new ContentGenerator(keySize, bufferSize);
+ contentGenerator = new ContentGenerator(keySize.toBytes(), bufferSize);
metadata = new HashMap<>();
replicationConfig = replication.fromParamsOrConfig(ozoneConfiguration);
@@ -123,7 +126,7 @@ public class OzoneClientKeyGenerator extends
BaseFreonGenerator
final String key = generateObjectName(counter);
timer.time(() -> {
- try (OutputStream stream = bucket.createKey(key, keySize,
+ try (OutputStream stream = bucket.createKey(key, keySize.toBytes(),
replicationConfig, metadata)) {
contentGenerator.write(stream);
stream.flush();
@@ -139,7 +142,7 @@ public class OzoneClientKeyGenerator extends
BaseFreonGenerator
timer.time(() -> {
try (OzoneDataStreamOutput stream = bucket.createStreamKey(
- key, keySize, conf, metadata)) {
+ key, keySize.toBytes(), conf, metadata)) {
contentGenerator.write(stream);
}
return null;
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 87608250f9..c964676f26 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -145,10 +146,12 @@ public final class RandomKeyGenerator implements
Callable<Void> {
@Option(
names = {"--key-size", "--keySize"},
description = "Specifies the size of Key in bytes to be created. Full" +
- " name --keySize will be removed in later versions.",
- defaultValue = "10240"
+ " name --keySize will be removed in later versions. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "10KB",
+ converter = StorageSizeConverter.class
)
- private long keySize = 10240;
+ private StorageSize keySize;
@Option(
names = {"--validate-writes", "--validateWrites"},
@@ -301,7 +304,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
// Compute the common initial digest for all keys without their UUID
if (validateWrites) {
commonInitialMD = DigestUtils.getDigest(DIGEST_ALGORITHM);
- for (long nrRemaining = keySize; nrRemaining > 0;
+ for (long nrRemaining = keySize.toBytes(); nrRemaining > 0;
nrRemaining -= bufferSize) {
int curSize = (int)Math.min(bufferSize, nrRemaining);
commonInitialMD.update(keyValueBuffer, 0, curSize);
@@ -319,7 +322,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
LOG.info("Number of Volumes: {}.", numOfVolumes);
LOG.info("Number of Buckets per Volume: {}.", numOfBuckets);
LOG.info("Number of Keys per Bucket: {}.", numOfKeys);
- LOG.info("Key size: {} bytes", keySize);
+ LOG.info("Key size: {} bytes", keySize.toBytes());
LOG.info("Buffer size: {} bytes", bufferSize);
LOG.info("validateWrites : {}", validateWrites);
LOG.info("Number of Validate Threads: {}", numOfValidateThreads);
@@ -804,7 +807,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
try {
try (AutoCloseable scope = TracingUtil.createActivatedSpan("createKey"))
{
long keyCreateStart = System.nanoTime();
- try (OzoneOutputStream os = bucket.createKey(keyName, keySize,
+ try (OzoneOutputStream os = bucket.createKey(keyName,
keySize.toBytes(),
replicationConfig, new HashMap<>())) {
long keyCreationDuration = System.nanoTime() - keyCreateStart;
histograms.get(FreonOps.KEY_CREATE.ordinal())
@@ -814,7 +817,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
try (AutoCloseable writeScope = TracingUtil
.createActivatedSpan("writeKeyData")) {
long keyWriteStart = System.nanoTime();
- for (long nrRemaining = keySize;
+ for (long nrRemaining = keySize.toBytes();
nrRemaining > 0; nrRemaining -= bufferSize) {
int curSize = (int) Math.min(bufferSize, nrRemaining);
os.write(keyValueBuffer, 0, curSize);
@@ -824,7 +827,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
histograms.get(FreonOps.KEY_WRITE.ordinal())
.update(keyWriteDuration);
keyWriteTime.getAndAdd(keyWriteDuration);
- totalBytesWritten.getAndAdd(keySize);
+ totalBytesWritten.getAndAdd(keySize.toBytes());
numberOfKeysAdded.getAndIncrement();
}
}
@@ -958,7 +961,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
this.numOfBuckets = RandomKeyGenerator.this.numOfBuckets;
this.numOfKeys = RandomKeyGenerator.this.numOfKeys;
this.numOfThreads = RandomKeyGenerator.this.numOfThreads;
- this.keySize = RandomKeyGenerator.this.keySize;
+ this.keySize = RandomKeyGenerator.this.keySize.toBytes();
this.bufferSize = RandomKeyGenerator.this.bufferSize;
this.jobStartTime =
Time.formatTime(RandomKeyGenerator.this.jobStartTime);
replicationType = replicationConfig.getReplicationType().name();
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java
index 5e01da7b4a..b826651a6f 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.ozone.freon;
import com.codahale.metrics.Timer;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.slf4j.Logger;
@@ -79,10 +80,11 @@ public class RangeKeysGenerator extends BaseFreonGenerator
private String encodeFormat;
@CommandLine.Option(names = {"-g", "--size"},
- description = "Generated object size (in bytes) " +
- "to be written.",
- defaultValue = "1")
- private long objectSizeInBytes;
+ description = "Generated object size. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "1B",
+ converter = StorageSizeConverter.class)
+ private StorageSize objectSize;
@CommandLine.Option(names = {"--buffer"},
description = "Size of buffer used to generate object content.",
@@ -113,7 +115,7 @@ public class RangeKeysGenerator extends BaseFreonGenerator
ensureVolumeAndBucketExist(ozoneClients[0], volumeName, bucketName);
contentGenerator =
- new ContentGenerator(objectSizeInBytes, bufferSize);
+ new ContentGenerator(objectSize.toBytes(), bufferSize);
timer = getMetrics().timer("key-read-write");
kg = new KeyGeneratorUtil();
@@ -159,7 +161,7 @@ public class RangeKeysGenerator extends BaseFreonGenerator
keyNameGeneratorfunc.apply(i);
try (OzoneOutputStream out = client.getProxy().
createKey(volumeName, bucketName, keyName,
- objectSizeInBytes, null, new HashMap())) {
+ objectSize.toBytes(), null, new HashMap())) {
contentGenerator.write(out);
}
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StorageSizeConverter.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StorageSizeConverter.java
new file mode 100644
index 0000000000..2588004e58
--- /dev/null
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StorageSizeConverter.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdds.conf.StorageSize;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import picocli.CommandLine.ITypeConverter;
+
+/**
+ * A Picocli custom converter for parsing command line string values into
+ * StorageSize objects.
+ */
+
+public class StorageSizeConverter implements ITypeConverter<StorageSize> {
+
+ public static final String STORAGE_SIZE_DESCRIPTION = "You can specify the "
+
+ "size using data units like 'GB', 'MB', 'KB', etc. Size is in base 2 " +
+ "binary.";
+
+ @Override
+ public StorageSize convert(String value) {
+ return StorageSize.parse(value, StorageUnit.BYTES);
+ }
+}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
index 35580fdefa..dd6e3e99c2 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.freon;
import com.codahale.metrics.Timer;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.ozone.container.stream.DirectoryServerDestination;
import org.apache.hadoop.ozone.container.stream.DirectoryServerSource;
import org.apache.hadoop.ozone.container.stream.StreamingClient;
@@ -61,9 +62,11 @@ public class StreamingGenerator extends BaseFreonGenerator
private int numberOfFiles;
@CommandLine.Option(names = {"--size"},
- description = "Size of the generated files.",
- defaultValue = "104857600")
- private long fileSize;
+ description = "Size of the generated files. " +
+ StorageSizeConverter.STORAGE_SIZE_DESCRIPTION,
+ defaultValue = "100MB",
+ converter = StorageSizeConverter.class)
+ private StorageSize fileSize;
private static final String SUB_DIR_NAME = "dir1";
@@ -91,8 +94,8 @@ public class StreamingGenerator extends BaseFreonGenerator
}
Path subDir = sourceDir.resolve(SUB_DIR_NAME);
Files.createDirectories(subDir);
- ContentGenerator contentGenerator = new ContentGenerator(fileSize,
- 1024);
+ ContentGenerator contentGenerator =
+ new ContentGenerator(fileSize.toBytes(), 1024);
for (int i = 0; i < numberOfFiles; i++) {
try (FileOutputStream out = new FileOutputStream(
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]