This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 9f67f4b1b5 HDDS-9611. Avoid unnecessary buffer conversion in
KeyValueHandler.validateChunkChecksumData (#5542)
9f67f4b1b5 is described below
commit 9f67f4b1b59544493c02ce56a70250bfb7c5af78
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Mon Nov 6 03:57:07 2023 -0800
HDDS-9611. Avoid unnecessary buffer conversion in
KeyValueHandler.validateChunkChecksumData (#5542)
---
.../org/apache/hadoop/ozone/common/Checksum.java | 7 ++++++-
.../ozone/common/ChunkBufferImplWithByteBuffer.java | 3 +--
.../common/ChunkBufferImplWithByteBufferList.java | 9 ++++++---
.../hadoop/ozone/common/utils/BufferUtils.java | 21 +++++++++++++++------
.../ozone/container/keyvalue/KeyValueHandler.java | 3 +--
.../container/keyvalue/helpers/ChunkUtils.java | 12 ++++++++++++
.../container/keyvalue/impl/BlockManagerImpl.java | 11 ++++++++++-
.../keyvalue/impl/FilePerBlockStrategy.java | 14 ++++----------
.../keyvalue/impl/FilePerChunkStrategy.java | 2 +-
.../container/keyvalue/interfaces/ChunkManager.java | 15 +++++++++++----
.../common/impl/TestContainerPersistence.java | 9 ++++-----
11 files changed, 71 insertions(+), 35 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
index 939527a5e2..059ed650f3 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
@@ -231,6 +231,11 @@ public class Checksum {
return verifyChecksum(ByteBuffer.wrap(data), checksumData, 0);
}
+ private static boolean verifyChecksum(ByteBuffer data,
+ ChecksumData checksumData, int startIndex) throws OzoneChecksumException
{
+ return verifyChecksum(ChunkBuffer.wrap(data), checksumData, startIndex);
+ }
+
/**
* Computes the ChecksumData for the input data and verifies that it
* matches with that of the input checksumData.
@@ -240,7 +245,7 @@ public class Checksum {
* data's computed checksum.
* @throws OzoneChecksumException is thrown if checksums do not match
*/
- private static boolean verifyChecksum(ByteBuffer data,
+ public static boolean verifyChecksum(ChunkBuffer data,
ChecksumData checksumData,
int startIndex) throws OzoneChecksumException {
ChecksumType checksumType = checksumData.getChecksumType();
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
index ef85a60f16..0cf49681cb 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.common;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.GatheringByteChannel;
-import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@@ -125,7 +124,7 @@ final class ChunkBufferImplWithByteBuffer implements
ChunkBuffer {
@Override
public List<ByteString> toByteStringListImpl(
Function<ByteBuffer, ByteString> f) {
- return Arrays.asList(f.apply(buffer));
+ return Collections.singletonList(f.apply(buffer));
}
@Override
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java
index f3f31739c6..f6a7f60b0a 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.common;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
+
+import java.util.Collections;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
@@ -37,7 +39,8 @@ import java.util.function.Function;
*/
public class ChunkBufferImplWithByteBufferList implements ChunkBuffer {
- private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0);
+ private static final List<ByteBuffer> EMPTY_BUFFER
+ = Collections.singletonList(ByteBuffer.allocate(0));
/** Buffer list backing the ChunkBuffer. */
private final List<ByteBuffer> buffers;
@@ -50,7 +53,7 @@ public class ChunkBufferImplWithByteBufferList implements
ChunkBuffer {
Preconditions.checkArgument(buffers != null, "buffer == null");
this.buffers = !buffers.isEmpty() ? ImmutableList.copyOf(buffers) :
- ImmutableList.of(EMPTY_BUFFER);
+ EMPTY_BUFFER;
this.limit = buffers.stream().mapToInt(ByteBuffer::limit).sum();
findCurrent();
@@ -177,10 +180,10 @@ public class ChunkBufferImplWithByteBufferList implements
ChunkBuffer {
return new ChunkBufferImplWithByteBufferList(duplicates);
}
- @Override
/**
* Returns the next buffer in the list irrespective of the bufferSize.
*/
+ @Override
public Iterable<ByteBuffer> iterate(int bufferSize) {
return () -> new Iterator<ByteBuffer>() {
@Override
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
index 323a80f201..8bfb7490c4 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
@@ -39,7 +39,7 @@ public final class BufferUtils {
* @param totalLen total length of all ByteBuffers
* @param bufferCapacity max capacity of each ByteBuffer
*/
- public static ByteBuffer[] assignByteBuffers(int totalLen,
+ public static ByteBuffer[] assignByteBuffers(long totalLen,
int bufferCapacity) {
Preconditions.checkArgument(totalLen > 0, "Buffer Length should be a " +
"positive integer.");
@@ -49,16 +49,16 @@ public final class BufferUtils {
int numBuffers = getNumberOfBins(totalLen, bufferCapacity);
ByteBuffer[] dataBuffers = new ByteBuffer[numBuffers];
- int buffersAllocated = 0;
+ long allocatedLen = 0;
// For each ByteBuffer (except the last) allocate bufferLen of capacity
for (int i = 0; i < numBuffers - 1; i++) {
dataBuffers[i] = ByteBuffer.allocate(bufferCapacity);
- buffersAllocated += bufferCapacity;
+ allocatedLen += bufferCapacity;
}
// For the last ByteBuffer, allocate as much space as is needed to fit
// remaining bytes
dataBuffers[numBuffers - 1] = ByteBuffer.allocate(
- (totalLen - buffersAllocated));
+ Math.toIntExact(totalLen - allocatedLen));
return dataBuffers;
}
@@ -124,8 +124,17 @@ public final class BufferUtils {
* @param maxElementsPerBin max number of elements per bin
* @return number of bins
*/
- public static int getNumberOfBins(long numElements, long maxElementsPerBin) {
- return (int) Math.ceil((double) numElements / (double) maxElementsPerBin);
+ public static int getNumberOfBins(long numElements, int maxElementsPerBin) {
+ Preconditions.checkArgument(numElements >= 0);
+ Preconditions.checkArgument(maxElementsPerBin > 0);
+ final long n = 1 + (numElements - 1) / maxElementsPerBin;
+ if (n > Integer.MAX_VALUE) {
+ throw new IllegalArgumentException("Integer overflow: n = " + n
+ + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE
+ + ", numElements = " + numElements
+ + ", maxElementsPerBin = " + maxElementsPerBin);
+ }
+ return Math.toIntExact(n);
}
public static void clearBuffers(ByteBuffer[] byteBuffers) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 042ed97842..cfc5a280d9 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -778,8 +778,7 @@ public class KeyValueHandler extends Handler {
throws StorageContainerException {
if (validateChunkChecksumData) {
try {
- Checksum.verifyChecksum(data.toByteString(byteBufferToByteString),
- info.getChecksumData(), 0);
+ Checksum.verifyChecksum(data, info.getChecksumData(), 0);
} catch (OzoneChecksumException ex) {
throw ChunkUtils.wrapInStorageContainerException(ex);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 73d1f9070b..85f3e21422 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -32,6 +32,7 @@ import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
import java.util.EnumSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -42,6 +43,7 @@ import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.ChunkBuffer;
+import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.util.Time;
@@ -59,6 +61,7 @@ import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import static
org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure;
+import org.apache.ratis.util.function.CheckedConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -180,6 +183,15 @@ public final class ChunkUtils {
}
}
+ public static ChunkBuffer readData(long len, int bufferCapacity,
+ CheckedConsumer<ByteBuffer[], StorageContainerException> readMethod)
+ throws StorageContainerException {
+ final ByteBuffer[] buffers = BufferUtils.assignByteBuffers(len,
+ bufferCapacity);
+ readMethod.accept(buffers);
+ return ChunkBuffer.wrap(Arrays.asList(buffers));
+ }
+
/**
* Reads data from an existing chunk file into a list of ByteBuffers.
*
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index ff0f25bff6..449fe46ae0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -69,10 +69,19 @@ public class BlockManagerImpl implements BlockManager {
public BlockManagerImpl(ConfigurationSource conf) {
Preconditions.checkNotNull(conf, "Config cannot be null");
this.config = conf;
- this.defaultReadBufferCapacity = (int) config.getStorageSize(
+ final double size = config.getStorageSize(
ScmConfigKeys.OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY,
ScmConfigKeys.OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_DEFAULT,
StorageUnit.BYTES);
+ if (size <= 0) {
+ throw new IllegalArgumentException(
+ ScmConfigKeys.OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY + " <= 0");
+ } else if (size > Integer.MAX_VALUE) {
+ throw new IllegalArgumentException(
+ ScmConfigKeys.OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY
+ + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE);
+ }
+ this.defaultReadBufferCapacity = (int) size;
}
@Override
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 88b75ed039..ccab7f35e8 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -22,14 +22,12 @@ import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
-import com.google.common.collect.Lists;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.common.ChunkBuffer;
-import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
@@ -62,6 +60,7 @@ import static
org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersi
import static
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage.COMMIT_DATA;
import static
org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure;
import static
org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
+import static
org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.readData;
import static
org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.validateChunkForOverwrite;
import static
org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.verifyChunkFileExists;
@@ -189,17 +188,12 @@ public class FilePerBlockStrategy implements ChunkManager
{
File chunkFile = getChunkFile(container, blockID, info);
- int len = (int) info.getLen();
+ final long len = info.getLen();
long offset = info.getOffset();
int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info,
defaultReadBufferCapacity);
-
- ByteBuffer[] dataBuffers = BufferUtils.assignByteBuffers(len,
- bufferCapacity);
-
- ChunkUtils.readData(chunkFile, dataBuffers, offset, len, volume);
-
- return ChunkBuffer.wrap(Lists.newArrayList(dataBuffers));
+ return readData(len, bufferCapacity,
+ array -> readData(chunkFile, array, offset, len, volume));
}
@Override
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
index 8c751ef90e..3ee331c2b6 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
@@ -229,7 +229,7 @@ public class FilePerChunkStrategy implements ChunkManager {
possibleFiles.add(finalChunkFile);
}
- int len = (int) info.getLen();
+ final long len = info.getLen();
int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info,
defaultReadBufferCapacity);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
index 9f4de60e8e..151c15f356 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
@@ -119,10 +119,10 @@ public interface ChunkManager {
static int getBufferCapacityForChunkRead(ChunkInfo chunkInfo,
int defaultReadBufferCapacity) {
- int bufferCapacity = 0;
+ long bufferCapacity = 0;
if (chunkInfo.isReadDataIntoSingleBuffer()) {
// Older client - read all chunk data into one single buffer.
- bufferCapacity = (int) chunkInfo.getLen();
+ bufferCapacity = chunkInfo.getLen();
} else {
// Set buffer capacity to checksum boundary size so that each buffer
// corresponds to one checksum. If checksum is NONE, then set buffer
@@ -140,9 +140,16 @@ public interface ChunkManager {
}
// If the buffer capacity is 0, set all the data into one ByteBuffer
if (bufferCapacity == 0) {
- bufferCapacity = (int) chunkInfo.getLen();
+ bufferCapacity = chunkInfo.getLen();
}
- return bufferCapacity;
+ if (bufferCapacity > Integer.MAX_VALUE) {
+ throw new IllegalStateException("Integer overflow:"
+ + " bufferCapacity = " + bufferCapacity
+ + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE
+ + ", defaultReadBufferCapacity=" + defaultReadBufferCapacity
+ + ", chunkInfo=" + chunkInfo);
+ }
+ return Math.toIntExact(bufferCapacity);
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 300980fde9..1b186c9e79 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -95,7 +95,6 @@ import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
-import org.junit.jupiter.api.Assertions;
import org.junit.rules.ExpectedException;
import org.junit.rules.TestRule;
import org.junit.rules.Timeout;
@@ -381,9 +380,9 @@ public class TestContainerPersistence {
Table<String, Long> metadataTable = store.getMetadataTable();
// Block data and metadata tables should have data.
- Assertions.assertNotNull(blockTable
+ Assert.assertNotNull(blockTable
.getIfExist(containerData.getBlockKey(testBlock.getLocalID())));
- Assertions.assertNotNull(metadataTable
+ Assert.assertNotNull(metadataTable
.getIfExist(containerData.getBlockCountKey()));
}
}
@@ -403,9 +402,9 @@ public class TestContainerPersistence {
Table<String, Long> metadataTable = store.getMetadataTable();
// Block data and metadata tables should have data.
- Assertions.assertNull(blockTable
+ Assert.assertNull(blockTable
.getIfExist(containerData.getBlockKey(testBlock.getLocalID())));
- Assertions.assertNull(metadataTable
+ Assert.assertNull(metadataTable
.getIfExist(containerData.getBlockCountKey()));
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]