This is an automated email from the ASF dual-hosted git repository.
ritesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new f39f82d125 HDDS-9114. HDDS-9115. ETag support (#5162)
f39f82d125 is described below
commit f39f82d125df3e9e8479735ddd5313cc2989043f
Author: Slava Tutrinov <[email protected]>
AuthorDate: Tue Oct 31 23:09:48 2023 +0300
HDDS-9114. HDDS-9115. ETag support (#5162)
---
.../client/io/BlockDataStreamOutputEntryPool.java | 8 +-
.../client/io/BlockOutputStreamEntryPool.java | 12 +-
.../ozone/client/io/CipherOutputStreamOzone.java | 54 +++++++++
.../hadoop/ozone/client/io/ECKeyOutputStream.java | 9 +-
.../ozone/client/io/KeyDataStreamOutput.java | 9 +-
.../hadoop/ozone/client/io/KeyMetadataAware.java | 30 +++++
.../hadoop/ozone/client/io/KeyOutputStream.java | 8 +-
.../ozone/client/io/OzoneDataStreamOutput.java | 17 ++-
.../hadoop/ozone/client/io/OzoneOutputStream.java | 22 +++-
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 76 +++++++------
...OzoneManagerProtocolClientSideTranslatorPB.java | 2 +
.../dist/src/main/smoketest/s3/commonawslib.robot | 5 +
.../dist/src/main/smoketest/s3/objectputget.robot | 53 +++++++++
.../ozone/om/request/key/OMKeyCommitRequest.java | 3 +
.../om/request/key/OMKeyCommitRequestWithFSO.java | 3 +
.../S3MultipartUploadCommitPartRequest.java | 3 +
.../S3MultipartUploadCompleteRequest.java | 21 +++-
.../hadoop/ozone/s3/endpoint/EndpointBase.java | 13 ++-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 123 ++++++++++++++-------
.../ozone/s3/endpoint/ObjectEndpointStreaming.java | 57 +++++++---
.../hadoop/ozone/client/OzoneBucketStub.java | 70 +++++++++++-
.../ozone/s3/endpoint/TestPermissionCheck.java | 5 +-
22 files changed, 495 insertions(+), 108 deletions(-)
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index e51242cc10..10b16f800d 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -38,12 +38,13 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
+import java.util.Map;
/**
* This class manages the stream entries list and handles block allocation
* from OzoneManager.
*/
-public class BlockDataStreamOutputEntryPool {
+public class BlockDataStreamOutputEntryPool implements KeyMetadataAware {
public static final Logger LOG =
LoggerFactory.getLogger(BlockDataStreamOutputEntryPool.class);
@@ -287,4 +288,9 @@ public class BlockDataStreamOutputEntryPool {
}
return totalDataLen;
}
+
+ @Override
+ public Map<String, String> getMetadata() {
+ return this.keyArgs.getMetadata();
+ }
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index 8a1796c3f0..7b0259e379 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
+import java.util.Map;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -51,7 +52,7 @@ import org.slf4j.LoggerFactory;
* entries that represent a writing channel towards DataNodes are the main
* responsibility of this class.
*/
-public class BlockOutputStreamEntryPool {
+public class BlockOutputStreamEntryPool implements KeyMetadataAware {
public static final Logger LOG =
LoggerFactory.getLogger(BlockOutputStreamEntryPool.class);
@@ -428,4 +429,13 @@ public class BlockOutputStreamEntryPool {
boolean isEmpty() {
return streamEntries.isEmpty();
}
+
+ @Override
+ public Map<String, String> getMetadata() {
+ if (keyArgs != null) {
+ return this.keyArgs.getMetadata();
+ }
+ return null;
+ }
+
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/CipherOutputStreamOzone.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/CipherOutputStreamOzone.java
new file mode 100644
index 0000000000..37098034cb
--- /dev/null
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/CipherOutputStreamOzone.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.client.io;
+
+import javax.crypto.Cipher;
+import javax.crypto.CipherOutputStream;
+
+import java.io.OutputStream;
+import java.util.Map;
+
+/**
+ * Wrap javax.crypto.CipherOutputStream with the method to return wrapped
+ * output stream.
+ */
+public class CipherOutputStreamOzone extends CipherOutputStream
+ implements KeyMetadataAware {
+
+ private OutputStream output;
+
+ public CipherOutputStreamOzone(OutputStream output, Cipher cipher) {
+ super(output, cipher);
+ this.output = output;
+ }
+
+ protected CipherOutputStreamOzone(OutputStream output) {
+ super(output);
+ this.output = output;
+ }
+
+ public OutputStream getWrappedStream() {
+ return output;
+ }
+
+ @Override
+ public Map<String, String> getMetadata() {
+ return ((KeyMetadataAware)getWrappedStream()).getMetadata();
+ }
+}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
index 5ca9890cbe..242b2606f8 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
@@ -22,6 +22,7 @@ import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
@@ -62,7 +63,8 @@ import org.slf4j.LoggerFactory;
* ECKeyOutputStream handles the EC writes by writing the data into underlying
* block output streams chunk by chunk.
*/
-public final class ECKeyOutputStream extends KeyOutputStream {
+public final class ECKeyOutputStream extends KeyOutputStream
+ implements KeyMetadataAware {
private OzoneClientConfig config;
private ECChunkBuffers ecChunkBufferCache;
private final BlockingQueue<ECChunkBuffers> ecStripeQueue;
@@ -608,6 +610,11 @@ public final class ECKeyOutputStream extends
KeyOutputStream {
return blockOutputStreamEntryPool.getExcludeList();
}
+ @Override
+ public Map<String, String> getMetadata() {
+ return this.blockOutputStreamEntryPool.getMetadata();
+ }
+
/**
* Builder class of ECKeyOutputStream.
*/
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index edc76066b5..a6331151e3 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -43,6 +43,7 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.UUID;
/**
@@ -54,7 +55,8 @@ import java.util.UUID;
*
* TODO : currently not support multi-thread access.
*/
-public class KeyDataStreamOutput extends AbstractDataStreamOutput {
+public class KeyDataStreamOutput extends AbstractDataStreamOutput
+ implements KeyMetadataAware {
private OzoneClientConfig config;
@@ -400,6 +402,11 @@ public class KeyDataStreamOutput extends
AbstractDataStreamOutput {
return blockDataStreamOutputEntryPool.getExcludeList();
}
+ @Override
+ public Map<String, String> getMetadata() {
+ return this.blockDataStreamOutputEntryPool.getMetadata();
+ }
+
/**
* Builder class of KeyDataStreamOutput.
*/
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyMetadataAware.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyMetadataAware.java
new file mode 100644
index 0000000000..b8c26c8f81
--- /dev/null
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyMetadataAware.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.client.io;
+
+import java.util.Map;
+
+/**
+ * Interface to provide methods related to key metadata.
+ */
+public interface KeyMetadataAware {
+
+ Map<String, String> getMetadata();
+
+}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 2eea676561..fa23f88544 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -65,7 +65,8 @@ import org.slf4j.LoggerFactory;
*
* TODO : currently not support multi-thread access.
*/
-public class KeyOutputStream extends OutputStream implements Syncable {
+public class KeyOutputStream extends OutputStream
+ implements Syncable, KeyMetadataAware {
private OzoneClientConfig config;
private final ReplicationConfig replication;
@@ -570,6 +571,11 @@ public class KeyOutputStream extends OutputStream
implements Syncable {
return blockOutputStreamEntryPool.getExcludeList();
}
+ @Override
+ public Map<String, String> getMetadata() {
+ return this.blockOutputStreamEntryPool.getMetadata();
+ }
+
/**
* Builder class of KeyOutputStream.
*/
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
index ee3f98ae78..d8cb06eccc 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
@@ -23,11 +23,13 @@ import
org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
+import java.util.Map;
/**
* OzoneDataStreamOutput is used to write data into Ozone.
*/
-public class OzoneDataStreamOutput extends ByteBufferOutputStream {
+public class OzoneDataStreamOutput extends ByteBufferOutputStream
+ implements KeyMetadataAware {
private final ByteBufferStreamOutput byteBufferStreamOutput;
@@ -70,6 +72,13 @@ public class OzoneDataStreamOutput extends
ByteBufferOutputStream {
return ((KeyDataStreamOutput) wrappedStream)
.getCommitUploadPartInfo();
}
+ } else if (outputStream instanceof CipherOutputStreamOzone) {
+ OutputStream wrappedStream =
+ ((CipherOutputStreamOzone) outputStream).getWrappedStream();
+ if (wrappedStream instanceof KeyDataStreamOutput) {
+ return ((KeyDataStreamOutput) wrappedStream)
+ .getCommitUploadPartInfo();
+ }
}
} else if (byteBufferStreamOutput instanceof KeyDataStreamOutput) {
return ((KeyDataStreamOutput)
@@ -82,4 +91,10 @@ public class OzoneDataStreamOutput extends
ByteBufferOutputStream {
public ByteBufferStreamOutput getByteBufStreamOutput() {
return byteBufferStreamOutput;
}
+
+ @Override
+ public Map<String, String> getMetadata() {
+ return ((KeyMetadataAware)this.byteBufferStreamOutput).getMetadata();
+ }
+
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
index b16a0506c5..093b31b7a5 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
@@ -23,13 +23,15 @@ import
org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import java.io.IOException;
import java.io.OutputStream;
+import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* OzoneOutputStream is used to write data into Ozone.
*/
-public class OzoneOutputStream extends ByteArrayStreamOutput {
+public class OzoneOutputStream extends ByteArrayStreamOutput
+ implements KeyMetadataAware {
private final OutputStream outputStream;
private final Syncable syncable;
@@ -129,6 +131,12 @@ public class OzoneOutputStream extends
ByteArrayStreamOutput {
if (wrappedStream instanceof KeyOutputStream) {
return ((KeyOutputStream) wrappedStream).getCommitUploadPartInfo();
}
+ } else if (outputStream instanceof CipherOutputStreamOzone) {
+ OutputStream wrappedStream =
+ ((CipherOutputStreamOzone) outputStream).getWrappedStream();
+ if (wrappedStream instanceof KeyOutputStream) {
+ return ((KeyOutputStream)wrappedStream).getCommitUploadPartInfo();
+ }
}
// Otherwise return null.
return null;
@@ -137,4 +145,16 @@ public class OzoneOutputStream extends
ByteArrayStreamOutput {
public OutputStream getOutputStream() {
return outputStream;
}
+
+ @Override
+ public Map<String, String> getMetadata() {
+ if (outputStream instanceof CryptoOutputStream) {
+ return ((KeyMetadataAware)((CryptoOutputStream) outputStream)
+ .getWrappedStream()).getMetadata();
+ } else if (outputStream instanceof CipherOutputStreamOzone) {
+ return ((KeyMetadataAware)((CipherOutputStreamOzone) outputStream)
+ .getWrappedStream()).getMetadata();
+ }
+ return ((KeyMetadataAware) outputStream).getMetadata();
+ }
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index fc91335771..ad8ced95d1 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -18,34 +18,16 @@
package org.apache.hadoop.ozone.client.rpc;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
import javax.annotation.Nonnull;
import javax.crypto.Cipher;
import javax.crypto.CipherInputStream;
-import javax.crypto.CipherOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.URI;
-import java.security.InvalidKeyException;
-import java.security.PrivilegedExceptionAction;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.crypto.CryptoOutputStream;
@@ -69,6 +51,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.storage.MultipartInputStream;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.io.ByteBufferPool;
@@ -94,12 +77,12 @@ import org.apache.hadoop.ozone.client.TenantArgs;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory;
import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl;
+import org.apache.hadoop.ozone.client.io.CipherOutputStreamOzone;
import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
import org.apache.hadoop.ozone.client.io.KeyInputStream;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.hdds.scm.storage.MultipartInputStream;
import org.apache.hadoop.ozone.client.io.OzoneCryptoInputStream;
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -108,8 +91,8 @@ import
org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.DeleteTenantState;
import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext;
import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
@@ -157,13 +140,33 @@ import
org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse;
import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.ratis.protocol.ClientId;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.security.InvalidKeyException;
+import java.security.PrivilegedExceptionAction;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.stream.Collectors;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY;
@@ -175,11 +178,6 @@ import static
org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH
import static
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
import static
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
-import org.apache.ratis.protocol.ClientId;
-import org.jetbrains.annotations.NotNull;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* Ozone RPC Client Implementation, it connects to OM, SCM and DataNode
* to execute client calls. This uses RPC protocol for communication
@@ -2275,7 +2273,7 @@ public class RpcClient implements ClientProtocol {
final GDPRSymmetricKey gk = getGDPRSymmetricKey(
openKey.getKeyInfo().getMetadata(), Cipher.ENCRYPT_MODE);
if (gk != null) {
- return new OzoneOutputStream(new CipherOutputStream(
+ return new OzoneOutputStream(new CipherOutputStreamOzone(
keyOutputStream, gk.getCipher()), syncable, enableHsync);
}
} catch (Exception ex) {
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 567fbdad17..9daa39db20 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -808,6 +808,7 @@ public final class
OzoneManagerProtocolClientSideTranslatorPB
.setBucketName(args.getBucketName())
.setKeyName(args.getKeyName())
.setDataSize(args.getDataSize())
+ .addAllMetadata(KeyValueUtil.toProtobuf(args.getMetadata()))
.addAllKeyLocations(locationInfoList.stream()
// TODO use OM version?
.map(info -> info.getProtobuf(ClientVersion.CURRENT_VERSION))
@@ -1585,6 +1586,7 @@ public final class
OzoneManagerProtocolClientSideTranslatorPB
.setIsMultipartKey(omKeyArgs.getIsMultipartKey())
.setMultipartNumber(omKeyArgs.getMultipartUploadPartNumber())
.setDataSize(omKeyArgs.getDataSize())
+ .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata()))
.addAllKeyLocations(locationInfoList.stream()
// TODO use OM version?
.map(info -> info.getProtobuf(ClientVersion.CURRENT_VERSION))
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
index 98691876e1..afcec38e47 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
@@ -49,6 +49,11 @@ Execute AWSS3Cli
${output} = Execute aws s3 --endpoint-url
${ENDPOINT_URL} ${command}
[return] ${output}
+Execute AWSS3CliDebug
+ [Arguments] ${command}
+ ${output} = Execute aws --debug s3 --endpoint
${ENDPOINT_URL} ${command}
+ [return] ${output}
+
Install aws cli
${rc} ${output} = Run And Return Rc And
Output which aws
Return From Keyword If '${rc}' == '0'
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
index 2a81d2caee..1b44360d6b 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
@@ -205,3 +205,56 @@ Create file with user defined metadata size larger than 2
KB
${custom_metadata_value} = Execute printf 'v%.0s'
{1..3000}
${result} = Execute AWSS3APICli and ignore error
put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/custom-metadata/key2
--body /tmp/testfile2 --metadata="custom-key1=${custom_metadata_value}"
Should not contain
${result} custom-key1: ${custom_metadata_value}
+
+Create small file and expect ETag (MD5) in a reponse header
+ Execute head -c 1MB
</dev/urandom > /tmp/small_file
+ ${file_md5_checksum} = Execute md5sum
/tmp/small_file | awk '{print $1}'
+ ${result} = Execute AWSS3CliDebug cp /tmp/small_file
s3://${BUCKET}
+ Should Match Regexp ${result}
(?i)Response header.*ETag':\ '"${file_md5_checksum}"'
+
+# The next two test cases depends on the previous one
+Download small file end expect ETag (MD5) in a response header
+ ${file_md5_checksum} = Execute md5sum
/tmp/small_file | awk '{print $1}'
+ ${result} = Execute AWSS3CliDebug cp
s3://${BUCKET}/small_file /tmp/small_file_downloaded
+ Should Match Regexp ${result}
(?is)HEAD /${BUCKET}/small_file.*?Response headers.*?ETag':
'"${file_md5_checksum}"'
+ Should Match Regexp ${result}
(?is)GET /${BUCKET}/small_file.*?Response headers.*?ETag':\
'"${file_md5_checksum}"'
+ # clean up
+ Execute AWSS3Cli rm
s3://${BUCKET}/small_file
+ Execute rm
/tmp/small_file_downloaded
+
+Create key with custom etag metadata and expect it won't conflict with ETag
response header of HEAD request
+ ${file_md5_checksum} Execute
md5sum /tmp/small_file | awk '{print $1}'
+ Execute AWSS3CliDebug
cp --metadata "ETag=custom-etag-value" /tmp/small_file s3://${BUCKET}/test_file
+ ${result} Execute AWSS3CliDebug
cp s3://${BUCKET}/test_file /tmp/test_file_downloaded
+ ${match} ${ETag} ${etagCustom} Should Match Regexp
${result} HEAD /${BUCKET}/test_file\ .*?Response headers.*?ETag':\
'"(.*?)"'.*?x-amz-meta-etag':\ '(.*?)' flags=DOTALL
+ Should Be Equal As Strings
${ETag} ${file_md5_checksum}
+ Should BE Equal As Strings
${etagCustom} custom-etag-value
+ Should Not Be Equal As Strings
${ETag} ${etagCustom}
+ # clean up
+ Execute AWSS3Cli
rm s3://${BUCKET}/test_file
+ Execute
rm -rf /tmp/small_file
+ Execute
rm -rf /tmp/test_file_downloaded
+
+Create&Download big file by multipart upload and expect ETag in a file
download response
+ Execute head -c 10MB
</dev/urandom > /tmp/big_file
+ ${result} Execute AWSS3CliDebug cp /tmp/big_file
s3://${BUCKET}/
+ ${match} ${etag1} Should Match Regexp ${result}
(?is)POST /${BUCKET}/big_file\\?uploadId=.*?Response body.*?ETag>"(.*?-2)"
+ ${file_download_result} Execute AWSS3CliDebug cp
s3://${BUCKET}/big_file /tmp/big_file_downloaded
+ ${match} ${etag2} Should Match Regexp
${file_download_result} (?is)GET /${BUCKET}/big_file.*?Response
headers.*?ETag':\ '"(.*?-2)"'
+ Should Be Equal As Strings ${etag1}
${etag2}
+ # clean up
+ Execute AWSS3Cli rm
s3://${BUCKET}/big_file
+ Execute rm -rf /tmp/big_file
+
+Create key twice with different content and expect different ETags
+ Execute head -c 1MiB
</dev/urandom > /tmp/file1
+ Execute head -c 1MiB
</dev/urandom > /tmp/file2
+ ${file1UploadResult} Execute AWSS3CliDebug cp /tmp/file1
s3://${BUCKET}/test_key_to_check_etag_differences
+ ${match} ${etag1} Should Match Regexp
${file1UploadResult} PUT /${BUCKET}/test_key_to_check_etag_differences\
.*?Response headers.*?ETag':\ '"(.*?)"' flags=DOTALL
+ ${file2UploadResult} Execute AWSS3CliDebug cp /tmp/file2
s3://${BUCKET}/test_key_to_check_etag_differences
+ ${match} ${etag2} Should Match Regexp
${file2UploadResult} PUT /${BUCKET}/test_key_to_check_etag_differences\
.*?Response headers.*?ETag':\ '"(.*?)"' flags=DOTALL
+ Should Not Be Equal As Strings ${etag1}
${etag2}
+ # clean up
+ Execute AWSS3Cli rm
s3://${BUCKET}/test_key_to_check_etag_differences
+ Execute rm -rf /tmp/file1
+ Execute rm -rf /tmp/file2
\ No newline at end of file
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 2994077763..a9953aca07 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -243,6 +244,8 @@ public class OMKeyCommitRequest extends OMKeyRequest {
throw new OMException("Failed to " + action + " key, as " + dbOpenKey +
"entry is not found in the OpenKey table", KEY_NOT_FOUND);
}
+ omKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf(
+ commitKeyArgs.getMetadataList()));
if (isHSync) {
omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID,
String.valueOf(commitKeyRequest.getClientID()));
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
index 5cee1e5958..bb1bda117e 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -163,6 +164,8 @@ public class OMKeyCommitRequestWithFSO extends
OMKeyCommitRequest {
dbOpenFileKey + "entry is not found in the OpenKey table",
KEY_NOT_FOUND);
}
+ omKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf(
+ commitKeyArgs.getMetadataList()));
if (isHSync) {
omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID,
String.valueOf(commitKeyRequest.getClientID()));
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index b2d794b1d7..ac1d7c9b32 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -164,6 +165,8 @@ public class S3MultipartUploadCommitPartRequest extends
OMKeyRequest {
openKey + "entry is not found in the openKey table",
KEY_NOT_FOUND);
}
+ omKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf(
+ keyArgs.getMetadataList()));
// set the data size and location info list
omKeyInfo.setDataSize(keyArgs.getDataSize());
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 34ff8d6ef6..d3f3fa518e 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.KeyValueUtil;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -252,7 +253,7 @@ public class S3MultipartUploadCompleteRequest extends
OMKeyRequest {
.setVolume(requestedVolume)
.setBucket(requestedBucket)
.setKey(keyName)
- .setHash(DigestUtils.sha256Hex(keyName)));
+ .setHash(omKeyInfo.getMetadata().get("ETag")));
long volumeId = omMetadataManager.getVolumeId(volumeName);
long bucketId = omMetadataManager.getBucketId(volumeName, bucketName);
@@ -390,7 +391,9 @@ public class S3MultipartUploadCompleteRequest extends
OMKeyRequest {
.setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
.setOmKeyLocationInfos(
Collections.singletonList(keyLocationInfoGroup))
- .setAcls(dbOpenKeyInfo.getAcls());
+ .setAcls(dbOpenKeyInfo.getAcls())
+ .addMetadata("ETag",
+ multipartUploadedKeyHash(partKeyInfoMap));
// Check if db entry has ObjectID. This check is required because
// it is possible that between multipart key uploads and complete,
// we had an upgrade.
@@ -418,6 +421,8 @@ public class S3MultipartUploadCompleteRequest extends
OMKeyRequest {
omKeyInfo.updateLocationInfoList(partLocationInfos, true, true);
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
omKeyInfo.setDataSize(dataSize);
+ omKeyInfo.getMetadata().put("ETag",
+ multipartUploadedKeyHash(partKeyInfoMap));
}
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
return omKeyInfo;
@@ -633,4 +638,16 @@ public class S3MultipartUploadCompleteRequest extends
OMKeyRequest {
}
return req;
}
+
+ private String multipartUploadedKeyHash(
+ OmMultipartKeyInfo.PartKeyInfoMap partsList) {
+ StringBuffer keysConcatenated = new StringBuffer();
+ for (PartKeyInfo partKeyInfo: partsList) {
+ keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo
+ .getPartKeyInfo().getMetadataList()).get("ETag"));
+ }
+ return DigestUtils.md5Hex(keysConcatenated.toString()) + "-"
+ + partsList.size();
+ }
+
}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 97027abd7c..9473467b8b 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -73,6 +73,10 @@ import static
org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PR
*/
public abstract class EndpointBase implements Auditor {
+ protected static final String ETAG = "ETag";
+
+ protected static final String ETAG_CUSTOM = "etag-custom";
+
@Inject
private OzoneClient client;
@Inject
@@ -311,8 +315,15 @@ public abstract class EndpointBase implements Auditor {
Map<String, String> metadata = key.getMetadata();
for (Map.Entry<String, String> entry : metadata.entrySet()) {
+ if (entry.getKey().equals(ETAG)) {
+ continue;
+ }
+ String metadataKey = entry.getKey();
+ if (metadataKey.equals(ETAG_CUSTOM)) {
+ metadataKey = ETAG.toLowerCase();
+ }
responseBuilder
- .header(CUSTOM_METADATA_HEADER_PREFIX + entry.getKey(),
+ .header(CUSTOM_METADATA_HEADER_PREFIX + metadataKey,
entry.getValue());
}
}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 5ef67f3c6f..9503c53cfd 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.ozone.s3.endpoint;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import javax.ws.rs.Consumes;
@@ -40,20 +42,10 @@ import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.StreamingOutput;
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.UnsupportedEncodingException;
-import java.text.ParseException;
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.util.Map;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.OptionalLong;
-
+import javax.xml.bind.DatatypeConverter;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
@@ -66,6 +58,7 @@ import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.KeyMetadataAware;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -86,15 +79,28 @@ import org.apache.hadoop.ozone.s3.util.S3StorageType;
import org.apache.hadoop.ozone.s3.util.S3Utils;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time;
+import org.apache.http.HttpStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.DigestInputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.text.ParseException;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.OptionalLong;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.annotations.VisibleForTesting;
import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH;
import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
-import org.apache.commons.io.IOUtils;
-
-import org.apache.commons.lang3.tuple.Pair;
-
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
@@ -127,10 +133,6 @@ import static
org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UN
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Utils.urlDecode;
-import org.apache.http.HttpStatus;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* Key level rest endpoints.
*/
@@ -140,6 +142,18 @@ public class ObjectEndpoint extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(ObjectEndpoint.class);
+ private static final ThreadLocal<MessageDigest> E_TAG_PROVIDER;
+
+ static {
+ E_TAG_PROVIDER = ThreadLocal.withInitial(() -> {
+ try {
+ return MessageDigest.getInstance("Md5");
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
@Context
private ContainerRequestContext context;
@@ -262,26 +276,47 @@ public class ObjectEndpoint extends EndpointBase {
// Normal put object
Map<String, String> customMetadata =
getCustomMetadataFromHeaders(headers.getRequestHeaders());
+ if (customMetadata.containsKey(ETAG)
+ || customMetadata.containsKey(ETAG.toLowerCase())) {
+ String customETag = customMetadata.get(ETAG) != null ?
+ customMetadata.get(ETAG) : customMetadata.get(ETAG.toLowerCase());
+ customMetadata.remove(ETAG);
+ customMetadata.remove(ETAG.toLowerCase());
+ customMetadata.put(ETAG_CUSTOM, customETag);
+ }
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
.equals(headers.getHeaderString("x-amz-content-sha256"))) {
- body = new SignedChunksInputStream(body);
+ body = new DigestInputStream(new SignedChunksInputStream(body),
+ E_TAG_PROVIDER.get());
+ } else {
+ body = new DigestInputStream(body, E_TAG_PROVIDER.get());
}
- long putLength = 0;
+
+ long putLength;
+ String eTag = null;
if (datastreamEnabled && !enableEC && length > datastreamMinLength) {
getMetrics().updatePutKeyMetadataStats(startNanos);
- putLength = ObjectEndpointStreaming
+ Pair<String, Long> keyWriteResult = ObjectEndpointStreaming
.put(bucket, keyPath, length, replicationConfig, chunkSize,
- customMetadata, body);
+ customMetadata, (DigestInputStream) body);
+ eTag = keyWriteResult.getKey();
+ putLength = keyWriteResult.getValue();
} else {
output = getClientProtocol().createKey(volume.getName(), bucketName,
keyPath, length, replicationConfig, customMetadata);
getMetrics().updatePutKeyMetadataStats(startNanos);
putLength = IOUtils.copyLarge(body, output);
+ eTag = DatatypeConverter.printHexBinary(
+ ((DigestInputStream) body).getMessageDigest().digest())
+ .toLowerCase();
+ output.getMetadata().put(ETAG, eTag);
}
getMetrics().incPutKeySuccessLength(putLength);
- return Response.ok().status(HttpStatus.SC_OK)
+ return Response.ok()
+ .header(ETAG, wrapInQuotes(eTag))
+ .status(HttpStatus.SC_OK)
.build();
} catch (OMException ex) {
auditSuccess = false;
@@ -421,8 +456,9 @@ public class ObjectEndpoint extends EndpointBase {
responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal);
}
- responseBuilder.header(ACCEPT_RANGE_HEADER,
- RANGE_HEADER_SUPPORTED_UNIT);
+ responseBuilder
+ .header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG)))
+ .header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT);
// if multiple query parameters having same name,
// Only the first parameters will be recognized
@@ -535,7 +571,7 @@ public class ObjectEndpoint extends EndpointBase {
}
ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK)
- .header("ETag", "" + key.getModificationTime())
+ .header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG)))
.header("Content-Length", key.getDataSize())
.header("Content-Type", "binary/octet-stream");
addLastModifiedDate(response, key);
@@ -765,8 +801,8 @@ public class ObjectEndpoint extends EndpointBase {
new CompleteMultipartUploadResponse();
completeMultipartUploadResponse.setBucket(bucket);
completeMultipartUploadResponse.setKey(key);
- completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo
- .getHash());
+ completeMultipartUploadResponse.setETag(
+ wrapInQuotes(omMultipartUploadCompleteInfo.getHash()));
// Location also setting as bucket name.
completeMultipartUploadResponse.setLocation(bucket);
AUDIT.logWriteSuccess(
@@ -820,7 +856,10 @@ public class ObjectEndpoint extends EndpointBase {
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
.equals(headers.getHeaderString("x-amz-content-sha256"))) {
- body = new SignedChunksInputStream(body);
+ body = new DigestInputStream(new SignedChunksInputStream(body),
+ E_TAG_PROVIDER.get());
+ } else {
+ body = new DigestInputStream(body, E_TAG_PROVIDER.get());
}
copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
@@ -841,7 +880,7 @@ public class ObjectEndpoint extends EndpointBase {
getMetrics().updatePutKeyMetadataStats(startNanos);
return ObjectEndpointStreaming
.createMultipartKey(ozoneBucket, key, length, partNumber,
- uploadID, chunkSize, body);
+ uploadID, chunkSize, (DigestInputStream) body);
}
ozoneOutputStream = getClientProtocol().createMultipartKey(
volume.getName(), bucket, key, length, partNumber, uploadID);
@@ -893,6 +932,10 @@ public class ObjectEndpoint extends EndpointBase {
} else {
getMetrics().updatePutKeyMetadataStats(startNanos);
long putLength = IOUtils.copyLarge(body, ozoneOutputStream);
+ ((KeyMetadataAware)ozoneOutputStream.getOutputStream())
+ .getMetadata().put("ETag", DatatypeConverter.printHexBinary(
+ ((DigestInputStream) body).getMessageDigest().digest())
+ .toLowerCase());
getMetrics().incPutKeySuccessLength(putLength);
}
} finally {
@@ -911,8 +954,7 @@ public class ObjectEndpoint extends EndpointBase {
return Response.ok(new CopyPartResult(eTag)).build();
} else {
getMetrics().updateCreateMultipartKeySuccessStats(startNanos);
- return Response.ok().header("ETag",
- eTag).build();
+ return Response.ok().header(ETAG, eTag).build();
}
} catch (OMException ex) {
@@ -1014,7 +1056,7 @@ public class ObjectEndpoint extends EndpointBase {
replication.getReplicationType() == EC) &&
srcKeyLen > datastreamMinLength) {
copyLength = ObjectEndpointStreaming
- .putKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen,
+ .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen,
chunkSize, replication, metadata, src);
} else {
try (OzoneOutputStream dest = getClientProtocol()
@@ -1190,4 +1232,9 @@ public class ObjectEndpoint extends EndpointBase {
public boolean isDatastreamEnabled() {
return datastreamEnabled;
}
+
+ private String wrapInQuotes(String value) {
+ return "\"" + value + "\"";
+ }
+
}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
index 5175bf8450..ef87ad450d 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java
@@ -17,8 +17,12 @@
*/
package org.apache.hadoop.ozone.s3.endpoint;
+import javax.ws.rs.core.Response;
+import javax.xml.bind.DatatypeConverter;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.io.KeyMetadataAware;
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
@@ -28,10 +32,10 @@ import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.ws.rs.core.Response;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
+import java.security.DigestInputStream;
import java.util.Map;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
@@ -49,10 +53,11 @@ final class ObjectEndpointStreaming {
private ObjectEndpointStreaming() {
}
- public static long put(OzoneBucket bucket, String keyPath,
- long length, ReplicationConfig replicationConfig,
- int chunkSize, Map<String, String> keyMetadata,
- InputStream body)
+ public static Pair<String, Long> put(
+ OzoneBucket bucket, String keyPath,
+ long length, ReplicationConfig replicationConfig,
+ int chunkSize, Map<String, String> keyMetadata,
+ DigestInputStream body)
throws IOException, OS3Exception {
try {
@@ -80,14 +85,35 @@ final class ObjectEndpointStreaming {
}
}
- public static long putKeyWithStream(OzoneBucket bucket,
- String keyPath,
- long length,
- int bufferSize,
- ReplicationConfig replicationConfig,
- Map<String, String> keyMetadata,
- InputStream body)
+ public static Pair<String, Long> putKeyWithStream(
+ OzoneBucket bucket,
+ String keyPath,
+ long length,
+ int bufferSize,
+ ReplicationConfig replicationConfig,
+ Map<String, String> keyMetadata,
+ DigestInputStream body)
throws IOException {
+ long writeLen;
+ String eTag;
+ try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath,
+ length, replicationConfig, keyMetadata)) {
+ writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length);
+ eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest())
+ .toLowerCase();
+ ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag);
+ }
+ return Pair.of(eTag, writeLen);
+ }
+
+ public static long copyKeyWithStream(
+ OzoneBucket bucket,
+ String keyPath,
+ long length,
+ int bufferSize,
+ ReplicationConfig replicationConfig,
+ Map<String, String> keyMetadata,
+ InputStream body) throws IOException {
long writeLen = 0;
try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath,
length, replicationConfig, keyMetadata)) {
@@ -117,16 +143,19 @@ final class ObjectEndpointStreaming {
public static Response createMultipartKey(OzoneBucket ozoneBucket, String
key,
long length, int partNumber,
String uploadID, int chunkSize,
- InputStream body)
+ DigestInputStream body)
throws IOException, OS3Exception {
OzoneDataStreamOutput streamOutput = null;
- String eTag = "";
+ String eTag;
S3GatewayMetrics metrics = S3GatewayMetrics.create();
try {
streamOutput = ozoneBucket
.createMultipartStreamKey(key, length, partNumber, uploadID);
long putLength =
writeToStreamOutput(streamOutput, body, chunkSize, length);
+ eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest())
+ .toLowerCase();
+ ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag);
metrics.incPutKeySuccessLength(putLength);
} catch (OMException ex) {
if (ex.getResult() ==
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index 460b073b39..d546afe2c7 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.client.io.KeyMetadataAware;
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -176,7 +177,7 @@ public class OzoneBucketStub extends OzoneBucket {
}
ReplicationConfig finalReplicationCon = repConfig;
ByteArrayOutputStream byteArrayOutputStream =
- new ByteArrayOutputStream((int) size) {
+ new KeyMetadataAwareOutputStream(metadata) {
@Override
public void close() throws IOException {
keyContents.put(key, toByteArray());
@@ -193,6 +194,7 @@ public class OzoneBucketStub extends OzoneBucket {
super.close();
}
};
+
return new OzoneOutputStream(byteArrayOutputStream, null);
}
@@ -255,7 +257,7 @@ public class OzoneBucketStub extends OzoneBucket {
throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
} else {
ByteBufferStreamOutput byteBufferStreamOutput =
- new ByteBufferStreamOutput() {
+ new KeyMetadataAwareByteBufferStreamOutput(new HashMap<>()) {
private final ByteBuffer buffer = ByteBuffer.allocate(1024 * 1024);
@Override
@@ -283,9 +285,6 @@ public class OzoneBucketStub extends OzoneBucket {
buffer.put(bytes);
}
- @Override
- public void flush() throws IOException {
- }
};
return new OzoneDataStreamOutputStub(byteBufferStreamOutput, key + size);
@@ -422,7 +421,7 @@ public class OzoneBucketStub extends OzoneBucket {
throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
} else {
ByteArrayOutputStream byteArrayOutputStream =
- new ByteArrayOutputStream((int) size) {
+ new KeyMetadataAwareOutputStream((int) size, new HashMap<>()) {
@Override
public void close() throws IOException {
Part part = new Part(key + size,
@@ -600,4 +599,63 @@ public class OzoneBucketStub extends OzoneBucket {
new ArrayList<>(), replicationConfig, new HashMap<>(), null,
() -> readKey(keyName), false));
}
+
+ /**
+ * ByteArrayOutputStream stub with metadata.
+ */
+ public static class KeyMetadataAwareOutputStream extends
ByteArrayOutputStream
+ implements KeyMetadataAware {
+ private Map<String, String> metadata;
+
+ public KeyMetadataAwareOutputStream(Map<String, String> metadata) {
+ super();
+ this.metadata = metadata;
+ }
+
+ public KeyMetadataAwareOutputStream(int size,
+ Map<String, String> metadata) {
+ super(size);
+ this.metadata = metadata;
+ }
+
+ @Override
+ public Map<String, String> getMetadata() {
+ return metadata;
+ }
+ }
+
+ /**
+ * ByteBufferOutputStream stub with metadata.
+ */
+ public static class KeyMetadataAwareByteBufferStreamOutput
+ implements KeyMetadataAware, ByteBufferStreamOutput {
+
+ private Map<String, String> metadata;
+
+ public KeyMetadataAwareByteBufferStreamOutput(
+ Map<String, String> metadata) {
+ this.metadata = metadata;
+ }
+
+ @Override
+ public void write(ByteBuffer buffer, int off, int len) throws IOException {
+
+ }
+
+ @Override
+ public void flush() throws IOException {
+
+ }
+
+ @Override
+ public void close() throws IOException {
+
+ }
+
+ @Override
+ public Map<String, String> getMetadata() {
+ return metadata;
+ }
+ }
+
}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
index a04af4be6b..264f5486d1 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
@@ -35,6 +35,8 @@ import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.core.HttpHeaders;
+
+import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -286,7 +288,8 @@ public class TestPermissionCheck {
objectEndpoint.setOzoneConfiguration(conf);
try {
- objectEndpoint.put("bucketName", "keyPath", 1024, 0, null, null);
+ objectEndpoint.put("bucketName", "keyPath", 1024, 0, null,
+ new ByteArrayInputStream(new byte[]{}));
Assert.fail("Should fail");
} catch (Exception e) {
Assert.assertTrue(e instanceof OS3Exception);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]