shameersss1 commented on code in PR #6884:
URL: https://github.com/apache/hadoop/pull/6884#discussion_r1830437071
##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java:
##########
@@ -266,6 +271,150 @@ public void testEncryptionEnabledAndDisabledFS() throws
Exception {
}
}
+ /**
+ * Test to check if unencrypted objects are read with V1 client
compatibility.
+ * @throws IOException
+ * @throws Exception
+ */
+ @Test
+ public void testUnencryptedObjectReadWithV1CompatibilityConfig() throws
Exception {
+ maybeSkipTest();
+ // initialize base s3 client.
+ Configuration conf = new Configuration(getConfiguration());
+ removeBaseAndBucketOverrides(getTestBucketName(conf),
+ conf,
+ S3_ENCRYPTION_ALGORITHM,
+ S3_ENCRYPTION_KEY,
+ SERVER_SIDE_ENCRYPTION_ALGORITHM,
+ SERVER_SIDE_ENCRYPTION_KEY);
+
+ Path file = methodPath();
+
+ try (S3AFileSystem nonCseFs = createTestFileSystem(conf)) {
+ nonCseFs.initialize(getFileSystem().getUri(), conf);
+
+ // write unencrypted file
+ ContractTestUtils.writeDataset(nonCseFs, file, new byte[SMALL_FILE_SIZE],
+ SMALL_FILE_SIZE, SMALL_FILE_SIZE, true);
+ }
+
+ Configuration cseConf = new Configuration(getConfiguration());
+ cseConf.setBoolean(S3_ENCRYPTION_CSE_V1_COMPATIBILITY_ENABLED, true);
+
+ // create filesystem with cse enabled and v1 compatibility.
+ try (S3AFileSystem cseFs = createTestFileSystem(cseConf)) {
+ cseFs.initialize(getFileSystem().getUri(), cseConf);
+
+ // read unencrypted file. It should not throw any exception.
+ try (FSDataInputStream in = cseFs.open(file)) {
+ in.read(new byte[SMALL_FILE_SIZE]);
+ }
+ }
+ }
+
+ /**
+ * Tests the size of an encrypted object when with V1 compatibility and
custom header length.
+ *
+ * @throws Exception If any error occurs during the test execution.
+ */
+ @Test
+ public void testSizeOfEncryptedObjectFromHeaderWithV1Compatibility() throws
Exception {
+ maybeSkipTest();
+ Configuration cseConf = new Configuration(getConfiguration());
+ cseConf.setBoolean(S3_ENCRYPTION_CSE_V1_COMPATIBILITY_ENABLED, true);
+ try (S3AFileSystem fs = createTestFileSystem(cseConf)) {
+ fs.initialize(getFileSystem().getUri(), cseConf);
+
+ Path filePath = methodPath();
+ Path file = new Path(filePath, "file");
+ String key = fs.pathToKey(file);
+
+ // write object with random content length header
+ Map<String, String> metadata = new HashMap<>();
+ metadata.put(AWSHeaders.UNENCRYPTED_CONTENT_LENGTH, "10");
+ try (AuditSpan span = span()) {
+ RequestFactory factory = RequestFactoryImpl.builder()
+ .withBucket(fs.getBucket())
+ .build();
+ PutObjectRequest.Builder putObjectRequestBuilder =
+ factory.newPutObjectRequestBuilder(key,
+ null, SMALL_FILE_SIZE, false);
+
putObjectRequestBuilder.contentLength(Long.parseLong(String.valueOf(SMALL_FILE_SIZE)));
+ putObjectRequestBuilder.metadata(metadata);
+ fs.putObjectDirect(putObjectRequestBuilder.build(),
+ PutObjectOptions.deletingDirs(),
+ new S3ADataBlocks.BlockUploadData(new byte[SMALL_FILE_SIZE], null),
+ null);
+
+ // fetch the random content length
+ long contentLength = fs.getFileStatus(file).getLen();
+ assertEquals("content length does not match", 10, contentLength);
Review Comment:
Thanks for suggesting this. It is much better.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]