This is an automated email from the ASF dual-hosted git repository.
daim pushed a commit to branch 1.22
in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git
The following commit(s) were added to refs/heads/1.22 by this push:
new d3aacc9fa7 OAK-12062 : fixed NPE while uploading metadata for AWS S3
(#2688) (#2706)
d3aacc9fa7 is described below
commit d3aacc9fa74a46aad62b9d494b801cd7b9edaff9
Author: Rishabh Kumar <[email protected]>
AuthorDate: Thu Jan 29 18:22:03 2026 +0530
OAK-12062 : fixed NPE while uploading metadata for AWS S3 (#2688) (#2706)
* OAK-12062 : fixed NPE while uploading metadata for AWS S3 (#2688)
* OAK-12062 : fixed NPE while uploading metadata for AWS S3
* OAK-12062 : removed temp file in finally method
* OAK-12062 : removed unnecessary comment
---
.../jackrabbit/oak/blob/cloud/s3/S3Backend.java | 83 +++++++++++-----------
1 file changed, 41 insertions(+), 42 deletions(-)
diff --git
a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
index 75ef22df76..4c92afdf0b 100644
---
a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
+++
b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
@@ -20,13 +20,13 @@ package org.apache.jackrabbit.oak.blob.cloud.s3;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
+import java.nio.file.Files;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
@@ -521,6 +521,7 @@ public class S3Backend extends AbstractSharedBackend {
// Executor required to handle reading from the InputStream on a
separate thread so the main upload is not blocked.
final ExecutorService executor = Executors.newSingleThreadExecutor();
+ File tempFile = null;
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
final PutObjectRequest.Builder builder = PutObjectRequest.builder()
@@ -528,8 +529,32 @@ public class S3Backend extends AbstractSharedBackend {
.contentType("application/octet-stream")
.key(addMetaKeyPrefix(name));
- // Specify `null` for the content length when you don't know the
content length.
- final AsyncRequestBody body = getRequestBody(input, executor,
builder);
+ InputStream uploadStream = input;
+ final long length;
+
+ if (input instanceof FileInputStream) {
+ // if the file is modified after opening, the size may not
reflect the latest changes
+ FileInputStream fis = (FileInputStream) input;
+ length = fis.getChannel().size();
+ } else if (input instanceof ByteArrayInputStream) {
+ length = input.available();
+ } else if (input.markSupported()) {
+ // in case the inputStream supports mark & reset
+ input.mark(Integer.MAX_VALUE);
+ length = IOUtils.consume(input);
+ input.reset();
+ } else {
+ // we have to read all the stream to get the actual length
+ // last else block: store to temp file and re-read
+ tempFile = File.createTempFile("s3backend-", ".tmp");
+ try (OutputStream out =
Files.newOutputStream(tempFile.toPath())) {
+ IOUtils.copy(input, out);
+ }
+ length = tempFile.length();
+ uploadStream = Files.newInputStream(tempFile.toPath());
+ }
+
+ final AsyncRequestBody body = getRequestBody(uploadStream, length,
executor, builder);
final Upload upload = tmx.upload(uploadReq ->
uploadReq.requestBody(body).
putObjectRequest(
@@ -537,12 +562,19 @@ public class S3Backend extends AbstractSharedBackend {
.build());
upload.completionFuture().join();
} catch (Exception e) {
- LOG.error("Exception in uploading {}", e.getMessage());
+ LOG.error("Exception in uploading metadata file", e);
throw new DataStoreException("Error in uploading metadata file",
e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
+ if (tempFile != null) {
+ try {
+ Files.deleteIfExists(tempFile.toPath());
+ } catch (IOException e) {
+ LOG.warn("Failed to delete temp file {}", tempFile, e);
+ }
+ }
executor.shutdown();
try {
if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
@@ -1339,44 +1371,11 @@ public class S3Backend extends AbstractSharedBackend {
}
@NotNull
- private AsyncRequestBody getRequestBody(final InputStream input, final
ExecutorService executor,
- final PutObjectRequest.Builder
builder) throws IOException {
- final AsyncRequestBody body;
- if (Objects.equals(RemoteStorageMode.S3,
properties.get(S3Constants.MODE))) {
- body = AsyncRequestBody.fromInputStream(input, null, executor);
- } else {
- // for GCP we need to know the length in advance, else it won't
work.
- final long length;
- if (input instanceof FileInputStream) {
- final FileInputStream fis = (FileInputStream) input;
- // if the file is modified after opening, the size may not
reflect the latest changes
- length = fis.getChannel().size();
- body = AsyncRequestBody.fromInputStream(input, length,
executor);
- } else if (input instanceof ByteArrayInputStream) {
- length = input.available();
- body = AsyncRequestBody.fromInputStream(input, length,
executor);
- } else if (input.markSupported()) {
- // in case the inputStream supports mark & reset
- input.mark(Integer.MAX_VALUE);
- length = IOUtils.consume(input);
- input.reset();
- body = AsyncRequestBody.fromInputStream(input, length,
executor);
- } else {
- // we have to read all the stream to get the actual length
- // last else block: store to temp file and re-read
- final File tempFile = File.createTempFile("inputstream-",
".tmp");
- tempFile.deleteOnExit(); // Clean up after JVM exits
-
- try (OutputStream out = new FileOutputStream(tempFile)) {
- IOUtils.copy(input, out); // Copy all bytes to file
- }
- // Get length from file
- length = tempFile.length();
- // Re-create InputStream from temp file
- body = AsyncRequestBody.fromInputStream(new
FileInputStream(tempFile), length, executor);
- }
- builder.contentLength(length);
- }
+ private AsyncRequestBody getRequestBody(final InputStream input, final
long length, final ExecutorService executor,
+ final PutObjectRequest.Builder
builder) {
+ // for both AWS/GCP we need to know the length in advance, else it
won't work.
+ AsyncRequestBody body = AsyncRequestBody.fromInputStream(input,
length, executor);
+ builder.contentLength(length);
return body;
}