reschke commented on code in PR #2409:
URL: https://github.com/apache/jackrabbit-oak/pull/2409#discussion_r2517754760


##########
oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java:
##########
@@ -546,142 +463,182 @@ public void deleteRecord(DataIdentifier identifier) 
throws DataStoreException {
 
     @Override
     public void addMetadataRecord(InputStream input, String name) throws 
DataStoreException {
-        if (null == input) {
-            throw new NullPointerException("input");
-        }
-        if (StringUtils.isEmpty(name)) {
-            throw new IllegalArgumentException("name");
-        }
-        long start = System.currentTimeMillis();
+        Objects.requireNonNull(input, "input must not be null");
+        Validate.checkArgument(StringUtils.isNotEmpty(name), "name should not 
be empty");
+        Stopwatch stopwatch = Stopwatch.createStarted();
         ClassLoader contextClassLoader = 
Thread.currentThread().getContextClassLoader();
         try {
             
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
 
-            addMetadataRecordImpl(input, name, -1L);
-            LOG.debug("Metadata record added. metadataName={} duration={}", 
name, (System.currentTimeMillis() - start));
+            addMetadataRecordImpl(input, name, -1);
+            LOG.debug("Metadata record added. metadataName={} duration={}", 
name, stopwatch.elapsed(TimeUnit.MILLISECONDS));
         }
         finally {
-            if (null != contextClassLoader) {
+            if (contextClassLoader != null) {
                 
Thread.currentThread().setContextClassLoader(contextClassLoader);
             }
         }
     }
 
     @Override
-    public void addMetadataRecord(File input, String name) throws 
DataStoreException {
-        if (null == input) {
-            throw new NullPointerException("input");
-        }
-        if (StringUtils.isEmpty(name)) {
-            throw new IllegalArgumentException("name");
-        }
-        long start = System.currentTimeMillis();
+    public void addMetadataRecord(File inputFile, String name) throws 
DataStoreException {
+        Objects.requireNonNull(inputFile, "input must not be null");
+        Validate.checkArgument(StringUtils.isNoneEmpty(name), "name should not 
be empty");
+
+        Stopwatch stopwatch = Stopwatch.createStarted();
         ClassLoader contextClassLoader = 
Thread.currentThread().getContextClassLoader();
         try {
             
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
-
-            addMetadataRecordImpl(new FileInputStream(input), name, 
input.length());
-            LOG.debug("Metadata record added. metadataName={} duration={}", 
name, (System.currentTimeMillis() - start));
-        }
-        catch (FileNotFoundException e) {
+            try (InputStream input = new FileInputStream(inputFile)) {
+                addMetadataRecordImpl(input, name, inputFile.length());
+            }
+            LOG.debug("Metadata record added. metadataName={} duration={}", 
name, stopwatch.elapsed(TimeUnit.MILLISECONDS));
+        } catch (IOException e) {
             throw new DataStoreException(e);
-        }
-        finally {
-            if (null != contextClassLoader) {
+        } finally {
+            if (contextClassLoader != null) {
                 
Thread.currentThread().setContextClassLoader(contextClassLoader);
             }
         }
     }
 
+    private BlockBlobClient getMetaBlobClient(String name) throws 
DataStoreException {
+        return 
getAzureContainer().getBlobClient(AzureConstants.AZURE_BlOB_META_DIR_NAME + "/" 
+ name).getBlockBlobClient();
+    }
+
     private void addMetadataRecordImpl(final InputStream input, String name, 
long recordLength) throws DataStoreException {
         try {
-            CloudBlobDirectory metaDir = 
getAzureContainer().getDirectoryReference(META_DIR_NAME);
-            CloudBlockBlob blob = metaDir.getBlockBlobReference(name);
-            addLastModified(blob);
-            blob.upload(input, recordLength);
-        }
-        catch (StorageException e) {
+            BlockBlobClient blockBlobClient = getMetaBlobClient(name);
+
+            // If length is unknown (-1), use a file buffer for the stream 
first
+            // This is necessary because Azure SDK requires a known length for 
upload
+            // and loading the entire stream into memory is too risky
+            if (recordLength < 0) {
+                LOG.debug("Metadata record length unknown. metadataName={}. 
Saving to temporary file before upload", name);
+                File tempFile = createTempFileFromStream(input, name);

Review Comment:
   I would strongly recommend against using tihs. The `TransientFileFactory` 
was created ages ago:
   
   - `deleteOnExit` only helps when the VM is terminated normally
   - it attempts to use GC to clean up
   - it only hides bugs in the caller; debugging a stream/file leak will be 
much harder than using regular code



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to