[ 
https://issues.apache.org/jira/browse/HADOOP-18340?focusedWorklogId=796052&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-796052
 ]

ASF GitHub Bot logged work on HADOOP-18340:
-------------------------------------------

                Author: ASF GitHub Bot
            Created on: 28/Jul/22 12:11
            Start Date: 28/Jul/22 12:11
    Worklog Time Spent: 10m 
      Work Description: steveloughran commented on code in PR #4608:
URL: https://github.com/apache/hadoop/pull/4608#discussion_r932130167


##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ADeleteOnExit.java:
##########
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import static org.apache.hadoop.fs.s3a.Constants.FS_S3A;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Date;
+
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;

Review Comment:
   nit: all non-static org apache imports should go in their own block below 
this one



##########
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java:
##########
@@ -3800,6 +3819,32 @@ UploadResult waitForUploadCompletion(String key, 
UploadInfo uploadInfo)
     }
   }
 
+
+  /**
+   * Delete all paths that were marked as delete-on-exit. This recursively
+   * deletes all files and directories in the specified paths. It does not
+   * check if file exists and filesystem is closed.
+   *
+   * The time to process this operation is {@code O(paths)}, with the actual
+   * time dependent on the time for existence and deletion operations to
+   * complete, successfully or not.
+   */
+  @Override
+  protected void processDeleteOnExit() {
+    Set<Path> deleteOnExit = getDeleteOnExitSet();
+    synchronized (deleteOnExit) {
+      for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) {
+        Path path = iter.next();
+        try {
+          deleteWithoutCloseCheck(path, true);
+        } catch (IOException e) {
+          LOG.info("Ignoring failure to deleteOnExit for path {}, exception 
{}", path, e);

Review Comment:
   could be very noisy if there are connectivity problems. how about logging 
e.toString at info, full stack at debug





Issue Time Tracking
-------------------

    Worklog Id:     (was: 796052)
    Time Spent: 1h 10m  (was: 1h)

> deleteOnExit does not work with S3AFileSystem
> ---------------------------------------------
>
>                 Key: HADOOP-18340
>                 URL: https://issues.apache.org/jira/browse/HADOOP-18340
>             Project: Hadoop Common
>          Issue Type: Bug
>          Components: fs/s3
>    Affects Versions: 3.3.3
>            Reporter: Huaxiang Sun
>            Priority: Minor
>              Labels: pull-request-available
>          Time Spent: 1h 10m
>  Remaining Estimate: 0h
>
> When deleteOnExit is set on some paths, they are not removed when file system 
> object is closed. The following exception is logged when printing out the 
> exception in info log.
> {code:java}
> 2022-07-15 19:29:12,552 [main] INFO  fs.FileSystem 
> (FileSystem.java:processDeleteOnExit(1810)) - Ignoring failure to 
> deleteOnExit for path /file, exception {}
> java.io.IOException: s3a://mock-bucket: FileSystem is closed!
>         at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.checkNotClosed(S3AFileSystem.java:3887)
>         at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2333)
>         at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2355)
>         at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.exists(S3AFileSystem.java:4402)
>         at 
> org.apache.hadoop.fs.FileSystem.processDeleteOnExit(FileSystem.java:1805)
>         at org.apache.hadoop.fs.FileSystem.close(FileSystem.java:2669)
>         at 
> org.apache.hadoop.fs.s3a.S3AFileSystem.close(S3AFileSystem.java:3830)
>         at 
> org.apache.hadoop.fs.s3a.TestS3AGetFileStatus.testFile(TestS3AGetFileStatus.java:87)
>         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>         at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>         at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>         at java.lang.reflect.Method.invoke(Method.java:498)
>         at 
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
>         at 
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
>         at 
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
>         at 
> org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
>         at 
> org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
>         at 
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
>         at 
> org.junit.rules.ExpectedException$ExpectedExceptionStatement.evaluate(ExpectedException.java:258)
>         at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
>         at 
> org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100)
>         at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366)
>         at 
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103)
>         at 
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63)
>         at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
>         at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
>         at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
>         at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
>         at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
>         at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
>         at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
>         at 
> org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:365)
>         at 
> org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:273)
>         at 
> org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:238)
>         at 
> org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:159)
>         at 
> org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:384)
>         at 
> org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:345)
>         at 
> org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:126)
>         at 
> org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:418)
>  {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to