[ 
https://issues.apache.org/jira/browse/HADOOP-16721?focusedWorklogId=563114&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-563114
 ]

ASF GitHub Bot logged work on HADOOP-16721:
-------------------------------------------

                Author: ASF GitHub Bot
            Created on: 09/Mar/21 14:57
            Start Date: 09/Mar/21 14:57
    Worklog Time Spent: 10m 
      Work Description: steveloughran commented on a change in pull request 
#2742:
URL: https://github.com/apache/hadoop/pull/2742#discussion_r590443239



##########
File path: 
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestlRenameDeleteRace.java
##########
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.impl;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import com.amazonaws.AmazonClientException;
+import 
org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
+
+import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY;
+import static 
org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE;
+import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL;
+import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit;
+import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
+
+/**
+ * HADOOP-16721: race condition with delete and rename underneath the same 
destination
+ * directory.
+ * This test suite recreates the failure using semaphores to guarantee the 
failure
+ * condition is encountered -then verifies that the rename operation is 
successful.
+ */
+public class ITestlRenameDeleteRace extends AbstractS3ATestBase {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITestlRenameDeleteRace.class);
+
+
+  /** Many threads for scale performance: {@value}. */
+  public static final int EXECUTOR_THREAD_COUNT = 2;
+
+  /**
+   * For submitting work.
+   */
+  private static final ListeningExecutorService EXECUTOR =
+      BlockingThreadPoolExecutorService.newInstance(
+          EXECUTOR_THREAD_COUNT,
+          EXECUTOR_THREAD_COUNT * 2,
+          30, TimeUnit.SECONDS,
+          "test-operations");
+
+  @Override
+  protected Configuration createConfiguration() {
+    Configuration conf = super.createConfiguration();
+    String bucketName = getTestBucketName(conf);
+
+    removeBaseAndBucketOverrides(bucketName, conf,
+        S3_METADATA_STORE_IMPL,
+        DIRECTORY_MARKER_POLICY);
+    // use the keep policy to ensure that surplus markers exist
+    // to complicate failures
+    conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_DELETE);
+    conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL);
+
+    return conf;
+  }
+
+  @Test
+  public void testDeleteRenameRaceCondition() throws Throwable {
+    describe("verify no race between delete and rename");
+    final S3AFileSystem fs = getFileSystem();
+    final Path path = path(getMethodName());
+    Path srcDir = new Path(path, "src");
+
+    // this dir must exist throughout the rename
+    Path destDir = new Path(path, "dest");
+    // this dir tree will be deleted in a thread which does not
+    // complete before the rename exists
+    Path destSubdir1 = new Path(destDir, "subdir1");
+    Path subfile1 = new Path(destSubdir1, "subfile1");
+
+    // this is the directory we want to copy over under the dest dir
+    Path srcSubdir2 = new Path(srcDir, "subdir2");
+    Path srcSubfile = new Path(srcSubdir2, "subfile2");
+    Path destSubdir2 = new Path(destDir, "subdir2");
+
+    // creates subfile1 and all parents
+    ContractTestUtils.touch(fs, subfile1);
+    assertIsDirectory(destDir);
+
+    // source subfile
+    ContractTestUtils.touch(fs, srcSubfile);
+
+    final BlockingFakeDirMarkerFS blockingFS
+        = new BlockingFakeDirMarkerFS();
+    blockingFS.initialize(fs.getUri(), fs.getConf());
+    // get the semaphore; this ensures that the next attempt to create
+    // a fake marker blocks
+    try {
+      blockingFS.blockBeforCreatingMarker.acquire();
+      final CompletableFuture<Path> future = submit(EXECUTOR, () -> {
+        LOG.info("deleting {}", destSubdir1);
+        blockingFS.delete(destSubdir1, true);
+        return destSubdir1;
+      });
+
+      // wait for the delete to complete the deletion phase
+      blockingFS.signalCreatingFakeParentDirectory.acquire();
+
+      // there is now no destination directory
+      assertPathDoesNotExist("should have been implicitly deleted", destDir);
+
+      try {
+        // Now attempt the rename in the normal FS.
+        LOG.info("renaming {} to {}", srcSubdir2, destSubdir2);
+        Assertions.assertThat(fs.rename(srcSubdir2, destSubdir2))
+            .describedAs("rename(%s, %s)", srcSubdir2, destSubdir2)
+            .isTrue();
+      } finally {
+        blockingFS.blockBeforCreatingMarker.release();
+      }
+
+      // now let the delete complete
+      LOG.info("Waiting for delete {} to finish", destSubdir1);
+      waitForCompletion(future);
+      assertPathExists("must now exist", destDir);
+      assertPathExists("must now exist", new Path(destSubdir2, "subfile2"));
+      assertPathDoesNotExist("Src dir deleted", srcSubdir2);
+
+    } finally {
+      cleanupWithLogger(LOG, blockingFS);
+    }
+
+  }
+
+  /**
+   * Subclass of S3A FS whose execution of maybeCreateFakeParentDirectory
+   * can be choreographed with another thread so as to reliably
+   * create the delete/rename race condition.
+   */
+  private final class BlockingFakeDirMarkerFS extends S3AFileSystem {
+
+    /**
+     * Block for entry into maybeCreateFakeParentDirectory(); will be released
+     * then.
+     */
+    private final Semaphore signalCreatingFakeParentDirectory = new Semaphore(
+        1);
+
+    /**
+     * Semaphore to acquire before the marker can be listed/created.
+     */
+    private final Semaphore blockBeforCreatingMarker = new Semaphore(1);

Review comment:
       done. My inability to type well surfaces again




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 563114)
    Time Spent: 2h  (was: 1h 50m)

> Improve S3A rename resilience
> -----------------------------
>
>                 Key: HADOOP-16721
>                 URL: https://issues.apache.org/jira/browse/HADOOP-16721
>             Project: Hadoop Common
>          Issue Type: Sub-task
>          Components: fs/s3
>    Affects Versions: 3.2.0
>            Reporter: Steve Loughran
>            Assignee: Steve Loughran
>            Priority: Blocker
>              Labels: pull-request-available
>          Time Spent: 2h
>  Remaining Estimate: 0h
>
> h3. race condition in delete/rename overlap
> If you have multiple threads on a system doing rename operations, then one 
> thread doing a delete(dest/subdir) may delete the last file under a subdir, 
> and, before its listed and recreated any parent dir marker -other threads may 
> conclude there's an empty dest dir and fail.
> This is most likely on an overloaded system with many threads executing 
> rename operations, as with parallel copying taking place there are many 
> threads to schedule and https connections to pool. 
> h3. failure reporting
> the classic \{[rename(source, dest)}} operation returns \{{false}} on certain 
> failures, which, while somewhat consistent with the posix APIs, turns out to 
> be useless for identifying the cause of problems. Applications tend to have 
> code which goes
> {code}
> if (!fs.rename(src, dest)) throw new IOException("rename failed");
> {code}
> While ultimately the rename/3 call needs to be made public (HADOOP-11452) it 
> would then need a adoption across applications. We can do this in the hadoop 
> modules, but for Hive, Spark etc it will take along time.
> Proposed: a switch to tell S3A to stop downgrading certain failures (source 
> is dir, dest is file, src==dest, etc) into "false". This can be turned on 
> when trying to diagnose why things like Hive are failing.
> Production code: trivial 
> * change in rename(), 
> * new option
> * docs.
> Test code: 
> * need to clear this option for rename contract tests
> * need to create a new FS with this set to verify the various failure modes 
> trigger it.
>  
> If this works we should do the same for ABFS, GCS. Hey, maybe even HDFS



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to