Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c1855a33e -> e402371b6
  refs/heads/branch-2.8 475a277e6 -> f1236c5d7
  refs/heads/trunk 307ec80ac -> d251e5541


HADOOP-12851. S3AFileSystem Uptake of 
ProviderUtils.excludeIncompatibleCredentialProviders. Contributed by Larry 
McCay.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d251e554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d251e554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d251e554

Branch: refs/heads/trunk
Commit: d251e55415f1fab085159b9eb3b43214d100b6a8
Parents: 307ec80
Author: Chris Nauroth <cnaur...@apache.org>
Authored: Mon Feb 29 20:03:42 2016 -0800
Committer: Chris Nauroth <cnaur...@apache.org>
Committed: Mon Feb 29 21:59:52 2016 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 ++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 47 +++++++++++---------
 .../hadoop/fs/s3a/TestS3AConfiguration.java     | 34 ++++++++++++++
 3 files changed, 63 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d251e554/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b84131b..65767f6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1772,6 +1772,10 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12813. Migrate TestRPC and related codes to rebase on
     ProtobufRpcEngine. (Kai Zheng via wheat9)
 
+    HADOOP-12851. S3AFileSystem Uptake of
+    ProviderUtils.excludeIncompatibleCredentialProviders.
+    (Larry McCay via cnauroth)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d251e554/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index b9590ea..7ab6c79 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
@@ -118,16 +119,16 @@ public class S3AFileSystem extends FileSystem {
     bucket = name.getHost();
 
     ClientConfiguration awsConf = new ClientConfiguration();
-    awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, 
+    awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS,
       DEFAULT_MAXIMUM_CONNECTIONS));
     boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS,
         DEFAULT_SECURE_CONNECTIONS);
     awsConf.setProtocol(secureConnections ?  Protocol.HTTPS : Protocol.HTTP);
-    awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, 
+    awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES,
       DEFAULT_MAX_ERROR_RETRIES));
     awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT,
         DEFAULT_ESTABLISH_TIMEOUT));
-    awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, 
+    awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT,
       DEFAULT_SOCKET_TIMEOUT));
     String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
     if(!signerOverride.isEmpty()) {
@@ -263,9 +264,9 @@ public class S3AFileSystem extends FileSystem {
   }
 
   private void initMultipartUploads(Configuration conf) {
-    boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, 
+    boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
       DEFAULT_PURGE_EXISTING_MULTIPART);
-    long purgeExistingMultipartAge = 
conf.getLong(PURGE_EXISTING_MULTIPART_AGE, 
+    long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
       DEFAULT_PURGE_EXISTING_MULTIPART_AGE);
 
     if (purgeExistingMultipart) {
@@ -297,9 +298,11 @@ public class S3AFileSystem extends FileSystem {
         accessKey = userInfo;
       }
     }
+    Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
+          conf, S3AFileSystem.class);
     if (accessKey == null) {
       try {
-        final char[] key = conf.getPassword(ACCESS_KEY);
+        final char[] key = c.getPassword(ACCESS_KEY);
         if (key != null) {
           accessKey = (new String(key)).trim();
         }
@@ -309,7 +312,7 @@ public class S3AFileSystem extends FileSystem {
     }
     if (secretKey == null) {
       try {
-        final char[] pass = conf.getPassword(SECRET_KEY);
+        final char[] pass = c.getPassword(SECRET_KEY);
         if (pass != null) {
           secretKey = (new String(pass)).trim();
         }
@@ -390,7 +393,7 @@ public class S3AFileSystem extends FileSystem {
       throw new FileNotFoundException("Can't open " + f + " because it is a 
directory");
     }
 
-    return new FSDataInputStream(new S3AInputStream(bucket, pathToKey(f), 
+    return new FSDataInputStream(new S3AInputStream(bucket, pathToKey(f),
       fileStatus.getLen(), s3, statistics));
   }
 
@@ -425,7 +428,7 @@ public class S3AFileSystem extends FileSystem {
     }
     // We pass null to FSDataOutputStream so it won't count writes that are 
being buffered to a file
     return new FSDataOutputStream(new S3AOutputStream(getConf(), transfers, 
this,
-      bucket, key, progress, cannedACL, statistics, 
+      bucket, key, progress, cannedACL, statistics,
       serverSideEncryptionAlgorithm), null);
   }
 
@@ -436,7 +439,7 @@ public class S3AFileSystem extends FileSystem {
    * @param progress for reporting progress if it is not null.
    * @throws IOException indicating that append is not supported.
    */
-  public FSDataOutputStream append(Path f, int bufferSize, 
+  public FSDataOutputStream append(Path f, int bufferSize,
     Progressable progress) throws IOException {
     throw new IOException("Not supported");
   }
@@ -446,8 +449,8 @@ public class S3AFileSystem extends FileSystem {
    * Renames Path src to Path dst.  Can take place on local fs
    * or remote DFS.
    *
-   * Warning: S3 does not support renames. This method does a copy which can 
-   * take S3 some time to execute with large files and directories. Since 
+   * Warning: S3 does not support renames. This method does a copy which can
+   * take S3 some time to execute with large files and directories. Since
    * there is no Progressable passed in, this can time out jobs.
    *
    * Note: This implementation differs with other S3 drivers. Specifically:
@@ -560,7 +563,7 @@ public class S3AFileSystem extends FileSystem {
         return false;
       }
 
-      List<DeleteObjectsRequest.KeyVersion> keysToDelete = 
+      List<DeleteObjectsRequest.KeyVersion> keysToDelete =
         new ArrayList<>();
       if (dstStatus != null && dstStatus.isEmptyDirectory()) {
         // delete unnecessary fake directory.
@@ -666,7 +669,7 @@ public class S3AFileSystem extends FileSystem {
       }
 
       if (!recursive && !status.isEmptyDirectory()) {
-        throw new IOException("Path is a folder: " + f + 
+        throw new IOException("Path is a folder: " + f +
                               " and it is not an empty directory");
       }
 
@@ -697,7 +700,7 @@ public class S3AFileSystem extends FileSystem {
         //request.setDelimiter("/");
         request.setMaxKeys(maxKeys);
 
-        List<DeleteObjectsRequest.KeyVersion> keys = 
+        List<DeleteObjectsRequest.KeyVersion> keys =
           new ArrayList<>();
         ObjectListing objects = s3.listObjects(request);
         statistics.incrementReadOps(1);
@@ -801,7 +804,7 @@ public class S3AFileSystem extends FileSystem {
               LOG.debug("Adding: fd: " + keyPath);
             }
           } else {
-            result.add(new S3AFileStatus(summary.getSize(), 
+            result.add(new S3AFileStatus(summary.getSize(),
                 dateToLong(summary.getLastModified()), keyPath,
                 getDefaultBlockSize(f.makeQualified(uri, workingDir))));
             if (LOG.isDebugEnabled()) {
@@ -869,7 +872,7 @@ public class S3AFileSystem extends FileSystem {
    * @param f path to create
    * @param permission to apply to f
    */
-  // TODO: If we have created an empty file at /foo/bar and we then call 
+  // TODO: If we have created an empty file at /foo/bar and we then call
   // mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/?
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     if (LOG.isDebugEnabled()) {
@@ -892,7 +895,7 @@ public class S3AFileSystem extends FileSystem {
           FileStatus fileStatus = getFileStatus(fPart);
           if (fileStatus.isFile()) {
             throw new FileAlreadyExistsException(String.format(
-                "Can't make directory for path '%s' since it is a file.", 
+                "Can't make directory for path '%s' since it is a file.",
                 fPart));
           }
         } catch (FileNotFoundException fnfe) {
@@ -998,9 +1001,9 @@ public class S3AFileSystem extends FileSystem {
       if (!objects.getCommonPrefixes().isEmpty()
           || objects.getObjectSummaries().size() > 0) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Found path as directory (with /): " + 
-            objects.getCommonPrefixes().size() + "/" + 
-            objects.getObjectSummaries().size());
+          LOG.debug("Found path as directory (with /): " +
+              objects.getCommonPrefixes().size() + "/" +
+              objects.getObjectSummaries().size());
 
           for (S3ObjectSummary summary : objects.getObjectSummaries()) {
             LOG.debug("Summary: " + summary.getKey() + " " + 
summary.getSize());
@@ -1046,7 +1049,7 @@ public class S3AFileSystem extends FileSystem {
    * @param dst path
    */
   @Override
-  public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
+  public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
     Path dst) throws IOException {
     String key = pathToKey(dst);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d251e554/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java
index 3db84db..e74ebca 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AConfiguration.java
@@ -29,6 +29,7 @@ import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -318,4 +319,37 @@ public class TestS3AConfiguration {
     assertEquals("AccessKey incorrect.", EXAMPLE_ID, creds.getAccessKey());
     assertEquals("SecretKey incorrect.", EXAMPLE_KEY, creds.getAccessSecret());
   }
+
+  @Test
+  public void testExcludingS3ACredentialProvider() throws Exception {
+    // set up conf to have a cred provider
+    final Configuration conf = new Configuration();
+    final File file = tempDir.newFile("test.jks");
+    final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
+        file.toURI());
+    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+        "jceks://s3a/foobar," + jks.toString());
+
+    // first make sure that the s3a based provider is removed
+    Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
+        conf, S3AFileSystem.class);
+    String newPath = conf.get(
+        CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH);
+    assertFalse("Provider Path incorrect", newPath.contains("s3a://"));
+
+    // now let's make sure the new path is created by the S3AFileSystem
+    // and the integration still works. Let's provision the keys through
+    // the altered configuration instance and then try and access them
+    // using the original config with the s3a provider in the path.
+    provisionAccessKeys(c);
+
+    S3AFileSystem s3afs = new S3AFileSystem();
+    conf.set(Constants.ACCESS_KEY, EXAMPLE_ID + "LJM");
+    URI uriWithUserInfo = new URI("s3a://123:456@foobar");
+    S3AFileSystem.AWSAccessKeys creds =
+        s3afs.getAWSAccessKeys(uriWithUserInfo, conf);
+    assertEquals("AccessKey incorrect.", "123", creds.getAccessKey());
+    assertEquals("SecretKey incorrect.", "456", creds.getAccessSecret());
+
+  }
 }

Reply via email to