[hadoop] branch trunk updated: HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu Yao.

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d072d33  HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu 
Yao.
d072d33 is described below

commit d072d3304ce3fe33e22bb703839b41ab5107ad42
Author: Xiaoyu Yao 
AuthorDate: Wed Aug 28 08:56:33 2019 -0700

HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu Yao.

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  32 +++--
 .../hadoop/hdds/scm/XceiverClientManager.java  |  28 -
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   9 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java |  26 +---
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  60 ++---
 .../hadoop/hdds/security/x509/SecurityConfig.java  | 137 -
 .../x509/certificate/client/CertificateClient.java |   6 +
 .../client/DefaultCertificateClient.java   |  31 -
 .../common/src/main/resources/ozone-default.xml|  27 
 .../container/common/helpers/ContainerMetrics.java |   9 +-
 .../common/transport/server/XceiverServerGrpc.java |  16 +--
 .../transport/server/ratis/XceiverServerRatis.java |   4 +-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |   1 +
 .../apache/hadoop/hdds/server/ProfileServlet.java  |   1 -
 .../hadoop/hdds/server/TestProfileServlet.java |  11 +-
 .../hadoop/hdds/scm/pipeline/PipelineFactory.java  |   5 +-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  |   2 +
 .../hdds/scm/pipeline/PipelineReportHandler.java   |   3 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |   9 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java  |  15 +--
 .../hdds/scm/pipeline/SCMPipelineManager.java  |  15 ++-
 .../hdds/scm/server/StorageContainerManager.java   |  13 +-
 .../container/TestCloseContainerEventHandler.java  |   2 +-
 .../scm/container/TestSCMContainerManager.java |   2 +-
 .../hdds/scm/node/TestContainerPlacement.java  |  10 +-
 .../scm/pipeline/MockRatisPipelineProvider.java|   2 +-
 .../safemode/TestHealthyPipelineSafeModeRule.java  |   6 +-
 .../TestOneReplicaPipelineSafeModeRule.java|   2 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |  10 +-
 .../ozone/client/OzoneMultipartUploadList.java |   1 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  54 ++--
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   1 -
 .../ozone/om/helpers/OmMultipartKeyInfo.java   |   3 -
 .../ozone/om/helpers/OmMultipartUploadList.java|   3 -
 .../om/helpers/OmMultipartUploadListParts.java |   1 -
 ...MultipartUploadList.java => ServiceInfoEx.java} |  30 ++---
 .../ozone/om/protocol/OzoneManagerProtocol.java|   9 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  21 +++-
 .../src/main/proto/OzoneManagerProtocol.proto  |   3 +
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |  15 ++-
 .../hadoop/ozone/TestStorageContainerManager.java  |  23 ++--
 .../ozone/client/CertificateClientTestImpl.java|   7 +-
 .../rpc/TestContainerReplicationEndToEnd.java  |  10 +-
 .../ozoneimpl/TestOzoneContainerWithTLS.java   | 104 +---
 .../hadoop/ozone/scm/TestXceiverClientManager.java |   6 +-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   1 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  11 ++
 .../protocolPB/OzoneManagerRequestHandler.java |   6 +-
 48 files changed, 375 insertions(+), 428 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index d8daaa7..b31da05 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -52,8 +52,8 @@ import 
org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
 import java.io.IOException;
+import java.security.cert.X509Certificate;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -80,6 +80,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   private boolean closed = false;
   private SecurityConfig secConfig;
   private final boolean topologyAwareRead;
+  private X509Certificate caCert;
 
   /**
* Constructs a client that can communicate with the Container framework on
@@ -87,8 +88,10 @@ public class XceiverClientGrpc extends XceiverClientSpi {
*
* @param pipeline - Pipeline that defines the machines.
* @param config   -- Ozone Config
+   * @param caCert   - SCM ca certificate.
*/
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config) {
+  public 

[hadoop] branch trunk updated: HDDS-2156. Fix alignment issues in HDDS doc pages

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9be448b  HDDS-2156. Fix alignment issues in HDDS doc pages
9be448b is described below

commit 9be448b3368088967064305e78ec17ffaaeaedb2
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Sep 19 16:41:00 2019 -0700

HDDS-2156. Fix alignment issues in HDDS doc pages

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/docs/content/security/SecurityAcls.md  |  3 +-
 hadoop-hdds/docs/content/security/_index.md|  2 +-
 .../themes/ozonedoc/layouts/_default/section.html  | 69 +-
 .../themes/ozonedoc/layouts/_default/single.html   |  2 +
 .../docs/themes/ozonedoc/static/css/ozonedoc.css   |  3 +
 5 files changed, 48 insertions(+), 31 deletions(-)

diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md 
b/hadoop-hdds/docs/content/security/SecurityAcls.md
index b85dcca..31bbb0a 100644
--- a/hadoop-hdds/docs/content/security/SecurityAcls.md
+++ b/hadoop-hdds/docs/content/security/SecurityAcls.md
@@ -2,7 +2,8 @@
 title: "Ozone ACLs"
 date: "2019-April-03"
 weight: 6
-summary: Native ACL support provides ACL functionality without Ranger 
integration.
+summary: Native Ozone Authorizer provides Access Control List (ACL) support 
for Ozone without Ranger integration.
+icon: transfer
 ---
 
 
 {{}}
-  Ozone is an enterprise class, secure storage system. There many
+  Ozone is an enterprise class, secure storage system. There are many
   optional security features in Ozone. Following pages discuss how
   you can leverage the security features of Ozone.
 {{}}
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
index 4150d07..5c01241 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html
@@ -18,42 +18,53 @@
 
 
 
-  {{ partial "navbar.html" . }}
+{{ partial "navbar.html" . }}
 
-  
+
 
-  {{ partial "sidebar.html" . }}
-  
-{{ .Title }}
-
-  {{ .Content }}
-{{.Params.card}}
-  {{ if not (eq .Params.cards "false")}}
-  {{ range .Pages }}
-
-  
-
-  
-
-  {{ with .Params.Icon}}
-
-{{end}}
-  {{ .LinkTitle }}
-
-{{.Summary}}
-{{.LinkTitle}}
-  
+{{ partial "sidebar.html" . }}
+
+
+{{ .Title }}
 
-  
+
+{{ .Content }}
+{{.Params.card}}
+{{ if not (eq .Params.cards "false")}}
+{{ range $page_index, $page_val := .Pages }}
+
+{{ $page_count := len .Pages }}
+{{if (eq (mod $page_index 2) 0)}}
+
+{{end}}
+
+
+
+
+{{ with .Params.Icon}}
+
+{{end}}
+{{ .LinkTitle }}
+
+{{.Summary}}
+{{.LinkTitle}}
+
+
+
 
-  {{ end }}
-  {{end}}
+{{if (or (eq (mod $page_index 2) 1) (eq $page_index (sub 
$page_count 1)))}}
+
+{{end}}
+{{ end }}
+{{end}}
+
 
-  
 
-  
+
 
-  {{ partial "footer.html" . }}
+{{ partial "footer.html" . }}
 
 
 
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html 
b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
index 31125ba..3679ddb 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html
@@ -36,7 +36,9 @@
 
   
 
+  
 {{.Title}}
+  
 
   {{ .Content }}
 
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css 
b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
index e004da0..6f812c8 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
+++ b/hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css
@@ -160,6 +160,9 @@ h4 {
   padding: 30px;
 }
 
+h1 {
+  margin-bottom: 20px;
+}
 
 .card {
   padding: 20px;


-
To unsubscribe, e-mail: 

[hadoop] branch branch-3.1 updated: HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL in ZKDelegationTokenSecretManager using principal with Schema /_HOST. Contributed by luhuachao.

2019-09-19 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 6ef3204  HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL 
in ZKDelegationTokenSecretManager using principal with Schema /_HOST. 
Contributed by luhuachao.
6ef3204 is described below

commit 6ef3204d54c84e6544287fbb2c11ce9741776db6
Author: Takanobu Asanuma 
AuthorDate: Fri Sep 20 13:55:34 2019 +0900

HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL in 
ZKDelegationTokenSecretManager using principal with Schema /_HOST. Contributed 
by luhuachao.

(cherry picked from commit 298f07abceb4bb854630c9058e1697d4ef3d2ae2)
---
 .../security/token/delegation/ZKDelegationTokenSecretManager.java   | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index dca9e2f..cce124e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
 import org.apache.zookeeper.CreateMode;
@@ -221,6 +222,7 @@ public abstract class 
ZKDelegationTokenSecretManager

[hadoop] branch branch-3.2 updated: HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL in ZKDelegationTokenSecretManager using principal with Schema /_HOST. Contributed by luhuachao.

2019-09-19 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new b207244  HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL 
in ZKDelegationTokenSecretManager using principal with Schema /_HOST. 
Contributed by luhuachao.
b207244 is described below

commit b2072444ab471125ab63592b50106961166bde7a
Author: Takanobu Asanuma 
AuthorDate: Fri Sep 20 13:55:34 2019 +0900

HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL in 
ZKDelegationTokenSecretManager using principal with Schema /_HOST. Contributed 
by luhuachao.

(cherry picked from commit 298f07abceb4bb854630c9058e1697d4ef3d2ae2)
---
 .../security/token/delegation/ZKDelegationTokenSecretManager.java   | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index dca9e2f..cce124e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
 import org.apache.zookeeper.CreateMode;
@@ -221,6 +222,7 @@ public abstract class 
ZKDelegationTokenSecretManager

[hadoop] branch trunk updated: HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL in ZKDelegationTokenSecretManager using principal with Schema /_HOST. Contributed by luhuachao.

2019-09-19 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 298f07a  HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL 
in ZKDelegationTokenSecretManager using principal with Schema /_HOST. 
Contributed by luhuachao.
298f07a is described below

commit 298f07abceb4bb854630c9058e1697d4ef3d2ae2
Author: Takanobu Asanuma 
AuthorDate: Fri Sep 20 13:55:34 2019 +0900

HADOOP-16069. Support configure ZK_DTSM_ZK_KERBEROS_PRINCIPAL in 
ZKDelegationTokenSecretManager using principal with Schema /_HOST. Contributed 
by luhuachao.
---
 .../security/token/delegation/ZKDelegationTokenSecretManager.java   | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index dca9e2f..cce124e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
 import org.apache.zookeeper.CreateMode;
@@ -221,6 +222,7 @@ public abstract class 
ZKDelegationTokenSecretManager

[hadoop] branch trunk updated: HDDS-2101. Ozone filesystem provider doesn't exist (#1473)

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7ae8a9  HDDS-2101. Ozone filesystem provider doesn't exist (#1473)
b7ae8a9 is described below

commit b7ae8a96cde5d78c7c73653e09b6e4b130b4d74b
Author: Vivek Ratnavel Subramanian 
AuthorDate: Thu Sep 19 16:28:29 2019 -0700

HDDS-2101. Ozone filesystem provider doesn't exist (#1473)
---
 .../src/main/compose/ozone-mr/hadoop27/docker-config |  1 -
 .../src/main/compose/ozone-mr/hadoop31/docker-config |  1 -
 .../src/main/compose/ozone-mr/hadoop32/docker-config |  1 -
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 .../META-INF/services/org.apache.hadoop.fs.FileSystem| 16 
 5 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
index 9e9cc04..fccdace 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzoneFileSystem
 
CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
index f826c75..d7ead21 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
index f826c75..d7ead21 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config
@@ -14,6 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
 CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
 
MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
diff --git 
a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..0368002
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git 
a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 000..39ca348
--- /dev/null
+++ 

[hadoop] branch branch-2 updated: YARN-7410. Cleanup FixedValueResource to avoid dependency to ResourceUtils. (wangda)

2019-09-19 Thread jhung
This is an automated email from the ASF dual-hosted git repository.

jhung pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 727cbcb  YARN-7410. Cleanup FixedValueResource to avoid dependency to 
ResourceUtils. (wangda)
727cbcb is described below

commit 727cbcb41dd1c6e85f6a451b4fc5057b6c87628b
Author: Wangda Tan 
AuthorDate: Mon Nov 6 15:26:49 2017 -0800

YARN-7410. Cleanup FixedValueResource to avoid dependency to ResourceUtils. 
(wangda)

(cherry picked from commit df27824b80b8eca03d02837efe2da31f089e67ec)
---
 .../apache/hadoop/yarn/api/records/Resource.java   |  2 +-
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  1 +
 .../hadoop/yarn/util/resource/Resources.java   | 59 ++
 3 files changed, 61 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index f7c699f..ce3ea13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -374,7 +374,7 @@ public abstract class Resource implements 
Comparable {
 }
   }
 
-  private void throwExceptionWhenArrayOutOfBound(int index) {
+  protected void throwExceptionWhenArrayOutOfBound(int index) {
 String exceptionMsg = String.format(
 "Trying to access ResourceInformation for given index=%d. "
 + "Acceptable index range is [0,%d), please check double check "
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index a359ad1..4c65cae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -290,6 +290,7 @@ public class ResourceUtils {
 updateKnownResources();
 updateResourceTypeIndex();
 initializedResources = true;
+numKnownResourceTypes = resourceTypes.size();
   }
 
   private static void updateKnownResources() {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 325bce4..11e53ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -125,6 +125,65 @@ public class Resources {
   throw new RuntimeException(name + " cannot be modified!");
 }
 
+/*
+ *  FixedValueResource cannot be updated when any resource types refresh
+ *  by using approach introduced by YARN-7307 and do operations like
+ *  Resources.compare(resource_x, Resources.none()) will throw exceptions.
+ *
+ *  That's why we do reinitialize resource maps for following methods.
+ */
+
+@Override
+public ResourceInformation getResourceInformation(int index)
+throws ResourceNotFoundException {
+  ResourceInformation ri = null;
+  try {
+ri = super.getResourceInformation(index);
+  } catch (ResourceNotFoundException e) {
+// Retry once to reinitialize resource information.
+initResourceMap();
+try {
+  return super.getResourceInformation(index);
+} catch (ResourceNotFoundException ee) {
+  throwExceptionWhenArrayOutOfBound(index);
+}
+  }
+  return ri;
+}
+
+@Override
+public ResourceInformation getResourceInformation(String resource)
+throws ResourceNotFoundException {
+  ResourceInformation ri;
+  try {
+ri = super.getResourceInformation(resource);
+  } catch (ResourceNotFoundException e) {
+// Retry once to reinitialize resource information.
+initResourceMap();
+try {
+  return super.getResourceInformation(resource);
+} catch (ResourceNotFoundException ee) {
+  throw ee;
+}
+  }
+  return ri;
+}
+
+@Override
+public ResourceInformation[] getResources() {
+  if (resources.length != ResourceUtils.getNumberOfKnownResourceTypes()) {
+// Retry once to reinitialize resource information.
+initResourceMap();
+if (resources.length 

[hadoop] 02/02: YARN-7860. Fix UT failure TestRMWebServiceAppsNodelabel#testAppsRunning. Contributed by Sunil G.

2019-09-19 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit c351230d0ea2d37f72a2c59162b23e110dead738
Author: Weiwei Yang 
AuthorDate: Tue Feb 6 19:33:04 2018 +0800

YARN-7860. Fix UT failure TestRMWebServiceAppsNodelabel#testAppsRunning. 
Contributed by Sunil G.

(cherry picked from commit bff858e910f69991e6c75151d9b89c3e78210f16)
---
 .../webapp/TestRMWebServiceAppsNodelabel.java   | 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
index ff48c7a..433c084 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java
@@ -217,13 +217,18 @@ public class TestRMWebServiceAppsNodelabel extends 
JerseyTestBase {
 
   private void verifyResource(JSONObject partition, String partitionName,
   String amused, String used, String reserved) throws JSONException {
+JSONObject amusedObject = (JSONObject) partition.get("amUsed");
+JSONObject usedObject = (JSONObject) partition.get("used");
+JSONObject reservedObject = (JSONObject) partition.get("reserved");
 assertEquals("Partition expected", partitionName,
 partition.get("partitionName"));
-assertEquals("partition amused", amused,
-partition.get("amUsed").toString());
-assertEquals("partition used", used, partition.get("used").toString());
+assertEquals("partition amused", amused, getResource(
+(int) amusedObject.get("memory"), (int) amusedObject.get("vCores")));
+assertEquals("partition used", used, getResource(
+(int) usedObject.get("memory"), (int) usedObject.get("vCores")));
 assertEquals("partition reserved", reserved,
-partition.get("reserved").toString());
+getResource((int) reservedObject.get("memory"),
+(int) reservedObject.get("vCores")));
   }
 
   @SuppressWarnings("unchecked")


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2 updated (ca93156 -> c351230)

2019-09-19 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a change to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ca93156  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
 new e56f2d4  YARN-7817. Add Resource reference to RM's NodeInfo object so 
REST API can get non memory/vcore resource usages. (Sunil G via wangda)
 new c351230  YARN-7860. Fix UT failure 
TestRMWebServiceAppsNodelabel#testAppsRunning. Contributed by Sunil G.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../webapp/JAXBContextResolver.java|  2 +-
 .../webapp/dao/ClusterMetricsInfo.java | 24 +
 .../resourcemanager/webapp/dao/NodeInfo.java   | 20 +
 .../resourcemanager/webapp/dao/ResourceInfo.java   | 18 +---
 ...outsInfo.java => ResourceInformationsInfo.java} | 25 +++---
 .../webapp/TestRMWebServiceAppsNodelabel.java  | 13 +++
 .../src/main/webapp/app/models/cluster-metric.js   |  4 ++--
 .../src/main/webapp/app/models/yarn-rm-node.js |  4 ++--
 .../main/webapp/app/serializers/yarn-rm-node.js|  4 ++--
 9 files changed, 88 insertions(+), 26 deletions(-)
 copy 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/{AppTimeoutsInfo.java
 => ResourceInformationsInfo.java} (66%)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: YARN-7817. Add Resource reference to RM's NodeInfo object so REST API can get non memory/vcore resource usages. (Sunil G via wangda)

2019-09-19 Thread epayne
This is an automated email from the ASF dual-hosted git repository.

epayne pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e56f2d4993c1693b25aacf4b9fa14eb21aa59bcd
Author: Wangda Tan 
AuthorDate: Fri Jan 26 15:43:27 2018 +0800

YARN-7817. Add Resource reference to RM's NodeInfo object so REST API can 
get non memory/vcore resource usages. (Sunil G via wangda)

Change-Id: Ia7ceeabd82046645ddeaf487c763288f36cfbdee
(cherry picked from commit e0cfb0a31a1c6ad0f9dcf0705b44229593a0915f)
---
 .../webapp/JAXBContextResolver.java|  2 +-
 .../webapp/dao/ClusterMetricsInfo.java | 24 +++
 .../resourcemanager/webapp/dao/NodeInfo.java   | 20 +
 .../resourcemanager/webapp/dao/ResourceInfo.java   | 18 ++--
 .../webapp/dao/ResourceInformationsInfo.java   | 48 ++
 .../src/main/webapp/app/models/cluster-metric.js   |  4 +-
 .../src/main/webapp/app/models/yarn-rm-node.js |  4 +-
 .../main/webapp/app/serializers/yarn-rm-node.js|  4 +-
 8 files changed, 114 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
index 2f50a24..2e4204e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
@@ -55,7 +55,7 @@ public class JAXBContextResolver implements 
ContextResolver {
 UsersInfo.class, UserInfo.class, ApplicationStatisticsInfo.class,
 StatisticsItemInfo.class, CapacitySchedulerHealthInfo.class,
 FairSchedulerQueueInfoList.class, AppTimeoutsInfo.class,
-AppTimeoutInfo.class };
+AppTimeoutInfo.class, ResourceInformationsInfo.class };
 // these dao classes need root unwrapping
 final Class[] rootUnwrappedTypes =
 { NewApplication.class, ApplicationSubmissionContextInfo.class,
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
index 3214cb9..84f70d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
@@ -61,6 +61,12 @@ public class ClusterMetricsInfo {
   private int activeNodes;
   private int shutdownNodes;
 
+  // Total used resource of the cluster, including all partitions
+  private ResourceInfo totalUsedResourcesAcrossPartition;
+
+  // Total registered resources of the cluster, including all partitions
+  private ResourceInfo totalClusterResourcesAcrossPartition;
+
   public ClusterMetricsInfo() {
   } // JAXB needs this
 
@@ -92,9 +98,20 @@ public class ClusterMetricsInfo {
 this.containersReserved = metrics.getReservedContainers();
 
 if (rs instanceof CapacityScheduler) {
+  CapacityScheduler cs = (CapacityScheduler) rs;
   this.totalMB = availableMB + allocatedMB + reservedMB;
   this.totalVirtualCores =
   availableVirtualCores + allocatedVirtualCores + containersReserved;
+  // TODO, add support of other schedulers to get total used resources
+  // across partition.
+  if (cs.getRootQueue() != null
+  && cs.getRootQueue().getQueueResourceUsage() != null
+  && cs.getRootQueue().getQueueResourceUsage().getAllUsed() != null) {
+totalUsedResourcesAcrossPartition = new ResourceInfo(
+cs.getRootQueue().getQueueResourceUsage().getAllUsed());
+totalClusterResourcesAcrossPartition = new ResourceInfo(
+cs.getClusterResource());
+  }
 } else {
   this.totalMB = availableMB + allocatedMB;
   this.totalVirtualCores = availableVirtualCores + allocatedVirtualCores;
@@ -310,4 +327,11 @@ public class ClusterMetricsInfo {
 this.shutdownNodes = shutdownNodes;
   }
 
+  public ResourceInfo getTotalUsedResourcesAcrossPartition() {
+return totalUsedResourcesAcrossPartition;
+  }
+
+  

[hadoop] branch trunk updated: HDDS-2154. Fix Checkstyle issues (#1475)

2019-09-19 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 126ef77  HDDS-2154. Fix Checkstyle issues (#1475)
126ef77 is described below

commit 126ef77a810113d263042adfec0a613bf900964d
Author: Elek, Márton 
AuthorDate: Thu Sep 19 20:30:33 2019 +0200

HDDS-2154. Fix Checkstyle issues (#1475)
---
 .../hadoop/hdds/scm/client/HddsClientUtils.java|  9 --
 .../hadoop/hdds/utils/LevelDBStoreIterator.java|  3 +-
 .../hadoop/hdds/utils/RocksDBStoreIterator.java|  8 +++--
 .../hadoop/hdds/utils/db/cache/TableCache.java |  3 +-
 .../hadoop/hdds/utils/TestMetadataStore.java   | 10 ---
 .../server/ratis/ContainerStateMachine.java|  3 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  |  3 +-
 .../org/apache/hadoop/fs/ozone/OzoneFsShell.java   | 34 --
 .../ozone/freon/OzoneClientKeyValidator.java   |  1 -
 .../apache/hadoop/ozone/freon/S3KeyGenerator.java  |  1 -
 .../apache/hadoop/ozone/freon/SameKeyReader.java   |  1 -
 11 files changed, 37 insertions(+), 39 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 4a3926d..d3bb31a 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -133,7 +133,8 @@ public final class HddsClientUtils {
*
* @throws IllegalArgumentException
*/
-  public static void verifyResourceName(String resName) throws 
IllegalArgumentException {
+  public static void verifyResourceName(String resName)
+  throws IllegalArgumentException {
 if (resName == null) {
   throw new IllegalArgumentException("Bucket or Volume name is null");
 }
@@ -141,7 +142,8 @@ public final class HddsClientUtils {
 if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH ||
 resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) {
   throw new IllegalArgumentException(
-  "Bucket or Volume length is illegal, valid length is 3-63 
characters");
+  "Bucket or Volume length is illegal, "
+  + "valid length is 3-63 characters");
 }
 
 if (resName.charAt(0) == '.' || resName.charAt(0) == '-') {
@@ -151,7 +153,8 @@ public final class HddsClientUtils {
 
 if (resName.charAt(resName.length() - 1) == '.' ||
 resName.charAt(resName.length() - 1) == '-') {
-  throw new IllegalArgumentException("Bucket or Volume name cannot end 
with a period or dash");
+  throw new IllegalArgumentException("Bucket or Volume name "
+  + "cannot end with a period or dash");
 }
 
 boolean isIPv4 = true;
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java
index 2b9dbc0..f5b6769 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java
@@ -25,7 +25,8 @@ import java.util.NoSuchElementException;
 /**
  * LevelDB store iterator.
  */
-public class LevelDBStoreIterator implements MetaStoreIterator< 
MetadataStore.KeyValue > {
+public class LevelDBStoreIterator
+implements MetaStoreIterator {
 
 
   private DBIterator levelDBIterator;
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java
index 01e7244..e39ec57 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java
@@ -26,7 +26,8 @@ import java.util.NoSuchElementException;
 /**
  * RocksDB store iterator.
  */
-public class RocksDBStoreIterator implements MetaStoreIterator< 
MetadataStore.KeyValue > {
+public class RocksDBStoreIterator
+implements MetaStoreIterator {
 
   private RocksIterator rocksDBIterator;
 
@@ -43,8 +44,9 @@ public class RocksDBStoreIterator implements 
MetaStoreIterator< MetadataStore.Ke
   @Override
   public MetadataStore.KeyValue next() {
 if (rocksDBIterator.isValid()) {
-  MetadataStore.KeyValue value = 
MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
-  .value());
+  MetadataStore.KeyValue value =
+  MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator
+  .value());
   rocksDBIterator.next();
   return value;
 }
diff --git 

[hadoop] branch trunk updated (a79f286 -> da1c67e)

2019-09-19 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from a79f286  HDFS-14609. RBF: Security should use common 
AuthenticationFilter. Contributed by Chen Zhang.
 add da1c67e  HDDS-1054. List Multipart uploads in a bucket (#1277)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hdds/client/ReplicationFactor.java  |  18 ++
 .../apache/hadoop/hdds/client/ReplicationType.java |  22 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java|  10 +
 .../hadoop/ozone/client/OzoneMultipartUpload.java  |  89 +++
 .../ozone/client/OzoneMultipartUploadList.java |  34 +--
 .../client/OzoneMultipartUploadPartListParts.java  |  10 +
 .../ozone/client/protocol/ClientProtocol.java  |   5 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  32 ++-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   1 +
 .../org/apache/hadoop/ozone/audit/OMAction.java|   1 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   8 +
 .../ozone/om/helpers/OmMultipartKeyInfo.java   |   5 +-
 .../hadoop/ozone/om/helpers/OmMultipartUpload.java | 149 
 ...ist.java => OmMultipartUploadCompleteList.java} |   8 +-
 .../ozone/om/helpers/OmMultipartUploadList.java|  44 ++--
 .../om/helpers/OmMultipartUploadListParts.java |  14 ++
 .../ozone/om/protocol/OzoneManagerProtocol.java|   8 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  46 +++-
 .../src/main/proto/OzoneManagerProtocol.proto  |  38 ++-
 ...mBucketInfo.java => TestOmMultipartUpload.java} |  27 +--
 .../src/main/smoketest/s3/MultipartUpload.robot|  20 ++
 .../org/apache/hadoop/ozone/om/KeyManager.java |   6 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 114 ++---
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |  13 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java |  53 ++--
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  32 ++-
 .../S3MultipartUploadCompleteRequest.java  |  33 +--
 .../protocolPB/OzoneManagerRequestHandler.java |  45 +++-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java |  71 +-
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |  30 ++-
 .../s3/endpoint/ListMultipartUploadsResult.java| 268 +
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  10 +-
 .../apache/hadoop/ozone/s3/util/S3StorageType.java |  11 +-
 .../hadoop/ozone/client/OzoneBucketStub.java   |   7 +-
 .../hadoop/ozone/s3/endpoint/TestBucketGet.java|  39 ++-
 35 files changed, 1137 insertions(+), 184 deletions(-)
 create mode 100644 
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java
 copy 
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java
 => 
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java
 (61%)
 create mode 100644 
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java
 copy 
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/{OmMultipartUploadList.java
 => OmMultipartUploadCompleteList.java} (88%)
 copy 
hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/{TestOmBucketInfo.java
 => TestOmMultipartUpload.java} (63%)
 create mode 100644 
hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14609. RBF: Security should use common AuthenticationFilter. Contributed by Chen Zhang.

2019-09-19 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a79f286  HDFS-14609. RBF: Security should use common 
AuthenticationFilter. Contributed by Chen Zhang.
a79f286 is described below

commit a79f286c6ff9d7e39cfb1e88839a4aaba0cf7867
Author: Inigo Goiri 
AuthorDate: Thu Sep 19 10:59:48 2019 -0700

HDFS-14609. RBF: Security should use common AuthenticationFilter. 
Contributed by Chen Zhang.
---
 .../fs/contract/router/SecurityConfUtil.java   |  15 ++-
 .../router/TestRouterWithSecureStartup.java|  14 ++-
 .../security/TestRouterHttpDelegationToken.java| 105 +
 3 files changed, 110 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
index d6ee3c7..6154eee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
@@ -24,8 +24,6 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KE
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY;
@@ -65,6 +63,7 @@ public final class SecurityConfUtil {
   // State string for mini dfs
   private static final String SPNEGO_USER_NAME = "HTTP";
   private static final String ROUTER_USER_NAME = "router";
+  private static final String PREFIX = "hadoop.http.authentication.";
 
   private static String spnegoPrincipal;
   private static String routerPrincipal;
@@ -73,6 +72,10 @@ public final class SecurityConfUtil {
 // Utility Class
   }
 
+  public static String getRouterUserName() {
+return ROUTER_USER_NAME;
+  }
+
   public static Configuration initSecurity() throws Exception {
 // delete old test dir
 File baseDir = GenericTestUtils.getTestDir(
@@ -114,8 +117,9 @@ public final class SecurityConfUtil {
 conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
 conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, routerPrincipal);
 conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
-conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
-conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, keytab);
+conf.set(PREFIX + "type", "kerberos");
+conf.set(PREFIX + "kerberos.principal", spnegoPrincipal);
+conf.set(PREFIX + "kerberos.keytab", keytab);
 
 conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
 conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
@@ -138,7 +142,8 @@ public final class SecurityConfUtil {
 // Setup principals and keytabs for router
 conf.set(DFS_ROUTER_KEYTAB_FILE_KEY, keytab);
 conf.set(DFS_ROUTER_KERBEROS_PRINCIPAL_KEY, routerPrincipal);
-conf.set(DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, "*");
+conf.set(DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
+spnegoPrincipal);
 
 // Setup basic state store
 conf.setClass(RBFConfigKeys.FEDERATION_STORE_DRIVER_CLASS,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java
index 7cc2c87..b660b4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java
@@ -27,7 +27,6 @@ import org.junit.rules.ExpectedException;
 import java.io.IOException;
 
 import static 
org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static 

[hadoop] branch trunk updated (f260b5a -> 1ada99b)

2019-09-19 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from f260b5a  HDDS-2127. Detailed Tools doc not reachable
 add 1ada99b  HDDS-2151. Ozone client logs the entire request payload at 
DEBUG level (#1477)

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java | 25 +-
 1 file changed, 24 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2127. Detailed Tools doc not reachable

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f260b5a  HDDS-2127. Detailed Tools doc not reachable
f260b5a is described below

commit f260b5aa5b26d85504e95f877b53300fb0cd70af
Author: Márton Elek 
AuthorDate: Thu Sep 19 14:42:33 2019 +0200

HDDS-2127. Detailed Tools doc not reachable

Signed-off-by: Anu Engineer 
---
 hadoop-hdds/docs/content/recipe/_index.md  |  2 +-
 hadoop-hdds/docs/content/tools/AuditParser.md  |  2 +-
 hadoop-hdds/docs/content/tools/Freon.md| 62 --
 hadoop-hdds/docs/content/tools/Genconf.md  |  2 +-
 hadoop-hdds/docs/content/tools/SCMCLI.md   |  2 +-
 hadoop-hdds/docs/content/tools/TestTools.md|  2 +-
 hadoop-hdds/docs/content/tools/Tools.md| 19 ---
 .../content/{beyond/Tools.md => tools/_index.md}   | 40 +-
 8 files changed, 33 insertions(+), 98 deletions(-)

diff --git a/hadoop-hdds/docs/content/recipe/_index.md 
b/hadoop-hdds/docs/content/recipe/_index.md
index beaab69..47053ab 100644
--- a/hadoop-hdds/docs/content/recipe/_index.md
+++ b/hadoop-hdds/docs/content/recipe/_index.md
@@ -2,7 +2,7 @@
 title: Recipes
 date: "2017-10-10"
 menu: main
-weight: 8
+weight: 9
 
 ---
 
-
-Overview
-
-
-Freon is a load-generator for Ozone. This tool is used for testing the 
functionality of ozone.
-
-### Random keys
-
-In randomkeys mode, the data written into ozone cluster is randomly generated.
-Each key will be of size 10 KB.
-
-The number of volumes/buckets/keys can be configured. The replication type and
-factor (eg. replicate with ratis to 3 nodes) Also can be configured.
-
-For more information use
-
-`bin/ozone freon --help`
-
-### Example
-
-{{< highlight bash >}}
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  
--replicationType=RATIS --factor=THREE
-{{< /highlight >}}
-
-{{< highlight bash >}}
-***
-Status: Success
-Git Base Revision: 48aae081e5afacbb3240657556b26c29e61830c3
-Number of Volumes created: 10
-Number of Buckets created: 100
-Number of Keys added: 1000
-Ratis replication factor: THREE
-Ratis replication type: RATIS
-Average Time spent in volume creation: 00:00:00,035
-Average Time spent in bucket creation: 00:00:00,319
-Average Time spent in key creation: 00:00:03,659
-Average Time spent in key write: 00:00:10,894
-Total bytes written: 1024
-Total Execution time: 00:00:16,898
-***
-{{< /highlight >}}
diff --git a/hadoop-hdds/docs/content/tools/Genconf.md 
b/hadoop-hdds/docs/content/tools/Genconf.md
index 146dfdc..35d5e3d 100644
--- a/hadoop-hdds/docs/content/tools/Genconf.md
+++ b/hadoop-hdds/docs/content/tools/Genconf.md
@@ -1,7 +1,7 @@
 ---
 title: "Generate Configurations"
 date: 2018-12-18
-
+summary: Tool to generate default configuration
 ---
 
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/beyond/Tools.md 
b/hadoop-hdds/docs/content/tools/_index.md
similarity index 85%
rename from hadoop-hdds/docs/content/beyond/Tools.md
rename to hadoop-hdds/docs/content/tools/_index.md
index 7316089..d7c9270 100644
--- a/hadoop-hdds/docs/content/beyond/Tools.md
+++ b/hadoop-hdds/docs/content/tools/_index.md
@@ -2,8 +2,11 @@
 title: "Tools"
 date: "2017-10-10"
 summary: Ozone supports a set of tools that are handy for developers.Here is a 
quick list of command line tools.
-weight: 3
+menu:
+   main:
+  weight: 8
 ---
+
 

[hadoop] branch trunk updated: HDDS-2110. Arbitrary file can be downloaded with the help of ProfilerServlet

2019-09-19 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f6d884c  HDDS-2110. Arbitrary file can be downloaded with the help of 
ProfilerServlet
f6d884c is described below

commit f6d884cd118fdb6987eb3c369fc9a4c9317acf68
Author: Márton Elek 
AuthorDate: Sat Sep 14 06:18:33 2019 +0200

HDDS-2110. Arbitrary file can be downloaded with the help of ProfilerServlet

Signed-off-by: Anu Engineer 
---
 .../apache/hadoop/hdds/server/ProfileServlet.java  | 60 -
 .../hadoop/hdds/server/TestProfileServlet.java | 63 ++
 2 files changed, 109 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
index e09e9b5..42944e1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
@@ -32,7 +32,9 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import org.apache.commons.io.IOUtils;
 import org.slf4j.Logger;
@@ -111,6 +113,10 @@ public class ProfileServlet extends HttpServlet {
   private static final AtomicInteger ID_GEN = new AtomicInteger(0);
   static final Path OUTPUT_DIR =
   Paths.get(System.getProperty("java.io.tmpdir"), "prof-output");
+  public static final String FILE_PREFIX = "async-prof-pid-";
+
+  public static final Pattern FILE_NAME_PATTERN =
+  Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+");
 
   private Lock profilerLock = new ReentrantLock();
   private Integer pid;
@@ -165,6 +171,26 @@ public class ProfileServlet extends HttpServlet {
 }
   }
 
+  @VisibleForTesting
+  protected static String generateFileName(Integer pid, Output output,
+  Event event) {
+return FILE_PREFIX + pid + "-" +
+event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
++ "." +
+output.name().toLowerCase();
+  }
+
+  @VisibleForTesting
+  protected static String validateFileName(String filename) {
+if (!FILE_NAME_PATTERN.matcher(filename).matches()) {
+  throw new IllegalArgumentException(
+  "Invalid file name parameter " + filename + " doesn't match pattern "
+  + FILE_NAME_PATTERN);
+
+}
+return filename;
+  }
+
   @Override
   protected void doGet(final HttpServletRequest req,
   final HttpServletResponse resp) throws IOException {
@@ -195,7 +221,8 @@ public class ProfileServlet extends HttpServlet {
   return;
 }
 
-final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
+final int duration =
+getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
 final Output output = getOutput(req);
 final Event event = getEvent(req);
 final Long interval = getLong(req, "interval");
@@ -213,11 +240,11 @@ public class ProfileServlet extends HttpServlet {
 int lockTimeoutSecs = 3;
 if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
   try {
+//Should be in sync with FILE_NAME_PATTERN
 File outputFile =
-OUTPUT_DIR.resolve("async-prof-pid-" + pid + "-" +
-event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet()
-+ "." +
-output.name().toLowerCase()).toFile();
+OUTPUT_DIR.resolve(
+ProfileServlet.generateFileName(pid, output, event))
+.toFile();
 List cmd = new ArrayList<>();
 cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
 cmd.add("-e");
@@ -270,7 +297,8 @@ public class ProfileServlet extends HttpServlet {
 String relativeUrl = "/prof?file=" + outputFile.getName();
 resp.getWriter().write(
 "Started [" + event.getInternalName()
-+ "] profiling. This page will automatically redirect to " 
+
++ "] profiling. This page will automatically redirect to "
++
 relativeUrl + " after " + duration
 + " seconds.\n\ncommand:\n" + Joiner.on(" ").join(cmd));
 resp.getWriter().write(
@@ -320,9 +348,12 @@ public class ProfileServlet extends HttpServlet {
   final HttpServletResponse resp)
   throws IOException {
 
+;
+String safeFileName = validateFileName(fileName);
 File requestedFile =
-

[hadoop] branch trunk updated: HADOOP-16556. Fix some alerts raised by LGTM.

2019-09-19 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 56248f9  HADOOP-16556. Fix some alerts raised by LGTM.
56248f9 is described below

commit 56248f9d87fdf65df6103f52f47dc6e8b9969abc
Author: Malcolm Taylor 
AuthorDate: Thu Sep 19 15:57:54 2019 +0100

HADOOP-16556. Fix some alerts raised by LGTM.

Contributed by Malcolm Taylor.

Change-Id: Ic60c3f4681dd9d48b3afcba7520bd1e4d3cc4231
---
 .../org/apache/hadoop/security/authentication/util/KerberosName.java| 2 +-
 .../org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java | 2 +-
 .../java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java  | 2 +-
 .../java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java | 2 --
 4 files changed, 3 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
index 684d2c8..67c2c10 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
@@ -281,7 +281,7 @@ public class KerberosName {
 if (paramNum != null) {
   try {
 int num = Integer.parseInt(paramNum);
-if (num < 0 || num > params.length) {
+if (num < 0 || num >= params.length) {
   throw new BadFormatString("index " + num + " from " + format +
 " is outside of the valid range 0 to " 
+
 (params.length - 1));
diff --git 
a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 
b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index dac1135..175f6bb 100644
--- 
a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ 
b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -926,7 +926,7 @@ public class RegistrySecurity extends AbstractService {
   UserGroupInformation realUser = currentUser.getRealUser();
   LOG.info("Real User = {}" , realUser);
 } catch (IOException e) {
-  LOG.warn("Failed to get current user {}, {}", e);
+  LOG.warn("Failed to get current user, {}", e);
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
index aed6343..8d30182 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
@@ -92,7 +92,7 @@ public class HttpFSExceptionProvider extends 
ExceptionProvider {
 String path = MDC.get("path");
 String message = getOneLineMessage(throwable);
 AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}", new Object[]{method, 
path, status, message});
-LOG.warn("[{}:{}] response [{}] {}", new Object[]{method, path, status, 
message}, throwable);
+LOG.warn("[{}:{}] response [{}] {}", method, path, status, message, 
throwable);
   }
 
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
index 0ada51a..2bad02c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
@@ -87,8 +87,6 @@ public class GenericExceptionHandler implements 
ExceptionMapper {
   s = Response.Status.BAD_REQUEST;
 } else if (e instanceof IllegalArgumentException) {
   s = Response.Status.BAD_REQUEST;
-} else if (e instanceof NumberFormatException) {
-  s = Response.Status.BAD_REQUEST;
 } else if (e instanceof BadRequestException) {
   s = Response.Status.BAD_REQUEST;
 } else if (e instanceof WebApplicationException


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For 

[hadoop] branch branch-2.8 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new dce2678  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
dce2678 is described below

commit dce2678ee5e2a590925c805e0f249a9d77c33dbb
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:28:48 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 41429ac..a338b53 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -320,6 +320,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 943846d..bffda59 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -256,6 +256,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 813642f..f1d37ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -448,11 +448,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -944,6 +951,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 3cf0f0e..88d290b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataOutputStream create(Path f);
 public FSDataOutputStream create(Path f, boolean overwrite);

[hadoop] branch branch-2.9 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new c5cbef8  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
c5cbef8 is described below

commit c5cbef88e478cd2bb0c42e87327a80eed6cc73af
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:28:09 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 954a041..1c38df8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -320,6 +320,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 5eeff15..3d8f03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -256,6 +256,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 27d8c0b..4c73eae 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -448,11 +448,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -944,6 +951,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index c72f579..7c0115e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataOutputStream create(Path f);
 public FSDataOutputStream create(Path f, boolean overwrite);

[hadoop] branch branch-2 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new ca93156  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
ca93156 is described below

commit ca93156cc5eb5a06ad025dc5ce87c3c49599bd79
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:27:23 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 954a041..1c38df8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -320,6 +320,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 5eeff15..3d8f03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -256,6 +256,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 27d8c0b..4c73eae 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -448,11 +448,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -944,6 +951,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index c72f579..7c0115e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataOutputStream create(Path f);
 public FSDataOutputStream create(Path f, boolean overwrite);



[hadoop] branch branch-3.1 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 56562b9  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
56562b9 is described below

commit 56562b911785806bbba5d5e7b897fc0149bfe271
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:26:36 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index f9bbfb1..9cab0f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -332,6 +332,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 8b90f53..2341fe4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -268,6 +268,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 9523070..ddc1190 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -462,11 +462,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -1077,6 +1084,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 9e01aef..7c4dfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -78,7 +78,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataInputStream open(PathHandle f);
 public FSDataOutputStream create(Path f);



[hadoop] branch branch-3.2 updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7477f8d  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
7477f8d is described below

commit 7477f8d2e9259bd4bb3e9bffe7d805c898ffe332
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:24:39 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee

(cherry picked from commit d4205dce176287e863f567b333e0d408bf51ae6d)
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index f9bbfb1..9cab0f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -332,6 +332,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 8b90f53..2341fe4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -268,6 +268,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 9523070..ddc1190 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -462,11 +462,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -1077,6 +1084,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 9e01aef..7c4dfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -78,7 +78,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataInputStream open(PathHandle f);
 public FSDataOutputStream create(Path f);



[hadoop] branch trunk updated: HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under viewfs. Contributed by Kihwal Lee

2019-09-19 Thread kihwal
This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d4205dc  HADOOP-16582. LocalFileSystem's mkdirs() does not work as 
expected under viewfs. Contributed by Kihwal Lee
d4205dc is described below

commit d4205dce176287e863f567b333e0d408bf51ae6d
Author: Kihwal Lee 
AuthorDate: Thu Sep 19 08:22:19 2019 -0500

HADOOP-16582. LocalFileSystem's mkdirs() does not work as expected under 
viewfs. Contributed by Kihwal Lee
---
 .../java/org/apache/hadoop/fs/FilterFileSystem.java |  4 
 .../org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java | 17 +++--
 .../java/org/apache/hadoop/fs/TestFilterFileSystem.java |  1 -
 4 files changed, 24 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 99c18b6..e05c574 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -334,6 +334,10 @@ public class FilterFileSystem extends FileSystem {
 return fs.mkdirs(f, permission);
   }
 
+  @Override
+  public boolean mkdirs(Path f) throws IOException {
+return fs.mkdirs(f);
+  }
 
   /**
* The src file is on the local disk.  Add it to FS at
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index cec1891..c93225f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -280,6 +280,11 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public boolean mkdirs(final Path f) throws IOException {
+return super.mkdirs(fullPath(f));
+  }
+
+  @Override
   public FSDataInputStream open(final Path f, final int bufferSize) 
 throws IOException {
 return super.open(fullPath(f), bufferSize);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index f127d8d..6bc469c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -550,11 +550,18 @@ public class ViewFileSystem extends FileSystem {
   }
 
   @Override
+  public boolean mkdirs(Path dir) throws IOException {
+InodeTree.ResolveResult res =
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath);
+  }
+
+  @Override
   public boolean mkdirs(final Path dir, final FsPermission permission)
   throws IOException {
 InodeTree.ResolveResult res = 
-  fsState.resolve(getUriPath(dir), false);
-   return  res.targetFileSystem.mkdirs(res.remainingPath, permission);
+fsState.resolve(getUriPath(dir), false);
+return res.targetFileSystem.mkdirs(res.remainingPath, permission);
   }
 
   @Override
@@ -1172,6 +1179,12 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
+public boolean mkdirs(Path dir)
+throws AccessControlException, FileAlreadyExistsException {
+  return mkdirs(dir, null);
+}
+
+@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 6de4f07..f0057a6 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -77,7 +77,6 @@ public class TestFilterFileSystem {
 boolean overwrite, int bufferSize, short replication, long blockSize,
 Progressable progress) throws IOException;
 
-public boolean mkdirs(Path f);
 public FSDataInputStream open(Path f);
 public FSDataInputStream open(PathHandle f);
 public FSDataOutputStream create(Path f);


-
To unsubscribe, e-mail: 

[hadoop] branch trunk updated (28913f7 -> c9900a0)

2019-09-19 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 28913f7  HDDS-2148. Remove redundant code in CreateBucketHandler.java
 add c9900a0  HDDS-2141. Missing total number of operations

No new revisions were added by this update.

Summary of changes:
 .../src/main/resources/webapps/ozoneManager/om-metrics.html   | 2 +-
 .../src/main/resources/webapps/ozoneManager/ozoneManager.js   | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2148. Remove redundant code in CreateBucketHandler.java

2019-09-19 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 28913f7  HDDS-2148. Remove redundant code in CreateBucketHandler.java
28913f7 is described below

commit 28913f733e53c75e97397953a71f06191308c9b8
Author: dchitlangia 
AuthorDate: Thu Sep 19 12:26:53 2019 +0200

HDDS-2148. Remove redundant code in CreateBucketHandler.java

Closes #1471
---
 .../org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java  | 1 -
 1 file changed, 1 deletion(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
index 237a7b2..b4951e8 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
@@ -88,7 +88,6 @@ public class CreateBucketHandler extends Handler {
   System.out.printf("Volume Name : %s%n", volumeName);
   System.out.printf("Bucket Name : %s%n", bucketName);
   if (bekName != null) {
-bb.setBucketEncryptionKey(bekName);
 System.out.printf("Bucket Encryption enabled with Key Name: %s%n",
 bekName);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-2119. Use checkstyle.xml and suppressions.xml in hdds/ozone projects for checkstyle validation

2019-09-19 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e78848f  HDDS-2119. Use checkstyle.xml and suppressions.xml in 
hdds/ozone projects for checkstyle validation
e78848f is described below

commit e78848fc3cb113733ea640f0aa3abbb271b16005
Author: Nanda kumar 
AuthorDate: Thu Sep 19 11:14:59 2019 +0200

HDDS-2119. Use checkstyle.xml and suppressions.xml in hdds/ozone projects 
for checkstyle validation

Closes #1435
---
 .../checkstyle/checkstyle-noframes-sorted.xsl  | 189 
 hadoop-hdds/dev-support/checkstyle/checkstyle.xml  | 196 +
 .../dev-support/checkstyle/suppressions.xml|  21 +++
 hadoop-hdds/pom.xml|   1 -
 pom.ozone.xml  |  26 ++-
 5 files changed, 429 insertions(+), 4 deletions(-)

diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl 
b/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl
new file mode 100644
index 000..7f2aedf
--- /dev/null
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl
@@ -0,0 +1,189 @@
+
+http://www.w3.org/1999/XSL/Transform; 
version="1.0">
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+  
+.bannercell {
+  border: 0px;
+  padding: 0px;
+}
+body {
+  margin-left: 10;
+  margin-right: 10;
+  font:normal 80% arial,helvetica,sanserif;
+  background-color:#FF;
+  color:#00;
+}
+.a td {
+  background: #efefef;
+}
+.b td {
+  background: #fff;
+}
+th, td {
+  text-align: left;
+  vertical-align: top;
+}
+th {
+  font-weight:bold;
+  background: #ccc;
+  color: black;
+}
+table, th, td {
+  font-size:100%;
+  border: none
+}
+table.log tr td, tr th {
+
+}
+h2 {
+  font-weight:bold;
+  font-size:140%;
+  margin-bottom: 5;
+}
+h3 {
+  font-size:100%;
+  font-weight:bold;
+  background: #525D76;
+  color: white;
+  text-decoration: none;
+  padding: 5px;
+  margin-right: 2px;
+  margin-left: 2px;
+  margin-bottom: 0;
+}
+
+
+
+  
+  
+  
+  
+
+  
+
+CheckStyle Audit
+  
+
+  Designed for use with CheckStyle and Ant.
+
+  
+  
+
+  
+  
+  
+
+  
+  
+  
+
+  
+  
+
+  
+
+
+  
+
+
+  
+Files
+
+  
+Name
+Errors
+  
+  
+
+
+
+  
+  
+  
+
+  
+
+  
+
+  
+
+File 
+
+
+  
+Error Description
+Line
+  
+  
+
+
+  
+  
+  
+
+  
+
+Back to top
+  
+
+  
+Summary
+
+
+
+  
+Files
+Errors
+  
+  
+
+
+
+  
+
+  
+
+  
+
+  a
+  b
+
+  
+
+
+
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml 
b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
new file mode 100644
index 000..1c43741
--- /dev/null
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -0,0 +1,196 @@
+
+https://checkstyle.org/dtds/configuration_1_2.dtd;>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdds/dev-support/checkstyle/suppressions.xml 
b/hadoop-hdds/dev-support/checkstyle/suppressions.xml
new file mode 100644
index 000..7bc9479
--- /dev/null
+++ b/hadoop-hdds/dev-support/checkstyle/suppressions.xml
@@ -0,0 +1,21 @@
+
+

[hadoop] branch trunk updated: HDDS-2016. Add option to enforce GDPR in Bucket Create command

2019-09-19 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5c963a7  HDDS-2016. Add option to enforce GDPR in Bucket Create command
5c963a7 is described below

commit 5c963a75d648cb36e7e36884f61616831229b25a
Author: dchitlangia 
AuthorDate: Thu Sep 19 10:58:01 2019 +0200

HDDS-2016. Add option to enforce GDPR in Bucket Create command

Closes #1458
---
 hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md | 42 ++
 hadoop-hdds/docs/content/gdpr/_index.md| 38 
 hadoop-hdds/docs/content/shell/BucketCommands.md   |  2 ++
 .../hadoop/ozone/om/helpers/OmBucketArgs.java  |  2 ++
 .../hadoop/ozone/om/helpers/OmBucketInfo.java  |  2 ++
 .../web/ozShell/bucket/CreateBucketHandler.java| 14 
 .../ozone/web/ozShell/keys/InfoKeyHandler.java |  6 
 7 files changed, 106 insertions(+)

diff --git a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md 
b/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md
new file mode 100644
index 000..dd23e04
--- /dev/null
+++ b/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md
@@ -0,0 +1,42 @@
+---
+title: "GDPR in Ozone"
+date: "2019-September-17"
+weight: 5
+summary: GDPR in Ozone
+icon: user
+---
+
+
+
+Enabling GDPR compliance in Ozone is very straight forward. During bucket
+creation, you can specify `--enforcegdpr=true` or `-g=true` and this will
+ensure the bucket is GDPR compliant. Thus, any key created under this bucket
+will automatically be GDPR compliant.
+
+GDPR can only be enabled on a new bucket. For existing buckets, you would
+have to create a new GDPR compliant bucket and copy data from old bucket into
+ new bucket to take advantage of GDPR.
+
+Example to create a GDPR compliant bucket:
+
+`ozone sh bucket create --enforcegdpr=true /hive/jan`
+
+`ozone sh bucket create -g=true /hive/jan`
+
+If you want to create an ordinary bucket then you can skip `--enforcegdpr`
+and `-g` flags.
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/gdpr/_index.md 
b/hadoop-hdds/docs/content/gdpr/_index.md
new file mode 100644
index 000..9888369
--- /dev/null
+++ b/hadoop-hdds/docs/content/gdpr/_index.md
@@ -0,0 +1,38 @@
+---
+title: GDPR
+name: GDPR
+identifier: gdpr
+menu: main
+weight: 5
+---
+
+
+{{}}
+  The General Data Protection Regulation (GDPR) is a law that governs 
how personal data should be handled. This is an European Union law, but due to 
the nature of software oftentimes spills into other geographies.
+  Ozone supports GDPR's Right to Erasure(Right to be Forgotten).
+{{}}
+
+
+If you would like to understand Ozone's GDPR framework at a greater
+depth, please take a look at https://issues.apache.org/jira/secure/attachment/12978992/Ozone%20GDPR%20Framework.pdf;>Ozone
 GDPR Framework.
+
+
+Once you create a GDPR compliant bucket, any key created in that bucket will 
+automatically by GDPR compliant.
+
+
diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.md 
b/hadoop-hdds/docs/content/shell/BucketCommands.md
index f59f1ad..e817349 100644
--- a/hadoop-hdds/docs/content/shell/BucketCommands.md
+++ b/hadoop-hdds/docs/content/shell/BucketCommands.md
@@ -35,8 +35,10 @@ The `bucket create` command allows users to create a bucket.
 
 | Arguments  |  Comment|
 ||-|
+| -g, \-\-enforcegdpr| Optional, if set to true it creates a GDPR 
compliant bucket, if not specified or set to false, it creates an ordinary 
bucket.
 |  Uri   | The name of the bucket in 
**/volume/bucket** format.
 
+
 {{< highlight bash >}}
 ozone sh bucket create /hive/jan
 {{< /highlight >}}
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 8a938a9..aa6e8f5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -112,6 +112,8 @@ public final class OmBucketArgs extends WithMetadata 
implements Auditable {
 Map auditMap = new LinkedHashMap<>();
 auditMap.put(OzoneConsts.VOLUME, this.volumeName);
 auditMap.put(OzoneConsts.BUCKET, this.bucketName);
+auditMap.put(OzoneConsts.GDPR_FLAG,
+this.metadata.get(OzoneConsts.GDPR_FLAG));
 auditMap.put(OzoneConsts.IS_VERSION_ENABLED,
 String.valueOf(this.isVersionEnabled));
 if(this.storageType != null){
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
 

[hadoop] branch trunk updated (ef478fe -> 1029060)

2019-09-19 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ef478fe  HDDS-730. ozone fs cli prints hadoop fs in usage
 add 1029060  HDDS-2147. Include dumpstream in test report

No new revisions were added by this update.

Summary of changes:
 hadoop-ozone/dev-support/checks/_mvn_unit_report.sh | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-730. ozone fs cli prints hadoop fs in usage

2019-09-19 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ef478fe  HDDS-730. ozone fs cli prints hadoop fs in usage
ef478fe is described below

commit ef478fe73e72692b660de818d8c8faa9a155a10b
Author: cxorm 
AuthorDate: Thu Sep 19 09:16:12 2019 +0200

HDDS-730. ozone fs cli prints hadoop fs in usage

Closes #1464
---
 hadoop-ozone/common/src/main/bin/ozone |   2 +-
 .../org/apache/hadoop/fs/ozone/OzoneFsShell.java   | 100 +
 2 files changed, 101 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
index e257519..cd8f202 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -178,7 +178,7 @@ function ozonecmd_case
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-recon"
 ;;
 fs)
-  HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
+  HADOOP_CLASSNAME=org.apache.hadoop.fs.ozone.OzoneFsShell
   OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools"
 ;;
 scmcli)
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
new file mode 100644
index 000..873c843
--- /dev/null
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.shell.Command;
+import org.apache.hadoop.fs.shell.CommandFactory;
+import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.tools.TableListing;
+import org.apache.hadoop.tracing.TraceUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Provide command line access to a Ozone FileSystem. */
+@InterfaceAudience.Private
+public class OzoneFsShell extends FsShell {
+
+  private final String ozoneUsagePrefix = "Usage: ozone fs [generic options]";
+
+  /**
+   * Default ctor with no configuration.  Be sure to invoke
+   * {@link #setConf(Configuration)} with a valid configuration prior
+   * to running commands.
+   */
+  public OzoneFsShell() { this(null); }
+
+  /**
+   * Construct a OzoneFsShell with the given configuration.  Commands can be
+   * executed via {@link #run(String[])}
+   * @param conf the hadoop configuration
+   */
+  public OzoneFsShell(Configuration conf) { super(conf); }
+
+  protected void registerCommands(CommandFactory factory) {
+// TODO: DFSAdmin subclasses FsShell so need to protect the command
+// registration.  This class should morph into a base class for
+// commands, and then this method can be abstract
+if (this.getClass().equals(OzoneFsShell.class)) {
+  factory.registerCommands(FsCommand.class);
+}
+  }
+
+  @Override
+  protected String getUsagePrefix() {
+return ozoneUsagePrefix;
+  }
+
+  /**
+   * main() has some simple utility methods
+   * @param argv the command and its arguments
+   * @throws Exception upon error
+   */
+  public static void main(String argv[]) throws Exception {
+OzoneFsShell shell = newShellInstance();
+Configuration conf = new Configuration();
+conf.setQuietMode(false);
+shell.setConf(conf);
+int res;
+try {
+  res = ToolRunner.run(shell, argv);
+} finally {
+  shell.close();
+}
+System.exit(res);
+  }
+
+  // TODO: this should be abstract in a base