[hadoop-ozone] 01/01: move out unsafeByteBufferConversion from the new interface

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch HDDS-4298
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 08dda9ade16b0befb2e643d64906337cd2fa7e34
Author: Elek Márton 
AuthorDate: Mon Oct 5 12:30:11 2020 +0200

move out unsafeByteBufferConversion from the new interface
---
 .../apache/hadoop/hdds/scm/XceiverClientFactory.java   |  6 --
 .../apache/hadoop/hdds/scm/XceiverClientManager.java   |  7 ---
 .../org/apache/hadoop/hdds/scm/storage/BufferPool.java |  2 +-
 .../apache/hadoop/hdds/scm/ByteStringConversion.java   | 18 +++---
 .../ozone/container/keyvalue/KeyValueHandler.java  | 14 +++---
 .../ozone/client/io/BlockOutputStreamEntryPool.java| 11 ---
 .../apache/hadoop/ozone/client/io/KeyOutputStream.java | 14 +++---
 .../org/apache/hadoop/ozone/client/rpc/RpcClient.java  |  6 ++
 8 files changed, 44 insertions(+), 34 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
index 184645d..dc35cd5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
@@ -18,20 +18,14 @@
 package org.apache.hadoop.hdds.scm;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.function.Function;
 
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-
 /**
  * Interface to provide XceiverClient when needed.
  */
 public interface XceiverClientFactory {
 
-  Function byteBufferToByteStringConversion();
-
   XceiverClientSpi acquireClient(Pipeline pipeline) throws IOException;
 
   void releaseClient(XceiverClientSpi xceiverClient, boolean invalidateClient);
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index e07a5d2..eaf0503 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -20,12 +20,10 @@ package org.apache.hadoop.hdds.scm;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.security.cert.CertificateException;
 import java.security.cert.X509Certificate;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
 
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
@@ -49,7 +47,6 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
 import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
 import static 
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_REPLICA_FOUND;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -277,10 +274,6 @@ public class XceiverClientManager implements Closeable, 
XceiverClientFactory {
 }
   }
 
-  public Function byteBufferToByteStringConversion(){
-return ByteStringConversion.createByteBufferConversion(conf);
-  }
-
   /**
* Get xceiver client metric.
*/
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
index dc27d4b..94fa87a 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
@@ -42,7 +42,7 @@ public class BufferPool {
 
   public BufferPool(int bufferSize, int capacity) {
 this(bufferSize, capacity,
-ByteStringConversion.createByteBufferConversion(null));
+ByteStringConversion.createByteBufferConversion(false));
   }
 
   public BufferPool(int bufferSize, int capacity,
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
index dc44392..b5f6e48 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.hdds.scm;
 
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import java.nio.ByteBuffer;
+import java.util.function.Function;
+
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.a

[hadoop-ozone] branch HDDS-4298 created (now 08dda9a)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch HDDS-4298
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


  at 08dda9a  move out unsafeByteBufferConversion from the new interface

This branch includes the following new commits:

 new 08dda9a  move out unsafeByteBufferConversion from the new interface

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated (5719615 -> 8cd86a6)

2020-10-05 Thread adoroszlai
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from 5719615  HDDS-4304. Close Container event can fail if pipeline is 
removed first. (#1471)
 add 8cd86a6  HDDS-4299. Display Ratis version with ozone version (#1464)

No new revisions were added by this update.

Summary of changes:
 .../{VersionInfo.java => RatisVersionInfo.java}| 66 +-
 .../org/apache/hadoop/hdds/utils/VersionInfo.java  |  9 +--
 .../apache/hadoop/ozone/util/OzoneVersionInfo.java | 15 +++--
 3 files changed, 31 insertions(+), 59 deletions(-)
 copy 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/{VersionInfo.java 
=> RatisVersionInfo.java} (50%)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated (5719615 -> 8cd86a6)

2020-10-05 Thread adoroszlai
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from 5719615  HDDS-4304. Close Container event can fail if pipeline is 
removed first. (#1471)
 add 8cd86a6  HDDS-4299. Display Ratis version with ozone version (#1464)

No new revisions were added by this update.

Summary of changes:
 .../{VersionInfo.java => RatisVersionInfo.java}| 66 +-
 .../org/apache/hadoop/hdds/utils/VersionInfo.java  |  9 +--
 .../apache/hadoop/ozone/util/OzoneVersionInfo.java | 15 +++--
 3 files changed, 31 insertions(+), 59 deletions(-)
 copy 
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/{VersionInfo.java 
=> RatisVersionInfo.java} (50%)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated (8cd86a6 -> cfff097)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from 8cd86a6  HDDS-4299. Display Ratis version with ozone version (#1464)
 add cfff097  HDDS-4271. Avoid logging chunk content in Ozone Insight 
(#1466)

No new revisions were added by this update.

Summary of changes:
 .../container/common/helpers/ContainerUtils.java   | 66 +++---
 .../container/common/impl/HddsDispatcher.java  |  4 +-
 .../server/OzoneProtocolMessageDispatcher.java | 32 ---
 3 files changed, 86 insertions(+), 16 deletions(-)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated (8cd86a6 -> cfff097)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from 8cd86a6  HDDS-4299. Display Ratis version with ozone version (#1464)
 add cfff097  HDDS-4271. Avoid logging chunk content in Ozone Insight 
(#1466)

No new revisions were added by this update.

Summary of changes:
 .../container/common/helpers/ContainerUtils.java   | 66 +++---
 .../container/common/impl/HddsDispatcher.java  |  4 +-
 .../server/OzoneProtocolMessageDispatcher.java | 32 ---
 3 files changed, 86 insertions(+), 16 deletions(-)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated: HDDS-4264. Uniform naming conventions of Ozone Shell Options. (#1447)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new 4ad0318  HDDS-4264. Uniform naming conventions of Ozone Shell Options. 
(#1447)
4ad0318 is described below

commit 4ad03188ed4fbbe8d6dce1e8e0c8d91518904fc1
Author: micah zhao 
AuthorDate: Mon Oct 5 19:45:34 2020 +0800

HDDS-4264. Uniform naming conventions of Ozone Shell Options. (#1447)
---
 hadoop-hdds/docs/content/tools/TestTools.md|  2 +-
 hadoop-hdds/docs/content/tools/TestTools.zh.md |  2 +-
 .../scm/cli/pipeline/CreatePipelineSubcommand.java | 10 +++---
 .../main/k8s/definitions/ozone/freon/freon.yaml|  2 +-
 .../getting-started/freon/freon-deployment.yaml|  2 +-
 .../examples/minikube/freon/freon-deployment.yaml  |  2 +-
 .../examples/ozone-dev/freon/freon-deployment.yaml |  2 +-
 .../k8s/examples/ozone/freon/freon-deployment.yaml |  2 +-
 .../main/smoketest/auditparser/auditparser.robot   |  2 +-
 .../dist/src/main/smoketest/basic/basic.robot  |  2 +-
 .../src/main/smoketest/basic/ozone-shell-lib.robot |  2 +-
 .../dist/src/main/smoketest/freon/freon.robot  |  2 +-
 .../dist/src/main/smoketest/recon/recon-api.robot  |  2 +-
 .../dist/src/main/smoketest/spnego/web.robot   |  2 +-
 .../hadoop/ozone/TestMiniChaosOzoneCluster.java| 40 +-
 .../src/test/blockade/ozone/client.py  | 10 +++---
 .../hadoop/ozone/freon/HadoopDirTreeGenerator.java | 15 
 .../ozone/freon/HadoopNestedDirGenerator.java  |  5 +--
 .../hadoop/ozone/freon/RandomKeyGenerator.java | 40 +-
 19 files changed, 84 insertions(+), 62 deletions(-)

diff --git a/hadoop-hdds/docs/content/tools/TestTools.md 
b/hadoop-hdds/docs/content/tools/TestTools.md
index 47e12eb..ac025f0 100644
--- a/hadoop-hdds/docs/content/tools/TestTools.md
+++ b/hadoop-hdds/docs/content/tools/TestTools.md
@@ -87,7 +87,7 @@ bin/ozone freon --help
 For example:
 
 ```
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  
--replicationType=RATIS --factor=THREE
+ozone freon randomkeys --num-of-volumes=10 --num-of-buckets 10 --num-of-keys 
10  --replication-type=RATIS --factor=THREE
 ```
 
 ```
diff --git a/hadoop-hdds/docs/content/tools/TestTools.zh.md 
b/hadoop-hdds/docs/content/tools/TestTools.zh.md
index 1c79f27..c6dfd2c 100644
--- a/hadoop-hdds/docs/content/tools/TestTools.zh.md
+++ b/hadoop-hdds/docs/content/tools/TestTools.zh.md
@@ -88,7 +88,7 @@ bin/ozone freon --help
 例如:
 
 ```
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  
--replicationType=RATIS --factor=THREE
+ozone freon randomkeys --num-of-volumes=10 --num-of-buckets 10 --num-of-keys 
10  --replication-type=RATIS --factor=THREE
 ```
 
 ```
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java
index c784be8..90858de 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java
@@ -38,15 +38,17 @@ import java.io.IOException;
 public class CreatePipelineSubcommand extends ScmSubcommand {
 
   @CommandLine.Option(
-  names = {"-t", "--replicationType"},
-  description = "Replication type (STAND_ALONE, RATIS)",
+  names = {"-t", "--replication-type", "--replicationType"},
+  description = "Replication type (STAND_ALONE, RATIS). Full name" +
+  " --replicationType will be removed in later versions.",
   defaultValue = "STAND_ALONE"
   )
   private HddsProtos.ReplicationType type;
 
   @CommandLine.Option(
-  names = {"-f", "--replicationFactor"},
-  description = "Replication factor (ONE, THREE)",
+  names = {"-f", "--replication-factor", "--replicationFactor"},
+  description = "Replication factor (ONE, THREE). Full name" +
+  " --replicationFactor will be removed in later versions.",
   defaultValue = "ONE"
   )
   private HddsProtos.ReplicationFactor factor;
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml 
b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml
index 40ebc98..90135f2 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml
@@ -34,7 +34,7 @@ spec:
   containers:
 - name: freon
   image: "@docker.image@"
-  args: ["ozone","freon", "rk", "--factor=THREE", 
"--replicationType=RATIS"]
+  args: ["ozone","freon", "rk", "--factor=THREE", 
"--replication-type=RATIS"]
   envFrom:
 - configMapRef:
 name: config
d

[hadoop-ozone] branch master updated: HDDS-4242. Copy PrefixInfo proto to new project hadoop-ozone/interface-storage (#1444)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new d6d27e4  HDDS-4242. Copy PrefixInfo proto to new project 
hadoop-ozone/interface-storage (#1444)
d6d27e4 is described below

commit d6d27e4edb23490cb9d1496078e5bcd0e5e8d60c
Author: Rui Wang 
AuthorDate: Mon Oct 5 04:49:22 2020 -0700

HDDS-4242. Copy PrefixInfo proto to new project 
hadoop-ozone/interface-storage (#1444)
---
 hadoop-ozone/interface-storage/pom.xml | 30 +++
 .../hadoop/ozone/om/codec/OmPrefixInfoCodec.java   |  5 +-
 .../hadoop/ozone/om/helpers/OmPrefixInfo.java  | 13 ++---
 .../hadoop/ozone/om/helpers/OzoneAclStorage.java   | 63 ++
 .../ozone/om/helpers/OzoneAclStorageUtil.java  | 62 +
 .../hadoop/ozone/om/helpers/package-info.java  | 24 +
 .../src/main/proto/OmStorageProtocol.proto | 60 +
 .../hadoop/ozone/om/helpers/TestOmPrefixInfo.java  |  0
 .../hadoop/ozone/om/helpers/package-info.java  | 24 +
 9 files changed, 273 insertions(+), 8 deletions(-)

diff --git a/hadoop-ozone/interface-storage/pom.xml 
b/hadoop-ozone/interface-storage/pom.xml
index 43ba408..9f000bf 100644
--- a/hadoop-ozone/interface-storage/pom.xml
+++ b/hadoop-ozone/interface-storage/pom.xml
@@ -35,6 +35,11 @@
 
 
 
+  com.google.protobuf
+  protobuf-java
+
+
+
   org.apache.hadoop
   hadoop-ozone-interface-client
 
@@ -63,4 +68,29 @@
 
 
   
+  
+
+  
+org.xolstice.maven.plugins
+protobuf-maven-plugin
+${protobuf-maven-plugin.version}
+true
+
+  
+compile-protoc
+
+  compile
+  test-compile
+
+
+  ${basedir}/src/main/proto/
+  
+
com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
+  
+
+  
+
+  
+
+  
 
\ No newline at end of file
diff --git 
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java
 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java
index 44a0741..919d972 100644
--- 
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java
+++ 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.om.codec;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
-import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo;
+import 
org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedPrefixInfo;
 
 import org.apache.hadoop.hdds.utils.db.Codec;
 
@@ -44,7 +44,8 @@ public class OmPrefixInfoCodec implements Codec 
{
 .checkNotNull(rawData,
 "Null byte array can't converted to real object.");
 try {
-  return OmPrefixInfo.getFromProtobuf(PrefixInfo.parseFrom(rawData));
+  return OmPrefixInfo.getFromProtobuf(
+  PersistedPrefixInfo.parseFrom(rawData));
 } catch (InvalidProtocolBufferException e) {
   throw new IllegalArgumentException(
   "Can't encode the the raw data from the byte array", e);
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
similarity index 92%
rename from 
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
rename to 
hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
index 80ca54d..a1ad55a 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
+++ 
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.om.helpers;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.ozone.OzoneAcl;
-import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo;
+import 
org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedPrefixInfo;
 
 import java.util.BitSet;
 import java.util.HashMap;
@@ -150,11 +150,12 @@ public final class OmPrefixInfo extends WithObjectID {
   /**
* Creates PrefixInfo protobuf from OmPrefixInfo.
*/
-  public PrefixInfo getProtobuf() {
-PrefixInfo.Builder pib =  PrefixInfo.newBuilder().setName(name)
+  public PersistedPrefixInfo getProtobuf() {
+PersistedPrefi

[hadoop-ozone] branch master updated: HDDS-4156. add hierarchical layout to Chinese doc (#1368)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new 19cb481  HDDS-4156. add hierarchical layout to Chinese doc (#1368)
19cb481 is described below

commit 19cb481896f091ec5c02fe4010a5ab428302f38f
Author: Huang-Mu Zheng 
AuthorDate: Mon Oct 5 21:30:24 2020 +0800

HDDS-4156. add hierarchical layout to Chinese doc (#1368)
---
 hadoop-hdds/docs/content/concept/Datanodes.zh.md   | 3 +++
 hadoop-hdds/docs/content/concept/Overview.zh.md| 5 +
 hadoop-hdds/docs/content/concept/OzoneManager.zh.md| 3 +++
 hadoop-hdds/docs/content/concept/StorageContainerManager.zh.md | 3 +++
 hadoop-hdds/docs/content/concept/_index.zh.md  | 2 +-
 hadoop-hdds/docs/content/interface/CSI.zh.md   | 3 +++
 hadoop-hdds/docs/content/interface/JavaApi.zh.md   | 3 +++
 hadoop-hdds/docs/content/interface/O3fs.zh.md  | 3 +++
 hadoop-hdds/docs/content/interface/S3.zh.md| 3 +++
 hadoop-hdds/docs/content/security/SecureOzone.zh.md| 3 +++
 hadoop-hdds/docs/content/security/SecuringS3.zh.md | 3 +++
 hadoop-hdds/docs/content/security/SecuringTDE.zh.md| 3 +++
 hadoop-hdds/docs/content/security/SecurityAcls.zh.md   | 3 +++
 hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md | 3 +++
 14 files changed, 42 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdds/docs/content/concept/Datanodes.zh.md 
b/hadoop-hdds/docs/content/concept/Datanodes.zh.md
index fa992dc..8f129df 100644
--- a/hadoop-hdds/docs/content/concept/Datanodes.zh.md
+++ b/hadoop-hdds/docs/content/concept/Datanodes.zh.md
@@ -2,6 +2,9 @@
 title: "数据节点"
 date: "2017-09-14"
 weight: 4
+menu: 
+  main:
+ parent: 概念
 summary: Ozone 支持 Amazon S3 协议,你可以原封不动地在 Ozone 上使用基于 S3 客户端和 S3 SDK 的应用。
 ---
 

[hadoop-ozone] branch HDDS-1880-Decom updated: HDDS-4300. Removed unneeded class DatanodeAdminNodeDetails (#1465)

2020-10-05 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/HDDS-1880-Decom by this push:
 new f43a370  HDDS-4300. Removed unneeded class DatanodeAdminNodeDetails 
(#1465)
f43a370 is described below

commit f43a370169f2d8cc2b8635ac9e026278157b16db
Author: Stephen O'Donnell 
AuthorDate: Mon Oct 5 14:42:41 2020 +0100

HDDS-4300. Removed unneeded class DatanodeAdminNodeDetails (#1465)
---
 .../hdds/scm/node/DatanodeAdminMonitorImpl.java| 105 
 .../hdds/scm/node/DatanodeAdminNodeDetails.java| 137 -
 .../hdds/scm/node/TestDatanodeAdminMonitor.java|  43 +++
 .../scm/node/TestDatanodeAdminNodeDetails.java |  81 
 4 files changed, 67 insertions(+), 299 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index f9d1a32..0bbd13d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -64,9 +64,9 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
   private EventPublisher eventQueue;
   private NodeManager nodeManager;
   private ReplicationManager replicationManager;
-  private Queue pendingNodes = new ArrayDeque();
-  private Queue cancelledNodes = new ArrayDeque();
-  private Set trackedNodes = new HashSet<>();
+  private Queue pendingNodes = new ArrayDeque();
+  private Queue cancelledNodes = new ArrayDeque();
+  private Set trackedNodes = new HashSet<>();
 
   private static final Logger LOG =
   LoggerFactory.getLogger(DatanodeAdminMonitorImpl.class);
@@ -93,10 +93,8 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
*/
   @Override
   public synchronized void startMonitoring(DatanodeDetails dn, int endInHours) 
{
-DatanodeAdminNodeDetails nodeDetails =
-new DatanodeAdminNodeDetails(dn, endInHours);
-cancelledNodes.remove(nodeDetails);
-pendingNodes.add(nodeDetails);
+cancelledNodes.remove(dn);
+pendingNodes.add(dn);
   }
 
   /**
@@ -108,9 +106,8 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
*/
   @Override
   public synchronized void stopMonitoring(DatanodeDetails dn) {
-DatanodeAdminNodeDetails nodeDetails = new DatanodeAdminNodeDetails(dn, 0);
-pendingNodes.remove(nodeDetails);
-cancelledNodes.add(nodeDetails);
+pendingNodes.remove(dn);
+cancelledNodes.add(dn);
   }
 
   /**
@@ -155,20 +152,19 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
   }
 
   @VisibleForTesting
-  public Set getTrackedNodes() {
+  public Set getTrackedNodes() {
 return trackedNodes;
   }
 
   private void processCancelledNodes() {
 while (!cancelledNodes.isEmpty()) {
-  DatanodeAdminNodeDetails dn = cancelledNodes.poll();
+  DatanodeDetails dn = cancelledNodes.poll();
   try {
 stopTrackingNode(dn);
 putNodeBackInService(dn);
-LOG.info("Recommissioned node {}", dn.getDatanodeDetails());
+LOG.info("Recommissioned node {}", dn);
   } catch (NodeNotFoundException e) {
-LOG.warn("Failed processing the cancel admin request for {}",
-dn.getDatanodeDetails(), e);
+LOG.warn("Failed processing the cancel admin request for {}", dn, e);
   }
 }
   }
@@ -180,11 +176,11 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
   }
 
   private void processTransitioningNodes() {
-Iterator iterator = trackedNodes.iterator();
+Iterator iterator = trackedNodes.iterator();
 while (iterator.hasNext()) {
-  DatanodeAdminNodeDetails dn = iterator.next();
+  DatanodeDetails dn = iterator.next();
   try {
-NodeStatus status = getNodeStatus(dn.getDatanodeDetails());
+NodeStatus status = getNodeStatus(dn);
 
 if (!shouldContinueWorkflow(dn, status)) {
   abortWorkflow(dn);
@@ -193,7 +189,7 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
 }
 
 if (status.isMaintenance()) {
-  if (dn.shouldMaintenanceEnd()) {
+  if (status.operationalStateExpired()) {
 completeMaintenance(dn);
 iterator.remove();
 continue;
@@ -205,12 +201,12 @@ public class DatanodeAdminMonitorImpl implements 
DatanodeAdminMonitor {
   // Ensure the DN has received and persisted the current maint
   // state.
   && status.getOperationalState()
-  == dn.getDatanodeDetails().getPersistedOpState()
+  == dn.getPersistedOpSta

[hadoop-ozone] branch master updated (19cb481 -> b6efb95)

2020-10-05 Thread avijayan
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


from 19cb481  HDDS-4156. add hierarchical layout to Chinese doc (#1368)
 add b6efb95  HDDS-4280. Document notable configurations for Recon. (#1448)

No new revisions were added by this update.

Summary of changes:
 hadoop-hdds/docs/content/feature/Recon.md | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org



[hadoop-ozone] branch master updated: HDDS-4298. Use an interface in Ozone client instead of XceiverClientManager (#1460)

2020-10-05 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
 new 0d7d1e2  HDDS-4298. Use an interface in Ozone client instead of 
XceiverClientManager (#1460)
0d7d1e2 is described below

commit 0d7d1e276f865b7066f5805a52b12ffe43161a16
Author: Elek, Márton 
AuthorDate: Mon Oct 5 19:16:19 2020 +0200

HDDS-4298. Use an interface in Ozone client instead of XceiverClientManager 
(#1460)
---
 .../hadoop/hdds/scm/XceiverClientFactory.java  | 38 +++
 .../hadoop/hdds/scm/XceiverClientManager.java  | 40 +++-
 .../hadoop/hdds/scm/storage/BlockInputStream.java  | 54 +++--
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 22 -
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |  2 +-
 .../hadoop/hdds/scm/ByteStringConversion.java  | 18 +++
 .../ozone/container/keyvalue/KeyValueHandler.java  | 14 --
 .../ozone/client/io/BlockOutputStreamEntry.java| 22 -
 .../client/io/BlockOutputStreamEntryPool.java  | 29 ++-
 .../hadoop/ozone/client/io/KeyInputStream.java | 36 +++---
 .../hadoop/ozone/client/io/KeyOutputStream.java| 56 ++
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  6 +++
 .../hadoop/ozone/client/rpc/TestReadRetries.java   | 40 
 13 files changed, 218 insertions(+), 159 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
new file mode 100644
index 000..dc35cd5
--- /dev/null
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+
+/**
+ * Interface to provide XceiverClient when needed.
+ */
+public interface XceiverClientFactory {
+
+  XceiverClientSpi acquireClient(Pipeline pipeline) throws IOException;
+
+  void releaseClient(XceiverClientSpi xceiverClient, boolean invalidateClient);
+
+  XceiverClientSpi acquireClientForReadData(Pipeline pipeline)
+  throws IOException;
+
+  void releaseClientForReadData(XceiverClientSpi xceiverClient, boolean b);
+
+}
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 430e6e2..eaf0503 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -18,41 +18,37 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
+import java.io.Closeable;
+import java.io.IOException;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneSecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.thirdparty.c