[hadoop] 01/01: HDDS-1550. MiniOzoneChaosCluster is not shutting down all the threads during shutdown. Contributed by Mukul Kumar Singh.

2019-05-16 Thread msingh
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch HDDS-1550
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a8a1727fbdf1c94fdbf4c57b098b0ce013c88fee
Author: Mukul Kumar Singh 
AuthorDate: Fri May 17 09:17:27 2019 +0530

HDDS-1550. MiniOzoneChaosCluster is not shutting down all the threads 
during shutdown. Contributed by Mukul Kumar Singh.
---
 .../ozone/container/common/interfaces/Handler.java|  5 +
 .../ozone/container/common/report/ReportManager.java  | 10 ++
 .../common/transport/server/XceiverServerGrpc.java|  6 ++
 .../ozone/container/keyvalue/KeyValueHandler.java |  5 +
 .../ozone/container/ozoneimpl/OzoneContainer.java |  1 +
 .../apache/hadoop/hdds/scm/node/SCMNodeManager.java   |  1 +
 .../hdds/scm/server/StorageContainerManager.java  |  8 +++-
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java|  2 +-
 .../org/apache/hadoop/ozone/MiniOzoneClusterImpl.java | 19 +++
 9 files changed, 47 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index a3bb34b..621dffb 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -121,6 +121,11 @@ public abstract class Handler {
   throws IOException;
 
   /**
+   * Stop the Handler.
+   */
+  public abstract void stop();
+
+  /**
* Marks the container for closing. Moves the container to CLOSING state.
*
* @param container container to update
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
index 8097cd6..ef592a6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
@@ -23,10 +23,13 @@ import com.google.protobuf.GeneratedMessage;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 /**
  * ReportManager is responsible for managing all the {@link ReportPublisher}
@@ -34,6 +37,8 @@ import java.util.concurrent.ScheduledExecutorService;
  * which should be used for scheduling the reports.
  */
 public final class ReportManager {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ReportManager.class);
 
   private final StateContext context;
   private final List publishers;
@@ -71,6 +76,11 @@ public final class ReportManager {
*/
   public void shutdown() {
 executorService.shutdown();
+try {
+  executorService.awaitTermination(1, TimeUnit.DAYS);
+} catch (Exception e) {
+  LOG.error("failed to shutdown Report Manager", e);
+}
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 6fe8fd4..3987cee 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -57,6 +57,7 @@ import java.net.SocketAddress;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Creates a Grpc server endpoint that acts as the communication layer for
@@ -172,6 +173,11 @@ public final class XceiverServerGrpc extends XceiverServer 
{
   public void stop() {
 if (isStarted) {
   server.shutdown();
+  try {
+server.awaitTermination(1, TimeUnit.DAYS);
+  } catch (Exception e) {
+LOG.error("failed to shutdown XceiverServerGrpc", e);
+  }
   isStarted = false;
 }
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 531fb02..0355973e 100644
--- 

[hadoop] branch HDDS-1550 created (now a8a1727)

2019-05-16 Thread msingh
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a change to branch HDDS-1550
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at a8a1727  HDDS-1550. MiniOzoneChaosCluster is not shutting down all the 
threads during shutdown. Contributed by Mukul Kumar Singh.

This branch includes the following new commits:

 new a8a1727  HDDS-1550. MiniOzoneChaosCluster is not shutting down all the 
threads during shutdown. Contributed by Mukul Kumar Singh.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1527. HDDS Datanode start fails due to datanode.id file read error. Contributed by Siddharth Wagle.

2019-05-16 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c183bd8  HDDS-1527. HDDS Datanode start fails due to datanode.id file 
read error. Contributed by Siddharth Wagle.
c183bd8 is described below

commit c183bd8e2009c41ca9bdde964ec7e428dacc0c03
Author: Siddharth 
AuthorDate: Thu May 16 15:13:10 2019 -0700

HDDS-1527. HDDS Datanode start fails due to datanode.id file read error. 
Contributed by Siddharth Wagle.

This closes #822.
---
 .../container/common/helpers/ContainerUtils.java   | 19 ++--
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  | 54 +-
 2 files changed, 49 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 770435e..ff6dec8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -24,6 +24,7 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING;
 
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.nio.file.Paths;
 import java.security.MessageDigest;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -51,6 +53,9 @@ import com.google.common.base.Preconditions;
  */
 public final class ContainerUtils {
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContainerUtils.class);
+
   private ContainerUtils() {
 //never constructed.
   }
@@ -198,7 +203,7 @@ public final class ContainerUtils {
 throw new IOException("Unable to overwrite the datanode ID file.");
   }
 } else {
-  if(!path.getParentFile().exists() &&
+  if (!path.getParentFile().exists() &&
   !path.getParentFile().mkdirs()) {
 throw new IOException("Unable to create datanode ID directories.");
   }
@@ -221,8 +226,16 @@ public final class ContainerUtils {
 try {
   return DatanodeIdYaml.readDatanodeIdFile(path);
 } catch (IOException e) {
-  throw new IOException("Failed to parse DatanodeDetails from "
-  + path.getAbsolutePath(), e);
+  LOG.warn("Error loading DatanodeDetails yaml from " +
+  path.getAbsolutePath(), e);
+  // Try to load as protobuf before giving up
+  try (FileInputStream in = new FileInputStream(path)) {
+return DatanodeDetails.getFromProtoBuf(
+HddsProtos.DatanodeDetailsProto.parseFrom(in));
+  } catch (IOException io) {
+throw new IOException("Failed to parse DatanodeDetails from "
++ path.getAbsolutePath(), io);
+  }
 }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index e8db976..f3a5d2c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -18,24 +18,38 @@
 
 package org.apache.hadoop.ozone;
 
+import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import 

[hadoop] branch trunk updated: YARN-9554. Fixed TimelineEntity DAO serialization handling. Contributed by Prabhu Joseph

2019-05-16 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fab5b80  YARN-9554.  Fixed TimelineEntity DAO serialization handling.  
   Contributed by Prabhu Joseph
fab5b80 is described below

commit fab5b80a36bad90e03f7e5e37ded47d67d6e2e81
Author: Eric Yang 
AuthorDate: Thu May 16 16:35:54 2019 -0400

YARN-9554.  Fixed TimelineEntity DAO serialization handling.
Contributed by Prabhu Joseph
---
 .../webapp/ContextFactory.java | 91 +++---
 .../webapp/TestAHSWebServices.java | 11 ---
 .../timeline/webapp/TestTimelineWebServices.java   | 21 +
 3 files changed, 102 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
index 67668a9..ff52324 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContextFactory.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -18,34 +19,104 @@
 
 package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Arrays;
 import java.util.Map;
 import java.lang.reflect.Method;
 import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
 
 /**
  * ContextFactory to reuse JAXBContextImpl for DAO Classes.
  */
 public final class ContextFactory {
 
-  private static JAXBContext jaxbContext;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContextFactory.class);
+
+  private static JAXBContext cacheContext;
+
+  // All the dao classes from TimelineWebService and AHSWebService
+  // added except TimelineEntity and TimelineEntities
+  private static final Class[] CTYPES = {AppInfo.class, AppsInfo.class,
+  AppAttemptInfo.class, AppAttemptsInfo.class, ContainerInfo.class,
+  ContainersInfo.class, RemoteExceptionData.class, TimelineDomain.class,
+  TimelineDomains.class, TimelineEvents.class, TimelinePutResponse.class};
+  private static final Set CLASS_SET =
+  new HashSet<>(Arrays.asList(CTYPES));
+
+  // TimelineEntity has java.util.Set interface which JAXB
+  // can't handle and throws IllegalAnnotationExceptions
+  private static final Class[] IGNORE_TYPES = {TimelineEntity.class,
+  TimelineEntities.class};
+  private static final Set IGNORE_SET =
+  new HashSet<>(Arrays.asList(IGNORE_TYPES));
+
+  private static JAXBException je =
+  new JAXBException("TimelineEntity and TimelineEntities has " +
+  "IllegalAnnotation");
+
+  private static StackTraceElement[] stackTrace = new StackTraceElement[]{
+  new StackTraceElement(ContextFactory.class.getName(),
+  "createContext", "ContextFactory.java", -1)};
 
   private ContextFactory() {
   }
 
+  public static JAXBContext newContext(Class[] classes,
+  Map properties) throws Exception {
+Class spFactory = Class.forName(
+"com.sun.xml.internal.bind.v2.ContextFactory");
+Method m = spFactory.getMethod("createContext", Class[].class, Map.class);
+return (JAXBContext) m.invoke((Object) null, classes, properties);
+  }
+
   // Called from WebComponent.service
   public static JAXBContext 

[hadoop] branch HDFS-13891 updated: HDFS-14447. RBF: Router should support RefreshUserMappingsProtocol. Contributed by Shen Yinjie.

2019-05-16 Thread gifuma
This is an automated email from the ASF dual-hosted git repository.

gifuma pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
 new 09f39bf  HDFS-14447. RBF: Router should support 
RefreshUserMappingsProtocol. Contributed by Shen Yinjie.
09f39bf is described below

commit 09f39bf62c83974f487d19c8d0bd57bb39b60c4c
Author: Giovanni Matteo Fumarola 
AuthorDate: Thu May 16 11:05:29 2019 -0700

HDFS-14447. RBF: Router should support RefreshUserMappingsProtocol. 
Contributed by Shen Yinjie.
---
 .../server/federation/router/RouterRpcServer.java  |  28 +-
 .../router/TestRefreshUserMappingsWithRouters.java | 386 +
 2 files changed, 413 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index d35d1f0..559270f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -139,7 +139,13 @@ import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos;
+import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB;
+import 
org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -158,7 +164,7 @@ import com.google.protobuf.BlockingService;
  * {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode}.
  */
 public class RouterRpcServer extends AbstractService
-implements ClientProtocol, NamenodeProtocol {
+implements ClientProtocol, NamenodeProtocol, RefreshUserMappingsProtocol {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(RouterRpcServer.class);
@@ -257,6 +263,12 @@ public class RouterRpcServer extends AbstractService
 BlockingService nnPbService = NamenodeProtocolService
 .newReflectiveBlockingService(namenodeProtocolXlator);
 
+RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator 
=
+new RefreshUserMappingsProtocolServerSideTranslatorPB(this);
+BlockingService refreshUserMappingService =
+RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService.
+newReflectiveBlockingService(refreshUserMappingXlator);
+
 InetSocketAddress confRpcAddress = conf.getSocketAddr(
 RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY,
 RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY,
@@ -283,6 +295,8 @@ public class RouterRpcServer extends AbstractService
 // Add all the RPC protocols that the Router implements
 DFSUtil.addPBProtocol(
 conf, NamenodeProtocolPB.class, nnPbService, this.rpcServer);
+DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class,
+refreshUserMappingService, this.rpcServer);
 
 // Set service-level authorization security policy
 this.serviceAuthEnabled = conf.getBoolean(
@@ -1661,4 +1675,16 @@ public class RouterRpcServer extends AbstractService
 }
 return false;
   }
+
+  @Override
+  public void refreshUserToGroupsMappings() throws IOException {
+LOG.info("Refresh user groups mapping in Router.");
+Groups.getUserToGroupsMappingService().refresh();
+  }
+
+  @Override
+  public void refreshSuperUserGroupsConfiguration() throws IOException {
+LOG.info("Refresh superuser groups configuration in Router.");
+ProxyUsers.refreshSuperUserGroupsConfiguration();
+  }
 }
\ No newline at end of file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java
new file mode 100644
index 000..597b8c2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRefreshUserMappingsWithRouters.java
@@ -0,0 +1,386 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed 

[hadoop] 02/02: HDDS-1297. Fix IllegalArgumentException thrown with MiniOzoneCluster Initialization. Contributed by Yiqun Lin.

2019-05-16 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 03ea8ea92e641a3fa2ce0cc1ef38022e0f6d8f20
Author: Márton Elek 
AuthorDate: Wed Apr 17 11:53:36 2019 +0200

HDDS-1297. Fix IllegalArgumentException thrown with MiniOzoneCluster 
Initialization. Contributed by Yiqun Lin.
---
 .../org/apache/hadoop/hdds/scm/HddsServerUtil.java | 36 +++
 .../org/apache/hadoop/hdds/server/ServerUtils.java | 29 ++---
 .../hadoop/hdds/scm/TestHddsServerUtils.java   | 72 ++
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   | 32 --
 4 files changed, 102 insertions(+), 67 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index cddce03..c1997d6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -253,25 +253,15 @@ public final class HddsServerUtil {
 //
 // Here we check that staleNodeInterval is at least five times more than 
the
 // frequency at which the accounting thread is going to run.
-try {
-  sanitizeUserArgs(staleNodeIntervalMs, heartbeatThreadFrequencyMs,
-  5, 1000);
-} catch (IllegalArgumentException ex) {
-  LOG.error("Stale Node Interval is cannot be honored due to " +
-  "mis-configured {}. ex:  {}",
-  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ex);
-  throw ex;
-}
+staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL,
+staleNodeIntervalMs, OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
+heartbeatThreadFrequencyMs, 5, 1000);
 
 // Make sure that stale node value is greater than configured value that
 // datanodes are going to send HBs.
-try {
-  sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
-} catch (IllegalArgumentException ex) {
-  LOG.error("Stale Node Interval MS is cannot be honored due to " +
-  "mis-configured {}. ex:  {}", HDDS_HEARTBEAT_INTERVAL, ex);
-  throw ex;
-}
+staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL,
+staleNodeIntervalMs, HDDS_HEARTBEAT_INTERVAL, heartbeatIntervalMs, 3,
+1000);
 return staleNodeIntervalMs;
   }
 
@@ -290,16 +280,10 @@ public final class HddsServerUtil {
 OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
 TimeUnit.MILLISECONDS);
 
-try {
-  // Make sure that dead nodes Ms is at least twice the time for staleNodes
-  // with a max of 1000 times the staleNodes.
-  sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000);
-} catch (IllegalArgumentException ex) {
-  LOG.error("Dead Node Interval MS is cannot be honored due to " +
-  "mis-configured {}. ex:  {}", OZONE_SCM_STALENODE_INTERVAL, ex);
-  throw ex;
-}
-return deadNodeIntervalMs;
+// Make sure that dead nodes Ms is at least twice the time for staleNodes
+// with a max of 1000 times the staleNodes.
+return sanitizeUserArgs(OZONE_SCM_DEADNODE_INTERVAL, deadNodeIntervalMs,
+OZONE_SCM_STALENODE_INTERVAL, staleNodeIntervalMs, 2, 1000);
   }
 
   /**
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
index a5aacd7..f775ca1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
@@ -47,23 +47,34 @@ public final class ServerUtils {
* For example, sanitizeUserArgs(17, 3, 5, 10)
* ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10.
*
+   * @param key   - config key of the value
* @param valueTocheck  - value to check
+   * @param baseKey   - config key of the baseValue
* @param baseValue - the base value that is being used.
* @param minFactor - range min - a 2 here makes us ensure that value
*valueTocheck is at least twice the baseValue.
* @param maxFactor - range max
* @return long
*/
-  public static long sanitizeUserArgs(long valueTocheck, long baseValue,
-  long minFactor, long maxFactor)
-  throws IllegalArgumentException {
-if ((valueTocheck >= (baseValue * minFactor)) &&
-(valueTocheck <= (baseValue * maxFactor))) {
-  return valueTocheck;
+  public static long sanitizeUserArgs(String key, long valueTocheck,
+  String baseKey, long baseValue, long minFactor, long maxFactor) {
+long minLimit = baseValue * minFactor;
+long maxLimit = baseValue * maxFactor;
+if (valueTocheck < 

[hadoop] 01/02: HDDS-1284. Adjust default values of pipline recovery for more resilient service restart.

2019-05-16 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9248b7d58b4cf1c8415ffd61a57274a472a9d445
Author: Márton Elek 
AuthorDate: Fri Apr 12 14:09:35 2019 +0200

HDDS-1284. Adjust default values of pipline recovery for more resilient 
service restart.
---
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java   | 4 ++--
 hadoop-hdds/common/src/main/resources/ozone-default.xml   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 5cacc87..4a42358 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -260,7 +260,7 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_STALENODE_INTERVAL =
   "ozone.scm.stale.node.interval";
   public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-  "90s";
+  "5m";
 
   public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
   "ozone.scm.heartbeat.rpc-timeout";
@@ -330,7 +330,7 @@ public final class ScmConfigKeys {
   "ozone.scm.pipeline.destroy.timeout";
 
   public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
-  "300s";
+  "66s";
 
   public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
   "ozone.scm.pipeline.creation.interval";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index c8a566a..a46ddb1 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1052,7 +1052,7 @@
   
   
 ozone.scm.stale.node.interval
-90s
+5m
 OZONE, MANAGEMENT
 
   The interval for stale node flagging. Please
@@ -1291,7 +1291,7 @@
   
   
 ozone.scm.pipeline.destroy.timeout
-300s
+66s
 OZONE, SCM, PIPELINE
 
   Once a pipeline is closed, SCM should wait for the above configured time


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (b067f8a -> 03ea8ea)

2019-05-16 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b067f8a  HADOOP-16050: s3a SSL connections should use OpenSSL
 new 9248b7d  HDDS-1284. Adjust default values of pipline recovery for more 
resilient service restart.
 new 03ea8ea  HDDS-1297. Fix IllegalArgumentException thrown with 
MiniOzoneCluster Initialization. Contributed by Yiqun Lin.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  4 +-
 .../common/src/main/resources/ozone-default.xml|  4 +-
 .../org/apache/hadoop/hdds/scm/HddsServerUtil.java | 36 +++
 .../org/apache/hadoop/hdds/server/ServerUtils.java | 29 ++---
 .../hadoop/hdds/scm/TestHddsServerUtils.java   | 72 ++
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   | 32 --
 6 files changed, 106 insertions(+), 71 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16050: s3a SSL connections should use OpenSSL

2019-05-16 Thread mackrorysd
This is an automated email from the ASF dual-hosted git repository.

mackrorysd pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b067f8a  HADOOP-16050: s3a SSL connections should use OpenSSL
b067f8a is described below

commit b067f8acaa79b1230336900a5c62ba465b2adb28
Author: Sahil Takiar 
AuthorDate: Wed Apr 10 17:27:20 2019 -0700

HADOOP-16050: s3a SSL connections should use OpenSSL

(cherry picked from commit aebf229c175dfa19fff3b31e9e67596f6c6124fa)
---
 hadoop-common-project/hadoop-common/pom.xml| 10 +++
 .../hadoop/security/ssl/OpenSSLSocketFactory.java  | 62 ++-
 .../security/ssl/TestOpenSSLSocketFactory.java | 53 
 hadoop-tools/hadoop-aws/pom.xml|  5 ++
 .../java/org/apache/hadoop/fs/s3a/Constants.java   |  6 ++
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java| 38 ++--
 .../java/org/apache/hadoop/fs/s3a/ITestS3ASSL.java | 72 ++
 hadoop-tools/hadoop-azure/pom.xml  |  2 +-
 .../hadoop/fs/azurebfs/AbfsConfiguration.java  |  4 +-
 .../constants/FileSystemConfigurations.java|  6 +-
 .../hadoop/fs/azurebfs/services/AbfsClient.java|  8 +--
 .../fs/azurebfs/services/AbfsHttpOperation.java|  4 +-
 .../TestAbfsConfigurationFieldsValidation.java | 16 ++---
 .../fs/azurebfs/services/TestAbfsClient.java   |  6 +-
 14 files changed, 235 insertions(+), 57 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 19044a5..10417eb 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -334,6 +334,16 @@
   dnsjava
   compile
 
+
+  org.wildfly.openssl
+  wildfly-openssl
+  provided
+
+
+  org.assertj
+  assertj-core
+  test
+
   
 
   
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/OpenSSLSocketFactory.java
similarity index 82%
rename from 
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
rename to 
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/OpenSSLSocketFactory.java
index 01dca4c..99fc195 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/SSLSocketFactoryEx.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/OpenSSLSocketFactory.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.fs.azurebfs.utils;
+package org.apache.hadoop.security.ssl;
 
 import java.io.IOException;
 import java.net.InetAddress;
@@ -42,11 +42,11 @@ import org.wildfly.openssl.SSL;
  * performance.
  *
  */
-public final class SSLSocketFactoryEx extends SSLSocketFactory {
+public final class OpenSSLSocketFactory extends SSLSocketFactory {
 
   /**
* Default indicates Ordered, preferred OpenSSL, if failed to load then fall
-   * back to Default_JSSE
+   * back to Default_JSSE.
*/
   public enum SSLChannelMode {
 OpenSSL,
@@ -54,9 +54,9 @@ public final class SSLSocketFactoryEx extends 
SSLSocketFactory {
 Default_JSSE
   }
 
-  private static SSLSocketFactoryEx instance = null;
+  private static OpenSSLSocketFactory instance = null;
   private static final Logger LOG = LoggerFactory.getLogger(
-  SSLSocketFactoryEx.class);
+  OpenSSLSocketFactory.class);
   private String providerName;
   private SSLContext ctx;
   private String[] ciphers;
@@ -71,7 +71,7 @@ public final class SSLSocketFactoryEx extends 
SSLSocketFactory {
   public static synchronized void initializeDefaultFactory(
   SSLChannelMode preferredMode) throws IOException {
 if (instance == null) {
-  instance = new SSLSocketFactoryEx(preferredMode);
+  instance = new OpenSSLSocketFactory(preferredMode);
 }
   }
 
@@ -84,7 +84,7 @@ public final class SSLSocketFactoryEx extends 
SSLSocketFactory {
* @return instance of the SSLSocketFactory, instance must be initialized by
* initializeDefaultFactory.
*/
-  public static SSLSocketFactoryEx getDefaultFactory() {
+  public static OpenSSLSocketFactory getDefaultFactory() {
 return instance;
   }
 
@@ -92,7 +92,7 @@ public final class SSLSocketFactoryEx extends 
SSLSocketFactory {
 OpenSSLProvider.register();
   }
 
-  private SSLSocketFactoryEx(SSLChannelMode preferredChannelMode)
+  private OpenSSLSocketFactory(SSLChannelMode preferredChannelMode)
   throws IOException {
 try {
   initializeSSLContext(preferredChannelMode);
@@ -118,33 +118,35 @@ public final class SSLSocketFactoryEx extends 
SSLSocketFactory {
   private void initializeSSLContext(SSLChannelMode 

[hadoop] branch docker-hadoop-runner-jdk8 updated: HDDS-1518. Use /etc/ozone for configuration inside docker-compose. Contributed by Elek, Marton.

2019-05-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch docker-hadoop-runner-jdk8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/docker-hadoop-runner-jdk8 by 
this push:
 new cc0bd3c  HDDS-1518. Use /etc/ozone for configuration inside 
docker-compose. Contributed by Elek, Marton.
cc0bd3c is described below

commit cc0bd3ccbb780ef2d9de2212a81e8a755d5767f1
Author: Anu Engineer 
AuthorDate: Thu May 16 15:02:43 2019 +0200

HDDS-1518. Use /etc/ozone for configuration inside docker-compose.
Contributed by Elek, Marton.

(cherry picked from commit ac814f15b15a0f70cab96fd7262a31477af6e2a8)
---
 Dockerfile   | 4 +++-
 scripts/envtoconf.py | 4 +++-
 scripts/starter.sh   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/Dockerfile b/Dockerfile
index d06cb82..21299df 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -36,7 +36,9 @@ RUN chown hadoop /opt
 ADD scripts /opt/
 ADD scripts/krb5.conf /etc/
 RUN yum install -y krb5-workstation
-
+RUN mkdir -p /etc/hadoop && mkdir -p /var/log/hadoop && chmod 1777 /etc/hadoop 
&& chmod 1777 /var/log/hadoop
+ENV HADOOP_LOG_DIR=/var/log/hadoop
+ENV HADOOP_CONF_DIR=/etc/hadoop
 WORKDIR /opt/hadoop
 
 VOLUME /data
diff --git a/scripts/envtoconf.py b/scripts/envtoconf.py
index ad2e176..0e2c368 100755
--- a/scripts/envtoconf.py
+++ b/scripts/envtoconf.py
@@ -36,7 +36,7 @@ class Simple(object):
 
 self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", 
"cfg", 'conf']
 self.output_dir = self.args.destination
-
+self.excluded_envs = ['HADOOP_CONF_DIR']
 self.configurables = {}
 
   def destination_file_path(self, name, extension):
@@ -51,6 +51,8 @@ class Simple(object):
   def process_envs(self):
 """Process environment variables"""
 for key in os.environ.keys():
+  if key in self.excluded_envs:
+  continue
   pattern = re.compile("[_\\.]")
   parts = pattern.split(key)
   extension = None
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 7688ce7..1328607 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -99,7 +99,7 @@ fi
 #To avoid docker volume permission problems
 sudo chmod o+rwx /data
 
-"$DIR"/envtoconf.py --destination /opt/hadoop/etc/hadoop
+"$DIR"/envtoconf.py --destination "${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}"
 
 if [ -n "$ENSURE_NAMENODE_DIR" ]; then
   CLUSTERID_OPTS=""


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch docker-hadoop-runner-jdk11 updated: HDDS-1518. Use /etc/ozone for configuration inside docker-compose. Contributed by Elek, Marton.

2019-05-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch docker-hadoop-runner-jdk11
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/docker-hadoop-runner-jdk11 by 
this push:
 new 566e71d  HDDS-1518. Use /etc/ozone for configuration inside 
docker-compose. Contributed by Elek, Marton.
566e71d is described below

commit 566e71d1dbda2ff3929e5b1cf92dfcb32288b66e
Author: Anu Engineer 
AuthorDate: Thu May 16 15:02:43 2019 +0200

HDDS-1518. Use /etc/ozone for configuration inside docker-compose.
Contributed by Elek, Marton.

(cherry picked from commit ac814f15b15a0f70cab96fd7262a31477af6e2a8)
---
 Dockerfile   | 4 +++-
 scripts/envtoconf.py | 4 +++-
 scripts/starter.sh   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/Dockerfile b/Dockerfile
index f50bc7b..20f5d31 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -36,7 +36,9 @@ RUN chown hadoop /opt
 ADD scripts /opt/
 ADD scripts/krb5.conf /etc/
 RUN yum install -y krb5-workstation
-
+RUN mkdir -p /etc/hadoop && mkdir -p /var/log/hadoop && chmod 1777 /etc/hadoop 
&& chmod 1777 /var/log/hadoop
+ENV HADOOP_LOG_DIR=/var/log/hadoop
+ENV HADOOP_CONF_DIR=/etc/hadoop
 WORKDIR /opt/hadoop
 
 VOLUME /data
diff --git a/scripts/envtoconf.py b/scripts/envtoconf.py
index ad2e176..0e2c368 100755
--- a/scripts/envtoconf.py
+++ b/scripts/envtoconf.py
@@ -36,7 +36,7 @@ class Simple(object):
 
 self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", 
"cfg", 'conf']
 self.output_dir = self.args.destination
-
+self.excluded_envs = ['HADOOP_CONF_DIR']
 self.configurables = {}
 
   def destination_file_path(self, name, extension):
@@ -51,6 +51,8 @@ class Simple(object):
   def process_envs(self):
 """Process environment variables"""
 for key in os.environ.keys():
+  if key in self.excluded_envs:
+  continue
   pattern = re.compile("[_\\.]")
   parts = pattern.split(key)
   extension = None
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 7688ce7..1328607 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -99,7 +99,7 @@ fi
 #To avoid docker volume permission problems
 sudo chmod o+rwx /data
 
-"$DIR"/envtoconf.py --destination /opt/hadoop/etc/hadoop
+"$DIR"/envtoconf.py --destination "${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}"
 
 if [ -n "$ENSURE_NAMENODE_DIR" ]; then
   CLUSTERID_OPTS=""


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch docker-hadoop-runner-latest updated: HDDS-1518. Use /etc/ozone for configuration inside docker-compose. Contributed by Elek, Marton.

2019-05-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch docker-hadoop-runner-latest
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/docker-hadoop-runner-latest by 
this push:
 new 527b43b  HDDS-1518. Use /etc/ozone for configuration inside 
docker-compose. Contributed by Elek, Marton.
527b43b is described below

commit 527b43b6041ea2d538054dab4bafd33928f05956
Author: Anu Engineer 
AuthorDate: Thu May 16 15:02:43 2019 +0200

HDDS-1518. Use /etc/ozone for configuration inside docker-compose.
Contributed by Elek, Marton.

(cherry picked from commit ac814f15b15a0f70cab96fd7262a31477af6e2a8)
---
 Dockerfile   | 4 +++-
 scripts/envtoconf.py | 4 +++-
 scripts/starter.sh   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/Dockerfile b/Dockerfile
index d06cb82..21299df 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -36,7 +36,9 @@ RUN chown hadoop /opt
 ADD scripts /opt/
 ADD scripts/krb5.conf /etc/
 RUN yum install -y krb5-workstation
-
+RUN mkdir -p /etc/hadoop && mkdir -p /var/log/hadoop && chmod 1777 /etc/hadoop 
&& chmod 1777 /var/log/hadoop
+ENV HADOOP_LOG_DIR=/var/log/hadoop
+ENV HADOOP_CONF_DIR=/etc/hadoop
 WORKDIR /opt/hadoop
 
 VOLUME /data
diff --git a/scripts/envtoconf.py b/scripts/envtoconf.py
index ad2e176..0e2c368 100755
--- a/scripts/envtoconf.py
+++ b/scripts/envtoconf.py
@@ -36,7 +36,7 @@ class Simple(object):
 
 self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", 
"cfg", 'conf']
 self.output_dir = self.args.destination
-
+self.excluded_envs = ['HADOOP_CONF_DIR']
 self.configurables = {}
 
   def destination_file_path(self, name, extension):
@@ -51,6 +51,8 @@ class Simple(object):
   def process_envs(self):
 """Process environment variables"""
 for key in os.environ.keys():
+  if key in self.excluded_envs:
+  continue
   pattern = re.compile("[_\\.]")
   parts = pattern.split(key)
   extension = None
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 7688ce7..1328607 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -99,7 +99,7 @@ fi
 #To avoid docker volume permission problems
 sudo chmod o+rwx /data
 
-"$DIR"/envtoconf.py --destination /opt/hadoop/etc/hadoop
+"$DIR"/envtoconf.py --destination "${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}"
 
 if [ -n "$ENSURE_NAMENODE_DIR" ]; then
   CLUSTERID_OPTS=""


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1522. Provide intellij runConfiguration for Ozone components. Contributed by Elek, Marton.

2019-05-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7de735  HDDS-1522. Provide intellij runConfiguration for Ozone 
components.  Contributed by Elek, Marton.
b7de735 is described below

commit b7de7351590c555cf4f810be490f183a2d36f9e3
Author: Elek, Márton 
AuthorDate: Thu May 16 16:38:50 2019 +0200

HDDS-1522. Provide intellij runConfiguration for Ozone components.  
Contributed by Elek, Marton.
---
 .../dev-support/intellij/install-runconfigs.sh | 21 +++
 hadoop-ozone/dev-support/intellij/log4j.properties | 18 ++
 hadoop-ozone/dev-support/intellij/ozone-site.xml   | 66 ++
 .../intellij/runConfigurations/Datanode.xml| 33 +++
 .../intellij/runConfigurations/FreonStandalone.xml | 33 +++
 .../intellij/runConfigurations/OzoneManager.xml| 33 +++
 .../runConfigurations/OzoneManagerInit.xml | 33 +++
 .../intellij/runConfigurations/OzoneShell.xml  | 33 +++
 .../intellij/runConfigurations/Recon.xml   | 33 +++
 .../intellij/runConfigurations/S3Gateway.xml   | 33 +++
 .../runConfigurations/StorageContainerManager.xml  | 33 +++
 .../StorageContainerManagerInit.xml| 33 +++
 12 files changed, 402 insertions(+)

diff --git a/hadoop-ozone/dev-support/intellij/install-runconfigs.sh 
b/hadoop-ozone/dev-support/intellij/install-runconfigs.sh
new file mode 100755
index 000..fc877bd
--- /dev/null
+++ b/hadoop-ozone/dev-support/intellij/install-runconfigs.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+SRC_DIR="$SCRIPT_DIR/runConfigurations"
+DEST_DIR="$SCRIPT_DIR/../../../.idea/runConfigurations/"
+mkdir -p "$DEST_DIR"
+#shellcheck disable=SC2010
+ls -1 "$SRC_DIR" | grep -v ozone-site.xml | xargs -n1 -I FILE cp 
"$SRC_DIR/FILE" "$DEST_DIR"
diff --git a/hadoop-ozone/dev-support/intellij/log4j.properties 
b/hadoop-ozone/dev-support/intellij/log4j.properties
new file mode 100644
index 000..bc62e32
--- /dev/null
+++ b/hadoop-ozone/dev-support/intellij/log4j.properties
@@ -0,0 +1,18 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+log4j.rootLogger=INFO,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n
+log4j.logger.io.jagertraecing=DEBUG
diff --git a/hadoop-ozone/dev-support/intellij/ozone-site.xml 
b/hadoop-ozone/dev-support/intellij/ozone-site.xml
new file mode 100644
index 000..d3209c1
--- /dev/null
+++ b/hadoop-ozone/dev-support/intellij/ozone-site.xml
@@ -0,0 +1,66 @@
+
+
+  
+hdds.profiler.endpoint.enabled
+true
+  
+  
+ozone.scm.block.client.address
+localhost
+  
+  
+ozone.enabled
+True
+  
+  
+ozone.scm.datanode.id
+/tmp/datanode.id
+  
+  
+ozone.scm.client.address
+localhost
+  
+  
+ozone.metadata.dirs
+/tmp/metadata
+  
+  
+ozone.scm.names
+localhost
+  
+  
+ozone.om.address
+localhost
+  
+  
+ozone.enabled
+true
+  
+  
+ozone.scm.container.size
+10MB
+  
+  
+ozone.scm.block.size
+1MB
+  
+  
+hdds.datanode.storage.utilization.critical.threshold
+0.99
+  
+
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml 

[hadoop] branch trunk updated: HADOOP-16294: Enable access to input options by DistCp subclasses.

2019-05-16 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c15b3bc  HADOOP-16294: Enable access to input options by DistCp 
subclasses.
c15b3bc is described below

commit c15b3bca86a0f973cc020f3ff2d5767ff1bd
Author: Andrew Olson 
AuthorDate: Thu May 16 16:11:12 2019 +0200

HADOOP-16294: Enable access to input options by DistCp subclasses.

Adding a protected-scope getter for the DistCpOptions, so that a subclass 
does
not need to save its own copy of the inputOptions supplied to its 
constructor,
if it wishes to override the createInputFileListing method with logic 
similar
to the original implementation, i.e. calling CopyListing#buildListing with 
a path and input options.

Author:Andrew Olson
---
 .../src/main/java/org/apache/hadoop/tools/DistCp.java| 9 +
 1 file changed, 9 insertions(+)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index 4f79975..a1a2075 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -417,6 +417,15 @@ public class DistCp extends Configured implements Tool {
   }
 
   /**
+   * Returns the context.
+   *
+   * @return the context
+   */
+  protected DistCpContext getContext() {
+return context;
+  }
+
+  /**
* Main function of the DistCp program. Parses the input arguments (via 
OptionsParser),
* and invokes the DistCp::run() method, via the ToolRunner.
* @param argv Command-line arguments sent to DistCp.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16294: Enable access to input options by DistCp subclasses.

2019-05-16 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 5560352  HADOOP-16294: Enable access to input options by DistCp 
subclasses.
5560352 is described below

commit 55603529d01dd9180dbbf2e3ad564a1ae3d11f6e
Author: Andrew Olson 
AuthorDate: Thu May 16 16:11:12 2019 +0200

HADOOP-16294: Enable access to input options by DistCp subclasses.

Adding a protected-scope getter for the DistCpOptions, so that a subclass 
does
not need to save its own copy of the inputOptions supplied to its 
constructor,
if it wishes to override the createInputFileListing method with logic 
similar
to the original implementation, i.e. calling CopyListing#buildListing with 
a path and input options.

Author:Andrew Olson
(cherry picked from commit c15b3bca86a0f973cc020f3ff2d5767ff1bd)
---
 .../src/main/java/org/apache/hadoop/tools/DistCp.java| 9 +
 1 file changed, 9 insertions(+)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index 4f79975..a1a2075 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -417,6 +417,15 @@ public class DistCp extends Configured implements Tool {
   }
 
   /**
+   * Returns the context.
+   *
+   * @return the context
+   */
+  protected DistCpContext getContext() {
+return context;
+  }
+
+  /**
* Main function of the DistCp program. Parses the input arguments (via 
OptionsParser),
* and invokes the DistCp::run() method, via the ToolRunner.
* @param argv Command-line arguments sent to DistCp.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16307. Intern User Name and Group Name in FileStatus.

2019-05-16 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new cf0d5a0  HADOOP-16307. Intern User Name and Group Name in FileStatus.
cf0d5a0 is described below

commit cf0d5a0e6ee174387669ece51ae206a467d3395c
Author: David Mollitor 
AuthorDate: Thu May 16 16:03:34 2019 +0200

HADOOP-16307. Intern User Name and Group Name in FileStatus.

Author:David Mollitor

(cherry picked from commit 2713dcf6e9ef308ffe6102532c90b27c52d27f7c)
---
 .../src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
index 3b6724a..6841c6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.protocolPB;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.StringInterner;
 
 import java.io.IOException;
 
@@ -91,8 +92,8 @@ public final class PBHelper {
 mtime = proto.getModificationTime();
 atime = proto.getAccessTime();
 permission = convert(proto.getPermission());
-owner = proto.getOwner();
-group = proto.getGroup();
+owner = StringInterner.weakIntern(proto.getOwner());
+group = StringInterner.weakIntern(proto.getGroup());
 int flags = proto.getFlags();
 FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
 blocksize, mtime, atime, permission, owner, group, symlink, path,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16307. Intern User Name and Group Name in FileStatus.

2019-05-16 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6eb4854  HADOOP-16307. Intern User Name and Group Name in FileStatus.
6eb4854 is described below

commit 6eb48542f120cc5b06ad9f4a697dbadd38c44451
Author: David Mollitor 
AuthorDate: Thu May 16 16:03:06 2019 +0200

HADOOP-16307. Intern User Name and Group Name in FileStatus.

Author:David Mollitor

(cherry picked from commit 2713dcf6e9ef308ffe6102532c90b27c52d27f7c)
---
 .../src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
index 3b6724a..6841c6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.protocolPB;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.StringInterner;
 
 import java.io.IOException;
 
@@ -91,8 +92,8 @@ public final class PBHelper {
 mtime = proto.getModificationTime();
 atime = proto.getAccessTime();
 permission = convert(proto.getPermission());
-owner = proto.getOwner();
-group = proto.getGroup();
+owner = StringInterner.weakIntern(proto.getOwner());
+group = StringInterner.weakIntern(proto.getGroup());
 int flags = proto.getFlags();
 FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
 blocksize, mtime, atime, permission, owner, group, symlink, path,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16307. Intern User Name and Group Name in FileStatus.

2019-05-16 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2713dcf  HADOOP-16307. Intern User Name and Group Name in FileStatus.
2713dcf is described below

commit 2713dcf6e9ef308ffe6102532c90b27c52d27f7c
Author: David Mollitor 
AuthorDate: Thu May 16 16:02:07 2019 +0200

HADOOP-16307. Intern User Name and Group Name in FileStatus.

Author:David Mollitor
---
 .../src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
index 3b6724a..6841c6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.protocolPB;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.StringInterner;
 
 import java.io.IOException;
 
@@ -91,8 +92,8 @@ public final class PBHelper {
 mtime = proto.getModificationTime();
 atime = proto.getAccessTime();
 permission = convert(proto.getPermission());
-owner = proto.getOwner();
-group = proto.getGroup();
+owner = StringInterner.weakIntern(proto.getOwner());
+group = StringInterner.weakIntern(proto.getGroup());
 int flags = proto.getFlags();
 FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
 blocksize, mtime, atime, permission, owner, group, symlink, path,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1531. Disable the sync flag by default during chunk writes in Datanode (#820). Contributed by Shashikant Banerjee.

2019-05-16 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e66ecc5  HDDS-1531. Disable the sync flag by default during chunk 
writes in Datanode (#820). Contributed by Shashikant Banerjee.
e66ecc5 is described below

commit e66ecc564a747b03b77442516045c2ae0d5a60b3
Author: Shashikant Banerjee 
AuthorDate: Thu May 16 18:38:52 2019 +0530

HDDS-1531. Disable the sync flag by default during chunk writes in Datanode 
(#820). Contributed by Shashikant Banerjee.
---
 .../common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java   | 2 +-
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index adcd090..1463c43 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -56,7 +56,7 @@ public final class OzoneConfigKeys {
 
   public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
   "dfs.container.chunk.write.sync";
-  public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = true;
+  public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
   /**
* Ratis Port where containers listen to.
*/
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d43020e..c8a566a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -54,7 +54,7 @@
   
   
 dfs.container.chunk.write.sync
-true
+false
 OZONE, CONTAINER, MANAGEMENT
 Determines whether the chunk writes in the container happen as
   sync I/0 or buffered I/O operation.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch docker-hadoop-runner updated: HDDS-1518. Use /etc/ozone for configuration inside docker-compose. Contributed by Elek, Marton.

2019-05-16 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch docker-hadoop-runner
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/docker-hadoop-runner by this 
push:
 new ac814f1  HDDS-1518. Use /etc/ozone for configuration inside 
docker-compose. Contributed by Elek, Marton.
ac814f1 is described below

commit ac814f15b15a0f70cab96fd7262a31477af6e2a8
Author: Anu Engineer 
AuthorDate: Thu May 16 15:02:43 2019 +0200

HDDS-1518. Use /etc/ozone for configuration inside docker-compose.
Contributed by Elek, Marton.
---
 Dockerfile   | 4 +++-
 scripts/envtoconf.py | 4 +++-
 scripts/starter.sh   | 2 +-
 3 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/Dockerfile b/Dockerfile
index d06cb82..21299df 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -36,7 +36,9 @@ RUN chown hadoop /opt
 ADD scripts /opt/
 ADD scripts/krb5.conf /etc/
 RUN yum install -y krb5-workstation
-
+RUN mkdir -p /etc/hadoop && mkdir -p /var/log/hadoop && chmod 1777 /etc/hadoop 
&& chmod 1777 /var/log/hadoop
+ENV HADOOP_LOG_DIR=/var/log/hadoop
+ENV HADOOP_CONF_DIR=/etc/hadoop
 WORKDIR /opt/hadoop
 
 VOLUME /data
diff --git a/scripts/envtoconf.py b/scripts/envtoconf.py
index ad2e176..0e2c368 100755
--- a/scripts/envtoconf.py
+++ b/scripts/envtoconf.py
@@ -36,7 +36,7 @@ class Simple(object):
 
 self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", 
"cfg", 'conf']
 self.output_dir = self.args.destination
-
+self.excluded_envs = ['HADOOP_CONF_DIR']
 self.configurables = {}
 
   def destination_file_path(self, name, extension):
@@ -51,6 +51,8 @@ class Simple(object):
   def process_envs(self):
 """Process environment variables"""
 for key in os.environ.keys():
+  if key in self.excluded_envs:
+  continue
   pattern = re.compile("[_\\.]")
   parts = pattern.split(key)
   extension = None
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 7688ce7..1328607 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -99,7 +99,7 @@ fi
 #To avoid docker volume permission problems
 sudo chmod o+rwx /data
 
-"$DIR"/envtoconf.py --destination /opt/hadoop/etc/hadoop
+"$DIR"/envtoconf.py --destination "${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}"
 
 if [ -n "$ENSURE_NAMENODE_DIR" ]; then
   CLUSTERID_OPTS=""


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: HDDS-1449. JVM Exit in datanode while committing a key

2019-05-16 Thread msingh
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a commit to branch HDDS-1449
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3bc3621c4231bdeef60aef4521acec822d8db5d2
Author: Mukul Kumar Singh 
AuthorDate: Thu May 16 18:03:45 2019 +0530

HDDS-1449. JVM Exit in datanode while committing a key
---
 .../commandhandler/DeleteBlocksCommandHandler.java |  89 -
 .../container/common/utils/ContainerCache.java |  85 +++--
 .../container/keyvalue/KeyValueBlockIterator.java  |  15 +-
 .../container/keyvalue/KeyValueContainer.java  |  12 +-
 .../container/keyvalue/KeyValueContainerCheck.java |  59 +++---
 .../container/keyvalue/helpers/BlockUtils.java |   4 +-
 .../keyvalue/helpers/KeyValueContainerUtil.java|  35 ++--
 .../container/keyvalue/impl/BlockManagerImpl.java  | 204 ++--
 .../background/BlockDeletingService.java   | 115 +--
 .../ozone/container/ozoneimpl/ContainerReader.java |  47 ++---
 .../keyvalue/TestKeyValueBlockIterator.java| 212 +++--
 .../container/keyvalue/TestKeyValueContainer.java  |  44 +++--
 .../keyvalue/TestKeyValueContainerCheck.java   |  87 +
 .../ozone/TestStorageContainerManagerHelper.java   |  13 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java |  70 +++
 .../container/common/TestBlockDeletingService.java | 164 
 .../common/impl/TestContainerPersistence.java  |   4 +-
 .../commandhandler/TestBlockDeletion.java  |  26 +--
 .../TestCloseContainerByPipeline.java  |  10 +-
 19 files changed, 703 insertions(+), 592 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index aa63fb4..3a955c6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -48,7 +48,7 @@ import 
org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataStore;
+import 
org.apache.hadoop.ozone.container.common.utils.ContainerCache.ReferenceCountedDB;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -198,52 +198,53 @@ public class DeleteBlocksCommandHandler implements 
CommandHandler {
 }
 
 int newDeletionBlocks = 0;
-MetadataStore containerDB = BlockUtils.getDB(containerData, conf);
-for (Long blk : delTX.getLocalIDList()) {
-  BatchOperation batch = new BatchOperation();
-  byte[] blkBytes = Longs.toByteArray(blk);
-  byte[] blkInfo = containerDB.get(blkBytes);
-  if (blkInfo != null) {
-byte[] deletingKeyBytes =
-DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
-byte[] deletedKeyBytes =
-DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
-if (containerDB.get(deletingKeyBytes) != null
-|| containerDB.get(deletedKeyBytes) != null) {
-  LOG.debug(String.format(
-  "Ignoring delete for block %d in container %d."
-  + " Entry already added.", blk, containerId));
-  continue;
-}
-// Found the block in container db,
-// use an atomic update to change its state to deleting.
-batch.put(deletingKeyBytes, blkInfo);
-batch.delete(blkBytes);
-try {
-  containerDB.writeBatch(batch);
-  newDeletionBlocks++;
-  LOG.debug("Transited Block {} to DELETING state in container {}",
-  blk, containerId);
-} catch (IOException e) {
-  // if some blocks failed to delete, we fail this TX,
-  // without sending this ACK to SCM, SCM will resend the TX
-  // with a certain number of retries.
-  throw new IOException(
-  "Failed to delete blocks for TXID = " + delTX.getTxID(), e);
+try(ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, 
conf)) {
+  for (Long blk : delTX.getLocalIDList()) {
+BatchOperation batch = new BatchOperation();
+byte[] blkBytes = Longs.toByteArray(blk);
+byte[] blkInfo = containerDB.getStore().get(blkBytes);
+if (blkInfo != null) {
+  byte[] deletingKeyBytes =
+  DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
+  byte[] deletedKeyBytes =
+  DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
+  if 

[hadoop] branch HDDS-1449 created (now 3bc3621)

2019-05-16 Thread msingh
This is an automated email from the ASF dual-hosted git repository.

msingh pushed a change to branch HDDS-1449
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 3bc3621  HDDS-1449. JVM Exit in datanode while committing a key

This branch includes the following new commits:

 new 3bc3621  HDDS-1449. JVM Exit in datanode while committing a key

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org