[hadoop] 01/04: HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. Original patch contributed by Ewan Higgs. Followup work and fixed contributed by Virajith Jalaparthi.

2019-12-02 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4f7bd416853143d9becaa51ed0d2158b4613a988
Author: Ewan Higgs 
AuthorDate: Mon Jul 23 13:14:04 2018 +0200

HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. 
Original patch contributed by Ewan Higgs. Followup work and fixed contributed 
by Virajith Jalaparthi.
---
 .../protocol/BlockSyncTaskExecutionFeedback.java   |  67 +++
 .../server/protocol/SyncTaskExecutionOutcome.java  |  25 +++
 .../server/protocol/SyncTaskExecutionResult.java   |  46 +
 .../DatanodeProtocolClientSideTranslatorPB.java|   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java|   6 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java| 208 -
 .../server/blockmanagement/DatanodeManager.java|   4 +-
 .../hdfs/server/datanode/BPServiceActor.java   |   9 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java|   8 +-
 .../hadoop/hdfs/server/protocol/BlockSyncTask.java |  83 
 .../protocol/BulkSyncTaskExecutionFeedback.java|  36 
 .../hdfs/server/protocol/DatanodeProtocol.java |  20 +-
 .../hadoop/hdfs/server/protocol/SyncCommand.java   |  39 
 .../src/main/proto/DatanodeProtocol.proto  |  88 -
 .../blockmanagement/TestDatanodeManager.java   |   2 +-
 .../TestNameNodePrunesMissingStorages.java |   2 +-
 .../server/datanode/InternalDataNodeTestUtils.java |   1 +
 .../hdfs/server/datanode/TestBPOfferService.java   |   5 +-
 .../hdfs/server/datanode/TestBlockRecovery.java|   1 +
 .../hdfs/server/datanode/TestDataNodeLifeline.java |   9 +-
 .../datanode/TestDatanodeProtocolRetryPolicy.java  |   1 +
 .../hdfs/server/datanode/TestStorageReport.java|   4 +-
 .../fsdataset/impl/TestFsDatasetCache.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java  |   5 +-
 .../hdfs/server/namenode/TestDeadDatanode.java |   4 +-
 27 files changed, 653 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
new file mode 100644
index 000..2e5393e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.UUID;
+
+/**
+ * Feedback for a BlockSyncTask.
+ */
+public class BlockSyncTaskExecutionFeedback {
+
+  private UUID syncTaskId;
+  private SyncTaskExecutionOutcome outcome;
+  private SyncTaskExecutionResult result;
+  private String syncMountId;
+
+  public BlockSyncTaskExecutionFeedback(UUID syncTaskId,
+  SyncTaskExecutionOutcome outcome, SyncTaskExecutionResult result,
+  String syncMountId) {
+this.syncTaskId = syncTaskId;
+this.outcome = outcome;
+this.result = result;
+this.syncMountId = syncMountId;
+  }
+
+  public static BlockSyncTaskExecutionFeedback finishedSuccessfully(
+  UUID syncTaskId, String syncMountId, SyncTaskExecutionResult result) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY, result, syncMountId);
+  }
+
+  public static BlockSyncTaskExecutionFeedback failedWithException(
+  UUID syncTaskId, String syncMountId, Exception e) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.EXCEPTION, null, syncMountId);
+  }
+
+  public UUID getSyncTaskId() {
+return syncTaskId;
+  }
+
+  public SyncTaskExecutionOutcome getOutcome() {
+return outcome;
+  }
+
+  public SyncTaskExecutionResult getResult() {
+return result;
+  }
+
+  public String getSyncMountId() {
+return syncMountId;
+  }
+}
diff 

[hadoop] 02/04: HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. Contributed by Ewan Higgs.

2019-12-02 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 1abb8f3dab1d5258c233c0bdbfef7980f9ceaa4d
Author: Virajith Jalaparti 
AuthorDate: Wed Aug 1 12:13:31 2018 -0700

HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. 
Contributed by Ewan Higgs.
---
 .../org/apache/hadoop/hdfs/BlockInputStream.java   |  52 +
 .../hdfs/server/datanode/BPOfferService.java   |   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java  |  18 +++
 .../SyncServiceSatisfierDatanodeWorker.java|  97 
 .../SyncTaskExecutionFeedbackCollector.java|  54 +
 .../executor/BlockSyncOperationExecutor.java   | 122 +
 .../executor/BlockSyncReaderFactory.java   |  92 
 .../syncservice/executor/BlockSyncTaskRunner.java  |  69 
 .../apache/hadoop/hdfs/TestBlockInputStream.java   |  84 ++
 .../executor/TestBlockSyncOperationExecutor.java   |  94 
 10 files changed, 688 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
new file mode 100644
index 000..152f83e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Facade around BlockReader that indeed implements the InputStream interface.
+ */
+public class BlockInputStream extends InputStream {
+  private final BlockReader blockReader;
+
+  public BlockInputStream(BlockReader blockReader) {
+this.blockReader = blockReader;
+  }
+
+  @Override
+  public int read() throws IOException {
+byte[] b = new byte[1];
+int c = blockReader.read(b, 0, b.length);
+if (c > 0) {
+  return b[0];
+} else {
+  return -1;
+}
+  }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+return blockReader.read(b, off, len);
+  }
+
+  @Override
+  public long skip(long n) throws IOException {
+return blockReader.skip(n);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 3233e2c..1d63fa9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -797,6 +797,12 @@ class BPOfferService {
   ((BlockECReconstructionCommand) cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   break;
+case DatanodeProtocol.DNA_BACKUP:
+  LOG.info("DatanodeCommand action: DNA_BACKUP");
+  Collection backupTasks =
+  ((SyncCommand) cmd).getSyncTasks();
+  dn.getSyncServiceSatisfierDatanodeWorker().processSyncTasks(backupTasks);
+  break;
 default:
   LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f322119..c2085a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -394,6 +394,7 @@ public class DataNode extends ReconfigurableBase
   private String dnUserName = null;
   private BlockRecoveryWorker blockRecoveryWorker;
   private ErasureCodingWorker ecWorker;
+  private SyncServiceSatisfierDatanodeWorker 
syncServiceSatisfierDat

[hadoop] 04/04: HDFS-12090. Fixup TestBlockReportLease and TestBPOfferService in branch rebase.

2019-12-02 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit afdb20b1e16fb12bd22042ec1c69a34646f7ab26
Author: Ewan Higgs 
AuthorDate: Mon Dec 2 17:53:37 2019 +0100

HDFS-12090. Fixup TestBlockReportLease and TestBPOfferService in branch 
rebase.
---
 .../hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java   | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
index 40408b1..6fbae09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
@@ -92,7 +92,7 @@ public class TestBlockReportLease {
   // Send heartbeat and request full block report lease
   HeartbeatResponse hbResponse = rpcServer.sendHeartbeat(
   dnRegistration, storages, 0, 0, 0, 0, 0, null, true,
-  SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
+  SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT, 
null);
 
   DelayAnswer delayer = new DelayAnswer(BlockManager.LOG);
   doAnswer(delayer).when(spyBlockManager).processReport(
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 26c07d5..822677a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -998,7 +998,8 @@ public class TestBPOfferService {
 Mockito.any(VolumeFailureSummary.class),
 Mockito.anyBoolean(),
 Mockito.any(SlowPeerReports.class),
-Mockito.any(SlowDiskReports.class)))
+Mockito.any(SlowDiskReports.class),
+Mockito.any(BulkSyncTaskExecutionFeedback.class)))
 //heartbeat to old NN instance
 .thenAnswer(new HeartbeatAnswer(0))
 //heartbeat to new NN instance with Register Command


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 03/04: HDFS-13794. [PROVIDED Phase 2] Teach BlockAliasMap.Writer remove method. Contributed by Ewan Higgs

2019-12-02 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 30607ec809d5b422c9e26c7b207c63a6a9c8d579
Author: Virajith Jalaparti 
AuthorDate: Tue Feb 12 13:43:06 2019 -0800

HDFS-13794. [PROVIDED Phase 2] Teach BlockAliasMap.Writer remove  method. 
Contributed by Ewan Higgs
---
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  1 -
 .../AliasMapProtocolServerSideTranslatorPB.java  | 16 
 .../InMemoryAliasMapProtocolClientSideTranslatorPB.java  | 15 +++
 .../hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java|  8 ++--
 .../hdfs/server/aliasmap/InMemoryAliasMapProtocol.java   |  9 +
 .../server/aliasmap/InMemoryLevelDBAliasMapServer.java   |  5 +
 .../hdfs/server/common/blockaliasmap/BlockAliasMap.java  | 14 +-
 .../impl/InMemoryLevelDBAliasMapClient.java  | 10 ++
 .../blockaliasmap/impl/LevelDBFileRegionAliasMap.java|  6 ++
 .../blockaliasmap/impl/TextFileRegionAliasMap.java   |  6 ++
 .../hadoop-hdfs/src/main/proto/AliasMapProtocol.proto|  8 
 .../hdfs/server/aliasmap/ITestInMemoryAliasMap.java  | 10 +++---
 .../impl/TestInMemoryLevelDBAliasMapClient.java  | 13 -
 .../impl/TestLevelDbMockAliasMapClient.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/NullBlockAliasMap.java   |  7 ++-
 15 files changed, 117 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1c3a71f..e2f2f38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -98,7 +98,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   HdfsClientConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
   public static final String 
DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT = "0.0.0.0:50200";
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST = 
"dfs.provided.aliasmap.inmemory.rpc.bind-host";
-
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR = 
"dfs.provided.aliasmap.inmemory.leveldb.dir";
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE = 
"dfs.provided.aliasmap.inmemory.batch-size";
   public static final int DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE_DEFAULT = 
500;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
index 8d89c40..48da058 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
@@ -57,6 +57,9 @@ public class AliasMapProtocolServerSideTranslatorPB
   private static final WriteResponseProto VOID_WRITE_RESPONSE =
   WriteResponseProto.newBuilder().build();
 
+  private static final RemoveResponseProto VOID_REMOVE_RESPONSE =
+  RemoveResponseProto.newBuilder().build();
+
   @Override
   public WriteResponseProto write(RpcController controller,
   WriteRequestProto request) throws ServiceException {
@@ -72,6 +75,19 @@ public class AliasMapProtocolServerSideTranslatorPB
   }
 
   @Override
+  public RemoveResponseProto remove(RpcController controller,
+  RemoveRequestProto request) throws ServiceException {
+try {
+  Block toRemove =
+  PBHelperClient.convert(request.getKey());
+  aliasMap.remove(toRemove);
+  return VOID_REMOVE_RESPONSE;
+} catch (IOException e) {
+  throw new ServiceException(e);
+}
+  }
+
+  @Override
   public ReadResponseProto read(RpcController controller,
   ReadRequestProto request) throws ServiceException {
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
index d9e984b..dc5bd3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
@@ -214,6 +214,21 @@ public class InMemoryAliasMapProtocolClientSideTranslatorPB
   }
 
   @Override
+ 

[hadoop] branch trunk updated: Revert "HADOOP-16193. Add extra S3A MPU test to see what happens if a file is created during the MPU. Contributed by Steve Loughran"

2019-08-26 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 23e532d  Revert "HADOOP-16193. Add extra S3A MPU test to see what 
happens if a file is created during the MPU. Contributed by Steve Loughran"
23e532d is described below

commit 23e532d73983a17eae4f3baec56d402ec471f0c3
Author: Ewan Higgs 
AuthorDate: Mon Aug 26 12:37:26 2019 +0200

Revert "HADOOP-16193. Add extra S3A MPU test to see what happens if a file 
is created during the MPU. Contributed by Steve Loughran"

This reverts commit 69ddb36876c0b3819e5409d83b27d18d1da89b22.
---
 .../s3a/ITestS3AContractMultipartUploader.java | 54 --
 1 file changed, 54 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
index 0ffe85b..059312a 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
@@ -17,31 +17,20 @@
  */
 package org.apache.hadoop.fs.contract.s3a;
 
-import java.io.ByteArrayInputStream;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.MultipartUploader;
-import org.apache.hadoop.fs.PartHandle;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UploadHandle;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.WriteOperationHelper;
 
-import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
 import static 
org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.DEFAULT_HUGE_PARTITION_SIZE;
-import static org.apache.hadoop.test.LambdaTestUtils.eventually;
 
 /**
  * Test MultipartUploader with S3A.
@@ -170,47 +159,4 @@ public class ITestS3AContractMultipartUploader extends
   public void testMultipartUploadReverseOrder() throws Exception {
 ContractTestUtils.skip("skipped for speed");
   }
-
-  /**
-   * This creates and then deletes a zero-byte file while an upload
-   * is in progress, and verifies that the uploaded file is ultimately
-   * visible.
-   */
-  @Test
-  public void testMultipartOverlapWithTransientFile() throws Throwable {
-// until there's a way to explicitly ask for a multipart uploader from a
-// specific FS, explicitly create one bonded to the raw FS.
-describe("testMultipartOverlapWithTransientFile");
-S3AFileSystem fs = getFileSystem();
-Path path = path("testMultipartOverlapWithTransientFile");
-fs.delete(path, true);
-MultipartUploader mpu = mpu(1);
-UploadHandle upload1 = mpu.initialize(path);
-byte[] dataset = dataset(1024, '0', 10);
-final Map handles = new HashMap<>();
-LOG.info("Uploading multipart entry");
-PartHandle value = mpu.putPart(path, new ByteArrayInputStream(dataset), 1,
-upload1,
-dataset.length);
-// upload 1K
-handles.put(1, value);
-// confirm the path is absent
-ContractTestUtils.assertPathDoesNotExist(fs,
-"path being uploaded", path);
-// now create an empty file
-ContractTestUtils.touch(fs, path);
-final FileStatus touchStatus = fs.getFileStatus(path);
-LOG.info("0-byte file has been created: {}", touchStatus);
-fs.delete(path, false);
-// now complete the upload
-mpu.complete(path, handles, upload1);
-
-// wait for the data to arrive
-eventually(timeToBecomeConsistentMillis(), 500, () -> {
-  FileStatus mpuStatus = fs.getFileStatus(path);
-  assertTrue("File is empty in " + mpuStatus, mpuStatus.getLen() > 0);
-  return mpuStatus;
-});
-
-  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16193. Add extra S3A MPU test to see what happens if a file is created during the MPU. Contributed by Steve Loughran

2019-08-22 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 69ddb36  HADOOP-16193. Add extra S3A MPU test to see what happens if a 
file is created during the MPU. Contributed by Steve Loughran
69ddb36 is described below

commit 69ddb36876c0b3819e5409d83b27d18d1da89b22
Author: Ewan Higgs 
AuthorDate: Thu Aug 22 13:56:47 2019 +0200

HADOOP-16193. Add extra S3A MPU test to see what happens if a file is 
created during the MPU. Contributed by Steve Loughran
---
 .../s3a/ITestS3AContractMultipartUploader.java | 54 ++
 1 file changed, 54 insertions(+)

diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
index 059312a..0ffe85b 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java
@@ -17,20 +17,31 @@
  */
 package org.apache.hadoop.fs.contract.s3a;
 
+import java.io.ByteArrayInputStream;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MultipartUploader;
+import org.apache.hadoop.fs.PartHandle;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UploadHandle;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.WriteOperationHelper;
 
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
 import static 
org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.DEFAULT_HUGE_PARTITION_SIZE;
+import static org.apache.hadoop.test.LambdaTestUtils.eventually;
 
 /**
  * Test MultipartUploader with S3A.
@@ -159,4 +170,47 @@ public class ITestS3AContractMultipartUploader extends
   public void testMultipartUploadReverseOrder() throws Exception {
 ContractTestUtils.skip("skipped for speed");
   }
+
+  /**
+   * This creates and then deletes a zero-byte file while an upload
+   * is in progress, and verifies that the uploaded file is ultimately
+   * visible.
+   */
+  @Test
+  public void testMultipartOverlapWithTransientFile() throws Throwable {
+// until there's a way to explicitly ask for a multipart uploader from a
+// specific FS, explicitly create one bonded to the raw FS.
+describe("testMultipartOverlapWithTransientFile");
+S3AFileSystem fs = getFileSystem();
+Path path = path("testMultipartOverlapWithTransientFile");
+fs.delete(path, true);
+MultipartUploader mpu = mpu(1);
+UploadHandle upload1 = mpu.initialize(path);
+byte[] dataset = dataset(1024, '0', 10);
+final Map handles = new HashMap<>();
+LOG.info("Uploading multipart entry");
+PartHandle value = mpu.putPart(path, new ByteArrayInputStream(dataset), 1,
+upload1,
+dataset.length);
+// upload 1K
+handles.put(1, value);
+// confirm the path is absent
+ContractTestUtils.assertPathDoesNotExist(fs,
+"path being uploaded", path);
+// now create an empty file
+ContractTestUtils.touch(fs, path);
+final FileStatus touchStatus = fs.getFileStatus(path);
+LOG.info("0-byte file has been created: {}", touchStatus);
+fs.delete(path, false);
+// now complete the upload
+mpu.complete(path, handles, upload1);
+
+// wait for the data to arrive
+eventually(timeToBecomeConsistentMillis(), 500, () -> {
+  FileStatus mpuStatus = fs.getFileStatus(path);
+  assertTrue("File is empty in " + mpuStatus, mpuStatus.getLen() > 0);
+  return mpuStatus;
+});
+
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/03: HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. Original patch contributed by Ewan Higgs. Followup work and fixed contributed by Virajith Jalaparthi.

2019-02-15 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ed8d57b55653881ce6747d0e0cf0ca42c811
Author: Ewan Higgs 
AuthorDate: Mon Jul 23 13:14:04 2018 +0200

HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. 
Original patch contributed by Ewan Higgs. Followup work and fixed contributed 
by Virajith Jalaparthi.
---
 .../protocol/BlockSyncTaskExecutionFeedback.java   |  67 +++
 .../server/protocol/SyncTaskExecutionOutcome.java  |  25 +++
 .../server/protocol/SyncTaskExecutionResult.java   |  46 +
 .../DatanodeProtocolClientSideTranslatorPB.java|   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java|   6 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java| 208 -
 .../server/blockmanagement/DatanodeManager.java|   4 +-
 .../hdfs/server/datanode/BPServiceActor.java   |   9 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java|   8 +-
 .../hadoop/hdfs/server/protocol/BlockSyncTask.java |  83 
 .../protocol/BulkSyncTaskExecutionFeedback.java|  36 
 .../hdfs/server/protocol/DatanodeProtocol.java |  20 +-
 .../hadoop/hdfs/server/protocol/SyncCommand.java   |  39 
 .../src/main/proto/DatanodeProtocol.proto  |  88 -
 .../blockmanagement/TestDatanodeManager.java   |   2 +-
 .../TestNameNodePrunesMissingStorages.java |   2 +-
 .../server/datanode/InternalDataNodeTestUtils.java |   1 +
 .../hdfs/server/datanode/TestBPOfferService.java   |   5 +-
 .../hdfs/server/datanode/TestBlockRecovery.java|   1 +
 .../hdfs/server/datanode/TestDataNodeLifeline.java |   9 +-
 .../datanode/TestDatanodeProtocolRetryPolicy.java  |   1 +
 .../hdfs/server/datanode/TestFsDatasetCache.java   |   4 +-
 .../hdfs/server/datanode/TestStorageReport.java|   4 +-
 .../server/namenode/NNThroughputBenchmark.java |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java  |   5 +-
 .../hdfs/server/namenode/TestDeadDatanode.java |   4 +-
 27 files changed, 653 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
new file mode 100644
index 000..2e5393e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.UUID;
+
+/**
+ * Feedback for a BlockSyncTask.
+ */
+public class BlockSyncTaskExecutionFeedback {
+
+  private UUID syncTaskId;
+  private SyncTaskExecutionOutcome outcome;
+  private SyncTaskExecutionResult result;
+  private String syncMountId;
+
+  public BlockSyncTaskExecutionFeedback(UUID syncTaskId,
+  SyncTaskExecutionOutcome outcome, SyncTaskExecutionResult result,
+  String syncMountId) {
+this.syncTaskId = syncTaskId;
+this.outcome = outcome;
+this.result = result;
+this.syncMountId = syncMountId;
+  }
+
+  public static BlockSyncTaskExecutionFeedback finishedSuccessfully(
+  UUID syncTaskId, String syncMountId, SyncTaskExecutionResult result) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY, result, syncMountId);
+  }
+
+  public static BlockSyncTaskExecutionFeedback failedWithException(
+  UUID syncTaskId, String syncMountId, Exception e) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.EXCEPTION, null, syncMountId);
+  }
+
+  public UUID getSyncTaskId() {
+return syncTaskId;
+  }
+
+  public SyncTaskExecutionOutcome getOutcome() {
+return outcome;
+  }
+
+  public SyncTaskExecutionResult getResult() {
+return result;
+  }
+
+  public String getSyncMountId() {
+return syncMountId;
+  }
+}
diff 

[hadoop] 03/03: HDFS-13794. [PROVIDED Phase 2] Teach BlockAliasMap.Writer remove method. Contributed by Ewan Higgs

2019-02-15 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 64da7040db3dee4f31e99316b30144c3b76c5433
Author: Virajith Jalaparti 
AuthorDate: Tue Feb 12 13:43:06 2019 -0800

HDFS-13794. [PROVIDED Phase 2] Teach BlockAliasMap.Writer remove  method. 
Contributed by Ewan Higgs
---
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  1 -
 .../AliasMapProtocolServerSideTranslatorPB.java  | 16 
 .../InMemoryAliasMapProtocolClientSideTranslatorPB.java  | 15 +++
 .../hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java|  8 ++--
 .../hdfs/server/aliasmap/InMemoryAliasMapProtocol.java   |  9 +
 .../server/aliasmap/InMemoryLevelDBAliasMapServer.java   |  5 +
 .../hdfs/server/common/blockaliasmap/BlockAliasMap.java  | 14 +-
 .../impl/InMemoryLevelDBAliasMapClient.java  |  7 ++-
 .../blockaliasmap/impl/LevelDBFileRegionAliasMap.java|  6 ++
 .../blockaliasmap/impl/TextFileRegionAliasMap.java   |  6 ++
 .../hadoop-hdfs/src/main/proto/AliasMapProtocol.proto|  8 
 .../hdfs/server/aliasmap/ITestInMemoryAliasMap.java  | 10 +++---
 .../impl/TestInMemoryLevelDBAliasMapClient.java  | 13 -
 .../impl/TestLevelDbMockAliasMapClient.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/NullBlockAliasMap.java   |  7 ++-
 15 files changed, 113 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fedfc5a..1e3083d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -96,7 +96,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   HdfsClientConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
   public static final String 
DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT = "0.0.0.0:50200";
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST = 
"dfs.provided.aliasmap.inmemory.rpc.bind-host";
-
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR = 
"dfs.provided.aliasmap.inmemory.leveldb.dir";
   public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE = 
"dfs.provided.aliasmap.inmemory.batch-size";
   public static final int DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE_DEFAULT = 
500;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
index 8d89c40..48da058 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/AliasMapProtocolServerSideTranslatorPB.java
@@ -57,6 +57,9 @@ public class AliasMapProtocolServerSideTranslatorPB
   private static final WriteResponseProto VOID_WRITE_RESPONSE =
   WriteResponseProto.newBuilder().build();
 
+  private static final RemoveResponseProto VOID_REMOVE_RESPONSE =
+  RemoveResponseProto.newBuilder().build();
+
   @Override
   public WriteResponseProto write(RpcController controller,
   WriteRequestProto request) throws ServiceException {
@@ -72,6 +75,19 @@ public class AliasMapProtocolServerSideTranslatorPB
   }
 
   @Override
+  public RemoveResponseProto remove(RpcController controller,
+  RemoveRequestProto request) throws ServiceException {
+try {
+  Block toRemove =
+  PBHelperClient.convert(request.getKey());
+  aliasMap.remove(toRemove);
+  return VOID_REMOVE_RESPONSE;
+} catch (IOException e) {
+  throw new ServiceException(e);
+}
+  }
+
+  @Override
   public ReadResponseProto read(RpcController controller,
   ReadRequestProto request) throws ServiceException {
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
index d9e984b..dc5bd3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
@@ -214,6 +214,21 @@ public class InMemoryAliasMapProtocolClientSideTranslatorPB
   }
 
   @Override
+ 

[hadoop] 02/03: HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. Contributed by Ewan Higgs.

2019-02-15 Thread ehiggs
This is an automated email from the ASF dual-hosted git repository.

ehiggs pushed a commit to branch HDFS-12090
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f482c677d299db81e90e27ecd3685622627a190e
Author: Virajith Jalaparti 
AuthorDate: Wed Aug 1 12:13:31 2018 -0700

HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. 
Contributed by Ewan Higgs.
---
 .../org/apache/hadoop/hdfs/BlockInputStream.java   |  52 +
 .../hdfs/server/datanode/BPOfferService.java   |   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java  |  18 +++
 .../SyncServiceSatisfierDatanodeWorker.java|  97 
 .../SyncTaskExecutionFeedbackCollector.java|  54 +
 .../executor/BlockSyncOperationExecutor.java   | 122 +
 .../executor/BlockSyncReaderFactory.java   |  92 
 .../syncservice/executor/BlockSyncTaskRunner.java  |  69 
 .../apache/hadoop/hdfs/TestBlockInputStream.java   |  84 ++
 .../executor/TestBlockSyncOperationExecutor.java   |  94 
 10 files changed, 688 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
new file mode 100644
index 000..152f83e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Facade around BlockReader that indeed implements the InputStream interface.
+ */
+public class BlockInputStream extends InputStream {
+  private final BlockReader blockReader;
+
+  public BlockInputStream(BlockReader blockReader) {
+this.blockReader = blockReader;
+  }
+
+  @Override
+  public int read() throws IOException {
+byte[] b = new byte[1];
+int c = blockReader.read(b, 0, b.length);
+if (c > 0) {
+  return b[0];
+} else {
+  return -1;
+}
+  }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+return blockReader.read(b, off, len);
+  }
+
+  @Override
+  public long skip(long n) throws IOException {
+return blockReader.skip(n);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 3233e2c..1d63fa9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -797,6 +797,12 @@ class BPOfferService {
   ((BlockECReconstructionCommand) cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   break;
+case DatanodeProtocol.DNA_BACKUP:
+  LOG.info("DatanodeCommand action: DNA_BACKUP");
+  Collection backupTasks =
+  ((SyncCommand) cmd).getSyncTasks();
+  dn.getSyncServiceSatisfierDatanodeWorker().processSyncTasks(backupTasks);
+  break;
 default:
   LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e926b6a..11fb2f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -388,6 +388,7 @@ public class DataNode extends ReconfigurableBase
   private String dnUserName = null;
   private BlockRecoveryWorker blockRecoveryWorker;
   private ErasureCodingWorker ecWorker;
+  private SyncServiceSatisfierDatanodeWorker 
syncServiceSatisfierDat

hadoop git commit: HDFS-13936. Multipart upload to HDFS to support 0 byte upload. Contributed by Ewan Higgs.

2018-10-02 Thread ehiggs
Repository: hadoop
Updated Branches:
  refs/heads/trunk a383ac47c -> 6fab6886f


HDFS-13936. Multipart upload to HDFS to support 0 byte upload. Contributed by 
Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fab6886
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fab6886
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fab6886

Branch: refs/heads/trunk
Commit: 6fab6886f652492573734b832ca0375459a82775
Parents: a383ac4
Author: Ewan Higgs 
Authored: Tue Oct 2 14:03:28 2018 +0200
Committer: Ewan Higgs 
Committed: Tue Oct 2 14:05:35 2018 +0200

--
 .../hadoop/fs/FileSystemMultipartUploader.java  | 25 +++-
 .../AbstractContractMultipartUploaderTest.java  | 22 +
 2 files changed, 41 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fab6886/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
index 690194d..94c7861 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -100,6 +100,14 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
 return fs.getPathHandle(status);
   }
 
+  private long totalPartsLen(List partHandles) throws IOException {
+long totalLen = 0;
+for (Path p: partHandles) {
+  totalLen += fs.getFileStatus(p).getLen();
+}
+return totalLen;
+  }
+
   @Override
   @SuppressWarnings("deprecation") // rename w/ OVERWRITE
   public PathHandle complete(Path filePath,
@@ -127,12 +135,17 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
 .collect(Collectors.toList());
 
 Path collectorPath = createCollectorPath(filePath);
-Path filePathInsideCollector = mergePaths(collectorPath,
-new Path(Path.SEPARATOR + filePath.getName()));
-fs.create(filePathInsideCollector).close();
-fs.concat(filePathInsideCollector,
-partHandles.toArray(new Path[handles.size()]));
-fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
+boolean emptyFile = totalPartsLen(partHandles) == 0;
+if (emptyFile) {
+  fs.create(filePath).close();
+} else {
+  Path filePathInsideCollector = mergePaths(collectorPath,
+  new Path(Path.SEPARATOR + filePath.getName()));
+  fs.create(filePathInsideCollector).close();
+  fs.concat(filePathInsideCollector,
+  partHandles.toArray(new Path[handles.size()]));
+  fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
+}
 fs.delete(collectorPath, true);
 return getPathHandle(filePath);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fab6886/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
index 85a6861..7cee5a6 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
@@ -165,6 +165,28 @@ public abstract class 
AbstractContractMultipartUploaderTest extends
   }
 
   /**
+   * Assert that a multipart upload is successful when a single empty part is
+   * uploaded.
+   * @throws Exception failure
+   */
+  @Test
+  public void testMultipartUploadEmptyPart() throws Exception {
+FileSystem fs = getFileSystem();
+Path file = path("testMultipartUpload");
+MultipartUploader mpu = MultipartUploaderFactory.get(fs, null);
+UploadHandle uploadHandle = mpu.initialize(file);
+List> partHandles = new ArrayList<>();
+MessageDigest origDigest = DigestUtils.getMd5Digest();
+byte[] payload = new byte[0];
+origDigest.update(payload);
+InputStream is = new ByteArrayInputStream(payload);
+PartHandle partHandle = mpu.putPart(file, is, 0, uploadHandle,
+payload.length);
+  partHandles.add(Pair.of(0, partHandle));
+

hadoop git commit: HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the replacement. Contributed by Akira Ajisaka.

2018-09-20 Thread ehiggs
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3da94a36e -> 429a07e08


HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the 
replacement. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/429a07e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/429a07e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/429a07e0

Branch: refs/heads/trunk
Commit: 429a07e08c8c919b1679c0a80df73d147d95e8a6
Parents: 3da94a3
Author: Ewan Higgs 
Authored: Thu Sep 20 15:13:55 2018 +0200
Committer: Ewan Higgs 
Committed: Thu Sep 20 15:13:55 2018 +0200

--
 .../hadoop-client-minicluster/pom.xml| 17 -
 .../hadoop-client-runtime/pom.xml| 11 +++
 hadoop-common-project/hadoop-common/pom.xml  |  5 +
 .../org/apache/hadoop/security/SecurityUtil.java | 19 +--
 4 files changed, 33 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index ea8d680..70fca8a 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -318,6 +318,10 @@
   commons-net
   commons-net
 
+
+  dnsjava
+  dnsjava
+
   
 
 
-
-  dnsjava:dnsjava
-  
-dig*
-jnamed*
-lookup*
-update*
-  
-
-
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 532fae9..bfa6c15 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -212,6 +212,17 @@
 ccache.txt
   
 
+
+
+  dnsjava:dnsjava
+  
+dig*
+jnamed*
+lookup*
+update*
+  
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 695dcde..1e6da92 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -324,6 +324,11 @@
   mockwebserver
   test
 
+
+  dnsjava
+  dnsjava
+  compile
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 0de334a..9fea535 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -27,6 +27,7 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -52,8 +53,9 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-//this will need to be replaced someday when there is a suitable replacement
-import sun.net.dns.ResolverConfiguration;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.ResolverConfig;
+
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.net.InetAddresses;
@@ -584,10 +586,15 @@ public final class SecurityUtil {
*   

hadoop git commit: HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the replacement. Contributed by Akira Ajisaka.

2018-09-20 Thread ehiggs
Repository: hadoop
Updated Branches:
  refs/heads/trunk 646874c32 -> 3da94a36e


HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the 
replacement. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3da94a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3da94a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3da94a36

Branch: refs/heads/trunk
Commit: 3da94a36e21a315c09ec7edb7702820fe2b524f9
Parents: 646874c
Author: Ewan Higgs 
Authored: Thu Sep 20 14:53:21 2018 +0200
Committer: Ewan Higgs 
Committed: Thu Sep 20 14:53:21 2018 +0200

--
 .../org/apache/hadoop/security/SecurityUtil.java | 15 ++-
 1 file changed, 6 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3da94a36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 5f8cb29..0de334a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -54,9 +54,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 //this will need to be replaced someday when there is a suitable replacement
 import sun.net.dns.ResolverConfiguration;
-import sun.net.util.IPAddressUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.InetAddresses;
 
 /**
  * Security Utils.
@@ -604,14 +604,11 @@ public final class SecurityUtil {
 public InetAddress getByName(String host) throws UnknownHostException {
   InetAddress addr = null;
 
-  if (IPAddressUtil.isIPv4LiteralAddress(host)) {
-// use ipv4 address as-is
-byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
-addr = InetAddress.getByAddress(host, ip);
-  } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
-// use ipv6 address as-is
-byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
-addr = InetAddress.getByAddress(host, ip);
+  if (InetAddresses.isInetAddress(host)) {
+// valid ip address. use it as-is
+addr = InetAddresses.forString(host);
+// set hostname
+addr = InetAddress.getByAddress(host, addr.getAddress());
   } else if (host.endsWith(".")) {
 // a rooted host ends with a dot, ex. "host."
 // rooted hosts never use the search path, so only try an exact lookup


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: HDDS-263. Add retries in Ozone Client to handle BlockNotCommitted Exception. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-263. Add retries in Ozone Client to handle BlockNotCommitted Exception. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/873ef8ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/873ef8ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/873ef8ae

Branch: refs/heads/HDFS-12090
Commit: 873ef8ae81321325889c9d3a6939163e98fbf5bb
Parents: ff036e4
Author: Mukul Kumar Singh 
Authored: Mon Sep 3 12:26:34 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Mon Sep 3 12:26:34 2018 +0530

--
 .../helpers/BlockNotCommittedException.java | 36 
 .../scm/storage/ContainerProtocolCalls.java |  5 ++
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  8 ++
 .../common/src/main/resources/ozone-default.xml | 16 
 .../hadoop/ozone/client/OzoneClientUtils.java   | 28 ++
 .../ozone/client/io/ChunkGroupOutputStream.java | 89 +++
 .../hadoop/ozone/client/rpc/RpcClient.java  |  5 ++
 .../rpc/TestCloseContainerHandlingByClient.java | 91 +---
 .../web/storage/DistributedStorageHandler.java  |  5 ++
 9 files changed, 254 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/873ef8ae/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
new file mode 100644
index 000..86f5a66
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+
+/**
+ * Exceptions thrown when a block is yet to be committed on the datanode.
+ */
+public class BlockNotCommittedException extends StorageContainerException {
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   */
+  public BlockNotCommittedException(String message) {
+super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/873ef8ae/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 1f2fafb..1d6a89d 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
+import org.apache.hadoop.hdds.scm.container.common.helpers
+.BlockNotCommittedException;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -420,6 +422,9 @@ public final class ContainerProtocolCalls  {
   ) throws StorageContainerException {
 if (response.getResult() == ContainerProtos.Result.SUCCESS) {
   return;
+} else if (response.getResult()
+== ContainerProtos.Result.BLOCK_NOT_COMMITTED) {
+  throw new BlockNotCommittedException(response.getMessage());
 }
 throw new StorageContainerException(
 response.getMessage(), 

[42/50] [abbrv] hadoop git commit: YARN-8535. Fix DistributedShell unit tests. Contributed by Abhishek Modi.

2018-09-04 Thread ehiggs
YARN-8535. Fix DistributedShell unit tests. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eed8415d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eed8415d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eed8415d

Branch: refs/heads/HDFS-12090
Commit: eed8415dc18fa7415ebd105350bd0532b3b1b6bb
Parents: 6edf3d2
Author: bibinchundatt 
Authored: Sun Sep 2 13:35:52 2018 +0530
Committer: bibinchundatt 
Committed: Sun Sep 2 13:35:52 2018 +0530

--
 .../yarn/applications/distributedshell/ApplicationMaster.java| 2 +-
 .../yarn/applications/distributedshell/TestDistributedShell.java | 1 +
 .../test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java | 4 +++-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed8415d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 76fa38f..ecf07b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -944,7 +944,7 @@ public class ApplicationMaster {
 
 // When the application completes, it should send a finish application
 // signal to the RM
-LOG.info("Application completed. Signalling finish to RM");
+LOG.info("Application completed. Signalling finished to RM");
 
 FinalApplicationStatus appStatus;
 boolean success = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed8415d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 3a98a22..c7e1cf1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -624,6 +624,7 @@ public class TestDistributedShell {
   String entityfileName) {
 String outputDirPathForEntity =
 basePath + File.separator + entityType + File.separator;
+LOG.info(outputDirPathForEntity);
 File outputDirForEntity = new File(outputDirPathForEntity);
 Assert.assertTrue(outputDirForEntity.isDirectory());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed8415d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 0395138..fa69f18 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -577,7 +577,9 @@ public class MiniYARNCluster extends CompositeService {
   

[43/50] [abbrv] hadoop git commit: HDDS-357. Use DBStore and TableStore for OzoneManager non-background service. Contributed by Nandakumar.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 21d2411..151fddf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -19,77 +19,178 @@ package org.apache.hadoop.ozone.om;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
-
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
+import org.apache.hadoop.utils.db.DBStore;
+import org.apache.hadoop.utils.db.DBStoreBuilder;
+import org.apache.hadoop.utils.db.Table;
+import org.apache.hadoop.utils.db.TableIterator;
+import org.eclipse.jetty.util.StringUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.ArrayList;
 import java.util.Map;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-.OZONE_OM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-.OZONE_OM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 
 /**
  * Ozone metadata manager interface.
  */
 public class OmMetadataManagerImpl implements OMMetadataManager {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(OmMetadataManagerImpl.class);
+
+  /**
+   * OM RocksDB Structure .
+   * 
+   * OM DB stores metadata as KV pairs in different column families.
+   * 
+   * OM DB Schema:
+   * |---|
+   * |  Column Family |VALUE |
+   * |---|
+   * | userTable  | user->VolumeList |
+   * |---|
+   * | volumeTable| 

[46/50] [abbrv] hadoop git commit: HDFS-13867. RBF: Add validation for max arguments for Router admin ls, clrQuota, setQuota, rm and nameservice commands. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13867. RBF: Add validation for max arguments for Router admin ls, 
clrQuota, setQuota, rm and nameservice commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/780df903
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/780df903
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/780df903

Branch: refs/heads/HDFS-12090
Commit: 780df9034f265a8e602856b34cc21d9be02f5c48
Parents: 873ef8a
Author: Vinayakumar B 
Authored: Mon Sep 3 14:28:31 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 3 14:28:31 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 45 ++--
 .../federation/router/TestRouterAdminCLI.java   | 35 +++
 2 files changed, 76 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/780df903/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 46be373..ef8d7c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -146,6 +146,43 @@ public class RouterAdmin extends Configured implements 
Tool {
 return getUsage(null);
   }
 
+  /**
+   * Usage: validates the maximum number of arguments for a command.
+   * @param arg List of of command line parameters.
+   */
+  private void validateMax(String[] arg) {
+if (arg[0].equals("-rm")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed");
+  }
+} else if (arg[0].equals("-ls")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed");
+  }
+} else if (arg[0].equals("-clrQuota")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed");
+  }
+} else if (arg[0].equals("-safemode")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed only");
+  }
+} else if (arg[0].equals("-nameservice")) {
+  if (arg.length > 3) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=2 arguments allowed");
+  }
+} else if (arg[0].equals("-getDisabledNameservices")) {
+  if (arg.length > 1) {
+throw new IllegalArgumentException("No arguments allowed");
+  }
+}
+  }
+
   @Override
   public int run(String[] argv) throws Exception {
 if (argv.length < 1) {
@@ -222,6 +259,7 @@ public class RouterAdmin extends Configured implements Tool 
{
 Exception debugException = null;
 exitCode = 0;
 try {
+  validateMax(argv);
   if ("-add".equals(cmd)) {
 if (addMount(argv, i)) {
   System.out.println("Successfully added mount point " + argv[i]);
@@ -251,10 +289,6 @@ public class RouterAdmin extends Configured implements 
Tool {
   "Successfully clear quota for mount point " + argv[i]);
 }
   } else if ("-safemode".equals(cmd)) {
-if (argv.length > 2) {
-  throw new IllegalArgumentException(
-  "Too many arguments, Max=1 argument allowed only");
-}
 manageSafeMode(argv[i]);
   } else if ("-nameservice".equals(cmd)) {
 String subcmd = argv[i];
@@ -641,6 +675,9 @@ public class RouterAdmin extends Configured implements Tool 
{
   throw new IllegalArgumentException(
   "Cannot parse ssQuota: " + parameters[i]);
 }
+  } else {
+throw new IllegalArgumentException(
+"Invalid argument : " + parameters[i]);
   }
 
   i++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/780df903/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 0c7321f..fa29cd9 100644
--- 

[48/50] [abbrv] hadoop git commit: HDDS-336. Print out container location information for a specific ozone key . Contributed by LiXin Ge.

2018-09-04 Thread ehiggs
HDDS-336. Print out container location information for a specific ozone key . 
Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/211034a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/211034a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/211034a6

Branch: refs/heads/HDFS-12090
Commit: 211034a6c22dd4ebe697481ea4d57b5eb932fa08
Parents: 3801436
Author: Márton Elek 
Authored: Mon Sep 3 13:32:55 2018 +0200
Committer: Márton Elek 
Committed: Mon Sep 3 13:32:55 2018 +0200

--
 .../apache/hadoop/ozone/client/OzoneBucket.java |   4 +-
 .../hadoop/ozone/client/OzoneClientUtils.java   |  29 -
 .../hadoop/ozone/client/OzoneKeyDetails.java|  58 ++
 .../hadoop/ozone/client/OzoneKeyLocation.java   |  82 ++
 .../ozone/client/protocol/ClientProtocol.java   |  10 +-
 .../hadoop/ozone/client/rest/RestClient.java|  27 ++---
 .../hadoop/ozone/client/rpc/RpcClient.java  |  22 ++--
 .../ozone/client/rest/headers/Header.java   |   1 +
 .../client/rest/response/KeyInfoDetails.java| 107 +++
 .../ozone/client/rest/response/KeyLocation.java |  89 +++
 .../ozone/web/response/KeyInfoDetails.java  |  80 ++
 .../hadoop/ozone/web/response/KeyLocation.java  |  82 ++
 .../ozone/client/rest/TestOzoneRestClient.java  |  86 +--
 .../ozone/client/rpc/TestOzoneRpcClient.java| 101 +++--
 .../hadoop/ozone/ozShell/TestOzoneShell.java|   6 +-
 .../hadoop/ozone/web/handlers/KeyHandler.java   |  12 +++
 .../ozone/web/interfaces/StorageHandler.java|  12 +++
 .../web/storage/DistributedStorageHandler.java  |  33 --
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  10 +-
 19 files changed, 779 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/211034a6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 2f3cff6..97bd682 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -258,10 +258,10 @@ public class OzoneBucket {
   /**
* Returns information about the key.
* @param key Name of the key.
-   * @return OzoneKey Information about the key.
+   * @return OzoneKeyDetails Information about the key.
* @throws IOException
*/
-  public OzoneKey getKey(String key) throws IOException {
+  public OzoneKeyDetails getKey(String key) throws IOException {
 return proxy.getKeyDetails(volumeName, name, key);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/211034a6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index 5d57753..40e4d83 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -25,10 +25,10 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
-import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.rest.response.VolumeOwner;
+import org.apache.hadoop.ozone.client.rest.response.*;
+
+import java.util.ArrayList;
+import java.util.List;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -112,4 +112,25 @@ public final class OzoneClientUtils {
 exceptionToPolicyMap);
 return retryPolicy;
   }
+  /**
+   * Returns a KeyInfoDetails object constructed using fields of the input
+   * OzoneKeyDetails object.
+   *
+   * @param key OzoneKeyDetails instance from which KeyInfo object needs to
+   *be created.
+   * @return KeyInfoDetails instance
+   */
+  public static KeyInfoDetails asKeyInfoDetails(OzoneKeyDetails key) {
+KeyInfoDetails keyInfo = new KeyInfoDetails();
+keyInfo.setKeyName(key.getName());
+
keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime()));
+

[34/50] [abbrv] hadoop git commit: HDFS-13027. Handle possible NPEs due to deleted blocks in race condition. Contributed by Vinayakumar B.

2018-09-04 Thread ehiggs
HDFS-13027. Handle possible NPEs due to deleted blocks in race condition. 
Contributed by Vinayakumar B.

(cherry picked from commit 65977e5d8124be2bc208af25beed934933f170b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c36d69a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c36d69a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c36d69a7

Branch: refs/heads/HDFS-12090
Commit: c36d69a7b30927eaea16335e06cfcc247accde35
Parents: f2c2a68
Author: Vinayakumar B 
Authored: Wed Aug 29 22:40:13 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Aug 30 22:15:51 2018 +0530

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java| 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java| 5 -
 4 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 43f4f47..d160f61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -52,7 +52,7 @@ public abstract class BlockInfo extends Block
   /**
* Block collection ID.
*/
-  private long bcId;
+  private volatile long bcId;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface. */
   private LightWeightGSet.LinkedElement nextLinkedElement;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 17f6f6e..675221a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4171,6 +4171,10 @@ public class BlockManager implements BlockStatsMXBean {
 int numExtraRedundancy = 0;
 while(it.hasNext()) {
   final BlockInfo block = it.next();
+  if (block.isDeleted()) {
+//Orphan block, will be handled eventually, skip
+continue;
+  }
   int expectedReplication = this.getExpectedRedundancyNum(block);
   NumberReplicas num = countNodes(block);
   if (shouldProcessExtraRedundancy(num, expectedReplication)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6ba0e0b..74c9f10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4128,7 +4128,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 while (it.hasNext()) {
   Block b = it.next();
   BlockInfo blockInfo = blockManager.getStoredBlock(b);
-  if (blockInfo == null) {
+  if (blockInfo == null || blockInfo.isDeleted()) {
 LOG.info("Cannot find block info for block " + b);
   } else {
 BlockCollection bc = getBlockCollection(blockInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 

[47/50] [abbrv] hadoop git commit: HDFS-13774. EC: 'hdfs ec -getPolicy' is not retrieving policy details when the special REPLICATION policy set on the directory. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13774. EC: 'hdfs ec -getPolicy' is not retrieving policy details when the 
special REPLICATION policy set on the directory. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3801436e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3801436e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3801436e

Branch: refs/heads/HDFS-12090
Commit: 3801436e49822c724c8f4e393e6e5abbd0d2573a
Parents: 780df90
Author: Vinayakumar B 
Authored: Mon Sep 3 14:37:57 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 3 14:37:57 2018 +0530

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3801436e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 2e8cbbd..67e6b75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -65,11 +65,11 @@ Architecture
 
   2. _The size of a striping cell._ This determines the granularity of 
striped reads and writes, including buffer sizes and encoding work.
 
-Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell 
size*. Currently, six built-in policies are supported: `RS-3-2-1024k`, 
`RS-6-3-1024k`, `RS-10-4-1024k`, `RS-LEGACY-6-3-1024k`, `XOR-2-1-1024k` and 
`REPLICATION`.
+Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell 
size*. Currently, five built-in policies are supported: `RS-3-2-1024k`, 
`RS-6-3-1024k`, `RS-10-4-1024k`, `RS-LEGACY-6-3-1024k`, `XOR-2-1-1024k`.
 
-`REPLICATION` is a special policy. It can only be set on directory, to 
force the directory to adopt 3x replication scheme, instead of inheriting its 
ancestor's erasure coding policy. This policy makes it possible to interleave 
3x replication scheme directory with erasure coding directory.
+The default `REPLICATION` scheme is also supported. It can only be set on 
directory, to force the directory to adopt 3x replication scheme, instead of 
inheriting its ancestor's erasure coding policy. This policy makes it possible 
to interleave 3x replication scheme directory with erasure coding directory.
 
-`REPLICATION` policy is always enabled. For other built-in policies, they 
are disabled by default.
+`REPLICATION` is always enabled. Out of all the EC policies, RS(6,3) is 
enabled by default.
 
 Similar to HDFS storage policies, erasure coding policies are set on a 
directory. When a file is created, it inherits the EC policy of its nearest 
ancestor directory.
 
@@ -184,7 +184,7 @@ Below are the details about each command.
   This parameter can be omitted if a 
'dfs.namenode.ec.system.default.policy' configuration is set.
   The EC policy of the path will be set with the default value in 
configuration.
 
-  `-replicate` apply the special `REPLICATION` policy on the directory, 
force the directory to adopt 3x replication scheme.
+  `-replicate` apply the default `REPLICATION` scheme on the directory, 
force the directory to adopt 3x replication scheme.
 
   `-replicate` and `-policy ` are optional arguments. They 
cannot be specified at the same time.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDDS-365. Implement flushStateMachineData for containerStateMachine. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-365. Implement flushStateMachineData for containerStateMachine. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2651e2c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2651e2c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2651e2c4

Branch: refs/heads/HDFS-12090
Commit: 2651e2c43d0825912669a87afc256bad9f1ea6ed
Parents: 7ed458b
Author: Mukul Kumar Singh 
Authored: Wed Aug 29 07:57:57 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed Aug 29 07:58:30 2018 +0530

--
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java |  2 +-
 .../transport/server/XceiverServerGrpc.java   |  2 +-
 .../server/ratis/ContainerStateMachine.java   | 18 ++
 hadoop-project/pom.xml|  2 +-
 4 files changed, 21 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2651e2c4/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index e2416c2..1622ddb 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -87,7 +87,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 }
 LOG.debug("Connecting to server Port : " + leader.getIpAddress());
 channel = NettyChannelBuilder.forAddress(leader.getIpAddress(), port)
-.usePlaintext(true)
+.usePlaintext()
 .maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
 .build();
 asyncStub = XceiverClientProtocolServiceGrpc.newStub(channel);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2651e2c4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index f4f3f6f..4dc232d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -77,7 +77,7 @@ public final class XceiverServerGrpc implements 
XceiverServerSpi {
 datanodeDetails.setPort(
 DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
 server = ((NettyServerBuilder) ServerBuilder.forPort(port))
-.maxMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
+.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
 .addService(new GrpcXceiverService(dispatcher))
 .build();
 storageContainer = dispatcher;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2651e2c4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index ede87f4..68d6d5b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -59,6 +59,7 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.stream.Collectors;
 
 /** A {@link org.apache.ratis.statemachine.StateMachine} for containers.
  *
@@ -316,6 +317,23 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 return LogEntryProto.newBuilder().setSmLogEntry(log).build();
   }
 
+  /**
+   * Returns the combined future of all the writeChunks till the given log
+   * index. The Raft log worker will wait for the stateMachineData to complete
+   * flush as well.
+   *
+   * @param index log 

[35/50] [abbrv] hadoop git commit: Revert "HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot enabled" status. Contributed by Siyao Meng."

2018-09-04 Thread ehiggs
Revert "HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct 
"snapshot enabled" status. Contributed by Siyao Meng."

This reverts commit 26c2a97c566969f50eb8e8432009724c51152a98.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa6c4f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa6c4f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa6c4f0

Branch: refs/heads/HDFS-12090
Commit: 8aa6c4f079fd38a3230bc070c2ce837fefbc5301
Parents: c36d69a
Author: Wei-Chiu Chuang 
Authored: Thu Aug 30 11:44:20 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 30 11:44:20 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/web/JsonUtilClient.java |  4 
 .../java/org/apache/hadoop/hdfs/web/TestWebHDFS.java| 12 
 2 files changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa6c4f0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index a685573..9bb1846 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -133,7 +133,6 @@ class JsonUtilClient {
 Boolean aclBit = (Boolean) m.get("aclBit");
 Boolean encBit = (Boolean) m.get("encBit");
 Boolean erasureBit  = (Boolean) m.get("ecBit");
-Boolean snapshotEnabledBit  = (Boolean) m.get("snapshotEnabled");
 EnumSet f =
 EnumSet.noneOf(HdfsFileStatus.Flags.class);
 if (aclBit != null && aclBit) {
@@ -145,9 +144,6 @@ class JsonUtilClient {
 if (erasureBit != null && erasureBit) {
   f.add(HdfsFileStatus.Flags.HAS_EC);
 }
-if (snapshotEnabledBit != null && snapshotEnabledBit) {
-  f.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED);
-}
 
 Map ecPolicyObj = (Map) m.get("ecPolicyObj");
 ErasureCodingPolicy ecPolicy = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa6c4f0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 9152636..cbc428a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -482,9 +482,6 @@ public class TestWebHDFS {
 
   // allow snapshots on /bar using webhdfs
   webHdfs.allowSnapshot(bar);
-  // check if snapshot status is enabled
-  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   webHdfs.createSnapshot(bar, "s1");
   final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
   Assert.assertTrue(webHdfs.exists(s1path));
@@ -494,24 +491,15 @@ public class TestWebHDFS {
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   dfs.deleteSnapshot(bar, "s1");
   dfs.disallowSnapshot(bar);
-  // check if snapshot status is disabled
-  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
 
   // disallow snapshots on /bar using webhdfs
   dfs.allowSnapshot(bar);
-  // check if snapshot status is enabled, again
-  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertEquals(1, snapshottableDirs.length);
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   webHdfs.disallowSnapshot(bar);
-  // check if snapshot status is disabled, again
-  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-13837. Enable debug log for LeaseRenewer in TestDistributedFileSystem. Contributed by Shweta.

2018-09-04 Thread ehiggs
HDFS-13837. Enable debug log for LeaseRenewer in TestDistributedFileSystem. 
Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33f42efc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33f42efc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33f42efc

Branch: refs/heads/HDFS-12090
Commit: 33f42efc947445b7755da6aad34b5e26b96ad663
Parents: ac515d2
Author: Shweta 
Authored: Tue Aug 28 13:51:04 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 13:56:32 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f42efc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 46323dd..cae0fbf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -100,12 +100,12 @@ import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 public class TestDistributedFileSystem {
   private static final Random RAN = new Random();
@@ -113,7 +113,8 @@ public class TestDistributedFileSystem {
   TestDistributedFileSystem.class);
 
   static {
-GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
+GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(LeaseRenewer.LOG, Level.DEBUG);
   }
 
   private boolean dualPortTesting = false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HDDS-359. RocksDB Profiles support. Contributed by Anu Engineer.

2018-09-04 Thread ehiggs
HDDS-359. RocksDB Profiles support. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c61824a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c61824a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c61824a1

Branch: refs/heads/HDFS-12090
Commit: c61824a18940ef37dc7201717a3115a78bf942d4
Parents: df21e1b
Author: Márton Elek 
Authored: Tue Aug 28 19:22:30 2018 +0200
Committer: Márton Elek 
Committed: Tue Aug 28 19:33:13 2018 +0200

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   6 +
 .../hadoop/utils/db/DBConfigFromFile.java   | 134 +
 .../org/apache/hadoop/utils/db/DBProfile.java   | 120 +++
 .../apache/hadoop/utils/db/DBStoreBuilder.java  | 201 +++
 .../org/apache/hadoop/utils/db/RDBStore.java|  32 +--
 .../org/apache/hadoop/utils/db/TableConfig.java |  93 +
 .../common/src/main/resources/ozone-default.xml |  10 +
 .../hadoop/utils/db/TestDBConfigFromFile.java   | 116 +++
 .../hadoop/utils/db/TestDBStoreBuilder.java | 174 
 .../apache/hadoop/utils/db/TestRDBStore.java|  17 +-
 .../hadoop/utils/db/TestRDBTableStore.java  |  11 +-
 .../common/src/test/resources/test.db.ini   | 145 +
 hadoop-hdds/pom.xml |   1 +
 13 files changed, 1040 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61824a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index d25af80..8272ed7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdds;
 
+import org.apache.hadoop.utils.db.DBProfile;
+
 /**
  * This class contains constants for configuration keys and default values
  * used in hdds.
@@ -58,4 +60,8 @@ public final class HddsConfigKeys {
   public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
   "hdds.datanode.volume.choosing.policy";
 
+  // DB Profiles used by ROCKDB instances.
+  public static final String HDDS_DB_PROFILE = "hdds.db.profile";
+  public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.SSD;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61824a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
new file mode 100644
index 000..753a460
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.utils.db;
+
+import com.google.common.base.Preconditions;
+import org.eclipse.jetty.util.StringUtil;
+import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.DBOptions;
+import org.rocksdb.Env;
+import org.rocksdb.OptionsUtil;
+import org.rocksdb.RocksDBException;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+
+/**
+ * A Class that controls the standard config options of RocksDB.
+ * 
+ * Important : Some of the functions in this file are magic functions designed
+ * for the use of OZONE developers only. Due to that this information is
+ * documented in this files only and is *not* intended for end user 
consumption.
+ * Please do not use this information to tune your production environments.
+ * Please remember the SpiderMan principal; with great 

[41/50] [abbrv] hadoop git commit: Revert "HDDS-98. Adding Ozone Manager Audit Log."

2018-09-04 Thread ehiggs
Revert "HDDS-98. Adding Ozone Manager Audit Log."

This reverts commit 630b64ec7e963968a5bdcd1d625fc78746950137.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6edf3d2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6edf3d2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6edf3d2e

Branch: refs/heads/HDFS-12090
Commit: 6edf3d2ea3de3629629c21cec3b4435bd71753ba
Parents: 19abaac
Author: Nanda kumar 
Authored: Sun Sep 2 00:18:13 2018 +0530
Committer: Nanda kumar 
Committed: Sun Sep 2 00:18:13 2018 +0530

--
 .../src/main/compose/ozone/docker-config|  37 
 .../org/apache/hadoop/ozone/OzoneConsts.java|  32 ---
 hadoop-ozone/common/src/main/bin/ozone  |   2 -
 .../src/main/conf/om-audit-log4j2.properties|  86 
 .../org/apache/hadoop/ozone/audit/OMAction.java |  25 +--
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  25 +--
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  21 +-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  |  22 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  16 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 218 +--
 10 files changed, 18 insertions(+), 466 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6edf3d2e/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 21127f8..a1828a3 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,40 +31,3 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
-
-#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
-LOG4J2.PROPERTIES_monitorInterval=30
-LOG4J2.PROPERTIES_filter=read,write
-LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
-LOG4J2.PROPERTIES_filter.read.marker=READ
-LOG4J2.PROPERTIES_filter.read.onMatch=DENY
-LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
-LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
-LOG4J2.PROPERTIES_filter.write.marker=WRITE
-LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
-LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
-LOG4J2.PROPERTIES_appenders=console, rolling
-LOG4J2.PROPERTIES_appender.console.type=Console
-LOG4J2.PROPERTIES_appender.console.name=STDOUT
-LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
-LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
-LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
-LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
-LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
-LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
-LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
-LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
-LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
-LOG4J2.PROPERTIES_loggers=audit
-LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
-LOG4J2.PROPERTIES_logger.audit.name=OMAudit
-LOG4J2.PROPERTIES_logger.audit.level=INFO
-LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
-LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
-LOG4J2.PROPERTIES_rootLogger.level=INFO
-LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
-LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6edf3d2e/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 9645c02..15366fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -184,36 +184,4 @@ public final 

[49/50] [abbrv] hadoop git commit: HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. Original patch contributed by Ewan Higgs. Followup work and fixed contributed by Virajith

2018-09-04 Thread ehiggs
HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. 
Original patch contributed by Ewan Higgs. Followup work and fixed contributed 
by Virajith Jalaparthi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cdd033a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cdd033a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cdd033a

Branch: refs/heads/HDFS-12090
Commit: 8cdd033a06693020c620c8057200e2da5b469ffc
Parents: 211034a
Author: Ewan Higgs 
Authored: Mon Jul 23 13:14:04 2018 +0200
Committer: Ewan Higgs 
Committed: Mon Sep 3 14:40:50 2018 +0200

--
 .../BlockSyncTaskExecutionFeedback.java |  67 ++
 .../protocol/SyncTaskExecutionOutcome.java  |  25 +++
 .../protocol/SyncTaskExecutionResult.java   |  46 
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   6 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 208 ++-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/datanode/BPServiceActor.java|   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   8 +-
 .../hdfs/server/protocol/BlockSyncTask.java |  83 
 .../protocol/BulkSyncTaskExecutionFeedback.java |  36 
 .../hdfs/server/protocol/DatanodeProtocol.java  |  20 +-
 .../hdfs/server/protocol/SyncCommand.java   |  39 
 .../src/main/proto/DatanodeProtocol.proto   |  88 +++-
 .../blockmanagement/TestDatanodeManager.java|   2 +-
 .../TestNameNodePrunesMissingStorages.java  |   2 +-
 .../datanode/InternalDataNodeTestUtils.java |   3 +-
 .../server/datanode/TestBPOfferService.java |   5 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   9 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   5 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 27 files changed, 658 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdd033a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
new file mode 100644
index 000..2e5393e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.UUID;
+
+/**
+ * Feedback for a BlockSyncTask.
+ */
+public class BlockSyncTaskExecutionFeedback {
+
+  private UUID syncTaskId;
+  private SyncTaskExecutionOutcome outcome;
+  private SyncTaskExecutionResult result;
+  private String syncMountId;
+
+  public BlockSyncTaskExecutionFeedback(UUID syncTaskId,
+  SyncTaskExecutionOutcome outcome, SyncTaskExecutionResult result,
+  String syncMountId) {
+this.syncTaskId = syncTaskId;
+this.outcome = outcome;
+this.result = result;
+this.syncMountId = syncMountId;
+  }
+
+  public static BlockSyncTaskExecutionFeedback finishedSuccessfully(
+  UUID syncTaskId, String syncMountId, SyncTaskExecutionResult result) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY, result, syncMountId);
+  }
+
+  public static BlockSyncTaskExecutionFeedback 

[05/50] [abbrv] hadoop git commit: HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot enabled" status. Contributed by Siyao Meng.

2018-09-04 Thread ehiggs
HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot 
enabled" status. Contributed by Siyao Meng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26c2a97c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26c2a97c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26c2a97c

Branch: refs/heads/HDFS-12090
Commit: 26c2a97c566969f50eb8e8432009724c51152a98
Parents: 602d138
Author: Wei-Chiu Chuang 
Authored: Mon Aug 27 16:02:35 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Aug 27 16:02:35 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/web/JsonUtilClient.java |  4 
 .../java/org/apache/hadoop/hdfs/web/TestWebHDFS.java| 12 
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26c2a97c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 9bb1846..a685573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -133,6 +133,7 @@ class JsonUtilClient {
 Boolean aclBit = (Boolean) m.get("aclBit");
 Boolean encBit = (Boolean) m.get("encBit");
 Boolean erasureBit  = (Boolean) m.get("ecBit");
+Boolean snapshotEnabledBit  = (Boolean) m.get("snapshotEnabled");
 EnumSet f =
 EnumSet.noneOf(HdfsFileStatus.Flags.class);
 if (aclBit != null && aclBit) {
@@ -144,6 +145,9 @@ class JsonUtilClient {
 if (erasureBit != null && erasureBit) {
   f.add(HdfsFileStatus.Flags.HAS_EC);
 }
+if (snapshotEnabledBit != null && snapshotEnabledBit) {
+  f.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED);
+}
 
 Map ecPolicyObj = (Map) m.get("ecPolicyObj");
 ErasureCodingPolicy ecPolicy = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26c2a97c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index cbc428a..9152636 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -482,6 +482,9 @@ public class TestWebHDFS {
 
   // allow snapshots on /bar using webhdfs
   webHdfs.allowSnapshot(bar);
+  // check if snapshot status is enabled
+  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   webHdfs.createSnapshot(bar, "s1");
   final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
   Assert.assertTrue(webHdfs.exists(s1path));
@@ -491,15 +494,24 @@ public class TestWebHDFS {
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   dfs.deleteSnapshot(bar, "s1");
   dfs.disallowSnapshot(bar);
+  // check if snapshot status is disabled
+  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
 
   // disallow snapshots on /bar using webhdfs
   dfs.allowSnapshot(bar);
+  // check if snapshot status is enabled, again
+  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertEquals(1, snapshottableDirs.length);
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   webHdfs.disallowSnapshot(bar);
+  // check if snapshot status is disabled, again
+  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDDS-380. Remove synchronization from ChunkGroupOutputStream and ChunkOutputStream. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-380. Remove synchronization from ChunkGroupOutputStream and 
ChunkOutputStream. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bd42171
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bd42171
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bd42171

Branch: refs/heads/HDFS-12090
Commit: 0bd4217194ae50ec30e386b200fcfa54c069f042
Parents: 3fa4639
Author: Nanda kumar 
Authored: Wed Aug 29 13:31:19 2018 +0530
Committer: Nanda kumar 
Committed: Wed Aug 29 13:31:19 2018 +0530

--
 .../hadoop/hdds/scm/storage/ChunkOutputStream.java  | 16 
 .../ozone/client/io/ChunkGroupOutputStream.java | 12 ++--
 2 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd42171/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index f2df3fa..8d311d0 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -99,7 +99,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(int b) throws IOException {
+  public void write(int b) throws IOException {
 checkOpen();
 int rollbackPosition = buffer.position();
 int rollbackLimit = buffer.limit();
@@ -110,7 +110,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(byte[] b, int off, int len)
+  public void write(byte[] b, int off, int len)
   throws IOException {
 if (b == null) {
   throw new NullPointerException();
@@ -137,7 +137,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void flush() throws IOException {
+  public void flush() throws IOException {
 checkOpen();
 if (buffer.position() > 0) {
   int rollbackPosition = buffer.position();
@@ -147,7 +147,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void close() throws IOException {
+  public void close() throws IOException {
 if (xceiverClientManager != null && xceiverClient != null
 && buffer != null) {
   if (buffer.position() > 0) {
@@ -164,7 +164,7 @@ public class ChunkOutputStream extends OutputStream {
 }
   }
 
-  public synchronized void cleanup() {
+  public void cleanup() {
 xceiverClientManager.releaseClient(xceiverClient);
 xceiverClientManager = null;
 xceiverClient = null;
@@ -176,7 +176,7 @@ public class ChunkOutputStream extends OutputStream {
*
* @throws IOException if stream is closed
*/
-  private synchronized void checkOpen() throws IOException {
+  private void checkOpen() throws IOException {
 if (xceiverClient == null) {
   throw new IOException("ChunkOutputStream has been closed.");
 }
@@ -191,7 +191,7 @@ public class ChunkOutputStream extends OutputStream {
* @param rollbackLimit limit to restore in buffer if write fails
* @throws IOException if there is an I/O error while performing the call
*/
-  private synchronized void flushBufferToChunk(int rollbackPosition,
+  private void flushBufferToChunk(int rollbackPosition,
   int rollbackLimit) throws IOException {
 boolean success = false;
 try {
@@ -213,7 +213,7 @@ public class ChunkOutputStream extends OutputStream {
*
* @throws IOException if there is an I/O error while performing the call
*/
-  private synchronized void writeChunkToContainer() throws IOException {
+  private void writeChunkToContainer() throws IOException {
 buffer.flip();
 ByteString data = ByteString.copyFrom(buffer);
 ChunkInfo chunk = ChunkInfo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd42171/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 988af07..00624d5 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -105,7 +105,7 @@ public 

[31/50] [abbrv] hadoop git commit: HADOOP-15680. ITestNativeAzureFileSystemConcurrencyLive times out. Contributed by Andras Bokor.

2018-09-04 Thread ehiggs
HADOOP-15680. ITestNativeAzureFileSystemConcurrencyLive times out.
Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8d138ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8d138ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8d138ca

Branch: refs/heads/HDFS-12090
Commit: e8d138ca7c1b695688515d816ac693437c87df62
Parents: 2e6c110
Author: Steve Loughran 
Authored: Thu Aug 30 14:36:00 2018 +0100
Committer: Steve Loughran 
Committed: Thu Aug 30 14:36:00 2018 +0100

--
 .../hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d138ca/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
index 87cac15..1c868ea 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
@@ -39,7 +39,7 @@ public class ITestNativeAzureFileSystemConcurrencyLive
 extends AbstractWasbTestBase {
 
   private static final int THREAD_COUNT = 102;
-  private static final int TEST_EXECUTION_TIMEOUT = 5000;
+  private static final int TEST_EXECUTION_TIMEOUT = 3;
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HDDS-332. Remove the ability to configure ozone.handler.type Contributed by Nandakumar and Anu Engineer.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
--
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index 2200cd8..f56cbe8 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -1,64 +1,58 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
-import static 
com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
-import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-
 import com.sun.jersey.api.container.ContainerFactory;
 import com.sun.jersey.api.core.ApplicationAdapter;
-
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import 
org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import 
org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.ObjectStoreApplication;
 import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
-import org.apache.hadoop.hdds.scm.protocolPB
-.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.protocolPB
-.StorageContainerLocationProtocolClientSideTranslatorPB;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import 

[07/50] [abbrv] hadoop git commit: HDFS-13858. RBF: Add check to have single valid argument to safemode command. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13858. RBF: Add check to have single valid argument to safemode command. 
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75691ad6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75691ad6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75691ad6

Branch: refs/heads/HDFS-12090
Commit: 75691ad600473d4d315434b0876d6d10d3050a6b
Parents: 3974427
Author: Vinayakumar B 
Authored: Tue Aug 28 09:21:07 2018 +0530
Committer: Vinayakumar B 
Committed: Tue Aug 28 09:21:07 2018 +0530

--
 .../hadoop/hdfs/tools/federation/RouterAdmin.java |  6 ++
 .../server/federation/router/TestRouterAdminCLI.java  | 14 ++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75691ad6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 91e1669..f88d0a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -218,6 +218,10 @@ public class RouterAdmin extends Configured implements 
Tool {
   "Successfully clear quota for mount point " + argv[i]);
 }
   } else if ("-safemode".equals(cmd)) {
+if (argv.length > 2) {
+  throw new IllegalArgumentException(
+  "Too many arguments, Max=1 argument allowed only");
+}
 manageSafeMode(argv[i]);
   } else if ("-nameservice".equals(cmd)) {
 String subcmd = argv[i];
@@ -712,6 +716,8 @@ public class RouterAdmin extends Configured implements Tool 
{
 } else if (cmd.equals("get")) {
   boolean result = getSafeMode();
   System.out.println("Safe Mode: " + result);
+} else {
+  throw new IllegalArgumentException("Invalid argument: " + cmd);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75691ad6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 2da5fb9..2682e9a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -519,6 +519,7 @@ public class TestRouterAdminCLI {
 assertTrue(routerContext.getRouter().getSafemodeService().isInSafeMode());
 
 System.setOut(new PrintStream(out));
+System.setErr(new PrintStream(err));
 assertEquals(0, ToolRunner.run(admin,
 new String[] {"-safemode", "get"}));
 assertTrue(out.toString().contains("true"));
@@ -534,6 +535,19 @@ public class TestRouterAdminCLI {
 assertEquals(0, ToolRunner.run(admin,
 new String[] {"-safemode", "get"}));
 assertTrue(out.toString().contains("false"));
+
+out.reset();
+assertEquals(-1, ToolRunner.run(admin,
+new String[] {"-safemode", "get", "-random", "check" }));
+assertTrue(err.toString(), err.toString()
+.contains("safemode: Too many arguments, Max=1 argument allowed 
only"));
+err.reset();
+
+assertEquals(-1,
+ToolRunner.run(admin, new String[] {"-safemode", "check" }));
+assertTrue(err.toString(),
+err.toString().contains("safemode: Invalid argument: check"));
+err.reset();
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD (Contributed by Laszlo Kollar via Daniel Templeton)

2018-09-04 Thread ehiggs
HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD
(Contributed by Laszlo Kollar via Daniel Templeton)

Change-Id: I6e2459d0700df7f3bad4eac8297a11690191c3ba


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2c2a68e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2c2a68e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2c2a68e

Branch: refs/heads/HDFS-12090
Commit: f2c2a68ec208f640e778fc41f95f0284fcc44729
Parents: 5a0babf
Author: Daniel Templeton 
Authored: Thu Aug 30 09:12:36 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 30 09:12:36 2018 -0700

--
 .../hadoop-common/src/site/markdown/Compatibility.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2c2a68e/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 6b17c62..03d162a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -187,7 +187,7 @@ existing documentation and tests and/or adding new 
documentation or tests.
 
  Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
 
-Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+Apache Hadoop revisions SHOULD retain binary compatability such that end-user
 applications continue to work without any modifications. Minor Apache Hadoop
 revisions within the same major revision MUST retain compatibility such that
 existing MapReduce applications (e.g. end-user applications and projects such


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-09-04 Thread ehiggs
HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d53a10b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d53a10b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d53a10b0

Branch: refs/heads/HDFS-12090
Commit: d53a10b0a552155de700e396fd7f450a4c5f9c22
Parents: 692736f
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 13:59:32 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d53a10b0/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: HDDS-357. Use DBStore and TableStore for OzoneManager non-background service. Contributed by Nandakumar.

2018-09-04 Thread ehiggs
HDDS-357. Use DBStore and TableStore for OzoneManager non-background service.
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff036e49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff036e49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff036e49

Branch: refs/heads/HDFS-12090
Commit: ff036e49ff967d5dacf4b2d9d5376e57578ef391
Parents: eed8415
Author: Anu Engineer 
Authored: Sun Sep 2 11:47:32 2018 -0700
Committer: Anu Engineer 
Committed: Sun Sep 2 11:47:32 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   6 +-
 .../org/apache/hadoop/utils/RocksDBStore.java   |   2 +-
 .../org/apache/hadoop/utils/db/DBStore.java |  22 +
 .../org/apache/hadoop/utils/db/RDBStore.java|  26 +-
 .../common/src/main/resources/ozone-default.xml |   2 +-
 .../apache/hadoop/hdds/server/ServerUtils.java  |   5 +
 .../ozone/client/io/ChunkGroupOutputStream.java |   4 +-
 .../hadoop/ozone/om/helpers/OpenKeySession.java |   6 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java |  11 +-
 ...neManagerProtocolClientSideTranslatorPB.java |   8 +-
 .../src/main/proto/OzoneManagerProtocol.proto   |   6 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  37 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   4 +
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|   7 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  37 +-
 .../hadoop/ozone/web/client/TestVolume.java |   6 +
 .../hadoop/ozone/om/BucketManagerImpl.java  |  57 ++-
 .../org/apache/hadoop/ozone/om/KeyManager.java  |   6 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  | 276 +-
 .../hadoop/ozone/om/OMMetadataManager.java  | 222 
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  | 509 +++
 .../apache/hadoop/ozone/om/OzoneManager.java| 209 
 .../hadoop/ozone/om/VolumeManagerImpl.java  | 156 +++---
 ...neManagerProtocolServerSideTranslatorPB.java |   7 +-
 .../hadoop/ozone/om/TestBucketManagerImpl.java  | 208 
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |  12 +-
 26 files changed, 978 insertions(+), 873 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb..8ea4d7f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -92,7 +92,6 @@ public final class OzoneConsts {
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String BLOCK_DB = "block.db";
   public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
@@ -113,8 +112,6 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-  public static final String OPEN_KEY_PREFIX = "#open#";
-  public static final String OPEN_KEY_ID_DELIMINATOR = "#";
 
   /**
* OM LevelDB prefixes.
@@ -138,8 +135,7 @@ public final class OzoneConsts {
*  | #deleting#/volumeName/bucketName/keyName |  KeyInfo|
*  --
*/
-  public static final String OM_VOLUME_PREFIX = "/#";
-  public static final String OM_BUCKET_PREFIX = "/#";
+
   public static final String OM_KEY_PREFIX = "/";
   public static final String OM_USER_PREFIX = "$";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
index b243e3d..379d9e9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
@@ -94,7 +94,7 @@ public class RocksDBStore implements MetadataStore {
 }
   }
 
-  private IOException toIOException(String msg, RocksDBException e) {
+  public static IOException 

[26/50] [abbrv] hadoop git commit: HDDS-280. Support ozone dist-start-stitching on openbsd/osx. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
HDDS-280. Support ozone dist-start-stitching on openbsd/osx. Contributed by 
Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/692736f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/692736f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/692736f7

Branch: refs/heads/HDFS-12090
Commit: 692736f7cfb72b8932dc2eb4f4faa995dc6521f8
Parents: 7362516
Author: Mukul Kumar Singh 
Authored: Thu Aug 30 02:21:24 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Aug 30 02:21:24 2018 +0530

--
 dev-support/bin/ozone-dist-layout-stitching   |  6 +++---
 dev-support/bin/ozone-dist-tar-stitching  |  9 ++---
 hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh |  2 +-
 .../acceptance-test/dev-support/bin/robot-dnd-all.sh  | 10 ++
 hadoop-ozone/acceptance-test/dev-support/bin/robot.sh |  7 ---
 hadoop-ozone/acceptance-test/pom.xml  |  7 +++
 .../src/test/acceptance/basic/ozone-shell.robot   |  1 -
 .../acceptance-test/src/test/acceptance/commonlib.robot   |  2 +-
 hadoop-ozone/common/pom.xml   |  5 +
 hadoop-ozone/docs/content/GettingStarted.md   |  3 ++-
 hadoop-ozone/pom.xml  |  5 +
 11 files changed, 24 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 2ba7791..1ba652c 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -117,9 +117,9 @@ ROOT=$(cd "${BASEDIR}"/../..;pwd)
 echo
 echo "Current directory $(pwd)"
 echo
-run rm -rf "ozone"
-run mkdir "ozone"
-run cd "ozone"
+run rm -rf "ozone-${HDDS_VERSION}"
+run mkdir "ozone-${HDDS_VERSION}"
+run cd "ozone-${HDDS_VERSION}"
 run cp -p "${ROOT}/LICENSE.txt" .
 run cp -p "${ROOT}/NOTICE.txt" .
 run cp -p "${ROOT}/README.txt" .

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/dev-support/bin/ozone-dist-tar-stitching
--
diff --git a/dev-support/bin/ozone-dist-tar-stitching 
b/dev-support/bin/ozone-dist-tar-stitching
index d1116e4..93d0525 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -36,13 +36,8 @@ function run()
   fi
 }
 
-#To make the final dist directory easily mountable from docker we don't use
-#version name in the directory name.
-#To include the version name in the root directory of the tar file
-# we create a symbolic link and dereference it during the tar creation
-ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
+run tar -c -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
 run gzip -f "ozone-${VERSION}.tar"
 echo
 echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
-echo
\ No newline at end of file
+echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
--
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh 
b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
index ee9c6b8..87b7137 100755
--- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
@@ -15,4 +15,4 @@
 # limitations under the License.
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-$DIR/robot.sh $DIR/../../src/test/acceptance
+"$DIR/robot.sh" "$DIR/../../src/test/acceptance"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
--
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh 
b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
index 9f1d367..052ffb3 100755
--- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
@@ -18,15 +18,9 @@ set -x
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 
-#Dir od the definition of the dind based test exeucution container
-DOCKERDIR="$DIR/../docker"
-
 #Dir to save the results
 TARGETDIR="$DIR/../../target/dnd"
 
-#Dir to mount the distribution from
-OZONEDIST="$DIR/../../../../hadoop-dist/target/ozone"
-
 #Name and imagename of the temporary, dind based test containers
 DOCKER_IMAGE_NAME=ozoneacceptance
 

[37/50] [abbrv] hadoop git commit: HDDS-388. Fix the name of the db profile configuration key. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
HDDS-388. Fix the name of the db profile configuration key.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50d2e3ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50d2e3ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50d2e3ec

Branch: refs/heads/HDFS-12090
Commit: 50d2e3ec41c73f9a0198d4a4e3d6f308d3030b8a
Parents: 630b64e
Author: Anu Engineer 
Authored: Fri Aug 31 14:30:29 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 14:30:29 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50d2e3ec/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 6d2ee09..d3ec4a5 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1100,7 +1100,7 @@
   
 
   
-ozone.db.profile
+hdds.db.profile
 DBProfile.SSD
 OZONE, OM, PERFORMANCE, REQUIRED
 This property allows user to pick a configuration


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: YARN-8488. Added SUCCEEDED/FAILED states to YARN service. Contributed by Suma Shivaprasad

2018-09-04 Thread ehiggs
YARN-8488.  Added SUCCEEDED/FAILED states to YARN service.
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd089caf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd089caf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd089caf

Branch: refs/heads/HDFS-12090
Commit: fd089caf69cf608a91564c9c3d20cbf84e7fd60c
Parents: c61824a
Author: Eric Yang 
Authored: Tue Aug 28 13:55:28 2018 -0400
Committer: Eric Yang 
Committed: Tue Aug 28 13:55:28 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java   | 100 ++---
 .../service/api/records/ComponentState.java |   2 +-
 .../service/api/records/ContainerState.java |   3 +-
 .../yarn/service/api/records/ServiceState.java  |   2 +-
 .../component/instance/ComponentInstance.java   | 144 ++-
 .../timelineservice/ServiceTimelineEvent.java   |   5 +-
 .../ServiceTimelinePublisher.java   |  33 -
 .../yarn/service/MockRunningServiceContext.java |  18 ++-
 .../hadoop/yarn/service/ServiceTestUtils.java   |   9 +-
 .../yarn/service/component/TestComponent.java   |  55 ++-
 .../component/TestComponentRestartPolicy.java   |   1 -
 .../instance/TestComponentInstance.java |  35 ++---
 .../TestServiceTimelinePublisher.java   |   4 +-
 13 files changed, 322 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd089caf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 384659f..b49ef2a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.service.api.ServiceApiConstants;
+import org.apache.hadoop.yarn.service.api.records.ContainerState;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
@@ -80,6 +81,8 @@ import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
 import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.apache.hadoop.yarn.util.BoundedAppender;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -102,7 +105,8 @@ import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
-import static 
org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_AFTER_APP_COMPLETION;
+import static org.apache.hadoop.yarn.api.records.ContainerExitStatus
+.KILLED_AFTER_APP_COMPLETION;
 import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
 import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
 import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes
@@ -137,6 +141,8 @@ public class ServiceScheduler extends CompositeService {
 
   private ServiceTimelinePublisher serviceTimelinePublisher;
 
+  private boolean timelineServiceEnabled;
+
   // Global diagnostics that will be reported to RM on eRxit.
   // The unit the number of characters. This will be limited to 64 * 1024
   // characters.
@@ -169,6 +175,8 @@ public class ServiceScheduler extends CompositeService {
   private volatile FinalApplicationStatus finalApplicationStatus =
   FinalApplicationStatus.ENDED;
 
+  private Clock systemClock;
+
   // For unit test override since we don't want to terminate UT process.
   private ServiceUtils.ProcessTerminationHandler
   terminationHandler = new ServiceUtils.ProcessTerminationHandler();
@@ 

[08/50] [abbrv] hadoop git commit: HDDS-381. Fix TestKeys#testPutAndGetKeyWithDnRestart. Contributed by Mukul Kumar Singh.

2018-09-04 Thread ehiggs
HDDS-381. Fix TestKeys#testPutAndGetKeyWithDnRestart. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2172399c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2172399c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2172399c

Branch: refs/heads/HDFS-12090
Commit: 2172399c55b481ea0da8cf2e2cb91ea6d8140b27
Parents: 75691ad
Author: Nanda kumar 
Authored: Tue Aug 28 22:19:52 2018 +0530
Committer: Nanda kumar 
Committed: Tue Aug 28 22:19:52 2018 +0530

--
 .../common/transport/server/GrpcXceiverService.java|  8 +++-
 .../java/org/apache/hadoop/ozone/MiniOzoneCluster.java |  3 ++-
 .../org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 13 +++--
 .../statemachine/commandhandler/TestBlockDeletion.java |  9 +++--
 .../org/apache/hadoop/ozone/web/client/TestKeys.java   | 11 ---
 5 files changed, 27 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2172399c/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
index df6220c..db4a86a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
@@ -56,10 +56,8 @@ public class GrpcXceiverService extends
   ContainerCommandResponseProto resp = dispatcher.dispatch(request);
   responseObserver.onNext(resp);
 } catch (Throwable e) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("{} got exception when processing"
+  LOG.error("{} got exception when processing"
 + " ContainerCommandRequestProto {}: {}", request, e);
-  }
   responseObserver.onError(e);
 }
   }
@@ -67,13 +65,13 @@ public class GrpcXceiverService extends
   @Override
   public void onError(Throwable t) {
 // for now we just log a msg
-LOG.info("{}: ContainerCommand send on error. Exception: {}", t);
+LOG.error("{}: ContainerCommand send on error. Exception: {}", t);
   }
 
   @Override
   public void onCompleted() {
 if (isClosed.compareAndSet(false, true)) {
-  LOG.info("{}: ContainerCommand send completed");
+  LOG.debug("{}: ContainerCommand send completed");
   responseObserver.onCompleted();
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2172399c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index b568672..ae6a91e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -152,7 +152,8 @@ public interface MiniOzoneCluster {
*
* @param i index of HddsDatanode in the MiniOzoneCluster
*/
-  void restartHddsDatanode(int i);
+  void restartHddsDatanode(int i) throws InterruptedException,
+  TimeoutException;
 
   /**
* Shutdown a particular HddsDatanode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2172399c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 9b7e399..e06e2f6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -216,7 +216,8 @@ public final class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   }
 
   @Override
-  public void restartHddsDatanode(int i) {
+  public void restartHddsDatanode(int i) throws InterruptedException,
+  TimeoutException {
 HddsDatanodeService datanodeService = 

[06/50] [abbrv] hadoop git commit: HDDS-247. Handle CLOSED_CONTAINER_IO exception in ozoneClient. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-247. Handle CLOSED_CONTAINER_IO exception in ozoneClient. Contributed by 
Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3974427f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3974427f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3974427f

Branch: refs/heads/HDFS-12090
Commit: 3974427f67299496e13b04f0d006d367b705fcb5
Parents: 26c2a97
Author: Mukul Kumar Singh 
Authored: Tue Aug 28 07:11:36 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue Aug 28 07:12:07 2018 +0530

--
 .../hdds/scm/storage/ChunkOutputStream.java |  28 +-
 .../ozone/client/io/ChunkGroupOutputStream.java | 195 +++--
 .../hadoop/ozone/om/helpers/OmKeyInfo.java  |  23 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 408 +++
 .../ozone/container/ContainerTestHelper.java|  21 +
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  16 +-
 6 files changed, 630 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3974427f/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 779e636..7309434 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -94,6 +94,10 @@ public class ChunkOutputStream extends OutputStream {
 this.chunkIndex = 0;
   }
 
+  public ByteBuffer getBuffer() {
+return buffer;
+  }
+
   @Override
   public synchronized void write(int b) throws IOException {
 checkOpen();
@@ -106,7 +110,8 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public void write(byte[] b, int off, int len) throws IOException {
+  public synchronized void write(byte[] b, int off, int len)
+  throws IOException {
 if (b == null) {
   throw new NullPointerException();
 }
@@ -143,24 +148,27 @@ public class ChunkOutputStream extends OutputStream {
 
   @Override
   public synchronized void close() throws IOException {
-if (xceiverClientManager != null && xceiverClient != null &&
-buffer != null) {
+if (xceiverClientManager != null && xceiverClient != null
+&& buffer != null) {
+  if (buffer.position() > 0) {
+writeChunkToContainer();
+  }
   try {
-if (buffer.position() > 0) {
-  writeChunkToContainer();
-}
 putKey(xceiverClient, containerKeyData.build(), traceID);
   } catch (IOException e) {
 throw new IOException(
 "Unexpected Storage Container Exception: " + e.toString(), e);
   } finally {
-xceiverClientManager.releaseClient(xceiverClient);
-xceiverClientManager = null;
-xceiverClient = null;
-buffer = null;
+cleanup();
   }
 }
+  }
 
+  public synchronized void cleanup() {
+xceiverClientManager.releaseClient(xceiverClient);
+xceiverClientManager = null;
+xceiverClient = null;
+buffer = null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3974427f/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 83b4dfd..988af07 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -46,8 +47,10 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import 

[02/50] [abbrv] hadoop git commit: HDSS-375. ContainerReportHandler should not send replication events for open containers. Contributed by Ajay Kumar.

2018-09-04 Thread ehiggs
HDSS-375. ContainerReportHandler should not send replication events for open 
containers. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9b63956
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9b63956
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9b63956

Branch: refs/heads/HDFS-12090
Commit: c9b63956d97521ec21a051bfcbbf4b79262ea16f
Parents: f152582
Author: Xiaoyu Yao 
Authored: Mon Aug 27 10:39:30 2018 -0700
Committer: Xiaoyu Yao 
Committed: Mon Aug 27 10:40:33 2018 -0700

--
 .../scm/container/ContainerReportHandler.java   |  4 ++
 .../container/TestContainerReportHandler.java   | 40 +++-
 2 files changed, 34 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63956/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 5a9e726..5ca2bcb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -129,6 +129,10 @@ public class ContainerReportHandler implements
   "Container is missing from containerStateManager. Can't request "
   + "replication. {}",
   containerID);
+  return;
+}
+if (container.isContainerOpen()) {
+  return;
 }
 if (replicationStatus.isReplicationEnabled()) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63956/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index e7b6cd9..443b4b2 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -84,6 +84,7 @@ public class TestContainerReportHandler implements 
EventPublisher {
 new Builder()
 .setReplicationFactor(ReplicationFactor.THREE)
 .setContainerID((Long) invocation.getArguments()[0])
+.setState(LifeCycleState.CLOSED)
 .build()
 );
 
@@ -116,26 +117,45 @@ public class TestContainerReportHandler implements 
EventPublisher {
 when(pipelineSelector.getReplicationPipeline(ReplicationType.STAND_ALONE,
 ReplicationFactor.THREE)).thenReturn(pipeline);
 
-long c1 = containerStateManager
+ContainerInfo cont1 = containerStateManager
 .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
-ReplicationFactor.THREE, "root").getContainerInfo()
-.getContainerID();
-
-long c2 = containerStateManager
+ReplicationFactor.THREE, "root").getContainerInfo();
+ContainerInfo cont2 = containerStateManager
 .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
-ReplicationFactor.THREE, "root").getContainerInfo()
-.getContainerID();
-
+ReplicationFactor.THREE, "root").getContainerInfo();
+// Open Container
+ContainerInfo cont3 = containerStateManager
+.allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+ReplicationFactor.THREE, "root").getContainerInfo();
+
+long c1 = cont1.getContainerID();
+long c2 = cont2.getContainerID();
+long c3 = cont3.getContainerID();
+
+// Close remaining containers
+try {
+  containerStateManager.getContainerStateMap()
+  .updateState(cont1, cont1.getState(), LifeCycleState.CLOSING);
+  containerStateManager.getContainerStateMap()
+  .updateState(cont1, cont1.getState(), LifeCycleState.CLOSED);
+  containerStateManager.getContainerStateMap()
+  .updateState(cont2, cont2.getState(), LifeCycleState.CLOSING);
+  containerStateManager.getContainerStateMap()
+  .updateState(cont2, cont2.getState(), LifeCycleState.CLOSED);
+
+} catch (IOException e) {
+  LOG.info("Failed to change state of open containers.", e);
+}
 //when
 
 //initial reports before replication is enabled. 2 

[19/50] [abbrv] hadoop git commit: YARN-8697. LocalityMulticastAMRMProxyPolicy should fallback to random sub-cluster when cannot resolve resource. Contributed by Botong Huang.

2018-09-04 Thread ehiggs
YARN-8697. LocalityMulticastAMRMProxyPolicy should fallback to random 
sub-cluster when cannot resolve resource. Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ed458b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ed458b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ed458b2

Branch: refs/heads/HDFS-12090
Commit: 7ed458b255e492fd5bc2ca36f216ff1b16054db7
Parents: 3e18b95
Author: Giovanni Matteo Fumarola 
Authored: Tue Aug 28 16:01:35 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Tue Aug 28 16:01:35 2018 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   | 105 +++
 .../TestLocalityMulticastAMRMProxyPolicy.java   |  53 --
 2 files changed, 125 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ed458b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 1ccd61c..e5f26d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -21,8 +21,11 @@ package 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
@@ -123,6 +126,8 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   public static final Logger LOG =
   LoggerFactory.getLogger(LocalityMulticastAMRMProxyPolicy.class);
 
+  private static Random rand = new Random();
+
   private Map weights;
   private SubClusterResolver resolver;
 
@@ -275,26 +280,18 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   }
 
   // Handle node/rack requests that the SubClusterResolver cannot map to
-  // any cluster. Defaulting to home subcluster.
+  // any cluster. Pick a random sub-cluster from active and enabled ones.
+  targetId = getSubClusterForUnResolvedRequest(bookkeeper,
+  rr.getAllocationRequestId());
   if (LOG.isDebugEnabled()) {
 LOG.debug("ERROR resolving sub-cluster for resourceName: "
-+ rr.getResourceName() + " we are falling back to homeSubCluster:"
-+ homeSubcluster);
++ rr.getResourceName() + ", picked a random subcluster to forward:"
++ targetId);
   }
-
-  // If home-subcluster is not active, ignore node/rack request
-  if (bookkeeper.isActiveAndEnabled(homeSubcluster)) {
-if (targetIds != null && targetIds.size() > 0) {
-  bookkeeper.addRackRR(homeSubcluster, rr);
-} else {
-  bookkeeper.addLocalizedNodeRR(homeSubcluster, rr);
-}
+  if (targetIds != null && targetIds.size() > 0) {
+bookkeeper.addRackRR(targetId, rr);
   } else {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("The homeSubCluster (" + homeSubcluster + ") we are "
-  + "defaulting to is not active, the ResourceRequest "
-  + "will be ignored.");
-}
+bookkeeper.addLocalizedNodeRR(targetId, rr);
   }
 }
 
@@ -314,6 +311,14 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   }
 
   /**
+   * For unit test to override.
+   */
+  protected SubClusterId getSubClusterForUnResolvedRequest(
+  AllocationBookkeeper bookKeeper, long allocationId) {
+return bookKeeper.getSubClusterForUnResolvedRequest(allocationId);
+  }
+
+  /**
* It splits a list of non-localized resource requests among sub-clusters.
*/
   private void splitAnyRequests(List originalResourceRequests,
@@ -512,10 +517,11 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
* This 

[36/50] [abbrv] hadoop git commit: HDDS-98. Adding Ozone Manager Audit Log. Contributed by Dinesh Chitlangia.

2018-09-04 Thread ehiggs
HDDS-98. Adding Ozone Manager Audit Log.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/630b64ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/630b64ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/630b64ec

Branch: refs/heads/HDFS-12090
Commit: 630b64ec7e963968a5bdcd1d625fc78746950137
Parents: 8aa6c4f
Author: Anu Engineer 
Authored: Fri Aug 31 14:20:56 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 14:20:56 2018 -0700

--
 .../src/main/compose/ozone/docker-config|  37 
 .../org/apache/hadoop/ozone/OzoneConsts.java|  32 +++
 hadoop-ozone/common/src/main/bin/ozone  |   2 +
 .../src/main/conf/om-audit-log4j2.properties|  86 
 .../org/apache/hadoop/ozone/audit/OMAction.java |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  21 +-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  |  22 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  16 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 218 ++-
 10 files changed, 466 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/630b64ec/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index a1828a3..21127f8 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,3 +31,40 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
+LOG4J2.PROPERTIES_monitorInterval=30
+LOG4J2.PROPERTIES_filter=read,write
+LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.read.marker=READ
+LOG4J2.PROPERTIES_filter.read.onMatch=DENY
+LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.write.marker=WRITE
+LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_appenders=console, rolling
+LOG4J2.PROPERTIES_appender.console.type=Console
+LOG4J2.PROPERTIES_appender.console.name=STDOUT
+LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
+LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
+LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
+LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
+LOG4J2.PROPERTIES_loggers=audit
+LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
+LOG4J2.PROPERTIES_logger.audit.name=OMAudit
+LOG4J2.PROPERTIES_logger.audit.level=INFO
+LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
+LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
+LOG4J2.PROPERTIES_rootLogger.level=INFO
+LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
+LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630b64ec/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb..9645c02 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -184,4 +184,36 @@ public final class OzoneConsts {
   public static 

[50/50] [abbrv] hadoop git commit: HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. Contributed by Ewan Higgs.

2018-09-04 Thread ehiggs
HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. 
Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06477abc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06477abc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06477abc

Branch: refs/heads/HDFS-12090
Commit: 06477abcd93eb988b4afd0a2dff549e67e0dbd85
Parents: 8cdd033
Author: Virajith Jalaparti 
Authored: Wed Aug 1 12:13:31 2018 -0700
Committer: Ewan Higgs 
Committed: Mon Sep 3 14:42:33 2018 +0200

--
 .../apache/hadoop/hdfs/BlockInputStream.java|  52 
 .../hdfs/server/datanode/BPOfferService.java|   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  18 +++
 .../SyncServiceSatisfierDatanodeWorker.java |  97 +++
 .../SyncTaskExecutionFeedbackCollector.java |  54 
 .../executor/BlockSyncOperationExecutor.java| 122 +++
 .../executor/BlockSyncReaderFactory.java|  92 ++
 .../executor/BlockSyncTaskRunner.java   |  69 +++
 .../hadoop/hdfs/TestBlockInputStream.java   |  84 +
 .../TestBlockSyncOperationExecutor.java |  94 ++
 10 files changed, 688 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06477abc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
new file mode 100644
index 000..152f83e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Facade around BlockReader that indeed implements the InputStream interface.
+ */
+public class BlockInputStream extends InputStream {
+  private final BlockReader blockReader;
+
+  public BlockInputStream(BlockReader blockReader) {
+this.blockReader = blockReader;
+  }
+
+  @Override
+  public int read() throws IOException {
+byte[] b = new byte[1];
+int c = blockReader.read(b, 0, b.length);
+if (c > 0) {
+  return b[0];
+} else {
+  return -1;
+}
+  }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+return blockReader.read(b, off, len);
+  }
+
+  @Override
+  public long skip(long n) throws IOException {
+return blockReader.skip(n);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06477abc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index a25f6a9..b8eef5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -795,6 +795,12 @@ class BPOfferService {
   ((BlockECReconstructionCommand) cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   break;
+case DatanodeProtocol.DNA_BACKUP:
+  LOG.info("DatanodeCommand action: DNA_BACKUP");
+  Collection backupTasks =
+  ((SyncCommand) cmd).getSyncTasks();
+  dn.getSyncServiceSatisfierDatanodeWorker().processSyncTasks(backupTasks);
+  break;
 default:
   LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
 }


[10/50] [abbrv] hadoop git commit: HDDS-332. Remove the ability to configure ozone.handler.type Contributed by Nandakumar and Anu Engineer.

2018-09-04 Thread ehiggs
HDDS-332. Remove the ability to configure ozone.handler.type
Contributed by Nandakumar and Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df21e1b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df21e1b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df21e1b1

Branch: refs/heads/HDFS-12090
Commit: df21e1b1ddcc8439b5fa1bb79388403f87742e65
Parents: 2172399
Author: Anu Engineer 
Authored: Tue Aug 28 09:56:02 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 09:56:02 2018 -0700

--
 .../apache/hadoop/ozone/OzoneConfigKeys.java|7 -
 .../org/apache/hadoop/ozone/OzoneConsts.java|1 -
 .../common/src/main/resources/ozone-default.xml |   21 -
 .../apache/hadoop/ozone/RatisTestHelper.java|8 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |7 +-
 .../rpc/TestCloseContainerHandlingByClient.java |2 -
 .../ozone/client/rpc/TestOzoneRpcClient.java|9 +-
 .../ozone/container/ContainerTestHelper.java|   10 -
 .../TestContainerDeletionChoosingPolicy.java|8 +-
 .../common/impl/TestContainerPersistence.java   |  116 +-
 .../commandhandler/TestBlockDeletion.java   |8 +-
 .../TestCloseContainerByPipeline.java   |   35 +-
 .../container/ozoneimpl/TestOzoneContainer.java |2 -
 .../ozoneimpl/TestOzoneContainerRatis.java  |2 -
 .../container/ozoneimpl/TestRatisManager.java   |2 -
 .../hadoop/ozone/freon/TestDataValidate.java|7 +-
 .../apache/hadoop/ozone/freon/TestFreon.java|3 +-
 .../ozone/om/TestContainerReportWithKeys.java   |   12 +-
 .../om/TestMultipleContainerReadWrite.java  |5 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |7 +-
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |7 +-
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|6 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |5 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java|   20 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |  188 ---
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |  187 ---
 .../hadoop/ozone/web/TestOzoneVolumes.java  |  183 +++
 .../hadoop/ozone/web/TestOzoneWebAccess.java|   10 +-
 .../hadoop/ozone/web/client/TestBuckets.java|9 +-
 .../hadoop/ozone/web/client/TestKeysRatis.java  |4 +-
 .../ozone/web/client/TestOzoneClient.java   |3 -
 .../hadoop/ozone/web/client/TestVolume.java |   11 +-
 .../ozone/web/client/TestVolumeRatis.java   |3 -
 .../server/datanode/ObjectStoreHandler.java |  182 ++-
 .../web/handlers/StorageHandlerBuilder.java |   18 +-
 .../web/localstorage/LocalStorageHandler.java   |  385 --
 .../web/localstorage/OzoneMetadataManager.java  | 1138 --
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |6 +-
 38 files changed, 363 insertions(+), 2274 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 92f0c41..6ad9085 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -66,16 +66,9 @@ public final class OzoneConfigKeys {
   "dfs.container.ratis.ipc.random.port";
   public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
   false;
-
-  public static final String OZONE_LOCALSTORAGE_ROOT =
-  "ozone.localstorage.root";
-  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
   public static final String OZONE_ENABLED =
   "ozone.enabled";
   public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_HANDLER_TYPE_KEY =
-  "ozone.handler.type";
-  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
   public static final String OZONE_TRACE_ENABLED_KEY =
   "ozone.trace.enabled";
   public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 320a3ed..ab6df92 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ 

[40/50] [abbrv] hadoop git commit: HDDS-392. Incomplete description about auditMap#key in AuditLogging Framework. Contributed by Dinesh Chitlangia.

2018-09-04 Thread ehiggs
HDDS-392. Incomplete description about auditMap#key in AuditLogging Framework.
Contributed by  Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19abaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19abaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19abaacd

Branch: refs/heads/HDFS-12090
Commit: 19abaacdad84b03fc790341b4b5bcf1c4d41f1fb
Parents: 76bae4c
Author: Anu Engineer 
Authored: Fri Aug 31 22:24:30 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 22:24:30 2018 -0700

--
 .../main/java/org/apache/hadoop/ozone/audit/package-info.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19abaacd/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
index 48de3f7..9c00ef7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -50,8 +50,10 @@ package org.apache.hadoop.ozone.audit;
  * The implementing class must override toAuditMap() to return an
  * instance of Map where both Key and Value are String.
  *
- * Key: must not contain any spaces. If the key is multi word then use
- * camel case.
+ * Key: must contain printable US ASCII characters
+ * May not contain a space, =, ], or "
+ * If the key is multi word then use camel case.
+ *
  * Value: if it is a collection/array, then it must be converted to a comma
  * delimited string
  *


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-8723. Fix a typo in CS init error message when resource calculator is not correctly set. Contributed by Abhishek Modi.

2018-09-04 Thread ehiggs
YARN-8723. Fix a typo in CS init error message when resource calculator is not 
correctly set. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fa46394
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fa46394
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fa46394

Branch: refs/heads/HDFS-12090
Commit: 3fa46394214181ed1cc7f06b886282bbdf67a10f
Parents: 64ad029
Author: Weiwei Yang 
Authored: Wed Aug 29 10:46:13 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Aug 29 11:13:44 2018 +0800

--
 .../resourcemanager/scheduler/capacity/CapacityScheduler.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa46394/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index dec1301..81dcf86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -348,7 +348,7 @@ public class CapacityScheduler extends
 throw new YarnRuntimeException("RM uses DefaultResourceCalculator 
which"
 + " used only memory as resource-type but invalid resource-types"
 + " specified " + ResourceUtils.getResourceTypes() + ". Use"
-+ " DomainantResourceCalculator instead to make effective use of"
++ " DominantResourceCalculator instead to make effective use of"
 + " these resource-types");
   }
   this.usePortForNodeName = this.conf.getUsePortForNodeName();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-15107. Stabilize/tune S3A committers; review correctness & docs. Contributed by Steve Loughran.

2018-09-04 Thread ehiggs
HADOOP-15107. Stabilize/tune S3A committers; review correctness & docs.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a0babf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a0babf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a0babf7

Branch: refs/heads/HDFS-12090
Commit: 5a0babf76550f63dad4c17173c4da2bf335c6532
Parents: e8d138c
Author: Steve Loughran 
Authored: Thu Aug 30 14:49:53 2018 +0100
Committer: Steve Loughran 
Committed: Thu Aug 30 14:49:53 2018 +0100

--
 .../lib/output/PathOutputCommitter.java |  12 +-
 .../java/org/apache/hadoop/fs/s3a/Invoker.java  |  15 +-
 .../fs/s3a/commit/AbstractS3ACommitter.java |  16 +-
 .../fs/s3a/commit/S3ACommitterFactory.java  |  18 +-
 .../s3a/commit/magic/MagicS3GuardCommitter.java |   7 +
 .../staging/DirectoryStagingCommitter.java  |   8 +-
 .../staging/PartitionedStagingCommitter.java|   9 +-
 .../hadoop/fs/s3a/commit/staging/Paths.java |  14 +-
 .../fs/s3a/commit/staging/StagingCommitter.java |  50 -
 .../tools/hadoop-aws/committer_architecture.md  |  94 ++---
 .../markdown/tools/hadoop-aws/committers.md |   2 +-
 .../fs/s3a/commit/AbstractCommitITest.java  |  19 ++
 .../fs/s3a/commit/AbstractITCommitMRJob.java|   5 +-
 .../fs/s3a/commit/AbstractITCommitProtocol.java |  63 --
 .../fs/s3a/commit/ITestS3ACommitterFactory.java | 200 +++
 .../fs/s3a/commit/magic/ITMagicCommitMRJob.java |   6 +-
 .../commit/magic/ITestMagicCommitProtocol.java  |  25 ++-
 .../ITStagingCommitMRJobBadDest.java|  62 ++
 .../integration/ITestStagingCommitProtocol.java |  13 ++
 19 files changed, 542 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a0babf7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
index 3679d9f..5e25f50 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
@@ -57,8 +57,8 @@ public abstract class PathOutputCommitter extends 
OutputCommitter {
   protected PathOutputCommitter(Path outputPath,
   TaskAttemptContext context) throws IOException {
 this.context = Preconditions.checkNotNull(context, "Null context");
-LOG.debug("Creating committer with output path {} and task context"
-+ " {}", outputPath, context);
+LOG.debug("Instantiating committer {} with output path {} and task context"
++ " {}", this, outputPath, context);
   }
 
   /**
@@ -71,8 +71,8 @@ public abstract class PathOutputCommitter extends 
OutputCommitter {
   protected PathOutputCommitter(Path outputPath,
   JobContext context) throws IOException {
 this.context = Preconditions.checkNotNull(context, "Null context");
-LOG.debug("Creating committer with output path {} and job context"
-+ " {}", outputPath, context);
+LOG.debug("Instantiating committer {} with output path {} and job context"
++ " {}", this, outputPath, context);
   }
 
   /**
@@ -103,6 +103,8 @@ public abstract class PathOutputCommitter extends 
OutputCommitter {
 
   @Override
   public String toString() {
-return "PathOutputCommitter{context=" + context + '}';
+return "PathOutputCommitter{context=" + context
++ "; " + super.toString()
++ '}';
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a0babf7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
index a007ba1..45912a0 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
@@ -130,8 +130,9 @@ public class Invoker {
   }
 
   /**
-   * Execute an operation and ignore all raised IOExceptions; log at INFO.
-   * @param log log to log at info.
+   * 

[01/50] [abbrv] hadoop git commit: YARN-8705. Refactor the UAM heartbeat thread in preparation for YARN-8696. Contributed by Botong Huang. [Forced Update!]

2018-09-04 Thread ehiggs
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12090 959f49b48 -> 06477abcd (forced update)


YARN-8705. Refactor the UAM heartbeat thread in preparation for YARN-8696. 
Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1525825
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1525825
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1525825

Branch: refs/heads/HDFS-12090
Commit: f1525825623a1307b5aa55c456b6afa3e0c61135
Parents: 7b1fa56
Author: Giovanni Matteo Fumarola 
Authored: Mon Aug 27 10:32:22 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Aug 27 10:32:22 2018 -0700

--
 .../yarn/server/AMHeartbeatRequestHandler.java  | 227 +
 .../server/uam/UnmanagedApplicationManager.java | 170 ++---
 .../amrmproxy/FederationInterceptor.java| 245 +--
 3 files changed, 358 insertions(+), 284 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1525825/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
new file mode 100644
index 000..42227bb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
@@ -0,0 +1,227 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+import org.apache.hadoop.yarn.util.AsyncCallback;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Extends Thread and provides an implementation that is used for processing 
the
+ * AM heart beat request asynchronously and sending back the response using the
+ * callback method registered with the system.
+ */
+public class AMHeartbeatRequestHandler extends Thread {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AMHeartbeatRequestHandler.class);
+
+  // Indication flag for the thread to keep running
+  private volatile boolean keepRunning;
+
+  private Configuration conf;
+  private ApplicationId applicationId;
+
+  private BlockingQueue requestQueue;
+  private AMRMClientRelayer rmProxyRelayer;
+  private UserGroupInformation userUgi;
+  private int lastResponseId;
+
+  public AMHeartbeatRequestHandler(Configuration conf,
+  ApplicationId applicationId) {
+super("AMHeartbeatRequestHandler Heartbeat Handler Thread");
+this.setUncaughtExceptionHandler(
+new HeartBeatThreadUncaughtExceptionHandler());
+this.keepRunning = true;
+
+this.conf = conf;
+this.applicationId = applicationId;
+this.requestQueue = new LinkedBlockingQueue<>();
+
+resetLastResponseId();
+  }
+
+  /**
+   * Shutdown the thread.
+   */
+  public void shutdown() {
+this.keepRunning = false;
+this.interrupt();
+  }
+
+  @Override
+  public void run() {
+while (keepRunning) {
+  AsyncAllocateRequestInfo requestInfo;
+  try {
+requestInfo = 

[29/50] [abbrv] hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-09-04 Thread ehiggs
HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/781437c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/781437c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/781437c2

Branch: refs/heads/HDFS-12090
Commit: 781437c219dc3422797a32dc7ba72cd4f5ee38e2
Parents: 582cb10
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:07:49 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..0640e25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -37,14 +35,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 public class KMSWebApp implements ServletContextListener {
 
-  private static final 

[39/50] [abbrv] hadoop git commit: HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76bae4cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76bae4cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76bae4cc

Branch: refs/heads/HDFS-12090
Commit: 76bae4ccb1d929260038b1869be8070c2320b617
Parents: 50d2e3e
Author: Anu Engineer 
Authored: Fri Aug 31 18:11:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 18:11:01 2018 -0700

--
 .../common/dev-support/findbugsExcludeFile.xml  |   4 +
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  82 +++
 .../hadoop/hdds/cli/HddsVersionProvider.java|  35 ++
 .../apache/hadoop/hdds/cli/package-info.java|  22 +
 hadoop-hdds/pom.xml |   5 +
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java   |  43 --
 .../hdds/scm/cli/OzoneCommandHandler.java   |  87 
 .../apache/hadoop/hdds/scm/cli/ResultCode.java  |  31 --
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  | 246 +++--
 .../cli/container/CloseContainerHandler.java|  85 ---
 .../hdds/scm/cli/container/CloseSubcommand.java |  54 ++
 .../cli/container/ContainerCommandHandler.java  | 128 -
 .../cli/container/CreateContainerHandler.java   |  67 ---
 .../scm/cli/container/CreateSubcommand.java |  65 +++
 .../cli/container/DeleteContainerHandler.java   |  95 
 .../scm/cli/container/DeleteSubcommand.java |  60 +++
 .../scm/cli/container/InfoContainerHandler.java | 114 
 .../hdds/scm/cli/container/InfoSubcommand.java  |  94 
 .../scm/cli/container/ListContainerHandler.java | 117 -
 .../hdds/scm/cli/container/ListSubcommand.java  |  83 +++
 .../hdds/scm/cli/container/package-info.java|   3 +
 .../hadoop/hdds/scm/cli/package-info.java   |  12 +-
 hadoop-ozone/common/src/main/bin/ozone  |   2 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java | 518 ---
 24 files changed, 596 insertions(+), 1456 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
index daf6fec..c7db679 100644
--- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
@@ -21,4 +21,8 @@
   
 
   
+  
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
new file mode 100644
index 000..2b3e6c0
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdds.cli;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import picocli.CommandLine;
+import picocli.CommandLine.ExecutionException;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.ParameterException;
+import picocli.CommandLine.RunLast;
+
+/**
+ * This is a generic parent class for all the ozone related cli tools.
+ */
+public class GenericCli implements Callable {
+
+  @Option(names = {"--verbose"},
+  description = "More verbose output. Show the stack trace of the errors.")
+  private boolean verbose;
+
+  @Option(names = {"-D", "--set"})
+  private Map configurationOverrides = new HashMap<>();
+
+  private final CommandLine cmd;
+
+  public GenericCli() {
+cmd = new CommandLine(this);
+  }
+
+  public void 

[30/50] [abbrv] hadoop git commit: HADOOP-15667. FileSystemMultipartUploader should verify that UploadHandle has non-0 length. Contributed by Ewan Higgs

2018-09-04 Thread ehiggs
HADOOP-15667. FileSystemMultipartUploader should verify that UploadHandle has 
non-0 length.
Contributed by Ewan Higgs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e6c1109
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e6c1109
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e6c1109

Branch: refs/heads/HDFS-12090
Commit: 2e6c1109dcdeedb59a3345047e9201271c9a0b27
Parents: 781437c
Author: Steve Loughran 
Authored: Thu Aug 30 14:33:16 2018 +0100
Committer: Steve Loughran 
Committed: Thu Aug 30 14:33:16 2018 +0100

--
 .../hadoop/fs/FileSystemMultipartUploader.java  |  6 ++-
 .../org/apache/hadoop/fs/MultipartUploader.java | 11 +
 .../AbstractContractMultipartUploaderTest.java  | 43 
 .../hadoop/fs/s3a/S3AMultipartUploader.java | 10 ++---
 4 files changed, 61 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e6c1109/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
index a700a9f..f13b50b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -68,6 +68,7 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   throws IOException {
 
 byte[] uploadIdByteArray = uploadId.toByteArray();
+checkUploadId(uploadIdByteArray);
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
 uploadIdByteArray.length, Charsets.UTF_8));
 Path partPath =
@@ -101,6 +102,8 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   List> handles, UploadHandle multipartUploadId)
   throws IOException {
 
+checkUploadId(multipartUploadId.toByteArray());
+
 if (handles.isEmpty()) {
   throw new IOException("Empty upload");
 }
@@ -133,8 +136,7 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   @Override
   public void abort(Path filePath, UploadHandle uploadId) throws IOException {
 byte[] uploadIdByteArray = uploadId.toByteArray();
-Preconditions.checkArgument(uploadIdByteArray.length != 0,
-"UploadId is empty");
+checkUploadId(uploadIdByteArray);
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
 uploadIdByteArray.length, Charsets.UTF_8));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e6c1109/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
index 47fd9f2..76f58d3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -91,4 +92,14 @@ public abstract class MultipartUploader {
   public abstract void abort(Path filePath, UploadHandle multipartUploadId)
   throws IOException;
 
+  /**
+   * Utility method to validate uploadIDs
+   * @param uploadId
+   * @throws IllegalArgumentException
+   */
+  protected void checkUploadId(byte[] uploadId)
+  throws IllegalArgumentException {
+Preconditions.checkArgument(uploadId.length > 0,
+"Empty UploadId is not valid");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e6c1109/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
index c0e1600..85a6861 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
+++ 

[03/50] [abbrv] hadoop git commit: YARN-8675. Remove default hostname for docker containers when net=host. Contributed by Suma Shivaprasad

2018-09-04 Thread ehiggs
YARN-8675. Remove default hostname for docker containers when net=host. 
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05b2bbeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05b2bbeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05b2bbeb

Branch: refs/heads/HDFS-12090
Commit: 05b2bbeb357d4fa03e71f2bfd5d8eeb0ea6c3f60
Parents: c9b6395
Author: Billie Rinaldi 
Authored: Mon Aug 27 11:34:33 2018 -0700
Committer: Billie Rinaldi 
Committed: Mon Aug 27 11:34:33 2018 -0700

--
 .../runtime/DockerLinuxContainerRuntime.java| 49 
 1 file changed, 29 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05b2bbeb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 1872830..00771ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -134,8 +134,8 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME} sets the
  * hostname to be used by the Docker container. If not specified, a
- * hostname will be derived from the container ID.  This variable is
- * ignored if the network is 'host' and Registry DNS is not enabled.
+ * hostname will be derived from the container ID and set as default
+ * hostname for networks other than 'host'.
  *   
  *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER}
@@ -549,22 +549,34 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 }
   }
 
-  /** Set a DNS friendly hostname. */
-  private void setHostname(DockerRunCommand runCommand, String
-  containerIdStr, String name)
+  /** Set a DNS friendly hostname.
+   *  Only add hostname if network is not host or if hostname is
+   *  specified via YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME
+   *  in host network mode
+   */
+  private void setHostname(DockerRunCommand runCommand,
+  String containerIdStr, String network, String name)
   throws ContainerExecutionException {
-if (name == null || name.isEmpty()) {
-  name = RegistryPathUtils.encodeYarnID(containerIdStr);
 
-  String domain = conf.get(RegistryConstants.KEY_DNS_DOMAIN);
-  if (domain != null) {
-name += ("." + domain);
+if (network.equalsIgnoreCase("host")) {
+  if (name != null && !name.isEmpty()) {
+LOG.info("setting hostname in container to: " + name);
+runCommand.setHostname(name);
   }
-  validateHostname(name);
-}
+} else {
+  //get default hostname
+  if (name == null || name.isEmpty()) {
+name = RegistryPathUtils.encodeYarnID(containerIdStr);
 
-LOG.info("setting hostname in container to: " + name);
-runCommand.setHostname(name);
+String domain = conf.get(RegistryConstants.KEY_DNS_DOMAIN);
+if (domain != null) {
+  name += ("." + domain);
+}
+validateHostname(name);
+  }
+  LOG.info("setting hostname in container to: " + name);
+  runCommand.setHostname(name);
+}
   }
 
   /**
@@ -823,12 +835,9 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
 dockerRunAsUser, imageName)
 .setNetworkType(network);
-// Only add hostname if network is not host or if Registry DNS is enabled.
-if (!network.equalsIgnoreCase("host") ||
-conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED,
-RegistryConstants.DEFAULT_DNS_ENABLED)) {
-  setHostname(runCommand, containerIdStr, hostname);
-}
+
+setHostname(runCommand, containerIdStr, network, hostname);
+
 

[18/50] [abbrv] hadoop git commit: HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException during processCheckpoints. Contributed by Zsolt Venczel.

2018-09-04 Thread ehiggs
HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException 
during processCheckpoints. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e18b957
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e18b957
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e18b957

Branch: refs/heads/HDFS-12090
Commit: 3e18b957ebdf20925224ab9c28e6c2f4b6bbdb24
Parents: c5629d5
Author: Zsolt Venczel 
Authored: Tue Aug 28 15:11:58 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 15:13:43 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  6 +--
 .../server/namenode/ReencryptionUpdater.java| 52 ++--
 2 files changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e18b957/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index c8c8d68..a8acccd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -714,10 +714,10 @@ public class ReencryptionHandler implements Runnable {
   zst = new ZoneSubmissionTracker();
   submissions.put(zoneId, zst);
 }
+Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
+currentBatch, reencryptionHandler));
+zst.addTask(future);
   }
-  Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
-  currentBatch, reencryptionHandler));
-  zst.addTask(future);
   LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.",
   currentBatch.getFirstFilePath(), currentBatch.size(), zoneId);
   currentBatch = new ReencryptionBatch(reencryptBatchSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e18b957/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
index a5923a7..15cfa92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -383,32 +383,34 @@ public final class ReencryptionUpdater implements 
Runnable {
 final LinkedList tasks = tracker.getTasks();
 final List xAttrs = Lists.newArrayListWithCapacity(1);
 ListIterator iter = tasks.listIterator();
-while (iter.hasNext()) {
-  Future curr = iter.next();
-  if (curr.isCancelled()) {
-break;
-  }
-  if (!curr.isDone() || !curr.get().processed) {
-// still has earlier tasks not completed, skip here.
-break;
-  }
-  ReencryptionTask task = curr.get();
-  LOG.debug("Updating re-encryption checkpoint with completed task."
-  + " last: {} size:{}.", task.lastFile, task.batch.size());
-  assert zoneId == task.zoneId;
-  try {
-final XAttr xattr = FSDirEncryptionZoneOp
-.updateReencryptionProgress(dir, zoneNode, status, task.lastFile,
-task.numFilesUpdated, task.numFailures);
-xAttrs.clear();
-xAttrs.add(xattr);
-  } catch (IOException ie) {
-LOG.warn("Failed to update re-encrypted progress to xattr for zone {}",
-zonePath, ie);
-++task.numFailures;
+synchronized (handler) {
+  while (iter.hasNext()) {
+Future curr = iter.next();
+if (curr.isCancelled()) {
+  break;
+}
+if (!curr.isDone() || !curr.get().processed) {
+  // still has earlier tasks not completed, skip here.
+  break;
+}
+ReencryptionTask task = curr.get();
+LOG.debug("Updating re-encryption checkpoint with completed task."
++ " last: {} size:{}.", task.lastFile, task.batch.size());
+assert zoneId == task.zoneId;
+try {
+  final XAttr xattr = FSDirEncryptionZoneOp
+  .updateReencryptionProgress(dir, zoneNode, status, 

[25/50] [abbrv] hadoop git commit: YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by Craig Condit

2018-09-04 Thread ehiggs
YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by 
Craig Condit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73625168
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73625168
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73625168

Branch: refs/heads/HDFS-12090
Commit: 73625168c0f29aa646d7a715c9fb15e43d6c7e05
Parents: a0ebb6b
Author: Shane Kumpf 
Authored: Wed Aug 29 07:08:37 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 29 07:08:37 2018 -0600

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../src/main/resources/yarn-default.xml |   7 +
 .../runtime/DockerLinuxContainerRuntime.java|  38 +
 .../linux/runtime/docker/DockerRunCommand.java  |   5 +
 .../container-executor/impl/utils/docker-util.c |  42 ++
 .../container-executor/impl/utils/docker-util.h |   3 +-
 .../test/utils/test_docker_util.cc  |  64 
 .../runtime/TestDockerContainerRuntime.java | 149 +++
 .../runtime/docker/TestDockerRunCommand.java|   5 +-
 .../src/site/markdown/DockerContainers.md   |   1 +
 10 files changed, 317 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 148edb9..d525e4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2012,6 +2012,11 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_DOCKER_DEFAULT_RW_MOUNTS =
   DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts";
 
+  /** The default list of tmpfs mounts to be mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_TMPFS_MOUNTS =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "default-tmpfs-mounts";
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 72e42d8..4262436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1828,6 +1828,13 @@
   
 
   
+The default list of tmpfs mounts to be mounted into all Docker
+  containers that use DockerContainerRuntime.
+yarn.nodemanager.runtime.linux.docker.default-tmpfs-mounts
+
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 00771ff..0ae3d0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 

[14/50] [abbrv] hadoop git commit: HDDS-376. Create custom message structure for use in AuditLogging Contributed by Dinesh Chitlangia.

2018-09-04 Thread ehiggs
HDDS-376. Create custom message structure for use in AuditLogging
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac515d22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac515d22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac515d22

Branch: refs/heads/HDFS-12090
Commit: ac515d22d84478acbed92ef4024d9a3d3f329c8a
Parents: cb9d371
Author: Anu Engineer 
Authored: Tue Aug 28 12:59:08 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 12:59:08 2018 -0700

--
 .../apache/hadoop/ozone/audit/AuditLogger.java  |  66 --
 .../apache/hadoop/ozone/audit/AuditMessage.java |  64 ++
 .../apache/hadoop/ozone/audit/package-info.java |  19 ++-
 .../ozone/audit/TestOzoneAuditLogger.java   | 124 ---
 4 files changed, 177 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac515d22/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
index 46ffaab..ee20c66 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -21,10 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.logging.log4j.Level;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.message.StructuredDataMessage;
 import org.apache.logging.log4j.spi.ExtendedLogger;
 
-import java.util.Map;
 
 /**
  * Class to define Audit Logger for Ozone.
@@ -32,16 +30,13 @@ import java.util.Map;
 public class AuditLogger {
 
   private ExtendedLogger logger;
-
-  private static final String SUCCESS = AuditEventStatus.SUCCESS.getStatus();
-  private static final String FAILURE = AuditEventStatus.FAILURE.getStatus();
   private static final String FQCN = AuditLogger.class.getName();
   private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
   private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
 
   /**
* Parametrized Constructor to initialize logger.
-   * @param type
+   * @param type Audit Logger Type
*/
   public AuditLogger(AuditLoggerType type){
 initializeLogger(type);
@@ -60,68 +55,53 @@ public class AuditLogger {
 return logger;
   }
 
-  public void logWriteSuccess(AuditAction type, Map data) {
-logWriteSuccess(type, data, Level.INFO);
+  public void logWriteSuccess(AuditMessage msg) {
+logWriteSuccess(Level.INFO, msg);
   }
 
-  public void logWriteSuccess(AuditAction type, Map data, Level
-  level) {
-StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
-type.getAction(), data);
+  public void logWriteSuccess(Level level, AuditMessage msg) {
 this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, null);
   }
 
-
-  public void logWriteFailure(AuditAction type, Map data) {
-logWriteFailure(type, data, Level.INFO, null);
+  public void logWriteFailure(AuditMessage msg) {
+logWriteFailure(Level.ERROR, msg);
   }
 
-  public void logWriteFailure(AuditAction type, Map data, Level
-  level) {
-logWriteFailure(type, data, level, null);
+  public void logWriteFailure(Level level, AuditMessage msg) {
+logWriteFailure(level, msg, null);
   }
 
-  public void logWriteFailure(AuditAction type, Map data,
-  Throwable exception) {
-logWriteFailure(type, data, Level.INFO, exception);
+  public void logWriteFailure(AuditMessage msg, Throwable exception) {
+logWriteFailure(Level.ERROR, msg, exception);
   }
 
-  public void logWriteFailure(AuditAction type, Map data, Level
-  level, Throwable exception) {
-StructuredDataMessage msg = new StructuredDataMessage("", FAILURE,
-type.getAction(), data);
+  public void logWriteFailure(Level level, AuditMessage msg,
+  Throwable exception) {
 this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, exception);
   }
 
-  public void logReadSuccess(AuditAction type, Map data) {
-logReadSuccess(type, data, Level.INFO);
+  public void logReadSuccess(AuditMessage msg) {
+logReadSuccess(Level.INFO, msg);
   }
 
-  public void logReadSuccess(AuditAction type, Map data, Level
-  level) {
-StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
-type.getAction(), data);
+  public void logReadSuccess(Level level, AuditMessage msg) {
 this.logger.logIfEnabled(FQCN, level, READ_MARKER, msg, null);
   }
 
-  public void 

[24/50] [abbrv] hadoop git commit: HDFS-13634. RBF: Configurable value in xml for async connection request queue size. Contributed by CR Hota.

2018-09-04 Thread ehiggs
HDFS-13634. RBF: Configurable value in xml for async connection request queue 
size. Contributed by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0ebb6b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0ebb6b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0ebb6b3

Branch: refs/heads/HDFS-12090
Commit: a0ebb6b39f2932d3ea2fb5e287f52b841e108428
Parents: 0bd4217
Author: Yiqun Lin 
Authored: Wed Aug 29 16:15:22 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Aug 29 16:15:22 2018 +0800

--
 .../federation/router/ConnectionManager.java  | 18 +++---
 .../server/federation/router/RBFConfigKeys.java   |  5 +
 .../src/main/resources/hdfs-rbf-default.xml   |  8 
 3 files changed, 24 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0ebb6b3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 0b50845..9fb83e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -49,9 +49,6 @@ public class ConnectionManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ConnectionManager.class);
 
-  /** Number of parallel new connections to create. */
-  protected static final int MAX_NEW_CONNECTIONS = 100;
-
   /** Minimum amount of active connections: 50%. */
   protected static final float MIN_ACTIVE_RATIO = 0.5f;
 
@@ -77,8 +74,10 @@ public class ConnectionManager {
   private final Lock writeLock = readWriteLock.writeLock();
 
   /** Queue for creating new connections. */
-  private final BlockingQueue creatorQueue =
-  new ArrayBlockingQueue<>(MAX_NEW_CONNECTIONS);
+  private final BlockingQueue creatorQueue;
+  /** Max size of queue for creating new connections. */
+  private final int creatorQueueMaxSize;
+
   /** Create new connections asynchronously. */
   private final ConnectionCreator creator;
   /** Periodic executor to remove stale connection pools. */
@@ -106,7 +105,12 @@ public class ConnectionManager {
 this.pools = new HashMap<>();
 
 // Create connections in a thread asynchronously
-this.creator = new ConnectionCreator(creatorQueue);
+this.creatorQueueMaxSize = this.conf.getInt(
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE,
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT
+);
+this.creatorQueue = new ArrayBlockingQueue<>(this.creatorQueueMaxSize);
+this.creator = new ConnectionCreator(this.creatorQueue);
 this.creator.setDaemon(true);
 
 // Cleanup periods
@@ -213,7 +217,7 @@ public class ConnectionManager {
 if (conn == null || !conn.isUsable()) {
   if (!this.creatorQueue.offer(pool)) {
 LOG.error("Cannot add more than {} connections at the same time",
-MAX_NEW_CONNECTIONS);
+this.creatorQueueMaxSize);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0ebb6b3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 87df5d2..997e1dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -93,6 +93,11 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   TimeUnit.SECONDS.toMillis(5);
 
   // HDFS Router NN client
+  public static final String
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.creator.queue-size";
+  public static final int
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT = 100;
   public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
   FEDERATION_ROUTER_PREFIX + "connection.pool-size";
   public static final 

[28/50] [abbrv] hadoop git commit: HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by Fei Hui.

2018-09-04 Thread ehiggs
HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by 
Fei Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/582cb10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/582cb10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/582cb10e

Branch: refs/heads/HDFS-12090
Commit: 582cb10ec74ed5666946a3769002ceb80ba660cb
Parents: d53a10b
Author: Yiqun Lin 
Authored: Thu Aug 30 11:21:13 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Aug 30 11:21:13 2018 +0800

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/582cb10e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d7f133e..27196c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1397,6 +1397,9 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   datanode.getMetrics().incrRamDiskBlocksWrite();
 } catch (DiskOutOfSpaceException de) {
   // Ignore the exception since we just fall back to persistent 
storage.
+  LOG.warn("Insufficient space for placing the block on a transient "
+  + "volume, fall back to persistent storage: "
+  + de.getMessage());
 } finally {
   if (ref == null) {
 cacheManager.release(b.getNumBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDFS-13854. RBF: The ProcessingAvgTime and ProxyAvgTime should display by JMX with ms unit. Contributed by yanghuafeng.

2018-09-04 Thread ehiggs
HDFS-13854. RBF: The ProcessingAvgTime and ProxyAvgTime should display by JMX 
with ms unit. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64ad0298
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64ad0298
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64ad0298

Branch: refs/heads/HDFS-12090
Commit: 64ad0298d441559951bc9589a40f8aab17c93a5f
Parents: 2651e2c
Author: Brahma Reddy Battula 
Authored: Wed Aug 29 08:29:50 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Aug 29 08:29:50 2018 +0530

--
 .../federation/metrics/FederationRPCMetrics.java | 13 ++---
 .../metrics/FederationRPCPerformanceMonitor.java | 15 +--
 2 files changed, 7 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ad0298/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 9ab4e5a..cce4b86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -86,15 +86,6 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
   }
 
   /**
-   * Convert nanoseconds to milliseconds.
-   * @param ns Time in nanoseconds.
-   * @return Time in milliseconds.
-   */
-  private static double toMs(double ns) {
-return ns / 100;
-  }
-
-  /**
* Reset the metrics system.
*/
   public static void reset() {
@@ -230,7 +221,7 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
 
   @Override
   public double getProxyAvg() {
-return toMs(proxy.lastStat().mean());
+return proxy.lastStat().mean();
   }
 
   @Override
@@ -250,7 +241,7 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
 
   @Override
   public double getProcessingAvg() {
-return toMs(processing.lastStat().mean());
+return processing.lastStat().mean();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ad0298/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
index 2c2741e..15725d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
@@ -35,6 +35,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 /**
  * Customizable RPC performance monitor. Receives events from the RPC server
  * and aggregates them via JMX.
@@ -120,12 +122,12 @@ public class FederationRPCPerformanceMonitor implements 
RouterRpcMonitor {
 
   @Override
   public void startOp() {
-START_TIME.set(this.getNow());
+START_TIME.set(monotonicNow());
   }
 
   @Override
   public long proxyOp() {
-PROXY_TIME.set(this.getNow());
+PROXY_TIME.set(monotonicNow());
 long processingTime = getProcessingTime();
 if (processingTime >= 0) {
   metrics.addProcessingTime(processingTime);
@@ -188,13 +190,6 @@ public class FederationRPCPerformanceMonitor implements 
RouterRpcMonitor {
 metrics.incrRouterFailureLocked();
   }
 
-  /**
-   * Get current time.
-   * @return Current time in nanoseconds.
-   */
-  private long getNow() {
-return System.nanoTime();
-  }
 
   /**
* Get time between we receiving the operation and sending it to the 
Namenode.
@@ -214,7 +209,7 @@ public class FederationRPCPerformanceMonitor implements 
RouterRpcMonitor {
*/
   private long getProxyTime() {
 if (PROXY_TIME.get() != null && PROXY_TIME.get() > 0) {
-  return getNow() - PROXY_TIME.get();
+  return monotonicNow() - PROXY_TIME.get();
 }
 return -1;
   }



[13/50] [abbrv] hadoop git commit: HDFS-13861. RBF: Illegal Router Admin command leads to printing usage for all commands. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13861. RBF: Illegal Router Admin command leads to printing usage for all 
commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb9d371a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb9d371a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb9d371a

Branch: refs/heads/HDFS-12090
Commit: cb9d371ae2cda1624fc83316ddc09de37d8d0bd3
Parents: fd089ca
Author: Brahma Reddy Battula 
Authored: Wed Aug 29 00:29:05 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Aug 29 00:29:05 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 92 +---
 .../federation/router/TestRouterAdminCLI.java   | 68 +++
 2 files changed, 130 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb9d371a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index f88d0a6..46be373 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -94,25 +94,58 @@ public class RouterAdmin extends Configured implements Tool 
{
* Print the usage message.
*/
   public void printUsage() {
-String usage = "Federation Admin Tools:\n"
-+ "\t[-add"
-+ "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
-+ "-owner  -group  -mode ]\n"
-+ "\t[-update
"
-+ "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
-+ "-owner  -group  -mode ]\n"
-+ "\t[-rm ]\n"
-+ "\t[-ls ]\n"
-+ "\t[-setQuota  -nsQuota  -ssQuota "
-+ "]\n"
-+ "\t[-clrQuota ]\n"
-+ "\t[-safemode enter | leave | get]\n"
-+ "\t[-nameservice enable | disable ]\n"
-+ "\t[-getDisabledNameservices]\n";
+String usage = getUsage(null);
+System.out.println(usage);
+  }
 
+  private void printUsage(String cmd) {
+String usage = getUsage(cmd);
 System.out.println(usage);
   }
 
+  private String getUsage(String cmd) {
+if (cmd == null) {
+  String[] commands =
+  {"-add", "-update", "-rm", "-ls", "-setQuota", "-clrQuota",
+  "-safemode", "-nameservice", "-getDisabledNameservices"};
+  StringBuilder usage = new StringBuilder();
+  usage.append("Usage: hdfs routeradmin :\n");
+  for (int i = 0; i < commands.length; i++) {
+usage.append(getUsage(commands[i]));
+if (i + 1 < commands.length) {
+  usage.append("\n");
+}
+  }
+  return usage.toString();
+}
+if (cmd.equals("-add")) {
+  return "\t[-add
"
+  + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+  + "-owner  -group  -mode ]";
+} else if (cmd.equals("-update")) {
+  return "\t[-update   "
+  + " "
+  + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+  + "-owner  -group  -mode ]";
+} else if (cmd.equals("-rm")) {
+  return "\t[-rm ]";
+} else if (cmd.equals("-ls")) {
+  return "\t[-ls ]";
+} else if (cmd.equals("-setQuota")) {
+  return "\t[-setQuota  -nsQuota  -ssQuota "
+  + "]";
+} else if (cmd.equals("-clrQuota")) {
+  return "\t[-clrQuota ]";
+} else if (cmd.equals("-safemode")) {
+  return "\t[-safemode enter | leave | get]";
+} else if (cmd.equals("-nameservice")) {
+  return "\t[-nameservice enable | disable ]";
+} else if (cmd.equals("-getDisabledNameservices")) {
+  return "\t[-getDisabledNameservices]";
+}
+return getUsage(null);
+  }
+
   @Override
   public int run(String[] argv) throws Exception {
 if (argv.length < 1) {
@@ -129,43 +162,43 @@ public class RouterAdmin extends Configured implements 
Tool {
 if ("-add".equals(cmd)) {
   if (argv.length < 4) {
 System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage();
+printUsage(cmd);
 return exitCode;
   }
 } else if ("-update".equals(cmd)) {
   if (argv.length < 4) {
 System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage();
+printUsage(cmd);
 return exitCode;
   }
-} else if ("-rm".equalsIgnoreCase(cmd)) {
+} else if ("-rm".equals(cmd)) {
   if (argv.length < 2) {
 System.err.println("Not enough parameters 

[16/50] [abbrv] hadoop git commit: HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils Contributed by Nandakumar.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 3b4426c..b652b6b 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -51,9 +51,9 @@ public class TestSCMContainerPlacementRandom {
 .thenReturn(new ArrayList<>(datanodes));
 
 when(mockNodeManager.getNodeStat(anyObject()))
-.thenReturn(new SCMNodeMetric(100l, 0l, 100l));
+.thenReturn(new SCMNodeMetric(100L, 0L, 100L));
 when(mockNodeManager.getNodeStat(datanodes.get(2)))
-.thenReturn(new SCMNodeMetric(100l, 90l, 10l));
+.thenReturn(new SCMNodeMetric(100L, 90L, 10L));
 
 SCMContainerPlacementRandom scmContainerPlacementRandom =
 new SCMContainerPlacementRandom(mockNodeManager, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index fa87706..da05913 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -21,7 +21,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -132,7 +131,7 @@ public class TestReplicationManager {
   //WHEN
 
   queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-  new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+  new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
   (short) 3));
 
   Thread.sleep(500L);
@@ -159,10 +158,8 @@ public class TestReplicationManager {
   leaseManager.start();
 
   ReplicationManager replicationManager =
-  new ReplicationManager(containerPlacementPolicy, 
containerStateManager,
-
-
-  queue, leaseManager) {
+  new ReplicationManager(containerPlacementPolicy,
+  containerStateManager, queue, leaseManager) {
 @Override
 protected List getCurrentReplicas(
 ReplicationRequest request) throws IOException {
@@ -172,7 +169,7 @@ public class TestReplicationManager {
   replicationManager.start();
 
   queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-  new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+  new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
   (short) 3));
 
   Thread.sleep(500L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
index a593718..9dd4fe3 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
@@ -92,8 +92,8 @@ public class TestReplicationQueue {
 1, replicationQueue.size());
 Assert.assertEquals(temp, msg5);
 
-// Message 2 should be ordered before message 5 as both have same 
replication
-// number but message 2 has earlier timestamp.
+// Message 2 should be ordered before message 5 as both have same
+// replication number but message 2 has earlier timestamp.
 temp = replicationQueue.take();
 Assert.assertEquals("Should have 0 objects",
 replicationQueue.size(), 0);


[04/50] [abbrv] hadoop git commit: HADOOP-15699. Fix some of testContainerManager failures in Windows. Contributed by Botong Huang.

2018-09-04 Thread ehiggs
HADOOP-15699. Fix some of testContainerManager failures in Windows. Contributed 
by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/602d1384
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/602d1384
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/602d1384

Branch: refs/heads/HDFS-12090
Commit: 602d13844a8d4c7b08ce185da01fde098ff8b9a6
Parents: 05b2bbe
Author: Giovanni Matteo Fumarola 
Authored: Mon Aug 27 12:25:46 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Aug 27 12:25:46 2018 -0700

--
 .../containermanager/TestContainerManager.java| 18 ++
 1 file changed, 6 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/602d1384/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index ee5259f..d28340b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -320,9 +320,8 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 
   @Test (timeout = 1L)
   public void testAuxPathHandler() throws Exception {
-File testDir = GenericTestUtils.getTestDir(GenericTestUtils.getTestDir(
-TestContainerManager.class.getSimpleName() + "LocDir").
-getAbsolutePath());
+File testDir = GenericTestUtils
+.getTestDir(TestContainerManager.class.getSimpleName() + "LocDir");
 testDir.mkdirs();
 File testFile = new File(testDir, "test");
 testFile.createNewFile();
@@ -1977,15 +1976,11 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 Signal signal = ContainerLaunch.translateCommandToSignal(command);
 containerManager.start();
 
-File scriptFile = new File(tmpDir, "scriptFile.sh");
+File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
 PrintWriter fileWriter = new PrintWriter(scriptFile);
 File processStartFile =
 new File(tmpDir, "start_file.txt").getAbsoluteFile();
-fileWriter.write("\numask 0"); // So that start file is readable by the 
test
-fileWriter.write("\necho Hello World! > " + processStartFile);
-fileWriter.write("\necho $$ >> " + processStartFile);
-fileWriter.write("\nexec sleep 1000s");
-fileWriter.close();
+writeScriptFile(fileWriter, "Hello world!", processStartFile, null, false);
 
 ContainerLaunchContext containerLaunchContext =
 recordFactory.newRecordInstance(ContainerLaunchContext.class);
@@ -2008,9 +2003,8 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 new HashMap();
 localResources.put(destinationFile, rsrc_alpha);
 containerLaunchContext.setLocalResources(localResources);
-List commands = new ArrayList<>();
-commands.add("/bin/bash");
-commands.add(scriptFile.getAbsolutePath());
+List commands =
+Arrays.asList(Shell.getRunScriptCommand(scriptFile));
 containerLaunchContext.setCommands(commands);
 StartContainerRequest scRequest =
 StartContainerRequest.newInstance(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
deleted file mode 100644
index 722c1a5..000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ /dev/null
@@ -1,518 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.ResultCode;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
-
-import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-/**
- * This class tests the CLI of SCM.
- */
-@Ignore ("Needs to be fixed for new SCM and Storage design")
-public class TestSCMCli {
-  private static SCMCLI cli;
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-  storageContainerLocationClient;
-
-  private static StorageContainerManager scm;
-  private static ScmClient containerOperationClient;
-
-  private static ByteArrayOutputStream outContent;
-  private static PrintStream outStream;
-  private static ByteArrayOutputStream errContent;
-  private static PrintStream errStream;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @Rule
-  public Timeout globalTimeout = new Timeout(3);
-
-  @BeforeClass
-  public static void setup() throws Exception {
-conf = new OzoneConfiguration();
-cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-cluster.waitForClusterToBeReady();
-xceiverClientManager = new XceiverClientManager(conf);
-storageContainerLocationClient =
-cluster.getStorageContainerLocationClient();
-containerOperationClient = new ContainerOperationClient(
-storageContainerLocationClient, new XceiverClientManager(conf));
-outContent = new ByteArrayOutputStream();
-outStream = new PrintStream(outContent);
-errContent = new ByteArrayOutputStream();
-errStream = new 

[17/50] [abbrv] hadoop git commit: HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils Contributed by Nandakumar.

2018-09-04 Thread ehiggs
HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix 
checkstyle in ContainerTestHelper, GenericTestUtils
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5629d54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5629d54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5629d54

Branch: refs/heads/HDFS-12090
Commit: c5629d546d64091a14560df488a7f797a150337e
Parents: 33f42ef
Author: Anu Engineer 
Authored: Tue Aug 28 14:06:19 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 14:06:19 2018 -0700

--
 .../apache/hadoop/hdds/scm/XceiverClient.java   |  6 +--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  6 +--
 .../hadoop/hdds/scm/XceiverClientManager.java   |  2 +-
 .../hdds/scm/storage/ChunkInputStream.java  |  7 +--
 .../hdds/scm/storage/ChunkOutputStream.java |  4 +-
 .../org/apache/hadoop/hdds/client/BlockID.java  |  5 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  2 -
 .../common/helpers/AllocatedBlock.java  |  4 +-
 .../container/common/helpers/ContainerInfo.java | 12 ++---
 .../common/helpers/ContainerWithPipeline.java   |  7 +--
 .../scm/container/common/helpers/Pipeline.java  | 11 ++---
 .../StorageContainerLocationProtocol.java   |  6 ++-
 ...rLocationProtocolClientSideTranslatorPB.java | 21 
 .../scm/storage/ContainerProtocolCalls.java |  6 +--
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 --
 .../ozone/container/common/helpers/KeyData.java |  8 ++--
 .../apache/hadoop/utils/HddsVersionInfo.java|  6 ++-
 .../apache/hadoop/utils/TestMetadataStore.java  |  1 -
 .../hadoop/ozone/HddsDatanodeService.java   |  3 +-
 .../common/helpers/ContainerUtils.java  | 22 -
 .../container/common/impl/ContainerSet.java |  2 +-
 .../common/impl/OpenContainerBlockMap.java  | 19 
 .../server/ratis/XceiverServerRatis.java|  6 +--
 .../keyvalue/interfaces/KeyManager.java |  4 +-
 .../ozone/protocol/commands/CommandStatus.java  | 16 +++
 .../ozone/container/common/ScmTestMock.java |  6 ++-
 .../common/interfaces/TestHandler.java  |  7 ---
 .../endpoint/TestHeartbeatEndpointTask.java |  2 -
 .../TestRoundRobinVolumeChoosingPolicy.java |  5 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  3 +-
 .../hadoop/hdds/server/events/EventWatcher.java |  6 ++-
 .../hdds/server/events/TestEventQueue.java  |  3 --
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 18 +++
 .../hdds/scm/block/DeletedBlockLogImpl.java |  3 +-
 .../hdds/scm/block/SCMBlockDeletingService.java |  4 +-
 .../container/CloseContainerEventHandler.java   |  4 +-
 .../hdds/scm/container/ContainerMapping.java|  4 +-
 .../scm/container/ContainerStateManager.java|  7 +--
 .../replication/ReplicationManager.java |  2 +-
 .../scm/container/states/ContainerStateMap.java |  2 +-
 .../hdds/scm/node/states/Node2ContainerMap.java |  4 +-
 .../scm/node/states/NodeNotFoundException.java  |  2 -
 .../hdds/scm/node/states/ReportResult.java  |  3 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java| 50 +---
 .../hdds/scm/pipelines/PipelineManager.java |  6 +--
 .../hdds/scm/pipelines/PipelineSelector.java|  7 +--
 .../scm/server/SCMClientProtocolServer.java |  3 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  8 ++--
 .../hadoop/hdds/scm/block/TestBlockManager.java |  1 -
 .../hdds/scm/block/TestDeletedBlockLog.java |  7 +--
 .../command/TestCommandStatusReportHandler.java | 22 -
 .../TestCloseContainerEventHandler.java |  1 -
 .../scm/container/TestContainerMapping.java |  7 +--
 .../container/TestContainerReportHandler.java   |  2 +-
 .../TestSCMContainerPlacementCapacity.java  |  8 ++--
 .../TestSCMContainerPlacementRandom.java|  4 +-
 .../replication/TestReplicationManager.java | 11 ++---
 .../replication/TestReplicationQueue.java   |  4 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  5 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  3 +-
 .../hdds/scm/node/TestNodeReportHandler.java|  3 +-
 .../ozone/container/common/TestEndPoint.java|  9 ++--
 .../placement/TestContainerPlacement.java   |  6 ++-
 .../apache/hadoop/ozone/client/ObjectStore.java |  7 ++-
 .../hdds/scm/pipeline/TestPipelineClose.java|  4 --
 .../apache/hadoop/ozone/RatisTestHelper.java|  8 ++--
 .../TestStorageContainerManagerHelper.java  |  2 -
 .../rpc/TestCloseContainerHandlingByClient.java |  3 +-
 .../ozone/container/ContainerTestHelper.java|  2 -
 .../common/impl/TestContainerPersistence.java   |  1 -
 .../ozoneimpl/TestOzoneContainerRatis.java  |  3 +-
 .../container/ozoneimpl/TestRatisManager.java   |  4 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |  2 -
 

hadoop git commit: HADOOP-15645. ITestS3GuardToolLocal.testDiffCommand fails if bucket has per-bucket binding to DDB. Contributed by Steve Loughran.

2018-08-13 Thread ehiggs
Repository: hadoop
Updated Branches:
  refs/heads/trunk 475bff6e8 -> a13929ddc


HADOOP-15645. ITestS3GuardToolLocal.testDiffCommand fails if bucket has 
per-bucket binding to DDB. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a13929dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a13929dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a13929dd

Branch: refs/heads/trunk
Commit: a13929ddcb3b90044350ae1c23a1150e8b4b975b
Parents: 475bff6
Author: Ewan Higgs 
Authored: Mon Aug 13 12:57:45 2018 +0200
Committer: Ewan Higgs 
Committed: Mon Aug 13 12:57:45 2018 +0200

--
 .../fs/s3a/s3guard/AbstractS3GuardToolTestBase.java| 13 ++---
 .../fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java   |  7 ---
 .../hadoop/fs/s3a/s3guard/ITestS3GuardToolLocal.java   | 11 +++
 3 files changed, 25 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a13929dd/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 9185fc5..96aac15 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -56,6 +56,7 @@ import static 
org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE;
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL;
 import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
+import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_BAD_STATE;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
@@ -142,12 +143,14 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
   public void setup() throws Exception {
 super.setup();
 S3ATestUtils.assumeS3GuardState(true, getConfiguration());
-ms = getFileSystem().getMetadataStore();
+S3AFileSystem fs = getFileSystem();
+ms = fs.getMetadataStore();
 
 // Also create a "raw" fs without any MetadataStore configured
 Configuration conf = new Configuration(getConfiguration());
-URI fsUri = getFileSystem().getUri();
+clearBucketOption(conf, fs.getBucket(), S3_METADATA_STORE_IMPL);
 conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL);
+URI fsUri = fs.getUri();
 S3AUtils.setBucketOption(conf,fsUri.getHost(),
 METADATASTORE_AUTHORITATIVE,
 S3GUARD_METASTORE_NULL);
@@ -394,13 +397,17 @@ public abstract class AbstractS3GuardToolTestBase extends 
AbstractS3ATestBase {
   }
 
   @Test
-  public void testDiffCommand() throws Exception {
+  public void
+  testDiffCommand() throws Exception {
 S3AFileSystem fs = getFileSystem();
 ms = getMetadataStore();
 Set filesOnS3 = new HashSet<>(); // files on S3.
 Set filesOnMS = new HashSet<>(); // files on metadata store.
 
 Path testPath = path("test-diff");
+// clean up through the store and behind it.
+fs.delete(testPath, true);
+rawFs.delete(testPath, true);
 mkdirs(testPath, true, true);
 
 Path msOnlyPath = new Path(testPath, "ms_only");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a13929dd/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
index 1a59bf1..c96cbd0 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java
@@ -50,9 +50,10 @@ public class ITestS3GuardToolDynamoDB extends 
AbstractS3GuardToolTestBase {
   @Override
   public void setup() throws Exception {
 super.setup();
-Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",
-getConfiguration().get(Constants.S3_METADATA_STORE_IMPL).equals(
-Constants.S3GUARD_METASTORE_DYNAMO));
+MetadataStore ms = 

[16/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 
upgrade. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64901abd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64901abd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64901abd

Branch: refs/heads/HDFS-12090
Commit: 64901abdfac72c22f6b002ff45b1107174e82207
Parents: 2ec97ab
Author: Sunil G 
Authored: Wed Aug 8 19:43:29 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 8 19:43:29 2018 +0530

--
 LICENSE.txt |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   8 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java   |   4 +-
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 +++
 .../webapps/static/dt-1.10.7/css/jui-dt.css | 322 +++
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 0 -> 27490 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 0 -> 612 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg| Bin 0 -> 807 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 0 -> 894 bytes
 .../dt-1.10.7/images/forward_disabled.jpg   | Bin 0 -> 635 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 0 -> 852 bytes
 .../static/dt-1.10.7/images/sort_asc.png| Bin 0 -> 263 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png  | Bin 0 -> 252 bytes
 .../static/dt-1.10.7/images/sort_both.png   | Bin 0 -> 282 bytes
 .../static/dt-1.10.7/images/sort_desc.png   | Bin 0 -> 260 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png | Bin 0 -> 251 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js   | 160 ++
 .../webapps/static/dt-1.9.4/css/demo_page.css   | 110 
 .../webapps/static/dt-1.9.4/css/demo_table.css  | 538 ---
 .../webapps/static/dt-1.9.4/css/jui-dt.css  | 322 ---
 .../static/dt-1.9.4/images/Sorting icons.psd| Bin 27490 -> 0 bytes
 .../static/dt-1.9.4/images/back_disabled.jpg| Bin 612 -> 0 bytes
 .../static/dt-1.9.4/images/back_enabled.jpg | Bin 807 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/favicon.ico  | Bin 894 -> 0 bytes
 .../static/dt-1.9.4/images/forward_disabled.jpg | Bin 635 -> 0 bytes
 .../static/dt-1.9.4/images/forward_enabled.jpg  | Bin 852 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/sort_asc.png | Bin 263 -> 0 bytes
 .../dt-1.9.4/images/sort_asc_disabled.png   | Bin 252 -> 0 bytes
 .../static/dt-1.9.4/images/sort_both.png| Bin 282 -> 0 bytes
 .../static/dt-1.9.4/images/sort_desc.png| Bin 260 -> 0 bytes
 .../dt-1.9.4/images/sort_desc_disabled.png  | Bin 251 -> 0 bytes
 .../static/dt-1.9.4/js/jquery.dataTables.min.js | 157 --
 33 files changed, 1137 insertions(+), 1134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64901abd/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index f8de86a..393ed0e 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -553,7 +553,7 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/
 

 Copyright (C) 2008-2016, SpryMedia Ltd.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64901abd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index eddcbaa..685eac9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -237,10 +237,10 @@
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep
 src/main/resources/webapps/node/.keep
-
src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css
-
src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
-src/main/resources/webapps/static/dt-1.9.4/images/Sorting 
icons.psd
-
src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
+
src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
+

[46/50] [abbrv] hadoop git commit: HDDS-245. Handle ContainerReports in the SCM. Contributed by Elek Marton.

2018-08-10 Thread ehiggs
HDDS-245. Handle ContainerReports in the SCM. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5dbbfe2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5dbbfe2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5dbbfe2

Branch: refs/heads/HDFS-12090
Commit: f5dbbfe2e97a8c11e3df0f95ae4a493f11fdbc28
Parents: b2517dd
Author: Xiaoyu Yao 
Authored: Thu Aug 9 16:55:13 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Aug 9 16:55:39 2018 -0700

--
 .../hadoop/hdds/server/events/EventQueue.java   |   7 +-
 .../scm/container/ContainerReportHandler.java   | 107 +-
 .../replication/ReplicationActivityStatus.java  |  86 +
 .../ReplicationActivityStatusMXBean.java|  28 ++
 .../replication/ReplicationRequest.java |  28 +-
 .../hadoop/hdds/scm/events/SCMEvents.java   |   9 +
 .../hdds/scm/node/states/Node2ContainerMap.java |  10 +-
 .../hdds/scm/node/states/ReportResult.java  |  18 +-
 .../scm/server/StorageContainerManager.java |  27 +-
 .../container/TestContainerReportHandler.java   | 228 +
 .../scm/node/states/Node2ContainerMapTest.java  | 308 -
 .../scm/node/states/TestNode2ContainerMap.java  | 328 +++
 12 files changed, 859 insertions(+), 325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index f93c54b..b2b0df2 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -147,7 +147,12 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
 
 for (EventHandler handler : executorAndHandlers.getValue()) {
   queuedCount.incrementAndGet();
-
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Delivering event {} to executor/handler {}: {}",
+event.getName(),
+executorAndHandlers.getKey().getName(),
+payload);
+  }
   executorAndHandlers.getKey()
   .onMessage(handler, payload, this);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5dbbfe2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 486162e..b26eed2 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -18,30 +18,131 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
+import java.io.IOException;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.replication
+.ReplicationActivityStatus;
+import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
+import org.apache.hadoop.hdds.scm.node.states.ReportResult;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Handles container reports from datanode.
  */
 public class ContainerReportHandler implements
 EventHandler {
 
-  private final Mapping containerMapping;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContainerReportHandler.class);
+
   private final Node2ContainerMap node2ContainerMap;
 
+  private final Mapping containerMapping;
+
+  private ContainerStateManager containerStateManager;
+
+  private 

[49/50] [abbrv] hadoop git commit: HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. Original patch contributed by Ewan Higgs. Followup work and fixed contributed by Virajith

2018-08-10 Thread ehiggs
HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. 
Original patch contributed by Ewan Higgs. Followup work and fixed contributed 
by Virajith Jalaparthi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c52ff5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c52ff5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c52ff5d

Branch: refs/heads/HDFS-12090
Commit: 2c52ff5d61dc3c8d6b71e69c66bb5e105dbc7b46
Parents: 0a71bf1
Author: Ewan Higgs 
Authored: Mon Jul 23 13:14:04 2018 +0200
Committer: Ewan Higgs 
Committed: Fri Aug 10 13:34:15 2018 +0200

--
 .../BlockSyncTaskExecutionFeedback.java |  67 ++
 .../protocol/SyncTaskExecutionOutcome.java  |  25 +++
 .../protocol/SyncTaskExecutionResult.java   |  46 
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   6 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 208 ++-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/datanode/BPServiceActor.java|   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +-
 .../hdfs/server/protocol/BlockSyncTask.java |  83 
 .../protocol/BulkSyncTaskExecutionFeedback.java |  36 
 .../hdfs/server/protocol/DatanodeProtocol.java  |  22 +-
 .../hdfs/server/protocol/SyncCommand.java   |  39 
 .../src/main/proto/DatanodeProtocol.proto   |  88 +++-
 .../blockmanagement/TestDatanodeManager.java|   2 +-
 .../TestNameNodePrunesMissingStorages.java  |   2 +-
 .../datanode/InternalDataNodeTestUtils.java |   3 +-
 .../server/datanode/TestBPOfferService.java |   5 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   9 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   5 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 27 files changed, 662 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c52ff5d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
new file mode 100644
index 000..2e5393e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.UUID;
+
+/**
+ * Feedback for a BlockSyncTask.
+ */
+public class BlockSyncTaskExecutionFeedback {
+
+  private UUID syncTaskId;
+  private SyncTaskExecutionOutcome outcome;
+  private SyncTaskExecutionResult result;
+  private String syncMountId;
+
+  public BlockSyncTaskExecutionFeedback(UUID syncTaskId,
+  SyncTaskExecutionOutcome outcome, SyncTaskExecutionResult result,
+  String syncMountId) {
+this.syncTaskId = syncTaskId;
+this.outcome = outcome;
+this.result = result;
+this.syncMountId = syncMountId;
+  }
+
+  public static BlockSyncTaskExecutionFeedback finishedSuccessfully(
+  UUID syncTaskId, String syncMountId, SyncTaskExecutionResult result) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY, result, syncMountId);
+  }
+
+  public static BlockSyncTaskExecutionFeedback 

[34/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
deleted file mode 100644
index 61acb9b..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * File:jquery.dataTables.min.js
- * Version: 1.9.4
- * Author:  Allan Jardine (www.sprymedia.co.uk)
- * Info:www.datatables.net
- *
- * Copyright 2008-2012 Allan Jardine, all rights reserved.
- *
- * This source file is free software, under either the GPL v2 license or a
- * BSD style license, available at:
- *   http://datatables.net/license_gpl2
- *   http://datatables.net/license_bsd
- *
- * This source file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.
- */
-(function(la,s,p){(function(i){if(typeof 
define==="function"&)define(["jquery"],i);else 
jQuery&&!jQuery.fn.dataTable&(jQuery)})(function(i){var 
l=function(h){function n(a,b){var 
c=l.defaults.columns,d=a.aoColumns.length;b=i.extend({},l.models.oColumn,c,{sSortingClass:a.oClasses.sSortable,sSortingClassJUI:a.oClasses.sSortJUI,nTh:b?b:s.createElement("th"),sTitle:c.sTitle?c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[d],mData:c.mData?c.oDefaults:d});a.aoColumns.push(b);if(a.aoPreSearchCols[d]===
-p||a.aoPreSearchCols[d]===null)a.aoPreSearchCols[d]=i.extend({},l.models.oSearch);else{b=a.aoPreSearchCols[d];if(b.bRegex===p)b.bRegex=true;if(b.bSmart===p)b.bSmart=true;if(b.bCaseInsensitive===p)b.bCaseInsensitive=true}q(a,d,null)}function
 q(a,b,c){var 
d=a.aoColumns[b];if(c!==p&!==null){if(c.mDataProp&&!c.mData)c.mData=c.mDataProp;if(c.sType!==p){d.sType=c.sType;d._bAutoType=false}i.extend(d,c);r(d,c,"sWidth","sWidthOrig");if(c.iDataSort!==p)d.aDataSort=[c.iDataSort];r(d,c,"aDataSort")}var
 e=d.mRender?
-ca(d.mRender):null,f=ca(d.mData);d.fnGetData=function(g,j){var 
k=f(g,j);if(d.mRender&&!=="")return e(k,j,g);return 
k};d.fnSetData=Ja(d.mData);if(!a.oFeatures.bSort)d.bSortable=false;if(!d.bSortable||i.inArray("asc",d.asSorting)==-1&("desc",d.asSorting)==-1){d.sSortingClass=a.oClasses.sSortableNone;d.sSortingClassJUI=""}else
 
if(i.inArray("asc",d.asSorting)==-1&("desc",d.asSorting)==-1){d.sSortingClass=a.oClasses.sSortable;d.sSortingClassJUI=a.oClasses.sSortJUI}else
 if(i.inArray("asc",
-d.asSorting)!=-1&("desc",d.asSorting)==-1){d.sSortingClass=a.oClasses.sSortableAsc;d.sSortingClassJUI=a.oClasses.sSortJUIAscAllowed}else
 
if(i.inArray("asc",d.asSorting)==-1&("desc",d.asSorting)!=-1){d.sSortingClass=a.oClasses.sSortableDesc;d.sSortingClassJUI=a.oClasses.sSortJUIDescAllowed}}function
 o(a){if(a.oFeatures.bAutoWidth===false)return false;ta(a);for(var 
b=0,c=a.aoColumns.length;b=0;e--){var 
m=b[e].aTargets;i.isArray(m)||O(a,1,"aTargets must be an array of targets, not 
a "+typeof m);f=0;for(g=m.length;f=0){for(;a.aoColumns.length<=m[f];)n(a);d(m[f],b[e])}else 
if(typeof m[f]==="number"&[f]<0)d(a.aoColumns.length+m[f],b[e]);else 
if(typeof m[f]===
-"string"){j=0;for(k=a.aoColumns.length;jb&[d]--;c!=-1&(c,1)}function da(a,b,c){var 
d=a.aoColumns[c];return d.fnRender({iDataRow:b,iDataColumn:c,oSettings:a,
-aData:a.aoData[b]._aData,mDataProp:d.mData},F(a,b,c,"display"))}function 
ua(a,b){var 
c=a.aoData[b],d;if(c.nTr===null){c.nTr=s.createElement("tr");c.nTr._DT_RowIndex=b;if(c._aData.DT_RowId)c.nTr.id=c._aData.DT_RowId;if(c._aData.DT_RowClass)c.nTr.className=c._aData.DT_RowClass;for(var
 e=0,f=a.aoColumns.length;e=0;f--)!a.aoColumns[f].bVisible&&!c&[d].splice(f,1);j.push([])}d=0;for(e=g.length;d=a.fnRecordsDisplay()?0:a.iInitDisplayStart;a.iInitDisplayStart=-1;I(a)}if(a.bDeferLoading){a.bDeferLoading=false;a.iDraw++}else
 if(a.oFeatures.bServerSide){if(!a.bDestroying&&!La(a))return}else 
a.iDraw++;if(a.aiDisplay.length!==0){var 
g=a._iDisplayStart;d=a._iDisplayEnd;if(a.oFeatures.bServerSide){g=0;d=a.aoData.length}for(g=g;g")[0];a.nTable.parentNode.insertBefore(b,a.nTable);a.nTableWrapper=i('')[0];a.nTableReinsertBefore=a.nTable.nextSibling;for(var 
c=a.nTableWrapper,d=a.sDom.split(""),e,f,g,j,k,m,u,x=0;x")[0];k=d[x+1];if(k=="'"||k=='"'){m="";for(u=2;d[x+u]!=k;){m+=d[x+u];u++}if(m=="H")m=a.oClasses.sJUIHeader;else
 if(m=="F")m=a.oClasses.sJUIFooter;if(m.indexOf(".")!=-1){k=
-m.split(".");j.id=k[0].substr(1,k[0].length-1);j.className=k[1]}else 
if(m.charAt(0)=="#")j.id=m.substr(1,m.length-1);else 
j.className=m;x+=u}c.appendChild(j);c=j}else 

[20/50] [abbrv] hadoop git commit: Revert "YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB."

2018-08-10 Thread ehiggs
Revert "YARN-8633. Update DataTables version in yarn-common in line with JQuery 
3 upgrade. Contributed by Akhil PB."

This reverts commit 64901abdfac72c22f6b002ff45b1107174e82207.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b898c17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b898c17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b898c17

Branch: refs/heads/HDFS-12090
Commit: 5b898c176ffb41e6fa3c605feb8ed3fcb60a5fe8
Parents: 64901ab
Author: Sunil G 
Authored: Wed Aug 8 19:48:49 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 8 19:48:49 2018 +0530

--
 LICENSE.txt |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   8 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java   |   4 +-
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 ---
 .../webapps/static/dt-1.10.7/css/jui-dt.css | 322 ---
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 27490 -> 0 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 612 -> 0 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg| Bin 807 -> 0 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 894 -> 0 bytes
 .../dt-1.10.7/images/forward_disabled.jpg   | Bin 635 -> 0 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 852 -> 0 bytes
 .../static/dt-1.10.7/images/sort_asc.png| Bin 263 -> 0 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png  | Bin 252 -> 0 bytes
 .../static/dt-1.10.7/images/sort_both.png   | Bin 282 -> 0 bytes
 .../static/dt-1.10.7/images/sort_desc.png   | Bin 260 -> 0 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png | Bin 251 -> 0 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js   | 160 --
 .../webapps/static/dt-1.9.4/css/demo_page.css   | 110 
 .../webapps/static/dt-1.9.4/css/demo_table.css  | 538 +++
 .../webapps/static/dt-1.9.4/css/jui-dt.css  | 322 +++
 .../static/dt-1.9.4/images/Sorting icons.psd| Bin 0 -> 27490 bytes
 .../static/dt-1.9.4/images/back_disabled.jpg| Bin 0 -> 612 bytes
 .../static/dt-1.9.4/images/back_enabled.jpg | Bin 0 -> 807 bytes
 .../webapps/static/dt-1.9.4/images/favicon.ico  | Bin 0 -> 894 bytes
 .../static/dt-1.9.4/images/forward_disabled.jpg | Bin 0 -> 635 bytes
 .../static/dt-1.9.4/images/forward_enabled.jpg  | Bin 0 -> 852 bytes
 .../webapps/static/dt-1.9.4/images/sort_asc.png | Bin 0 -> 263 bytes
 .../dt-1.9.4/images/sort_asc_disabled.png   | Bin 0 -> 252 bytes
 .../static/dt-1.9.4/images/sort_both.png| Bin 0 -> 282 bytes
 .../static/dt-1.9.4/images/sort_desc.png| Bin 0 -> 260 bytes
 .../dt-1.9.4/images/sort_desc_disabled.png  | Bin 0 -> 251 bytes
 .../static/dt-1.9.4/js/jquery.dataTables.min.js | 157 ++
 33 files changed, 1134 insertions(+), 1137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b898c17/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 393ed0e..f8de86a 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -553,7 +553,7 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/
 

 Copyright (C) 2008-2016, SpryMedia Ltd.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b898c17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 685eac9..eddcbaa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -237,10 +237,10 @@
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep
 src/main/resources/webapps/node/.keep
-
src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
-
src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
-
src/main/resources/webapps/static/dt-1.10.7/images/Sorting 
icons.psd
-
src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
+

[15/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/64901abd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
new file mode 100644
index 000..85dd817
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
@@ -0,0 +1,160 @@
+/*! DataTables 1.10.7
+ * ©2008-2015 SpryMedia Ltd - datatables.net/license
+ */
+(function(Ea,Q,k){var P=function(h){function W(a){var 
b,c,e={};h.each(a,function(d){if((b=d.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa 
ai ao as b fn i m o s ".indexOf(b[1]+" 
"))c=d.replace(b[0],b[2].toLowerCase()),e[c]=d,"o"===b[1]&(a[d])});a._hungarianMap=e}function
 H(a,b,c){a._hungarianMap||W(a);var 
e;h.each(b,function(d){e=a._hungarianMap[d];if(e!==k&&(c||b[e]===k))"o"===e.charAt(0)?(b[e]||(b[e]={}),h.extend(!0,b[e],b[d]),H(a[e],b[e],c)):b[e]=b[d]})}function
 P(a){var b=m.defaults.oLanguage,c=a.sZeroRecords;
+!a.sEmptyTable&&(c&&"No data available in 
table"===b.sEmptyTable)&(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&(c&&"Loading..."===b.sLoadingRecords)&(a,a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&(a)}function
 
eb(a){A(a,"ordering","bSort");A(a,"orderMulti","bSortMulti");A(a,"orderClasses","bSortClasses");A(a,"orderCellsTop","bSortCellsTop");A(a,"order","aaSorting");A(a,"orderFixed","aaSortingFixed");A(a,"paging","bPaginate");
+A(a,"pagingType","sPaginationType");A(a,"pageLength","iDisplayLength");A(a,"searching","bFilter");if(a=a.aoSearchCols)for(var
 b=0,c=a.length;b").css({position:"absolute",top:0,left:0,height:1,width:1,overflow:"hidden"}).append(h("").css({position:"absolute",
+top:1,left:1,width:100,overflow:"scroll"}).append(h('').css({width:"100%",height:10}))).appendTo("body"),c=b.find(".test");a.bScrollOversize=100===c[0].offsetWidth;a.bScrollbarLeft=1!==Math.round(c.offset().left);b.remove()}function
 hb(a,b,c,e,d,f){var 
g,j=!1;c!==k&&(g=c,j=!0);for(;e!==d;)a.hasOwnProperty(e)&&(g=j?b(g,a[e],e,a):a[e],j=!0,e+=f);return
 g}function Fa(a,b){var 
c=m.defaults.column,e=a.aoColumns.length,c=h.extend({},m.models.oColumn,c,{nTh:b?b:Q.createElement("th"),sTitle:c.sTitle?
+c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[e],mData:c.mData?c.mData:e,idx:e});a.aoColumns.push(c);c=a.aoPreSearchCols;c[e]=h.extend({},m.models.oSearch,c[e]);ka(a,e,h(b).data())}function
 ka(a,b,c){var 
b=a.aoColumns[b],e=a.oClasses,d=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=d.attr("width")||null;var
 
f=(d.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);f&&(b.sWidthOrig=f[1])}c!==k&!==c&&(fb(c),H(m.defaults.column,c),c.mDataProp!==k&&!c.mData&&(c.mData=c.mDataProp),c.sType&&
+(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),h.extend(b,c),E(b,c,"sWidth","sWidthOrig"),c.iDataSort!==k&&(b.aDataSort=[c.iDataSort]),E(b,c,"aDataSort"));var
 
g=b.mData,j=R(g),i=b.mRender?R(b.mRender):null,c=function(a){return"string"===typeof
 
a&&-1!==a.indexOf("@")};b._bAttrSrc=h.isPlainObject(g)&&(c(g.sort)||c(g.type)||c(g.filter));b.fnGetData=function(a,b,c){var
 e=j(a,b,k,c);return i&?i(e,b,a,c):e};b.fnSetData=function(a,b,c){return 
S(g)(a,b,c)};"number"!==typeof g&&
+(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,d.addClass(e.sSortableNone));a=-1!==h.inArray("asc",b.asSorting);c=-1!==h.inArray("desc",b.asSorting);!b.bSortable||!a&&!c?(b.sSortingClass=e.sSortableNone,b.sSortingClassJUI=""):a&&!c?(b.sSortingClass=e.sSortableAsc,b.sSortingClassJUI=e.sSortJUIAscAllowed):!a&?(b.sSortingClass=e.sSortableDesc,b.sSortingClassJUI=e.sSortJUIDescAllowed):(b.sSortingClass=e.sSortable,b.sSortingClassJUI=e.sSortJUI)}function
 X(a){if(!1!==a.oFeatures.bAutoWidth){var b=
+a.aoColumns;Ga(a);for(var 
c=0,e=b.length;cq[f])e(l.length+q[f],o);else if("string"===typeof 
q[f]){j=0;for(i=l.length;jb&[d]--; -1!=e&===k&(e,1)}function 
ca(a,b,c,e){var 
d=a.aoData[b],f,g=function(c,f){for(;c.childNodes.length;)c.removeChild(c.firstChild);c.innerHTML=x(a,b,f,"display")};if("dom"===c||(!c||"auto"===c)&&"dom"===d.src)d._aData=na(a,d,e,e===k?k:d._aData).data;else{var
 
j=d.anCells;if(j)if(e!==k)g(j[e],e);else{c=0;for(f=j.length;c").appendTo(g));b=0;for(c=l.length;btr").attr("role","row");h(g).find(">tr>th,
 >tr>td").addClass(o.sHeaderTH);
+h(j).find(">tr>th, 
>tr>td").addClass(o.sFooterTH);if(null!==j){a=a.aoFooter[0];b=0;for(c=a.length;b=a.fnRecordsDisplay()?0:g,a.iInitDisplayStart=-1);var 
g=a._iDisplayStart,o=a.fnDisplayEnd();if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,C(a,!1);else
 if(j){if(!a.bDestroying&&!kb(a))return}else 

[07/50] [abbrv] hadoop git commit: HDDS-124. Validate all required configs needed for ozone-site.xml and reflect the changes in ozone-default.xml Contributed by Dinesh Chitlangia.

2018-08-10 Thread ehiggs
HDDS-124. Validate all required configs needed for ozone-site.xml and reflect 
the changes in ozone-default.xml
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38784f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38784f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38784f95

Branch: refs/heads/HDFS-12090
Commit: 38784f95fecd02c2f94344c1967cccf0799ec074
Parents: 0f8cb12
Author: Anu Engineer 
Authored: Tue Aug 7 16:40:33 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 7 16:40:33 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38784f95/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5099bbe..568e38d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -815,7 +815,7 @@
   
 ozone.scm.names
 
-OZONE
+OZONE, REQUIRED
 
   The value of this property is a set of DNS | DNS:PORT | IP
   Address | IP:PORT. Written as a comma separated string. e.g. scm1,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HDDS-344. Remove multibyte characters from OzoneAcl. Contributed by Takanobu Asanuma.

2018-08-10 Thread ehiggs
HDDS-344. Remove multibyte characters from OzoneAcl. Contributed by Takanobu 
Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/778369ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/778369ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/778369ea

Branch: refs/heads/HDFS-12090
Commit: 778369ea0204e75ce86fc7da3321b046f8139d9a
Parents: 3d96bc6
Author: Márton Elek 
Authored: Thu Aug 9 14:23:41 2018 +0200
Committer: Márton Elek 
Committed: Thu Aug 9 14:26:37 2018 +0200

--
 .../src/main/java/org/apache/hadoop/ozone/OzoneAcl.java  | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/778369ea/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
index ff0ac4e..1827b23 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -25,9 +25,11 @@ import java.util.Objects;
  * OzoneACL classes define bucket ACLs used in OZONE.
  *
  * ACLs in Ozone follow this pattern.
- * • user:name:rw
- * • group:name:rw
- * • world::rw
+ * 
+ * user:name:rw
+ * group:name:rw
+ * world::rw
+ * 
  */
 public class OzoneAcl {
   private OzoneACLType type;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: Make 3.1.1 awared by other branches - adding missing files

2018-08-10 Thread ehiggs
Make 3.1.1 awared by other branches - adding missing files


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3214cd75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3214cd75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3214cd75

Branch: refs/heads/HDFS-12090
Commit: 3214cd75acd0474373951870e1ba2ec11833a3da
Parents: 49c6876
Author: Wangda Tan 
Authored: Wed Aug 8 13:05:24 2018 -0700
Committer: Wangda Tan 
Committed: Wed Aug 8 13:05:24 2018 -0700

--
 .../jdiff/Apache_Hadoop_YARN_Common_3.1.1.xml   | 3327 ++
 1 file changed, 3327 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-15583. Stabilize S3A Assumed Role support. Contributed by Steve Loughran.

2018-08-10 Thread ehiggs
HADOOP-15583. Stabilize S3A Assumed Role support.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da9a39ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da9a39ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da9a39ee

Branch: refs/heads/HDFS-12090
Commit: da9a39eed138210de29b59b90c449b28da1c04f9
Parents: d81cd36
Author: Steve Loughran 
Authored: Wed Aug 8 22:57:10 2018 -0700
Committer: Steve Loughran 
Committed: Wed Aug 8 22:57:24 2018 -0700

--
 .../src/main/resources/core-default.xml |  18 +-
 .../fs/s3a/AWSCredentialProviderList.java   | 101 ++--
 .../org/apache/hadoop/fs/s3a/Constants.java |  19 +-
 .../hadoop/fs/s3a/DefaultS3ClientFactory.java   | 190 --
 .../fs/s3a/InconsistentAmazonS3Client.java  |  10 +
 .../fs/s3a/InconsistentS3ClientFactory.java |  11 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  35 ++-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java|   4 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 245 +--
 .../apache/hadoop/fs/s3a/S3ClientFactory.java   |   7 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java |  78 +-
 .../fs/s3a/auth/NoAuthWithAWSException.java |  37 +++
 .../apache/hadoop/fs/s3a/auth/RoleModel.java|   8 +
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 143 +--
 .../hadoop/fs/s3a/auth/STSClientFactory.java|  78 ++
 .../fs/s3a/s3guard/DynamoDBClientFactory.java   |  18 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |  62 -
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 191 +++
 .../src/site/markdown/tools/hadoop-aws/index.md |   6 +-
 .../hadoop/fs/s3a/ITestS3AConfiguration.java| 117 -
 .../fs/s3a/ITestS3ATemporaryCredentials.java|  71 +++---
 .../fs/s3a/ITestS3GuardListConsistency.java |  68 +++--
 .../hadoop/fs/s3a/ITestS3GuardWriteBack.java|  57 +++--
 .../hadoop/fs/s3a/MockS3ClientFactory.java  |   6 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  76 +-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 151 ++--
 .../auth/ITestAssumedRoleCommitOperations.java  |   5 +-
 .../hadoop/fs/s3a/auth/RoleTestUtils.java   |  24 +-
 .../s3guard/AbstractS3GuardToolTestBase.java|   7 +-
 .../s3a/s3guard/ITestS3GuardConcurrentOps.java  | 147 ++-
 30 files changed, 1461 insertions(+), 529 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 75acf48..29c2bc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1033,7 +1033,19 @@
   fs.s3a.assumed.role.sts.endpoint
   
   
-AWS Simple Token Service Endpoint. If unset, uses the default endpoint.
+AWS Security Token Service Endpoint.
+If unset, uses the default endpoint.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  
+
+
+
+  fs.s3a.assumed.role.sts.endpoint.region
+  us-west-1
+  
+AWS Security Token Service Endpoint's region;
+Needed if fs.s3a.assumed.role.sts.endpoint points to an endpoint
+other than the default one and the v4 signature is used.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   
 
@@ -1058,7 +1070,9 @@
 
   fs.s3a.connection.ssl.enabled
   true
-  Enables or disables SSL connections to S3.
+  Enables or disables SSL connections to AWS services.
+Also sets the default port to use for the s3a proxy settings,
+when not explicitly set in fs.s3a.proxy.port.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
index 10201f0..f9052fa 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
@@ -18,25 +18,29 @@
 
 package org.apache.hadoop.fs.s3a;
 
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;

[35/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
deleted file mode 100644
index 3bc0433..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- *  File: demo_table.css
- *  CVS:  $Id$
- *  Description:  CSS descriptions for DataTables demo pages
- *  Author:   Allan Jardine
- *  Created:  Tue May 12 06:47:22 BST 2009
- *  Modified: $Date$ by $Author$
- *  Language: CSS
- *  Project:  DataTables
- *
- *  Copyright 2009 Allan Jardine. All Rights Reserved.
- *
- * ***
- * DESCRIPTION
- *
- * The styles given here are suitable for the demos that are used with the 
standard DataTables
- * distribution (see www.datatables.net). You will most likely wish to modify 
these styles to
- * meet the layout requirements of your site.
- *
- * Common issues:
- *   'full_numbers' pagination - I use an extra selector on the body tag to 
ensure that there is
- * no conflict between the two pagination types. If you want to use 
full_numbers pagination
- * ensure that you either have "example_alt_pagination" as a body class 
name, or better yet,
- * modify that selector.
- *   Note that the path used for Images is relative. All images are by default 
located in
- * ../images/ - relative to this CSS file.
- */
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables features
- */
-
-.dataTables_wrapper {
-   position: relative;
-   min-height: 302px;
-   clear: both;
-   _height: 302px;
-   zoom: 1; /* Feeling sorry for IE */
-}
-
-.dataTables_processing {
-   position: absolute;
-   top: 50%;
-   left: 50%;
-   width: 250px;
-   height: 30px;
-   margin-left: -125px;
-   margin-top: -15px;
-   padding: 14px 0 2px 0;
-   border: 1px solid #ddd;
-   text-align: center;
-   color: #999;
-   font-size: 14px;
-   background-color: white;
-}
-
-.dataTables_length {
-   width: 40%;
-   float: left;
-}
-
-.dataTables_filter {
-   width: 50%;
-   float: right;
-   text-align: right;
-}
-
-.dataTables_info {
-   width: 60%;
-   float: left;
-}
-
-.dataTables_paginate {
-   width: 44px;
-   * width: 50px;
-   float: right;
-   text-align: right;
-}
-
-/* Pagination nested */
-.paginate_disabled_previous, .paginate_enabled_previous, 
.paginate_disabled_next, .paginate_enabled_next {
-   height: 19px;
-   width: 19px;
-   margin-left: 3px;
-   float: left;
-}
-
-.paginate_disabled_previous {
-   background-image: url('../images/back_disabled.jpg');
-}
-
-.paginate_enabled_previous {
-   background-image: url('../images/back_enabled.jpg');
-}
-
-.paginate_disabled_next {
-   background-image: url('../images/forward_disabled.jpg');
-}
-
-.paginate_enabled_next {
-   background-image: url('../images/forward_enabled.jpg');
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables display
- */
-table.display {
-   margin: 0 auto;
-   clear: both;
-   width: 100%;
-   
-   /* Note Firefox 3.5 and before have a bug with border-collapse
-* ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 ) 
-* border-spacing: 0; is one possible option. Conditional-css.com is
-* useful for this kind of thing
-*
-* Further note IE 6/7 has problems when calculating widths with border 
width.
-* It subtracts one px relative to the other browsers from the first 
column, and
-* adds one to the end...
-*
-* If you want that effect I'd suggest setting a border-top/left on 
th/td's and 
-* then filling in the gaps with other borders.
-*/
-}
-
-table.display thead th {
-   padding: 3px 18px 3px 10px;
-   border-bottom: 1px solid black;
-   font-weight: bold;
-   cursor: pointer;
-   * cursor: hand;
-}
-
-table.display tfoot th {
-   padding: 3px 18px 3px 10px;
-   border-top: 1px solid black;
-   font-weight: bold;
-}
-
-table.display tr.heading2 td {
-   border-bottom: 1px solid #aaa;
-}
-
-table.display td {
-   padding: 3px 10px;
-}
-
-table.display td.center {
-   text-align: center;
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables sorting
- */
-
-.sorting_asc {
-   

[05/50] [abbrv] hadoop git commit: YARN-8626. Create HomePolicyManager that sends all the requests to the home subcluster. Contributed by Inigo Goiri.

2018-08-10 Thread ehiggs
YARN-8626. Create HomePolicyManager that sends all the requests to the home 
subcluster. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d838179d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d838179d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d838179d

Branch: refs/heads/HDFS-12090
Commit: d838179d8dc257e582e8c7bb1cf312d4c0d3f733
Parents: 861095f
Author: Giovanni Matteo Fumarola 
Authored: Tue Aug 7 15:33:16 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Tue Aug 7 15:33:16 2018 -0700

--
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |   8 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |   7 --
 .../policies/amrmproxy/HomeAMRMProxyPolicy.java |  74 +
 .../amrmproxy/RejectAMRMProxyPolicy.java|   8 --
 .../policies/manager/HomePolicyManager.java |  61 ++
 .../amrmproxy/TestHomeAMRMProxyPolicy.java  | 110 +++
 .../policies/manager/TestHomePolicyManager.java |  39 +++
 .../utils/FederationPoliciesTestUtil.java   |  16 ++-
 8 files changed, 305 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d838179d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
index e853744..07cd6db 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java
@@ -20,9 +20,12 @@ package 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
 
 import java.util.Map;
 
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.AbstractConfigurableFederationPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
@@ -44,4 +47,9 @@ public abstract class AbstractAMRMProxyPolicy extends
 }
   }
 
+  @Override
+  public void notifyOfResponse(SubClusterId subClusterId,
+  AllocateResponse response) throws YarnException {
+// By default, a stateless policy does not care about responses
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d838179d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
index 7fddb8e..eb83baa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
@@ -22,7 +22,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -65,10 +64,4 @@ public class BroadcastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 return 

[21/50] [abbrv] hadoop git commit: HDFS-13447. Fix Typos - Node Not Chosen. Contributed by Beluga Behr.

2018-08-10 Thread ehiggs
HDFS-13447. Fix Typos - Node Not Chosen. Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36c0d742
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36c0d742
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36c0d742

Branch: refs/heads/HDFS-12090
Commit: 36c0d742d484f8bf01d7cb01c7b1c9e3627625dc
Parents: 5b898c1
Author: Márton Elek 
Authored: Wed Aug 8 17:27:57 2018 +0200
Committer: Márton Elek 
Committed: Wed Aug 8 17:31:55 2018 +0200

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36c0d742/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 6985f55..d00f961 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -72,11 +72,11 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   .withInitial(() -> new HashMap());
 
   private enum NodeNotChosenReason {
-NOT_IN_SERVICE("the node isn't in service"),
+NOT_IN_SERVICE("the node is not in service"),
 NODE_STALE("the node is stale"),
 NODE_TOO_BUSY("the node is too busy"),
 TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
-NOT_ENOUGH_STORAGE_SPACE("no enough storage space to place the block");
+NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
 
 private final String text;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. Contributed by Ewan Higgs.

2018-08-10 Thread ehiggs
HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. 
Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/959f49b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/959f49b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/959f49b4

Branch: refs/heads/HDFS-12090
Commit: 959f49b4803bae374da086b1891655f90e5502e5
Parents: 2c52ff5
Author: Virajith Jalaparti 
Authored: Wed Aug 1 12:13:31 2018 -0700
Committer: Ewan Higgs 
Committed: Fri Aug 10 13:34:28 2018 +0200

--
 .../apache/hadoop/hdfs/BlockInputStream.java|  52 
 .../hdfs/server/datanode/BPOfferService.java|   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  20 ++-
 .../SyncServiceSatisfierDatanodeWorker.java |  97 +++
 .../SyncTaskExecutionFeedbackCollector.java |  54 
 .../executor/BlockSyncOperationExecutor.java| 122 +++
 .../executor/BlockSyncReaderFactory.java|  92 ++
 .../executor/BlockSyncTaskRunner.java   |  69 +++
 .../hadoop/hdfs/TestBlockInputStream.java   |  84 +
 .../TestBlockSyncOperationExecutor.java |  94 ++
 10 files changed, 689 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/959f49b4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
new file mode 100644
index 000..152f83e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Facade around BlockReader that indeed implements the InputStream interface.
+ */
+public class BlockInputStream extends InputStream {
+  private final BlockReader blockReader;
+
+  public BlockInputStream(BlockReader blockReader) {
+this.blockReader = blockReader;
+  }
+
+  @Override
+  public int read() throws IOException {
+byte[] b = new byte[1];
+int c = blockReader.read(b, 0, b.length);
+if (c > 0) {
+  return b[0];
+} else {
+  return -1;
+}
+  }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+return blockReader.read(b, off, len);
+  }
+
+  @Override
+  public long skip(long n) throws IOException {
+return blockReader.skip(n);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/959f49b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index a25f6a9..b8eef5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -795,6 +795,12 @@ class BPOfferService {
   ((BlockECReconstructionCommand) cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   break;
+case DatanodeProtocol.DNA_BACKUP:
+  LOG.info("DatanodeCommand action: DNA_BACKUP");
+  Collection backupTasks =
+  ((SyncCommand) cmd).getSyncTasks();
+  dn.getSyncServiceSatisfierDatanodeWorker().processSyncTasks(backupTasks);
+  break;
 default:
   LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
 }


[06/50] [abbrv] hadoop git commit: HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to missing synchronization between rollEditsRpcExecutor and tailerThread shutdown. Contri

2018-08-10 Thread ehiggs
HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to 
missing synchronization
between rollEditsRpcExecutor and tailerThread shutdown. Contributed 
by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f8cb127
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f8cb127
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f8cb127

Branch: refs/heads/HDFS-12090
Commit: 0f8cb127cd759cdc6422d19d8b28f21198ddfd61
Parents: d838179
Author: Xiao Chen 
Authored: Tue Aug 7 16:11:37 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 7 16:13:41 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8cb127/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 2003f94..b306b8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -234,7 +234,6 @@ public class EditLogTailer {
   }
   
   public void stop() throws IOException {
-rollEditsRpcExecutor.shutdown();
 tailerThread.setShouldRun(false);
 tailerThread.interrupt();
 try {
@@ -242,6 +241,8 @@ public class EditLogTailer {
 } catch (InterruptedException e) {
   LOG.warn("Edit log tailer thread exited with an exception");
   throw new IOException(e);
+} finally {
+  rollEditsRpcExecutor.shutdown();
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: Revert "YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB."

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b898c17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
new file mode 100644
index 000..3bc0433
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
@@ -0,0 +1,538 @@
+/*
+ *  File: demo_table.css
+ *  CVS:  $Id$
+ *  Description:  CSS descriptions for DataTables demo pages
+ *  Author:   Allan Jardine
+ *  Created:  Tue May 12 06:47:22 BST 2009
+ *  Modified: $Date$ by $Author$
+ *  Language: CSS
+ *  Project:  DataTables
+ *
+ *  Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the 
standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify 
these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ *   'full_numbers' pagination - I use an extra selector on the body tag to 
ensure that there is
+ * no conflict between the two pagination types. If you want to use 
full_numbers pagination
+ * ensure that you either have "example_alt_pagination" as a body class 
name, or better yet,
+ * modify that selector.
+ *   Note that the path used for Images is relative. All images are by default 
located in
+ * ../images/ - relative to this CSS file.
+ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+   position: relative;
+   min-height: 302px;
+   clear: both;
+   _height: 302px;
+   zoom: 1; /* Feeling sorry for IE */
+}
+
+.dataTables_processing {
+   position: absolute;
+   top: 50%;
+   left: 50%;
+   width: 250px;
+   height: 30px;
+   margin-left: -125px;
+   margin-top: -15px;
+   padding: 14px 0 2px 0;
+   border: 1px solid #ddd;
+   text-align: center;
+   color: #999;
+   font-size: 14px;
+   background-color: white;
+}
+
+.dataTables_length {
+   width: 40%;
+   float: left;
+}
+
+.dataTables_filter {
+   width: 50%;
+   float: right;
+   text-align: right;
+}
+
+.dataTables_info {
+   width: 60%;
+   float: left;
+}
+
+.dataTables_paginate {
+   width: 44px;
+   * width: 50px;
+   float: right;
+   text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous, .paginate_enabled_previous, 
.paginate_disabled_next, .paginate_enabled_next {
+   height: 19px;
+   width: 19px;
+   margin-left: 3px;
+   float: left;
+}
+
+.paginate_disabled_previous {
+   background-image: url('../images/back_disabled.jpg');
+}
+
+.paginate_enabled_previous {
+   background-image: url('../images/back_enabled.jpg');
+}
+
+.paginate_disabled_next {
+   background-image: url('../images/forward_disabled.jpg');
+}
+
+.paginate_enabled_next {
+   background-image: url('../images/forward_enabled.jpg');
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+   margin: 0 auto;
+   clear: both;
+   width: 100%;
+   
+   /* Note Firefox 3.5 and before have a bug with border-collapse
+* ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 ) 
+* border-spacing: 0; is one possible option. Conditional-css.com is
+* useful for this kind of thing
+*
+* Further note IE 6/7 has problems when calculating widths with border 
width.
+* It subtracts one px relative to the other browsers from the first 
column, and
+* adds one to the end...
+*
+* If you want that effect I'd suggest setting a border-top/left on 
th/td's and 
+* then filling in the gaps with other borders.
+*/
+}
+
+table.display thead th {
+   padding: 3px 18px 3px 10px;
+   border-bottom: 1px solid black;
+   font-weight: bold;
+   cursor: pointer;
+   * cursor: hand;
+}
+
+table.display tfoot th {
+   padding: 3px 18px 3px 10px;
+   border-top: 1px solid black;
+   font-weight: bold;
+}
+
+table.display tr.heading2 td {
+   border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+   padding: 3px 10px;
+}
+
+table.display td.center {
+   text-align: center;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+   background: 

[12/50] [abbrv] hadoop git commit: HADOOP-15576. S3A Multipart Uploader to work with S3Guard and encryption Originally contributed by Ewan Higgs with refinements by Steve Loughran.

2018-08-10 Thread ehiggs
HADOOP-15576. S3A Multipart Uploader to work with S3Guard and encryption 
Originally contributed by Ewan Higgs with refinements by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ec97abb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ec97abb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ec97abb

Branch: refs/heads/HDFS-12090
Commit: 2ec97abb2e93c1a8127e7a146c08e26454b583fa
Parents: 4203bc7
Author: Ewan Higgs 
Authored: Wed Aug 8 13:50:23 2018 +0200
Committer: Ewan Higgs 
Committed: Wed Aug 8 13:50:23 2018 +0200

--
 .../hadoop/fs/FileSystemMultipartUploader.java  |  69 +++--
 .../org/apache/hadoop/fs/MultipartUploader.java |  32 +-
 .../java/org/apache/hadoop/fs/PartHandle.java   |   8 +-
 .../java/org/apache/hadoop/fs/PathHandle.java   |   9 +-
 .../fs/AbstractSystemMultipartUploaderTest.java | 143 -
 .../TestLocalFileSystemMultipartUploader.java   |  65 
 .../AbstractContractMultipartUploaderTest.java  | 300 +++
 .../TestLocalFSContractMultipartUploader.java   |  43 +++
 .../hadoop/fs/TestHDFSMultipartUploader.java|  76 -
 .../hdfs/TestHDFSContractMultipartUploader.java |  58 
 .../hadoop/fs/s3a/S3AMultipartUploader.java | 177 +++
 .../hadoop/fs/s3a/WriteOperationHelper.java |   4 +
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  15 -
 ...rg.apache.hadoop.fs.MultipartUploaderFactory |  15 +
 .../s3a/ITestS3AContractMultipartUploader.java  | 116 +++
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |   5 +
 .../fs/s3a/TestS3AMultipartUploaderSupport.java |  84 ++
 .../TestStagingPartitionedJobCommit.java|   4 +-
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java |   4 +-
 .../src/test/resources/contract/s3a.xml |   5 +
 20 files changed, 831 insertions(+), 401 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ec97abb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
index b57ff3d..a700a9f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -16,12 +16,6 @@
  */
 package org.apache.hadoop.fs;
 
-import com.google.common.base.Charsets;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsPermission;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
@@ -29,13 +23,26 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.stream.Collectors;
 
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.compress.utils.IOUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import static org.apache.hadoop.fs.Path.mergePaths;
+
 /**
  * A MultipartUploader that uses the basic FileSystem commands.
  * This is done in three stages:
- * Init - create a temp _multipart directory.
- * PutPart - copying the individual parts of the file to the temp directory.
- * Complete - use {@link FileSystem#concat} to merge the files; and then delete
- * the temp directory.
+ * 
+ *   Init - create a temp {@code _multipart} directory.
+ *   PutPart - copying the individual parts of the file to the temp
+ *   directory.
+ *   Complete - use {@link FileSystem#concat} to merge the files;
+ *   and then delete the temp directory.
+ * 
  */
 public class FileSystemMultipartUploader extends MultipartUploader {
 
@@ -64,28 +71,44 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
 uploadIdByteArray.length, Charsets.UTF_8));
 Path partPath =
-Path.mergePaths(collectorPath, Path.mergePaths(new 
Path(Path.SEPARATOR),
+mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR),
 new Path(Integer.toString(partNumber) + ".part")));
-FSDataOutputStreamBuilder outputStream = fs.createFile(partPath);
-FSDataOutputStream fsDataOutputStream = outputStream.build();
-IOUtils.copy(inputStream, fsDataOutputStream, 4096);
-fsDataOutputStream.close();
+try(FSDataOutputStream fsDataOutputStream =
+ 

[44/50] [abbrv] hadoop git commit: YARN-8588. Logging improvements for better debuggability. (Suma Shivaprasad via wangda)

2018-08-10 Thread ehiggs
YARN-8588. Logging improvements for better debuggability. (Suma Shivaprasad via 
wangda)

Change-Id: I66aa4b0ec031ae5ce0fae558e2f8cbcbbfebc442


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/344c335a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/344c335a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/344c335a

Branch: refs/heads/HDFS-12090
Commit: 344c335a920e6f32a35ebace0a118a9dc4a22fb7
Parents: 5326a79
Author: Wangda Tan 
Authored: Thu Aug 9 11:03:00 2018 -0700
Committer: Wangda Tan 
Committed: Thu Aug 9 11:04:02 2018 -0700

--
 .../capacity/AutoCreatedLeafQueueConfig.java|  5 ++
 .../capacity/QueueManagementChange.java |  2 +-
 .../QueueManagementDynamicEditPolicy.java   | 36 ++
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 50 
 4 files changed, 52 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
index 5952250..87ef1c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedLeafQueueConfig.java
@@ -63,4 +63,9 @@ public class AutoCreatedLeafQueueConfig {
   public CapacitySchedulerConfiguration getLeafQueueConfigs() {
 return leafQueueConfigs;
   }
+
+  @Override public String toString() {
+return "AutoCreatedLeafQueueConfig{" + "queueCapacities=" + queueCapacities
++ ", leafQueueConfigs=" + leafQueueConfigs + '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
index 74d9b23..64ba578 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementChange.java
@@ -124,7 +124,7 @@ public abstract class QueueManagementChange {
 
   @Override
   public String toString() {
-return "QueueManagementChange{" + "queue=" + queue
+return "QueueManagementChange{" + "queue=" + queue.getQueueName()
 + ", updatedEntitlementsByPartition=" + queueTemplateUpdate
 + ", queueAction=" + queueAction + ", transitionToQueueState="
 + transitionToQueueState + '}';

http://git-wip-us.apache.org/repos/asf/hadoop/blob/344c335a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
index 9b0cf7b..ea43ac8 100644
--- 

[37/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 
upgrade. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00013d6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00013d6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00013d6e

Branch: refs/heads/HDFS-12090
Commit: 00013d6ef7fdf65fa8a0f6eb56c0aef2f6e19444
Parents: da9a39e
Author: Sunil G 
Authored: Thu Aug 9 12:18:32 2018 +0530
Committer: Sunil G 
Committed: Thu Aug 9 12:18:32 2018 +0530

--
 LICENSE.txt |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   8 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java   |   4 +-
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 +++
 .../webapps/static/dt-1.10.7/css/jui-dt.css | 322 +++
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 0 -> 27490 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 0 -> 612 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg| Bin 0 -> 807 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 0 -> 894 bytes
 .../dt-1.10.7/images/forward_disabled.jpg   | Bin 0 -> 635 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 0 -> 852 bytes
 .../static/dt-1.10.7/images/sort_asc.png| Bin 0 -> 263 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png  | Bin 0 -> 252 bytes
 .../static/dt-1.10.7/images/sort_both.png   | Bin 0 -> 282 bytes
 .../static/dt-1.10.7/images/sort_desc.png   | Bin 0 -> 260 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png | Bin 0 -> 251 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js   | 160 ++
 .../webapps/static/dt-1.9.4/css/demo_page.css   | 110 
 .../webapps/static/dt-1.9.4/css/demo_table.css  | 538 ---
 .../webapps/static/dt-1.9.4/css/jui-dt.css  | 322 ---
 .../static/dt-1.9.4/images/Sorting icons.psd| Bin 27490 -> 0 bytes
 .../static/dt-1.9.4/images/back_disabled.jpg| Bin 612 -> 0 bytes
 .../static/dt-1.9.4/images/back_enabled.jpg | Bin 807 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/favicon.ico  | Bin 894 -> 0 bytes
 .../static/dt-1.9.4/images/forward_disabled.jpg | Bin 635 -> 0 bytes
 .../static/dt-1.9.4/images/forward_enabled.jpg  | Bin 852 -> 0 bytes
 .../webapps/static/dt-1.9.4/images/sort_asc.png | Bin 263 -> 0 bytes
 .../dt-1.9.4/images/sort_asc_disabled.png   | Bin 252 -> 0 bytes
 .../static/dt-1.9.4/images/sort_both.png| Bin 282 -> 0 bytes
 .../static/dt-1.9.4/images/sort_desc.png| Bin 260 -> 0 bytes
 .../dt-1.9.4/images/sort_desc_disabled.png  | Bin 251 -> 0 bytes
 .../static/dt-1.9.4/js/jquery.dataTables.min.js | 157 --
 33 files changed, 1137 insertions(+), 1134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index f8de86a..393ed0e 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -553,7 +553,7 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
 
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/
 

 Copyright (C) 2008-2016, SpryMedia Ltd.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index eddcbaa..685eac9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -237,10 +237,10 @@
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep
 src/main/resources/webapps/node/.keep
-
src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css
-
src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
-src/main/resources/webapps/static/dt-1.9.4/images/Sorting 
icons.psd
-
src/main/resources/webapps/static/dt-1.9.4/js/jquery.dataTables.min.js
+
src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
+

[38/50] [abbrv] hadoop git commit: HDDS-219. Genearate version-info.properties for hadoop and ozone. Contributed by Sandeep Nemuri.

2018-08-10 Thread ehiggs
HDDS-219. Genearate version-info.properties for hadoop and ozone. Contributed 
by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d96bc6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d96bc6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d96bc6e

Branch: refs/heads/HDFS-12090
Commit: 3d96bc6e5ff098900cf07e4b30c642e961a39427
Parents: 00013d6
Author: Márton Elek 
Authored: Thu Aug 9 11:06:03 2018 +0200
Committer: Márton Elek 
Committed: Thu Aug 9 11:06:03 2018 +0200

--
 hadoop-hdds/common/pom.xml  |  34 +++
 .../apache/hadoop/utils/HddsVersionInfo.java| 182 
 .../main/resources/hdds-version-info.properties |  26 +++
 hadoop-ozone/common/pom.xml |  35 +++
 hadoop-ozone/common/src/main/bin/ozone  |   2 +-
 .../hadoop/ozone/util/OzoneVersionInfo.java | 213 +++
 .../resources/ozone-version-info.properties |  27 +++
 7 files changed, 518 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-hdds/common/pom.xml
--
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 4068522..ed29d31 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -29,10 +29,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   jar
 
   
+0.2.1-SNAPSHOT
 hdds
 true
 2.11.0
 3.4.2
+${hdds.version}
   
 
   
@@ -102,6 +104,22 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
   
+
+  
+${basedir}/src/main/resources
+
+  hdds-version-info.properties
+
+false
+  
+  
+${basedir}/src/main/resources
+
+  hdds-version-info.properties
+
+true
+  
+
 
   
 kr.motd.maven
@@ -170,6 +188,22 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 hadoop-maven-plugins
 
   
+version-info
+generate-resources
+
+  version-info
+
+
+  
+${basedir}/../
+
+  */src/main/java/**/*.java
+  */src/main/proto/*.proto
+
+  
+
+  
+  
 compile-protoc
 
   protoc

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96bc6e/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
new file mode 100644
index 000..59b9de6
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ClassUtil;
+import org.apache.hadoop.util.ThreadUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+/**
+ * This class returns build information about Hadoop components.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class HddsVersionInfo {
+  private static final Logger LOG = 
LoggerFactory.getLogger(HddsVersionInfo.class);
+
+  private Properties info;
+
+  protected HddsVersionInfo(String component) {
+info = new Properties();
+String versionInfoFile = component + "-version-info.properties";
+InputStream is = null;
+try {
+  is = 
ThreadUtil.getResourceAsStream(HddsVersionInfo.class.getClassLoader(),
+  

[09/50] [abbrv] hadoop git commit: HDFS-13786. EC: Display erasure coding policy for sub-directories is not working. Contributed by Ayush Saxena.

2018-08-10 Thread ehiggs
HDFS-13786. EC: Display erasure coding policy for sub-directories is not 
working. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b0f9772
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b0f9772
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b0f9772

Branch: refs/heads/HDFS-12090
Commit: 2b0f9772417d205e8df16bac6921c2bb8bdcf740
Parents: 7862f15
Author: Vinayakumar B 
Authored: Wed Aug 8 07:47:10 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Aug 8 07:53:17 2018 +0530

--
 .../namenode/ContentSummaryComputationContext.java|  2 ++
 .../apache/hadoop/hdfs/TestErasureCodingPolicies.java | 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0f9772/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index c81f82c..95f3fee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -191,6 +191,8 @@ public class ContentSummaryComputationContext {
   .getEnabledPolicyByName(ecPolicyName)
   .getName();
 }
+  } else if (inode.getParent() != null) {
+  return getErasureCodingPolicyName(inode.getParent());
   }
 } catch (IOException ioe) {
   LOG.warn("Encountered error getting ec policy for "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b0f9772/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 7d97cce..835d18f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -154,6 +155,19 @@ public class TestErasureCodingPolicies {
   }
 
   @Test
+  public void testContentSummaryOfECSubdir() throws IOException {
+final Path testDir = new Path("/ec");
+fs.mkdir(testDir, FsPermission.getDirDefault());
+fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
+final Path fPath = new Path("ec/file");
+fs.create(fPath).close();
+final Path subdir = new Path("/ec/sub");
+fs.mkdir(subdir, FsPermission.getDirDefault());
+ContentSummary contentSummary = fs.getContentSummary(subdir);
+assertEquals(ecPolicy.getName(),contentSummary.getErasureCodingPolicy());
+  }
+
+  @Test
   public void testBasicSetECPolicy()
   throws IOException, InterruptedException {
 final Path testDir = new Path("/ec");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: YARN-8629. Container cleanup fails while trying to delete Cgroups. (Suma Shivaprasad via wangda)

2018-08-10 Thread ehiggs
YARN-8629. Container cleanup fails while trying to delete Cgroups. (Suma 
Shivaprasad via wangda)

Change-Id: I392ef4f8baa84d5d7b1f2e438c560b5426b6d4f2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4258fca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4258fca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4258fca

Branch: refs/heads/HDFS-12090
Commit: d4258fcad71eabe2de3cf829cde36840200ab9b6
Parents: b1a59b1
Author: Wangda Tan 
Authored: Tue Aug 7 12:36:55 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 12:36:55 2018 -0700

--
 .../linux/resources/CGroupsHandlerImpl.java | 26 
 1 file changed, 16 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4258fca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index c3800b6..a547e8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -504,23 +504,29 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   private boolean checkAndDeleteCgroup(File cgf) throws InterruptedException {
 boolean deleted = false;
 // FileInputStream in = null;
-try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
-  if (in.read() == -1) {
+if ( cgf.exists() ) {
+  try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
+if (in.read() == -1) {
 /*
  * "tasks" file is empty, sleep a bit more and then try to delete the
  * cgroup. Some versions of linux will occasionally panic due to a race
  * condition in this area, hence the paranoia.
  */
-Thread.sleep(deleteCGroupDelay);
-deleted = cgf.delete();
-if (!deleted) {
-  LOG.warn("Failed attempt to delete cgroup: " + cgf);
+  Thread.sleep(deleteCGroupDelay);
+  deleted = cgf.delete();
+  if (!deleted) {
+LOG.warn("Failed attempt to delete cgroup: " + cgf);
+  }
+} else{
+  logLineFromTasksFile(cgf);
 }
-  } else {
-logLineFromTasksFile(cgf);
+  } catch (IOException e) {
+LOG.warn("Failed to read cgroup tasks file. ", e);
   }
-} catch (IOException e) {
-  LOG.warn("Failed to read cgroup tasks file. ", e);
+} else {
+  LOG.info("Parent Cgroups directory {} does not exist. Skipping "
+  + "deletion", cgf.getPath());
+  deleted = true;
 }
 return deleted;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed by Kitti Nanasi.

2018-08-10 Thread ehiggs
HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed 
by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9499df7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9499df7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9499df7b

Branch: refs/heads/HDFS-12090
Commit: 9499df7b81b55b488a32fd59798a543dafef4ef8
Parents: ff06bd1
Author: Xiao Chen 
Authored: Wed Aug 8 10:36:44 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 8 10:40:20 2018 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  |  2 +
 .../hadoop/hdfs/protocol/ECBlockGroupStats.java | 27 +++-
 .../hdfs/protocol/ReplicatedBlockStats.java | 28 -
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 21 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 ++
 .../federation/metrics/NamenodeBeanMetrics.java | 10 +
 .../server/federation/router/ErasureCoding.java | 13 ++
 .../server/blockmanagement/BlockManager.java|  8 
 .../blockmanagement/LowRedundancyBlocks.java| 28 +
 .../hdfs/server/namenode/FSNamesystem.java  | 20 -
 .../hdfs/server/namenode/NameNodeMXBean.java| 18 
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 10 +
 .../TestLowRedundancyBlockQueues.java   | 43 +---
 .../namenode/metrics/TestNameNodeMetrics.java   | 12 ++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 32 +++
 15 files changed, 247 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9499df7b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4313640..83ad40a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -244,6 +244,8 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
 | `NumStaleStorages` | Number of storages marked as content stale (after 
NameNode restart/failover before first block report is received) |
 | `MissingReplOneBlocks` | Current number of missing blocks with replication 
factor 1 |
+| `HighestPriorityLowRedundancyReplicatedBlocks` | Current number of 
non-corrupt, low redundancy replicated blocks with the highest risk of loss 
(have 0 or 1 replica). Will be recovered with the highest priority. |
+| `HighestPriorityLowRedundancyECBlocks` | Current number of non-corrupt, low 
redundancy EC blocks with the highest risk of loss. Will be recovered with the 
highest priority. |
 | `NumFilesUnderConstruction` | Current number of files under construction |
 | `NumActiveClients` | Current number of active clients holding lease |
 | `HAState` | (HA-only) Current state of the NameNode: initializing or active 
or standby or stopping state |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9499df7b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
index 9a8ad8c..3dde604 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
@@ -34,15 +34,26 @@ public final class ECBlockGroupStats {
   private final long missingBlockGroups;
   private final long bytesInFutureBlockGroups;
   private final long pendingDeletionBlocks;
+  private final Long highestPriorityLowRedundancyBlocks;
 
   public ECBlockGroupStats(long lowRedundancyBlockGroups,
   long corruptBlockGroups, long missingBlockGroups,
   long bytesInFutureBlockGroups, long pendingDeletionBlocks) {
+this(lowRedundancyBlockGroups, corruptBlockGroups, missingBlockGroups,
+bytesInFutureBlockGroups, pendingDeletionBlocks, null);
+  }
+
+  public ECBlockGroupStats(long lowRedundancyBlockGroups,
+  long corruptBlockGroups, long missingBlockGroups,
+  long bytesInFutureBlockGroups, long pendingDeletionBlocks,
+  Long highestPriorityLowRedundancyBlocks) {
 this.lowRedundancyBlockGroups = lowRedundancyBlockGroups;
 this.corruptBlockGroups = corruptBlockGroups;
 

[32/50] [abbrv] hadoop git commit: HADOOP-15583. Stabilize S3A Assumed Role support. Contributed by Steve Loughran.

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9a39ee/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
index 3afd63f..8af0457 100644
--- 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
+++ 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
@@ -29,7 +29,7 @@ assumed roles for different buckets.
 *IAM Assumed Roles are unlikely to be supported by third-party systems
 supporting the S3 APIs.*
 
-## Using IAM Assumed Roles
+##  Using IAM Assumed Roles
 
 ### Before You Begin
 
@@ -40,6 +40,8 @@ are, how to configure their policies, etc.
 * You need a pair of long-lived IAM User credentials, not the root account set.
 * Have the AWS CLI installed, and test that it works there.
 * Give the role access to S3, and, if using S3Guard, to DynamoDB.
+* For working with data encrypted with SSE-KMS, the role must
+have access to the appropriate KMS keys.
 
 Trying to learn how IAM Assumed Roles work by debugging stack traces from
 the S3A client is "suboptimal".
@@ -51,7 +53,7 @@ To use assumed roles, the client must be configured to use the
 in the configuration option `fs.s3a.aws.credentials.provider`.
 
 This AWS Credential provider will read in the `fs.s3a.assumed.role` options 
needed to connect to the
-Session Token Service [Assumed Role 
API](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html),
+Security Token Service [Assumed Role 
API](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html),
 first authenticating with the full credentials, then assuming the specific role
 specified. It will then refresh this login at the configured rate of
 `fs.s3a.assumed.role.session.duration`
@@ -69,7 +71,7 @@ which uses `fs.s3a.access.key` and `fs.s3a.secret.key`.
 Note: although you can list other AWS credential providers in  to the
 Assumed Role Credential Provider, it can only cause confusion.
 
-###  Using Assumed Roles
+###  Configuring Assumed Roles
 
 To use assumed roles, the S3A client credentials provider must be set to
 the `AssumedRoleCredentialProvider`, and `fs.s3a.assumed.role.arn` to
@@ -78,7 +80,6 @@ the previously created ARN.
 ```xml
 
   fs.s3a.aws.credentials.provider
-  org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider
   org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider
 
 
@@ -159,7 +160,18 @@ Here are the full set of configuration options.
   fs.s3a.assumed.role.sts.endpoint
   
   
-AWS Simple Token Service Endpoint. If unset, uses the default endpoint.
+AWS Security Token Service Endpoint. If unset, uses the default endpoint.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  
+
+
+
+  fs.s3a.assumed.role.sts.endpoint.region
+  us-west-1
+  
+AWS Security Token Service Endpoint's region;
+Needed if fs.s3a.assumed.role.sts.endpoint points to an endpoint
+other than the default one and the v4 signature is used.
 Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   
 
@@ -194,39 +206,101 @@ These lists represent the minimum actions to which the 
client's principal
 must have in order to work with a bucket.
 
 
-### Read Access Permissions
+###  Read Access Permissions
 
 Permissions which must be granted when reading from a bucket:
 
 
-| Action | S3A operations |
-||--|
-| `s3:ListBucket` | `listStatus()`, `getFileStatus()` and elsewhere |
-| `s3:GetObject` | `getFileStatus()`, `open()` and elsewhere |
-| `s3:ListBucketMultipartUploads` |  Aborting/cleaning up S3A commit 
operations|
+```
+s3:Get*
+s3:ListBucket
+```
+
+When using S3Guard, the client needs the appropriate
+DynamoDB access permissions
+
+To use SSE-KMS encryption, the client needs the
+SSE-KMS Permissions to access the
+KMS key(s).
+
+###  Write Access Permissions
+
+These permissions must all be granted for write access:
+
+```
+s3:Get*
+s3:Delete*
+s3:Put*
+s3:ListBucket
+s3:ListBucketMultipartUploads
+s3:AbortMultipartUpload
+```
+
+###  SSE-KMS Permissions
+
+When to read data encrypted using SSE-KMS, the client must have
+ `kms:Decrypt` permission for the specific key a file was encrypted with.
+
+```
+kms:Decrypt
+```
+
+To write data using SSE-KMS, the client must have all the following 
permissions.
+
+```
+kms:Decrypt
+kms:GenerateDataKey
+```
 
+This includes renaming: renamed files are encrypted with the encryption key
+of the current S3A client; it must decrypt the source file first.
 
-The `s3:ListBucketMultipartUploads` is only needed when committing work
-via the [S3A committers](committers.html).
-However, it must be granted to the root path in order to safely clean up jobs.
-It is simplest to permit this in all buckets, even if it is only 

[30/50] [abbrv] hadoop git commit: YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)

2018-08-10 Thread ehiggs
YARN-8568. Replace the deprecated zk-address property in the HA config example 
in ResourceManagerHA.md (bsteinbach via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8478732b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8478732b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8478732b

Branch: refs/heads/HDFS-12090
Commit: 8478732bb28e9e71061d6b4a043a3a1b5c688902
Parents: 3214cd7
Author: Robert Kanter 
Authored: Wed Aug 8 15:08:55 2018 -0700
Committer: Robert Kanter 
Committed: Wed Aug 8 15:08:55 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8478732b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index da9f5a0..ff97328 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -111,7 +111,7 @@ Here is the sample of minimal setup for RM failover.
   master2:8088
 
 
-  yarn.resourcemanager.zk-address
+  hadoop.zk.address
   zk1:2181,zk2:2181,zk3:2181
 
 ```


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: Revert "YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB."

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b898c17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
deleted file mode 100644
index 85dd817..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
+++ /dev/null
@@ -1,160 +0,0 @@
-/*! DataTables 1.10.7
- * ©2008-2015 SpryMedia Ltd - datatables.net/license
- */
-(function(Ea,Q,k){var P=function(h){function W(a){var 
b,c,e={};h.each(a,function(d){if((b=d.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa 
ai ao as b fn i m o s ".indexOf(b[1]+" 
"))c=d.replace(b[0],b[2].toLowerCase()),e[c]=d,"o"===b[1]&(a[d])});a._hungarianMap=e}function
 H(a,b,c){a._hungarianMap||W(a);var 
e;h.each(b,function(d){e=a._hungarianMap[d];if(e!==k&&(c||b[e]===k))"o"===e.charAt(0)?(b[e]||(b[e]={}),h.extend(!0,b[e],b[d]),H(a[e],b[e],c)):b[e]=b[d]})}function
 P(a){var b=m.defaults.oLanguage,c=a.sZeroRecords;
-!a.sEmptyTable&&(c&&"No data available in 
table"===b.sEmptyTable)&(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&(c&&"Loading..."===b.sLoadingRecords)&(a,a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&(a)}function
 
eb(a){A(a,"ordering","bSort");A(a,"orderMulti","bSortMulti");A(a,"orderClasses","bSortClasses");A(a,"orderCellsTop","bSortCellsTop");A(a,"order","aaSorting");A(a,"orderFixed","aaSortingFixed");A(a,"paging","bPaginate");
-A(a,"pagingType","sPaginationType");A(a,"pageLength","iDisplayLength");A(a,"searching","bFilter");if(a=a.aoSearchCols)for(var
 b=0,c=a.length;b").css({position:"absolute",top:0,left:0,height:1,width:1,overflow:"hidden"}).append(h("").css({position:"absolute",
-top:1,left:1,width:100,overflow:"scroll"}).append(h('').css({width:"100%",height:10}))).appendTo("body"),c=b.find(".test");a.bScrollOversize=100===c[0].offsetWidth;a.bScrollbarLeft=1!==Math.round(c.offset().left);b.remove()}function
 hb(a,b,c,e,d,f){var 
g,j=!1;c!==k&&(g=c,j=!0);for(;e!==d;)a.hasOwnProperty(e)&&(g=j?b(g,a[e],e,a):a[e],j=!0,e+=f);return
 g}function Fa(a,b){var 
c=m.defaults.column,e=a.aoColumns.length,c=h.extend({},m.models.oColumn,c,{nTh:b?b:Q.createElement("th"),sTitle:c.sTitle?
-c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[e],mData:c.mData?c.mData:e,idx:e});a.aoColumns.push(c);c=a.aoPreSearchCols;c[e]=h.extend({},m.models.oSearch,c[e]);ka(a,e,h(b).data())}function
 ka(a,b,c){var 
b=a.aoColumns[b],e=a.oClasses,d=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=d.attr("width")||null;var
 
f=(d.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);f&&(b.sWidthOrig=f[1])}c!==k&!==c&&(fb(c),H(m.defaults.column,c),c.mDataProp!==k&&!c.mData&&(c.mData=c.mDataProp),c.sType&&
-(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),h.extend(b,c),E(b,c,"sWidth","sWidthOrig"),c.iDataSort!==k&&(b.aDataSort=[c.iDataSort]),E(b,c,"aDataSort"));var
 
g=b.mData,j=R(g),i=b.mRender?R(b.mRender):null,c=function(a){return"string"===typeof
 
a&&-1!==a.indexOf("@")};b._bAttrSrc=h.isPlainObject(g)&&(c(g.sort)||c(g.type)||c(g.filter));b.fnGetData=function(a,b,c){var
 e=j(a,b,k,c);return i&?i(e,b,a,c):e};b.fnSetData=function(a,b,c){return 
S(g)(a,b,c)};"number"!==typeof g&&
-(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,d.addClass(e.sSortableNone));a=-1!==h.inArray("asc",b.asSorting);c=-1!==h.inArray("desc",b.asSorting);!b.bSortable||!a&&!c?(b.sSortingClass=e.sSortableNone,b.sSortingClassJUI=""):a&&!c?(b.sSortingClass=e.sSortableAsc,b.sSortingClassJUI=e.sSortJUIAscAllowed):!a&?(b.sSortingClass=e.sSortableDesc,b.sSortingClassJUI=e.sSortJUIDescAllowed):(b.sSortingClass=e.sSortable,b.sSortingClassJUI=e.sSortJUI)}function
 X(a){if(!1!==a.oFeatures.bAutoWidth){var b=
-a.aoColumns;Ga(a);for(var 
c=0,e=b.length;cq[f])e(l.length+q[f],o);else if("string"===typeof 
q[f]){j=0;for(i=l.length;jb&[d]--; -1!=e&===k&(e,1)}function 
ca(a,b,c,e){var 
d=a.aoData[b],f,g=function(c,f){for(;c.childNodes.length;)c.removeChild(c.firstChild);c.innerHTML=x(a,b,f,"display")};if("dom"===c||(!c||"auto"===c)&&"dom"===d.src)d._aData=na(a,d,e,e===k?k:d._aData).data;else{var
 
j=d.anCells;if(j)if(e!==k)g(j[e],e);else{c=0;for(f=j.length;c").appendTo(g));b=0;for(c=l.length;btr").attr("role","row");h(g).find(">tr>th,
 >tr>td").addClass(o.sHeaderTH);
-h(j).find(">tr>th, 
>tr>td").addClass(o.sFooterTH);if(null!==j){a=a.aoFooter[0];b=0;for(c=a.length;b=a.fnRecordsDisplay()?0:g,a.iInitDisplayStart=-1);var 
g=a._iDisplayStart,o=a.fnDisplayEnd();if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,C(a,!1);else
 if(j){if(!a.bDestroying&&!kb(a))return}else 

[48/50] [abbrv] hadoop git commit: YARN-8575. Avoid committing allocation proposal to unavailable nodes in async scheduling. Contributed by Tao Yang.

2018-08-10 Thread ehiggs
YARN-8575. Avoid committing allocation proposal to unavailable nodes in async 
scheduling. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a71bf14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a71bf14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a71bf14

Branch: refs/heads/HDFS-12090
Commit: 0a71bf145293adbd3728525ab4c36c08d51377d3
Parents: 08d5060
Author: Weiwei Yang 
Authored: Fri Aug 10 14:37:45 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Aug 10 14:37:45 2018 +0800

--
 .../scheduler/common/fica/FiCaSchedulerApp.java | 12 
 .../yarn/server/resourcemanager/MockNodes.java  |  6 +-
 .../resourcemanager/TestResourceManager.java| 16 -
 .../TestCapacitySchedulerAsyncScheduling.java   | 69 
 .../scheduler/capacity/TestUtils.java   |  2 +
 5 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 9810e98..6a5af81 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -429,6 +430,17 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
 SchedulerContainer
 schedulerContainer = allocation.getAllocatedOrReservedContainer();
 
+// Make sure node is in RUNNING state
+if (schedulerContainer.getSchedulerNode().getRMNode().getState()
+!= NodeState.RUNNING) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Failed to accept this proposal because node "
++ schedulerContainer.getSchedulerNode().getNodeID() + " is in "
++ schedulerContainer.getSchedulerNode().getRMNode().getState()
++ " state (not RUNNING)");
+  }
+  return false;
+}
 if (schedulerContainer.isAllocated()) {
   // When allocate a new container
   containerRequest =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a71bf14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index 9041132..c444b6e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -347,17 +347,17 @@ public class MockNodes {
   }
 
   public static RMNode newNodeInfo(int rack, final Resource perNode, int 
hostnum) {
-return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null, 123);
+return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0", 
hostnum, null, 123);
   }
   
   public static RMNode newNodeInfo(int rack, final Resource perNode,
   int hostnum, String hostName) {
-return 

[14/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/64901abd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
deleted file mode 100644
index 3bc0433..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- *  File: demo_table.css
- *  CVS:  $Id$
- *  Description:  CSS descriptions for DataTables demo pages
- *  Author:   Allan Jardine
- *  Created:  Tue May 12 06:47:22 BST 2009
- *  Modified: $Date$ by $Author$
- *  Language: CSS
- *  Project:  DataTables
- *
- *  Copyright 2009 Allan Jardine. All Rights Reserved.
- *
- * ***
- * DESCRIPTION
- *
- * The styles given here are suitable for the demos that are used with the 
standard DataTables
- * distribution (see www.datatables.net). You will most likely wish to modify 
these styles to
- * meet the layout requirements of your site.
- *
- * Common issues:
- *   'full_numbers' pagination - I use an extra selector on the body tag to 
ensure that there is
- * no conflict between the two pagination types. If you want to use 
full_numbers pagination
- * ensure that you either have "example_alt_pagination" as a body class 
name, or better yet,
- * modify that selector.
- *   Note that the path used for Images is relative. All images are by default 
located in
- * ../images/ - relative to this CSS file.
- */
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables features
- */
-
-.dataTables_wrapper {
-   position: relative;
-   min-height: 302px;
-   clear: both;
-   _height: 302px;
-   zoom: 1; /* Feeling sorry for IE */
-}
-
-.dataTables_processing {
-   position: absolute;
-   top: 50%;
-   left: 50%;
-   width: 250px;
-   height: 30px;
-   margin-left: -125px;
-   margin-top: -15px;
-   padding: 14px 0 2px 0;
-   border: 1px solid #ddd;
-   text-align: center;
-   color: #999;
-   font-size: 14px;
-   background-color: white;
-}
-
-.dataTables_length {
-   width: 40%;
-   float: left;
-}
-
-.dataTables_filter {
-   width: 50%;
-   float: right;
-   text-align: right;
-}
-
-.dataTables_info {
-   width: 60%;
-   float: left;
-}
-
-.dataTables_paginate {
-   width: 44px;
-   * width: 50px;
-   float: right;
-   text-align: right;
-}
-
-/* Pagination nested */
-.paginate_disabled_previous, .paginate_enabled_previous, 
.paginate_disabled_next, .paginate_enabled_next {
-   height: 19px;
-   width: 19px;
-   margin-left: 3px;
-   float: left;
-}
-
-.paginate_disabled_previous {
-   background-image: url('../images/back_disabled.jpg');
-}
-
-.paginate_enabled_previous {
-   background-image: url('../images/back_enabled.jpg');
-}
-
-.paginate_disabled_next {
-   background-image: url('../images/forward_disabled.jpg');
-}
-
-.paginate_enabled_next {
-   background-image: url('../images/forward_enabled.jpg');
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables display
- */
-table.display {
-   margin: 0 auto;
-   clear: both;
-   width: 100%;
-   
-   /* Note Firefox 3.5 and before have a bug with border-collapse
-* ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 ) 
-* border-spacing: 0; is one possible option. Conditional-css.com is
-* useful for this kind of thing
-*
-* Further note IE 6/7 has problems when calculating widths with border 
width.
-* It subtracts one px relative to the other browsers from the first 
column, and
-* adds one to the end...
-*
-* If you want that effect I'd suggest setting a border-top/left on 
th/td's and 
-* then filling in the gaps with other borders.
-*/
-}
-
-table.display thead th {
-   padding: 3px 18px 3px 10px;
-   border-bottom: 1px solid black;
-   font-weight: bold;
-   cursor: pointer;
-   * cursor: hand;
-}
-
-table.display tfoot th {
-   padding: 3px 18px 3px 10px;
-   border-top: 1px solid black;
-   font-weight: bold;
-}
-
-table.display tr.heading2 td {
-   border-bottom: 1px solid #aaa;
-}
-
-table.display td {
-   padding: 3px 10px;
-}
-
-table.display td.center {
-   text-align: center;
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables sorting
- */
-
-.sorting_asc {
-   

[36/50] [abbrv] hadoop git commit: YARN-8633. Update DataTables version in yarn-common in line with JQuery 3 upgrade. Contributed by Akhil PB.

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00013d6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
new file mode 100644
index 000..85dd817
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
@@ -0,0 +1,160 @@
+/*! DataTables 1.10.7
+ * ©2008-2015 SpryMedia Ltd - datatables.net/license
+ */
+(function(Ea,Q,k){var P=function(h){function W(a){var 
b,c,e={};h.each(a,function(d){if((b=d.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa 
ai ao as b fn i m o s ".indexOf(b[1]+" 
"))c=d.replace(b[0],b[2].toLowerCase()),e[c]=d,"o"===b[1]&(a[d])});a._hungarianMap=e}function
 H(a,b,c){a._hungarianMap||W(a);var 
e;h.each(b,function(d){e=a._hungarianMap[d];if(e!==k&&(c||b[e]===k))"o"===e.charAt(0)?(b[e]||(b[e]={}),h.extend(!0,b[e],b[d]),H(a[e],b[e],c)):b[e]=b[d]})}function
 P(a){var b=m.defaults.oLanguage,c=a.sZeroRecords;
+!a.sEmptyTable&&(c&&"No data available in 
table"===b.sEmptyTable)&(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&(c&&"Loading..."===b.sLoadingRecords)&(a,a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&(a)}function
 
eb(a){A(a,"ordering","bSort");A(a,"orderMulti","bSortMulti");A(a,"orderClasses","bSortClasses");A(a,"orderCellsTop","bSortCellsTop");A(a,"order","aaSorting");A(a,"orderFixed","aaSortingFixed");A(a,"paging","bPaginate");
+A(a,"pagingType","sPaginationType");A(a,"pageLength","iDisplayLength");A(a,"searching","bFilter");if(a=a.aoSearchCols)for(var
 b=0,c=a.length;b").css({position:"absolute",top:0,left:0,height:1,width:1,overflow:"hidden"}).append(h("").css({position:"absolute",
+top:1,left:1,width:100,overflow:"scroll"}).append(h('').css({width:"100%",height:10}))).appendTo("body"),c=b.find(".test");a.bScrollOversize=100===c[0].offsetWidth;a.bScrollbarLeft=1!==Math.round(c.offset().left);b.remove()}function
 hb(a,b,c,e,d,f){var 
g,j=!1;c!==k&&(g=c,j=!0);for(;e!==d;)a.hasOwnProperty(e)&&(g=j?b(g,a[e],e,a):a[e],j=!0,e+=f);return
 g}function Fa(a,b){var 
c=m.defaults.column,e=a.aoColumns.length,c=h.extend({},m.models.oColumn,c,{nTh:b?b:Q.createElement("th"),sTitle:c.sTitle?
+c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[e],mData:c.mData?c.mData:e,idx:e});a.aoColumns.push(c);c=a.aoPreSearchCols;c[e]=h.extend({},m.models.oSearch,c[e]);ka(a,e,h(b).data())}function
 ka(a,b,c){var 
b=a.aoColumns[b],e=a.oClasses,d=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=d.attr("width")||null;var
 
f=(d.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);f&&(b.sWidthOrig=f[1])}c!==k&!==c&&(fb(c),H(m.defaults.column,c),c.mDataProp!==k&&!c.mData&&(c.mData=c.mDataProp),c.sType&&
+(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),h.extend(b,c),E(b,c,"sWidth","sWidthOrig"),c.iDataSort!==k&&(b.aDataSort=[c.iDataSort]),E(b,c,"aDataSort"));var
 
g=b.mData,j=R(g),i=b.mRender?R(b.mRender):null,c=function(a){return"string"===typeof
 
a&&-1!==a.indexOf("@")};b._bAttrSrc=h.isPlainObject(g)&&(c(g.sort)||c(g.type)||c(g.filter));b.fnGetData=function(a,b,c){var
 e=j(a,b,k,c);return i&?i(e,b,a,c):e};b.fnSetData=function(a,b,c){return 
S(g)(a,b,c)};"number"!==typeof g&&
+(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,d.addClass(e.sSortableNone));a=-1!==h.inArray("asc",b.asSorting);c=-1!==h.inArray("desc",b.asSorting);!b.bSortable||!a&&!c?(b.sSortingClass=e.sSortableNone,b.sSortingClassJUI=""):a&&!c?(b.sSortingClass=e.sSortableAsc,b.sSortingClassJUI=e.sSortJUIAscAllowed):!a&?(b.sSortingClass=e.sSortableDesc,b.sSortingClassJUI=e.sSortJUIDescAllowed):(b.sSortingClass=e.sSortable,b.sSortingClassJUI=e.sSortJUI)}function
 X(a){if(!1!==a.oFeatures.bAutoWidth){var b=
+a.aoColumns;Ga(a);for(var 
c=0,e=b.length;cq[f])e(l.length+q[f],o);else if("string"===typeof 
q[f]){j=0;for(i=l.length;jb&[d]--; -1!=e&===k&(e,1)}function 
ca(a,b,c,e){var 
d=a.aoData[b],f,g=function(c,f){for(;c.childNodes.length;)c.removeChild(c.firstChild);c.innerHTML=x(a,b,f,"display")};if("dom"===c||(!c||"auto"===c)&&"dom"===d.src)d._aData=na(a,d,e,e===k?k:d._aData).data;else{var
 
j=d.anCells;if(j)if(e!==k)g(j[e],e);else{c=0;for(f=j.length;c").appendTo(g));b=0;for(c=l.length;btr").attr("role","row");h(g).find(">tr>th,
 >tr>td").addClass(o.sHeaderTH);
+h(j).find(">tr>th, 
>tr>td").addClass(o.sFooterTH);if(null!==j){a=a.aoFooter[0];b=0;for(c=a.length;b=a.fnRecordsDisplay()?0:g,a.iInitDisplayStart=-1);var 
g=a._iDisplayStart,o=a.fnDisplayEnd();if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,C(a,!1);else
 if(j){if(!a.bDestroying&&!kb(a))return}else 

[03/50] [abbrv] hadoop git commit: YARN-7089. Mark the log-aggregation-controller APIs as public. (Zian Chen via wangda)

2018-08-10 Thread ehiggs
YARN-7089. Mark the log-aggregation-controller APIs as public. (Zian Chen via 
wangda)

Change-Id: I37851bdc5935d623a27d0973a206c997258716eb


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0599151
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0599151
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0599151

Branch: refs/heads/HDFS-12090
Commit: c0599151bb438d3dc0c6a54af93b2670770daefd
Parents: d4258fc
Author: Wangda Tan 
Authored: Tue Aug 7 12:37:32 2018 -0700
Committer: Wangda Tan 
Committed: Tue Aug 7 12:37:32 2018 -0700

--
 .../filecontroller/LogAggregationFileController.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0599151/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index 6b3c9a4..fe65288 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -35,7 +35,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -65,7 +65,7 @@ import 
org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 /**
  * Base class to implement Log Aggregation File Controller.
  */
-@Private
+@Public
 @Unstable
 public abstract class LogAggregationFileController {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: HDDS-267. Handle consistency issues during container update/close.

2018-08-10 Thread ehiggs
HDDS-267. Handle consistency issues during container update/close.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d81cd361
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d81cd361
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d81cd361

Branch: refs/heads/HDFS-12090
Commit: d81cd3611a449bcd7970ff2f1392a5e868e28f7e
Parents: 8478732
Author: Hanisha Koneru 
Authored: Wed Aug 8 16:47:25 2018 -0700
Committer: Hanisha Koneru 
Committed: Wed Aug 8 16:47:25 2018 -0700

--
 .../container/common/impl/ContainerData.java|  1 -
 .../container/keyvalue/KeyValueContainer.java   | 54 ++-
 .../container/keyvalue/KeyValueHandler.java | 21 ++--
 .../keyvalue/TestKeyValueContainer.java | 16 --
 .../container/keyvalue/TestKeyValueHandler.java | 55 
 .../common/impl/TestContainerPersistence.java   |  8 ---
 6 files changed, 80 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 5803628..26954a7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -257,7 +257,6 @@ public abstract class ContainerData {
* Marks this container as closed.
*/
   public synchronized void closeContainer() {
-// TODO: closed or closing here
 setState(ContainerLifeCycleState.CLOSED);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81cd361/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 353fe4f..c96f997 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -138,7 +138,7 @@ public class KeyValueContainer implements Container {
 
   // Create .container file
   File containerFile = getContainerFile();
-  writeToContainerFile(containerFile, true);
+  createContainerFile(containerFile);
 
 } catch (StorageContainerException ex) {
   if (containerMetaDataPath != null && 
containerMetaDataPath.getParentFile()
@@ -165,11 +165,11 @@ public class KeyValueContainer implements Container {
   }
 
   /**
-   * Creates .container file and checksum file.
+   * Writes to .container file.
*
-   * @param containerFile
-   * @param isCreate true if we are creating a new container file and false if
-   *we are updating an existing container file.
+   * @param containerFile container file name
+   * @param isCreate True if creating a new file. False is updating an
+   * existing container file.
* @throws StorageContainerException
*/
   private void writeToContainerFile(File containerFile, boolean isCreate)
@@ -181,19 +181,18 @@ public class KeyValueContainer implements Container {
   ContainerDataYaml.createContainerFile(
   ContainerType.KeyValueContainer, containerData, tempContainerFile);
 
+  // NativeIO.renameTo is an atomic function. But it might fail if the
+  // container file already exists. Hence, we handle the two cases
+  // separately.
   if (isCreate) {
-// When creating a new container, .container file should not exist
-// already.
 NativeIO.renameTo(tempContainerFile, containerFile);
   } else {
-// When updating a container, the .container file should exist. If
-// not, the container is in an inconsistent state.
 Files.move(tempContainerFile.toPath(), containerFile.toPath(),
 StandardCopyOption.REPLACE_EXISTING);
   }
 
 } catch (IOException ex) {
-  throw new StorageContainerException("Error during creation of " +
+  throw new StorageContainerException("Error while creating/ updating " +
   ".container file. ContainerID: " + containerId, ex,
   

[22/50] [abbrv] hadoop git commit: YARN-8601. Print ExecutionType in Container report CLI. Contributed by Bilwa S T.

2018-08-10 Thread ehiggs
YARN-8601. Print ExecutionType in Container report CLI. Contributed by Bilwa S 
T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff06bd1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff06bd1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff06bd1b

Branch: refs/heads/HDFS-12090
Commit: ff06bd1be83a2a6d2ee39cb002e91499720a7243
Parents: 36c0d74
Author: bibinchundatt 
Authored: Wed Aug 8 22:42:52 2018 +0530
Committer: bibinchundatt 
Committed: Wed Aug 8 22:42:52 2018 +0530

--
 .../java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java | 2 ++
 .../test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java   | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff06bd1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 14710a4..807938c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -881,6 +881,8 @@ public class ApplicationCLI extends YarnCLI {
   containerReportStr.println(containerReport.getFinishTime());
   containerReportStr.print("\tState : ");
   containerReportStr.println(containerReport.getContainerState());
+  containerReportStr.print("\tExecution-Type : ");
+  containerReportStr.println(containerReport.getExecutionType());
   containerReportStr.print("\tLOG-URL : ");
   containerReportStr.println(containerReport.getLogUrl());
   containerReportStr.print("\tHost : ");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff06bd1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 6b823b2..526adfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -292,6 +292,7 @@ public class TestYarnCLI {
 pw.println("\tStart-Time : 1234");
 pw.println("\tFinish-Time : 5678");
 pw.println("\tState : COMPLETE");
+pw.println("\tExecution-Type : GUARANTEED");
 pw.println("\tLOG-URL : logURL");
 pw.println("\tHost : host:1234");
 pw.println("\tNodeHttpAddress : http://host:2345;);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: YARN-8559. Expose mutable-conf scheduler's configuration in RM /scheduler-conf endpoint. Contributed by Weiwei Yang.

2018-08-10 Thread ehiggs
YARN-8559. Expose mutable-conf scheduler's configuration in RM /scheduler-conf 
endpoint. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d352f167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d352f167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d352f167

Branch: refs/heads/HDFS-12090
Commit: d352f167ebb865a6486afbbdac8e2a5e97a7bbad
Parents: cd04e95
Author: Weiwei Yang 
Authored: Thu Aug 9 23:46:53 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Aug 9 23:46:53 2018 +0800

--
 .../scheduler/MutableConfigurationProvider.java |  7 ++
 .../conf/MutableCSConfigurationProvider.java|  5 ++
 .../resourcemanager/webapp/RMWebServices.java   | 34 +
 .../resourcemanager/webapp/dao/ConfInfo.java| 72 
 .../TestRMWebServicesConfigurationMutation.java | 40 +++
 .../src/site/markdown/ResourceManagerRest.md| 40 +++
 6 files changed, 198 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 2b9b25a..6e56f3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
@@ -59,6 +60,12 @@ public interface MutableConfigurationProvider {
   void confirmPendingMutation(boolean isValid) throws Exception;
 
   /**
+   * Returns scheduler configuration cached in this provider.
+   * @return scheduler configuration.
+   */
+  Configuration getConfiguration();
+
+  /**
* Closes the configuration provider, releasing any required resources.
* @throws IOException on failure to close
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 9c3bf9d..51de437 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -126,6 +126,11 @@ public class MutableCSConfigurationProvider implements 
CSConfigurationProvider,
   }
 
   @Override
+  public Configuration getConfiguration() {
+return new Configuration(schedConf);
+  }
+
+  @Override
   public ConfigurationMutationACLPolicy getAclMutationPolicy() {
 return aclMutationPolicy;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d352f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 

[11/50] [abbrv] hadoop git commit: HDFS-13785. EC: 'removePolicy' is not working for built-in/system Erasure Code policies. Contributed by Ayush Saxena

2018-08-10 Thread ehiggs
HDFS-13785. EC: 'removePolicy' is not working for built-in/system Erasure Code 
policies. Contributed by Ayush Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4203bc73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4203bc73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4203bc73

Branch: refs/heads/HDFS-12090
Commit: 4203bc738c11aaf083b6d407c6d6b7f4f22fe0d3
Parents: 6677717
Author: Vinayakumar B 
Authored: Wed Aug 8 12:42:20 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Aug 8 12:42:20 2018 +0530

--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  4 ++--
 .../src/site/markdown/HDFSErasureCoding.md  |  4 ++--
 .../test/resources/testErasureCodingConf.xml| 22 +++-
 3 files changed, 25 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4203bc73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 9b9fe14..56706b2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -154,7 +154,7 @@ public class ECAdmin extends Configured implements Tool {
   listing.addRow("",
   "The path of the xml file which defines the EC policies to add");
   return getShortUsage() + "\n" +
-  "Add a list of erasure coding policies.\n" +
+  "Add a list of user defined erasure coding policies.\n" +
   listing.toString();
 }
 
@@ -268,7 +268,7 @@ public class ECAdmin extends Configured implements Tool {
   TableListing listing = AdminHelper.getOptionDescriptionListing();
   listing.addRow("", "The name of the erasure coding policy");
   return getShortUsage() + "\n" +
-  "Remove an erasure coding policy.\n" +
+  "Remove an user defined erasure coding policy.\n" +
   listing.toString();
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4203bc73/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 60fd3ab..6ae2086 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -203,7 +203,7 @@ Below are the details about each command.
 
  *  `[-addPolicies -policyFile ]`
 
- Add a list of erasure coding policies. Please refer 
etc/hadoop/user_ec_policies.xml.template for the example policy file. The 
maximum cell size is defined in property 
'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. Currently 
HDFS allows the user to add 64 policies in total, and the added policy ID is in 
range of 64 to 127. Adding policy will fail if there are already 64 policies 
added.
+ Add a list of user defined erasure coding policies. Please refer 
etc/hadoop/user_ec_policies.xml.template for the example policy file. The 
maximum cell size is defined in property 
'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. Currently 
HDFS allows the user to add 64 policies in total, and the added policy ID is in 
range of 64 to 127. Adding policy will fail if there are already 64 policies 
added.
 
  *  `[-listCodecs]`
 
@@ -211,7 +211,7 @@ Below are the details about each command.
 
 *  `[-removePolicy -policy ]`
 
- Remove an erasure coding policy.
+ Remove an user defined erasure coding policy.
 
 *  `[-enablePolicy -policy ]`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4203bc73/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 2f7a6a7..9070367 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -154,7 +154,7 @@
   
 
   SubstringComparator
-  Add a list of erasure coding 
policies
+  Add a list of user defined erasure coding 
policies
 
 
   SubstringComparator
@@ -164,6 

[24/50] [abbrv] hadoop git commit: Make 3.1.1 awared by other branches

2018-08-10 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/49c68760/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.1.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.1.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.1.xml
new file mode 100644
index 000..e3dbe6a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.1.xml
@@ -0,0 +1,2920 @@
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+

[27/50] [abbrv] hadoop git commit: Make 3.1.1 awared by other branches

2018-08-10 Thread ehiggs
Make 3.1.1 awared by other branches


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49c68760
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49c68760
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49c68760

Branch: refs/heads/HDFS-12090
Commit: 49c687608b65b772faeed614700ece8e526432e8
Parents: 9499df7
Author: Wangda Tan 
Authored: Wed Aug 8 13:01:58 2018 -0700
Committer: Wangda Tan 
Committed: Wed Aug 8 13:02:12 2018 -0700

--
 .../markdown/release/3.1.1/CHANGES.3.1.1.md |  498 +++
 .../release/3.1.1/RELEASENOTES.3.1.1.md |  498 +++
 .../jdiff/Apache_Hadoop_HDFS_3.1.1.xml  |  676 
 hadoop-project-dist/pom.xml |2 +-
 .../jdiff/Apache_Hadoop_YARN_Client_3.1.1.xml   | 2920 ++
 5 files changed, 4593 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49c68760/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.1/CHANGES.3.1.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.1/CHANGES.3.1.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.1/CHANGES.3.1.1.md
new file mode 100644
index 000..8e2c804
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.1/CHANGES.3.1.1.md
@@ -0,0 +1,498 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.1.1 - 2018-08-02
+
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14667](https://issues.apache.org/jira/browse/HADOOP-14667) | 
Flexible Visual Studio support |  Major | build | Allen Wittenauer | Allen 
Wittenauer |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13056](https://issues.apache.org/jira/browse/HDFS-13056) | Expose 
file-level composite CRCs in HDFS which are comparable across different 
instances/layouts |  Major | datanode, distcp, erasure-coding, federation, hdfs 
| Dennis Huo | Dennis Huo |
+| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage 
based Reserved Space Calculation for DataNode |  Major | datanode, hdfs | Lukas 
Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-8028](https://issues.apache.org/jira/browse/YARN-8028) | Support 
authorizeUserAccessToQueue in RMWebServices |  Major | . | Wangda Tan | Wangda 
Tan |
+| [HADOOP-15332](https://issues.apache.org/jira/browse/HADOOP-15332) | Fix 
typos in hadoop-aws markdown docs |  Minor | . | Gabor Bota | Gabor Bota |
+| [HADOOP-15330](https://issues.apache.org/jira/browse/HADOOP-15330) | Remove 
jdk1.7 profile from hadoop-annotations module |  Minor | . | Akira Ajisaka | 
fang zhenyi |
+| [HADOOP-15342](https://issues.apache.org/jira/browse/HADOOP-15342) | Update 
ADLS connector to use the current SDK version (2.2.7) |  Major | fs/adl | Atul 
Sikaria | Atul Sikaria |
+| [YARN-1151](https://issues.apache.org/jira/browse/YARN-1151) | Ability to 
configure auxiliary services from HDFS-based JAR files |  Major | nodemanager | 
john lilley | Xuan Gong |
+| [HDFS-13418](https://issues.apache.org/jira/browse/HDFS-13418) |  
NetworkTopology should be configurable when enable DFSNetworkTopology |  Major 
| . | Tao Jie | Tao Jie |
+| [HDFS-13439](https://issues.apache.org/jira/browse/HDFS-13439) | Add test 
case for read block operation when it is moved |  Major | . | Ajay Kumar | Ajay 
Kumar |
+| [HDFS-13462](https://issues.apache.org/jira/browse/HDFS-13462) | Add 
BIND\_HOST configuration for JournalNode's HTTP and RPC Servers |  Major | 
hdfs, journal-node | Lukas Majercak | Lukas Majercak |
+| [YARN-8140](https://issues.apache.org/jira/browse/YARN-8140) | Improve log 
message when launch cmd is ran for stopped yarn service |  Major | 
yarn-native-services | Yesha Vora | Eric Yang |
+| [MAPREDUCE-7086](https://issues.apache.org/jira/browse/MAPREDUCE-7086) | Add 
config to allow FileInputFormat to ignore directories when recursive=false |  
Major | . | Sergey Shelukhin | Sergey Shelukhin |
+| [HDFS-12981](https://issues.apache.org/jira/browse/HDFS-12981) | 
renameSnapshot a Non-Existent snapshot to itself should throw error |  Minor | 
hdfs | Sailesh Patel | Kitti Nanasi |
+| [YARN-8239](https://issues.apache.org/jira/browse/YARN-8239) | [UI2] 
Clicking on Node Manager UI under AM container info / App Attempt page goes to 
old RM UI |  Major | yarn-ui-v2 | Sumana Sathish | Sunil Govindan |
+| [YARN-8260](https://issues.apache.org/jira/browse/YARN-8260) | [UI2] 

[42/50] [abbrv] hadoop git commit: HDFS-13735. Make QJM HTTP URL connection timeout configurable. Contributed by Chao Sun.

2018-08-10 Thread ehiggs
HDFS-13735. Make QJM HTTP URL connection timeout configurable. Contributed by 
Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5326a790
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5326a790
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5326a790

Branch: refs/heads/HDFS-12090
Commit: 5326a7906de7c86a236d948012cabf3a9ba82310
Parents: d352f16
Author: Chen Liang 
Authored: Thu Aug 9 10:11:47 2018 -0700
Committer: Chen Liang 
Committed: Thu Aug 9 10:11:47 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java |  5 +
 .../qjournal/client/QuorumJournalManager.java | 11 +--
 .../src/main/resources/hdfs-default.xml   | 18 ++
 3 files changed, 32 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5326a790/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 4f21ee1..55085eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1033,6 +1034,8 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String  DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY = 
"dfs.qjournal.get-journal-state.timeout.ms";
   public static final String  DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY = 
"dfs.qjournal.new-epoch.timeout.ms";
   public static final String  DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY = 
"dfs.qjournal.write-txns.timeout.ms";
+  public static final String  DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_KEY = 
"dfs.qjournal.http.open.timeout.ms";
+  public static final String  DFS_QJOURNAL_HTTP_READ_TIMEOUT_KEY = 
"dfs.qjournal.http.read.timeout.ms";
   public static final int DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT = 
2;
   public static final int DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT = 
12;
   public static final int DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT = 
12;
@@ -1041,6 +1044,8 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final int DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT = 
12;
   public static final int DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT = 12;
   public static final int DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT = 2;
+  public static final int DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_DEFAULT = 
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT;
+  public static final int DFS_QJOURNAL_HTTP_READ_TIMEOUT_DEFAULT = 
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT;
   
   public static final String DFS_MAX_NUM_BLOCKS_TO_LOG_KEY = 
"dfs.namenode.max-num-blocks-to-log";
   public static final long   DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT = 1000l;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5326a790/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 7a70a3d..4faaa98 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -124,8 +124,6 @@ public class QuorumJournalManager implements JournalManager 
{
 this.nsInfo = nsInfo;
 this.nameServiceId = nameServiceId;
 this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory));
-this.connectionFactory = URLConnectionFactory
-.newDefaultURLConnectionFactory(conf);
 
 // Configure timeouts.
 this.startSegmentTimeoutMs = conf.getInt(
@@ -156,6 +154,15 @@ public class QuorumJournalManager implements 
JournalManager {
 .DFS_QJM_OPERATIONS_TIMEOUT,
  

  1   2   >