[hadoop] branch trunk updated: HADOOP-16520. Race condition in DDB table init and waiting threads. (#1576). Contributed by Gabor Bota.

2019-10-11 Thread gabota
This is an automated email from the ASF dual-hosted git repository.

gabota pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4a700c2  HADOOP-16520. Race condition in DDB table init and waiting 
threads.  (#1576). Contributed by Gabor Bota.
4a700c2 is described below

commit 4a700c20d553dc5336ee881719bcf189fc46bfbf
Author: Gabor Bota 
AuthorDate: Fri Oct 11 12:08:47 2019 +0200

HADOOP-16520. Race condition in DDB table init and waiting threads.  
(#1576). Contributed by Gabor Bota.

Fixes HADOOP-16349. DynamoDBMetadataStore.getVersionMarkerItem() to log at 
info/warn on retry

Change-Id: Ia83e92b9039ccb780090c99c41b4f71ef7539d35
---
 .../java/org/apache/hadoop/fs/s3a/Constants.java   |   2 +-
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java  | 450 +
 .../s3guard/DynamoDBMetadataStoreTableManager.java | 693 +
 .../s3guard/PathMetadataDynamoDBTranslation.java   |   2 +-
 .../hadoop/fs/s3a/s3guard/S3GuardTableAccess.java  |   6 +-
 .../src/site/markdown/tools/hadoop-aws/s3guard.md  |  33 +-
 .../fs/s3a/s3guard/ITestDynamoDBMetadataStore.java | 173 +++--
 .../s3guard/ITestDynamoDBMetadataStoreScale.java   |   4 +-
 .../fs/s3a/s3guard/ITestS3GuardToolDynamoDB.java   |   7 +-
 .../fs/s3a/s3guard/TestDynamoDBMiscOperations.java |   2 +-
 .../TestPathMetadataDynamoDBTranslation.java   |   6 +-
 11 files changed, 892 insertions(+), 486 deletions(-)

diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index fdbdf37..9f120b8 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -176,7 +176,7 @@ public final class Constants {
 
   // number of times we should retry errors
   public static final String MAX_ERROR_RETRIES = "fs.s3a.attempts.maximum";
-  public static final int DEFAULT_MAX_ERROR_RETRIES = 20;
+  public static final int DEFAULT_MAX_ERROR_RETRIES = 10;
 
   // seconds until we give up trying to establish a connection to s3
   public static final String ESTABLISH_TIMEOUT =
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 92f04bf..044f3a5 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.s3a.s3guard;
 
 import javax.annotation.Nullable;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.net.URI;
@@ -28,7 +27,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -43,9 +41,7 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
 
-import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
-import com.amazonaws.SdkBaseException;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
 import com.amazonaws.services.dynamodbv2.document.BatchWriteItemOutcome;
@@ -62,17 +58,9 @@ import 
com.amazonaws.services.dynamodbv2.document.spec.GetItemSpec;
 import com.amazonaws.services.dynamodbv2.document.spec.QuerySpec;
 import com.amazonaws.services.dynamodbv2.document.utils.ValueMap;
 import com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException;
-import com.amazonaws.services.dynamodbv2.model.BillingMode;
-import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
-import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
 import 
com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription;
-import com.amazonaws.services.dynamodbv2.model.ResourceInUseException;
-import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
 import com.amazonaws.services.dynamodbv2.model.TableDescription;
-import com.amazonaws.services.dynamodbv2.model.Tag;
-import com.amazonaws.services.dynamodbv2.model.TagResourceRequest;
 import com.amazonaws.services.dynamodbv2.model.WriteRequest;
-import com.amazonaws.waiters.WaiterTimedOutException;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -89,7 +77,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.a

[hadoop] branch trunk updated: YARN-9836. General usability improvements in showSimulationTrace.html. Contributed by Adam Antal

2019-10-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 62b5cef  YARN-9836. General usability improvements in 
showSimulationTrace.html. Contributed by Adam Antal
62b5cef is described below

commit 62b5cefaeaa9cccd8d2de8eaff75d0e32e87f54d
Author: Szilard Nemeth 
AuthorDate: Fri Oct 11 13:39:27 2019 +0200

YARN-9836. General usability improvements in showSimulationTrace.html. 
Contributed by Adam Antal
---
 .../src/main/html/showSimulationTrace.html | 507 +++--
 1 file changed, 264 insertions(+), 243 deletions(-)

diff --git a/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html 
b/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html
index 570c0f2..102fdcf 100644
--- a/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html
+++ b/hadoop-tools/hadoop-sls/src/main/html/showSimulationTrace.html
@@ -69,265 +69,286 @@
   
 
 
-
-  
-  
+
+  
+
+  
+  
+×
+  
+
+  
 
 
-
-  
-  
-
+
+  
+
+
+  
 
-
-  
-  
-
+  
+
+
+  
 
-
-  
-  
+  
+
+
+  
+
+  
+
+
+  
 
  
 
 // select file and draw
 function draw() {
-var filepath = document.getElementById('jsonfile').value;
-if (filepath) {
-for (var i = 1; i < 9; i ++) {
-$('#area' + i).empty();
-}
-filepath = filepath.replace("C:\\fakepath\\", "");
-drawCharts(filepath);
-} else {
-alert('choose file firstly.');
-}
+  $("#error-div").css("display", "none");
+  var filepath = document.getElementById('jsonfile').value;
+  if (filepath) {
+$(".chart-area").empty();
+filepath = filepath.replace("C:\\fakepath\\", "");
+drawCharts(filepath);
+  } else {
+$("#error-message").html("Please choose file.");
+$("#error-div").css("display", "block");
+$("#data").css("display", "none");
+  }
 }
 
 function drawCharts(filepath) {
-$.getJSON(filepath, function(data) {
-var numQueues = 0;
-var queueNames = new Array();
-for (var j in data[0]) {
-if (j.substring(0, 'queue'.length) === 'queue') {
-queueNames[numQueues] = j;
-numQueues ++;
-}
-}
-numQueues /= 2;
-
-// create graph
-$.getJSON(filepath, function(data) {
-var basetime = data[0].time;
-data.forEach(function(d) {
-d.time = (d.time - basetime) / 1000;
-});
-
-var legends = ["running.applications", "running.containers"];
-drawEachChart("#area1", data, legends, "Cluster running applications & 
containers", "Number", 0, 0);
-legends = ["jvm.free.memory", "jvm.max.memory", "jvm.total.memory"];
-drawEachChart("#area2", data, legends, "JVM memory", "Memory (GB)", 0, 0);
-legends = ["cluster.allocated.memory", "cluster.available.memory"];
-drawEachChart("#area3", data, legends, "Cluster allocated & available memory", 
"Memory (GB)", 0, 0);
-legends = ["cluster.allocated.vcores", "cluster.available.vcores"];
-drawEachChart("#area4", data, legends, "Cluster allocated & available vcores", 
"Number", 0, 0);
-
-for (var i = 0; i < numQueues; i ++) {
-legends[i] = queueNames[i * 2];
-}
-drawEachChart("#area5", data, legends, "Queue allocated memory", "Memory 
(GB)", 1, 100);
-for (var i = 0; i < numQueues; i ++) {
-legends[i] = queueNames[i * 2 + 1];
-}
-drawEachChart("#area6", data, legends, "Queue allocated vcores", "VCores", 1, 
90);
-
-legends = [
-"scheduler.allocate.timecost",
-"scheduler.handle-NODE_ADDED.timecost", 
"scheduler.handle-NODE_REMOVED.timecost",
-"scheduler.handle-NODE_UPDATE.timecost", "scheduler.handle-APP_ADDED.timecost",
-"scheduler.handle-APP_REMOVED.timecost", 
"scheduler.handle-CONTAINER_EXPIRED.timecost"
-];
-drawEachChart("#area7", data, legends, "Scheduler allocate & handle operations 
timecost", "Timecost (ms)", 0, 210);
-});
-});
+  $.getJSON(filepath, function(data) {
+var numQueues = 0;
+var queueNames = new Array();
+for (var j in data[0]) {
+  if (j.substring(0, 'queue'.length) === 'queue') {
+queueNames[numQueues] = j;
+numQueues ++;
+  }
+}
+numQueues /= 2;
+
+// create graph
+$.getJSON(filepath, function(data) {
+  var basetime = data[0].time;
+  data.forEach(function(d) {
+d.time = (d.time - basetime) / 1000;
+  });
+
+  var legends = ["running.applications", "running.containers"];
+  drawEachChart("#area1", data, legends, "Cluster running applications & 
containers", "Number", 0, 0);
+  legends = ["jvm.free.memory", "jvm.max.memory", "jvm.total.memory"];
+  drawEachChart("#area2", data, legends, "JVM memory", "Memory (GB)", 0, 
0);
+  legends = ["cluster.allocated.memory", "cluster.available.memory"];
+  drawEachChart("#area3", data, legends, "Cluster allocated & available 
memory", "Memory (GB)", 0, 0);
+  legends = ["cluster.allocated.vcores", "cluster.available.vcores"];
+  drawEachChart("#area4", data, legends, "Cluster allocated & available 
vcores", "Number", 0, 0);

[hadoop] branch trunk updated: YARN-8453. Additional Unit tests to verify queue limit and max-limit with multiple resource types. Contributed by Adam Antal

2019-10-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ec86f42  YARN-8453. Additional Unit tests to verify queue limit and 
max-limit with multiple resource types. Contributed by Adam Antal
ec86f42 is described below

commit ec86f42e40ec57ea5d515c1207161fcaf2c770e1
Author: Szilard Nemeth 
AuthorDate: Fri Oct 11 14:01:19 2019 +0200

YARN-8453. Additional Unit tests to verify queue limit and max-limit with 
multiple resource types. Contributed by Adam Antal
---
 ...estCapacitySchedulerWithMultiResourceTypes.java | 154 +++--
 1 file changed, 142 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
index 3a8d33a..720e787 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
@@ -19,12 +19,21 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import 
org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -32,6 +41,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -41,29 +51,43 @@ import java.util.Map;
 public class TestCapacitySchedulerWithMultiResourceTypes {
   private static String RESOURCE_1 = "res1";
 
-  @Test
-  public void testMaximumAllocationRefreshWithMultipleResourceTypes() throws 
Exception {
+  private static final String A_QUEUE = CapacitySchedulerConfiguration.ROOT + 
".a";
+  private static final String B_QUEUE = CapacitySchedulerConfiguration.ROOT + 
".b";
+  private static float A_CAPACITY = 50.0f;
+  private static float B_CAPACITY = 50.0f;
 
+  private void setupResources(boolean withGpu) {
 // Initialize resource map
 Map riMap = new HashMap<>();
 
 // Initialize mandatory resources
 ResourceInformation memory = ResourceInformation.newInstance(
-ResourceInformation.MEMORY_MB.getName(),
-ResourceInformation.MEMORY_MB.getUnits(),
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+ResourceInformation.MEMORY_MB.getName(),
+ResourceInformation.MEMORY_MB.getUnits(),
+YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
 ResourceInformation vcores = ResourceInformation.newInstance(
-ResourceInformation.VCORES.getName(),
-ResourceInformation.VCORES.getUnits(),
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
-YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+ResourceInformation.VCORES.getName(),
+ResourceInformation.VCORES.getUnits(),
+YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+ 

[hadoop] branch trunk updated (ec86f42 -> c561a70)

2019-10-11 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ec86f42  YARN-8453. Additional Unit tests to verify queue limit and 
max-limit with multiple resource types. Contributed by Adam Antal
 add c561a70  HDDS-2213.Reduce key provider loading log level in 
OzoneFileSystem#getAdditionalTokenIssuers (#1556)

No new revisions were added by this update.

Summary of changes:
 .../src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org