[34/50] [abbrv] hadoop git commit: YARN-5015. entire time series is returned for YARN container system metrics (CPU and memory) (Varun Saxena via sjlee)

2016-07-08 Thread sjlee
YARN-5015. entire time series is returned for YARN container system metrics 
(CPU and memory) (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f88693d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f88693d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f88693d

Branch: refs/heads/YARN-2928
Commit: 1f88693d8530a1e8c974ac28598245376966519b
Parents: d65e45d
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue May 31 13:09:59 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 8 10:19:06 2016 -0700

--
 ...stTimelineReaderWebServicesHBaseStorage.java | 211 +++--
 .../storage/TestHBaseTimelineStorage.java   | 315 ++-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  26 +-
 .../reader/TimelineDataToRetrieve.java  |  32 +-
 .../reader/TimelineEntityFilters.java   |   5 +-
 .../reader/TimelineReaderWebServices.java   | 274 
 .../reader/TimelineReaderWebServicesUtils.java  |  17 +-
 .../storage/reader/ApplicationEntityReader.java |   3 +-
 .../storage/reader/FlowRunEntityReader.java |  12 +
 .../storage/reader/GenericEntityReader.java |   4 +-
 .../storage/reader/TimelineEntityReader.java|   5 +-
 .../TestFileSystemTimelineReaderImpl.java   |  12 +-
 12 files changed, 711 insertions(+), 205 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f88693d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index ca80ed5..f9f4607 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.timelineservice.reader;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
@@ -112,13 +113,14 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 TimelineMetric m1 = new TimelineMetric();
 m1.setId("MAP_SLOT_MILLIS");
 Map<Long, Number> metricValues =
-ImmutableMap.of(ts - 10, (Number)2, ts - 8, 40);
+ImmutableMap.of(ts - 10, (Number)2, ts - 9, 7, ts - 8, 40);
 m1.setType(Type.TIME_SERIES);
 m1.setValues(metricValues);
 metrics.add(m1);
 m1 = new TimelineMetric();
 m1.setId("MAP1_SLOT_MILLIS");
-metricValues = ImmutableMap.of(ts - 10, (Number)2, ts - 8, 40);
+metricValues =
+ImmutableMap.of(ts - 10, (Number)2, ts - 9, 9, ts - 8, 40);
 m1.setType(Type.TIME_SERIES);
 m1.setValues(metricValues);
 metrics.add(m1);
@@ -460,6 +462,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 assertNotNull(resp);
 assertTrue("Response from server should have been " + status,
 resp.getClientResponseStatus().equals(status));
+System.out.println("Response is: " + resp.getEntity(String.class));
   }
 
   @Test
@@ -615,12 +618,18 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 (entity.getStartTime() == 1425016501034L) &&
 (entity.getMetrics().size() == 1)));
   }
+
+  // fields as CONFIGS will lead to a HTTP 400 as it makes no sense for
+  // flow runs.
+  uri = URI.create("http://localhost:; + serverPort + "/ws/v2/" +
+  "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
+  "fields=CONFIGS");
+  verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 } finally {
   client.destroy();
 }
   }
 
-
   @Test
   public void testGetFlowRunsMetri

[24/50] [abbrv] hadoop git commit: YARN-5093. created time shows 0 in most REST output (Varun Saxena via sjlee)

2016-07-08 Thread sjlee
YARN-5093. created time shows 0 in most REST output (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/050846f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/050846f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/050846f5

Branch: refs/heads/YARN-2928
Commit: 050846f597fccd68b9b445abcd62fb30c3d49262
Parents: c8100fd
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue May 24 10:33:04 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 8 10:19:04 2016 -0700

--
 .../records/timelineservice/TimelineEntity.java | 16 +--
 .../storage/TestHBaseTimelineStorage.java   | 30 +---
 .../storage/FileSystemTimelineReaderImpl.java   |  2 +-
 .../storage/reader/ApplicationEntityReader.java |  5 ++--
 .../storage/reader/GenericEntityReader.java |  4 +--
 .../reader/TestTimelineReaderWebServices.java   |  4 +--
 .../TestFileSystemTimelineReaderImpl.java   | 10 +++
 7 files changed, 45 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/050846f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 7ce8279..9c0a983 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -144,7 +144,7 @@ public class TimelineEntity implements 
Comparable {
   private NavigableSet events = new TreeSet<>();
   private HashMap<String, Set> isRelatedToEntities = new HashMap<>();
   private HashMap<String, Set> relatesToEntities = new HashMap<>();
-  private long createdTime;
+  private Long createdTime;
 
   public TimelineEntity() {
 identifier = new Identifier();
@@ -490,7 +490,7 @@ public class TimelineEntity implements 
Comparable {
   }
 
   @XmlElement(name = "createdtime")
-  public long getCreatedTime() {
+  public Long getCreatedTime() {
 if (real == null) {
   return createdTime;
 } else {
@@ -499,7 +499,7 @@ public class TimelineEntity implements 
Comparable {
   }
 
   @JsonSetter("createdtime")
-  public void setCreatedTime(long createdTs) {
+  public void setCreatedTime(Long createdTs) {
 if (real == null) {
   this.createdTime = createdTs;
 } else {
@@ -547,6 +547,16 @@ public class TimelineEntity implements 
Comparable {
   public int compareTo(TimelineEntity other) {
 int comparison = getType().compareTo(other.getType());
 if (comparison == 0) {
+  if (getCreatedTime() == null) {
+if (other.getCreatedTime() == null) {
+  return getId().compareTo(other.getId());
+} else {
+  return 1;
+}
+  }
+  if (other.getCreatedTime() == null) {
+return -1;
+  }
   if (getCreatedTime() > other.getCreatedTime()) {
 // Order by created time desc
 return -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/050846f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
index 8ab54bc..aebd936 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
@@ -488,7 +488,7 @@ public class TestHBaseTimelineStorage {
 ApplicationEntity entity = new ApplicationEntity();
 String appId = "appl

[14/50] [abbrv] hadoop git commit: YARN-5045. hbase unit tests fail due to dependency issues. (Sangjin Lee via varunsaxena)

2016-07-08 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ffa5f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
deleted file mode 100644
index 58d5e61..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestPhoenixOfflineAggregationWriterImpl.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.hbase.IntegrationTestingUtility;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import 
org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.OfflineAggregationInfo;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-
-public class TestPhoenixOfflineAggregationWriterImpl extends BaseTest {
-  private static PhoenixOfflineAggregationWriterImpl storage;
-  private static final int BATCH_SIZE = 3;
-
-  @BeforeClass
-  public static void setup() throws Exception {
-YarnConfiguration conf = new YarnConfiguration();
-storage = setupPhoenixClusterAndWriterForTest(conf);
-  }
-
-  @Test(timeout = 9)
-  public void testFlowLevelAggregationStorage() throws Exception {
-testAggregator(OfflineAggregationInfo.FLOW_AGGREGATION);
-  }
-
-  @Test(timeout = 9)
-  public void testUserLevelAggregationStorage() throws Exception {
-testAggregator(OfflineAggregationInfo.USER_AGGREGATION);
-  }
-
-  @AfterClass
-  public static void cleanup() throws Exception {
-storage.dropTable(OfflineAggregationInfo.FLOW_AGGREGATION_TABLE_NAME);
-storage.dropTable(OfflineAggregationInfo.USER_AGGREGATION_TABLE_NAME);
-tearDownMiniCluster();
-  }
-
-  private static PhoenixOfflineAggregationWriterImpl
-setupPhoenixClusterAndWriterForTest(YarnConfiguration conf)
-  throws Exception{
-Map props = new HashMap<>();
-// Must update config before starting server
-props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
-Boolean.FALSE.toString());
-props.put("java.security.krb5.realm", "");
-props.put("java.security.krb5.kdc", "");
-props.put(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER,
-Boolean.FALSE.toString());
-props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
-props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
-// Make a small batch size to test multiple calls to reserve sequences
-props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB,
-Long.toString(BATCH_SIZE));
-// Must update config before starting server
-setUpTestDriver(new 

[20/50] [abbrv] hadoop git commit: YARN-5045. hbase unit tests fail due to dependency issues. (Sangjin Lee via varunsaxena)

2016-07-08 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ffa5f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
new file mode 100644
index 000..ca80ed5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -0,0 +1,2008 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URL;
+import java.text.DateFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.GenericType;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+public class TestTimelineReaderWebServicesHBaseStorage {
+  private int serverPort;
+  private TimelineReaderServer server;
+  private static HBaseTestingUtility util;
+  private static long ts = System.currentTimeMillis();
+  private static long dayTs =
+  TimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+
+  @BeforeClass
+  public static void setup() throws Exception {
+util = new HBaseTestingUtility();
+Configuration conf = util.getConfiguration();
+conf.setInt("hfile.format.version", 3);
+util.startMiniCluster();
+TimelineSchemaCreator.createAllTables(util.getConfiguration(), 

[09/50] [abbrv] hadoop git commit: YARN-4447. Provide a mechanism to represent complex filters and parse them at the REST layer (Varun Saxena via sjlee)

2016-07-08 Thread sjlee
YARN-4447. Provide a mechanism to represent complex filters and parse them at 
the REST layer (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/585c77d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/585c77d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/585c77d9

Branch: refs/heads/YARN-2928
Commit: 585c77d9bb61cd40a519dd2084edb899e29b39b0
Parents: 5aef416
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon May 2 14:06:19 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 8 10:19:02 2016 -0700

--
 .../reader/TimelineParseConstants.java  |  34 +
 .../reader/TimelineParseException.java  |  36 +
 .../timelineservice/reader/TimelineParser.java  |  37 +
 .../reader/TimelineParserForCompareExpr.java| 300 ++
 .../reader/TimelineParserForDataToRetrieve.java |  95 ++
 .../reader/TimelineParserForEqualityExpr.java   | 343 +++
 .../reader/TimelineParserForExistFilters.java   |  51 +
 .../reader/TimelineParserForKVFilters.java  |  78 ++
 .../reader/TimelineParserForNumericFilters.java |  72 ++
 .../TimelineParserForRelationFilters.java   |  71 ++
 .../reader/TimelineReaderWebServices.java   | 220 -
 .../reader/TimelineReaderWebServicesUtils.java  | 196 ++--
 .../reader/filter/TimelineCompareFilter.java|  73 +-
 .../reader/filter/TimelineExistsFilter.java |  49 +-
 .../reader/filter/TimelineFilterList.java   |  36 +
 .../reader/filter/TimelineKeyValueFilter.java   |  13 +
 .../reader/filter/TimelineKeyValuesFilter.java  |  61 +-
 .../reader/filter/TimelinePrefixFilter.java |  37 +
 .../reader/TestTimelineReaderWebServices.java   |  14 +-
 ...stTimelineReaderWebServicesHBaseStorage.java | 900 +-
 .../TestTimelineReaderWebServicesUtils.java | 923 +++
 21 files changed, 3442 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/585c77d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
new file mode 100644
index 000..662a102
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+/**
+ * Set of constants used while parsing filter expressions.
+ */
+final class TimelineParseConstants {
+  private TimelineParseConstants() {
+  }
+  static final String COMMA_DELIMITER = ",";
+  static final String COLON_DELIMITER = ":";
+  static final char NOT_CHAR = '!';
+  static final char SPACE_CHAR = ' ';
+  static final char OPENING_BRACKET_CHAR = '(';
+  static final char CLOSING_BRACKET_CHAR = ')';
+  static final char COMMA_CHAR = ',';
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/585c77d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseException.java
 

[23/50] [abbrv] hadoop git commit: YARN-5050. Code cleanup for TestDistributedShell (Li Lu via sjlee)

2016-07-08 Thread sjlee
YARN-5050. Code cleanup for TestDistributedShell (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8100fdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8100fdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8100fdb

Branch: refs/heads/YARN-2928
Commit: c8100fdb474bce91c1154a07d85ba99c0c6099d5
Parents: 6fc95ff
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu May 19 17:25:05 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 8 10:19:04 2016 -0700

--
 .../distributedshell/TestDistributedShell.java | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8100fdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index ba11e60..c02cd85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -135,6 +135,8 @@ public class TestDistributedShell {
 
 conf = new YarnConfiguration();
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
+// reduce the teardown waiting time
+conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000);
 conf.set("yarn.log.dir", "target");
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
 // mark if we need to launch the v1 timeline server
@@ -183,9 +185,6 @@ public class TestDistributedShell {
   conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
   conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + 
TIMELINE_AUX_SERVICE_NAME
 + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
-  conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, 
true);
-  conf.setBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED,
-  false);
 } else {
   Assert.fail("Wrong timeline version number: " + timelineVersion);
 }
@@ -280,7 +279,7 @@ public class TestDistributedShell {
 testDSShell(true);
   }
 
-  @Test(timeout=9)
+  @Test
   @TimelineVersion(2.0f)
   public void testDSShellWithoutDomainV2() throws Exception {
 testDSShell(false);
@@ -290,12 +289,14 @@ public class TestDistributedShell {
 testDSShell(haveDomain, true);
   }
 
-  @Test(timeout=9)
+  @Test
+  @TimelineVersion(2.0f)
   public void testDSShellWithoutDomainV2DefaultFlow() throws Exception {
 testDSShell(false, true);
   }
 
-  @Test(timeout=9)
+  @Test
+  @TimelineVersion(2.0f)
   public void testDSShellWithoutDomainV2CustomizedFlow() throws Exception {
 testDSShell(false, false);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-5138. fix "no findbugs output file" error for hadoop-yarn-server-timelineservice-hbase-tests. (Vrushali C via gtcarrera9)

2016-07-08 Thread sjlee
YARN-5138. fix "no findbugs output file" error for 
hadoop-yarn-server-timelineservice-hbase-tests. (Vrushali C via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/795094e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/795094e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/795094e1

Branch: refs/heads/YARN-2928
Commit: 795094e18dbe205a6540edda8f59168b614115d5
Parents: 88fe027
Author: Li Lu 
Authored: Thu May 26 11:12:31 2016 -0700
Committer: Sangjin Lee 
Committed: Fri Jul 8 10:19:05 2016 -0700

--
 .../hadoop-yarn-server-timelineservice-hbase-tests/pom.xml| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/795094e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index b5dc3c0..a8e5195 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -362,6 +362,13 @@
   
 
   
+org.codehaus.mojo
+findbugs-maven-plugin
+ 
+  true
+
+  
+  
 maven-jar-plugin
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: YARN-4447. Provide a mechanism to represent complex filters and parse them at the REST layer (Varun Saxena via sjlee)

2016-07-08 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/585c77d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 57d75db..2e667d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -18,29 +18,19 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
-import java.io.IOException;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Set;
 
 import javax.servlet.http.HttpServletRequest;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValueFilter;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 
 /**
  * Set of utility methods to be used by timeline reader web services.
  */
 final class TimelineReaderWebServicesUtils {
-  private static final String COMMA_DELIMITER = ",";
-  private static final String COLON_DELIMITER = ":";
 
   private TimelineReaderWebServicesUtils() {
   }
@@ -56,11 +46,10 @@ final class TimelineReaderWebServicesUtils {
* @param entityType Entity Type.
* @param entityId Entity Id.
* @return a {@link TimelineReaderContext} object.
-   * @throws Exception if any problem occurs during parsing.
*/
   static TimelineReaderContext createTimelineReaderContext(String clusterId,
   String userId, String flowName, String flowRunId, String appId,
-  String entityType, String entityId) throws Exception {
+  String entityType, String entityId) {
 return new TimelineReaderContext(parseStr(clusterId), parseStr(userId),
 parseStr(flowName), parseLongStr(flowRunId), parseStr(appId),
 parseStr(entityType), parseStr(entityId));
@@ -79,20 +68,17 @@ final class TimelineReaderWebServicesUtils {
* @param metricfilters Entities to return must match these metric filters.
* @param eventfilters Entities to return must match these event filters.
* @return a {@link TimelineEntityFilters} object.
-   * @throws Exception if any problem occurs during parsing.
+   * @throws TimelineParseException if any problem occurs during parsing.
*/
   static TimelineEntityFilters createTimelineEntityFilters(String limit,
   String createdTimeStart, String createdTimeEnd, String relatesTo,
   String isRelatedTo, String infofilters, String conffilters,
-  String metricfilters, String eventfilters) throws Exception {
+  String metricfilters, String eventfilters) throws TimelineParseException 
{
 return new TimelineEntityFilters(parseLongStr(limit),
 parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
-parseKeyStrValuesStr(relatesTo, COMMA_DELIMITER, COLON_DELIMITER),
-parseKeyStrValuesStr(isRelatedTo, COMMA_DELIMITER, COLON_DELIMITER),
-parseKeyStrValueObj(infofilters, COMMA_DELIMITER, COLON_DELIMITER),
-parseKeyStrValueStr(conffilters, COMMA_DELIMITER, COLON_DELIMITER),
-parseMetricFilters(metricfilters, COMMA_DELIMITER),
-parseValuesStr(eventfilters, COMMA_DELIMITER));
+parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
+parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
+parseMetricFilters(metricfilters), parseEventFilters(eventfilters));
   }
 
   /**
@@ -102,12 +88,13 @@ final class TimelineReaderWebServicesUtils {
* @param metrics metrics to retrieve.
* @param fields fields to retrieve.
* @return a {@link TimelineDataToRetrieve} object.
-   * @throws Exception 

[04/50] [abbrv] hadoop git commit: YARN-3150. Documenting the timeline service v2. (Sangjin Lee and Vrushali C via gtcarrera9)

2016-07-08 Thread sjlee
ss is 
`org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity`
+whereas the v.1 class is 
`org.apache.hadoop.yarn.api.records.timeline.TimelineEntity`. The methods
+on `TimelineClient` suitable for writing to the Timeline Service v.2 are 
clearly delineated, and
+they use the v.2 types as arguments.
+
+Timeline Service v.2 `putEntities` methods come in 2 varieties: `putEntities` 
and
+`putEntitiesAsync`. The former is a blocking operation which should be used 
for writing more
+critical data (e.g. lifecycle events). The latter is a non-blocking operation. 
Note that neither
+has a return value.
+
+Creating a `TimelineClient` for v.2 involves passing in the application id to 
the factory method.
+
+For example:
+
+
+// Create and start the Timeline client v.2
+TimelineClient client = TimelineClient.createTimelineClient(appId);
+client.init(conf);
+client.start();
+
+try {
+  TimelineEntity myEntity = new TimelineEntity();
+  myEntity.setEntityType("MY_APPLICATION");
+  myEntity.setEntityId("MyApp1")
+  // Compose other entity info
+
+  // Blocking write
+  client.putEntities(entity);
+
+  TimelineEntity myEntity2 = new TimelineEntity();
+  // Compose other info
+
+  // Non-blocking write
+  timelineClient.putEntitiesAsync(entity);
+
+} catch (IOException e) {
+  // Handle the exception
+} catch (RuntimeException e) {
+  // In Hadoop 2.6, if attempts submit information to the Timeline Server 
fail more than the retry limit,
+  // a RuntimeException will be raised. This may change in future 
releases, being
+  // replaced with a IOException that is (or wraps) that which triggered 
retry failures.
+} catch (YarnException e) {
+  // Handle the exception
+} finally {
+  // Stop the Timeline client
+  client.stop();
+}
+
+As evidenced above, you need to specify the YARN application id to be able to 
write to the Timeline
+Service v.2. Note that currently you need to be on the cluster to be able to 
write to the Timeline
+Service. For example, an application master or code in the container can write 
to the Timeline
+Service, while an off-cluster MapReduce job submitter cannot.
+
+You can create and publish your own entities, events, and metrics as with 
previous versions.
+
+Application frameworks should set the "flow context" whenever possible in 
order to take advantage
+of the flow support Timeline Service v.2 provides. The flow context consists 
of the following:
+
+* Flow name: a string that identifies the high-level flow (e.g. "distributed 
grep" or any
+identifiable name that can uniquely represent the app)
+* Flow run id: a monotonically-increasing sequence of numbers that distinguish 
different runs of
+the same flow
+* (optional) Flow version: a string identifier that denotes a version of the 
flow
+
+If the flow context is not specified, defaults are supplied for these 
attributes:
+
+* Flow name: the YARN application name (or the application id if the name is 
not set)
+* Flow run id: the application start time in Unix time (milliseconds)
+* Flow version: "1"
+
+You can provide the flow context via YARN application tags:
+
+ApplicationSubmissionContext appContext = 
app.getApplicationSubmissionContext();
+
+// set the flow context as YARN application tags
+Set tags = new HashSet<>();
+tags.add(TimelineUtils.generateFlowNameTag("distributed grep"));
+
tags.add(Timelineutils.generateFlowVersionTag("3df8b0d6100530080d2e0decf9e528e57c42a90a"));
+tags.add(TimelineUtils.generateFlowRunIdTag(System.currentTimeMillis()));
+
+appContext.setApplicationTags(tags);
+
+# Timeline Service v.2 REST API
+
+Querying the Timeline Service v.2 is currently only supported via REST API; 
there is no API
+client implemented in the YARN libraries.
+
+The v.2 REST API is implemented at under the path, `/ws/v2/timeline/` on the 
Timeline Service web
+service.
+
+Here is an informal description of the API.
+
+### Root path
+
+GET /ws/v2/timeline/
+
+Returns a JSON object describing the service instance and version information.
+
+{
+  "About":"Timeline Reader API",
+  "timeline-service-version":"3.0.0-SNAPSHOT",
+  "timeline-service-build-version":"3.0.0-SNAPSHOT from 
fb0acd08e6f0b030d82eeb7cbfa5404376313e60 by sjlee source checksum 
be6cba0e42417d53be16459e1685e7",
+  "timeline-service-version-built-on":"2016-04-11T23:15Z",
+  "hadoop-version":"3.0.0-SNAPSHOT",
+  "hadoop-build-version":"3.0.0-SNAPSHOT from 
fb0acd08e6f0b030d82eeb7cbfa5404376313e60 by sjlee source checksum 
ee968fd0aedcc7384230ee3ca216e790",
+  "hadoop-version-built-on":"2016-04-11T23:14Z"
+}
+
+### Request Examples
+
+The following shows some of

[12/50] [abbrv] hadoop git commit: YARN-5102. timeline service build fails with java 8. (Sangjin Lee via varunsaxena)

2016-07-08 Thread sjlee
YARN-5102. timeline service build fails with java 8. (Sangjin Lee via 
varunsaxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcd59e7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcd59e7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcd59e7f

Branch: refs/heads/YARN-2928
Commit: fcd59e7f4dc73496e215bf182e9eead8eefca273
Parents: 47ffa5f
Author: Varun Saxena 
Authored: Wed May 18 01:18:18 2016 +0530
Committer: Sangjin Lee 
Committed: Fri Jul 8 10:19:03 2016 -0700

--
 hadoop-project/pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd59e7f/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f071504..9b30570 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1058,6 +1058,12 @@
 org.apache.hbase
 hbase-common
 ${hbase.version}
+
+  
+jdk.tools
+jdk.tools
+  
+
   
   
 org.apache.hbase


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: YARN-5096 addendum. Turned another logging statement to debug. Contributed by Sangjin Lee.

2016-07-08 Thread sjlee
YARN-5096 addendum. Turned another logging statement to debug. Contributed by 
Sangjin Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc95ffb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc95ffb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc95ffb

Branch: refs/heads/YARN-2928
Commit: 6fc95ffbc0bffcb9ea584209b904b4d05921937a
Parents: d5f1a30
Author: Sangjin Lee 
Authored: Thu May 19 15:40:15 2016 -0700
Committer: Sangjin Lee 
Committed: Fri Jul 8 10:19:03 2016 -0700

--
 .../yarn/server/timelineservice/storage/common/ColumnHelper.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc95ffb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
index dff677b..759bf27 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -258,7 +258,9 @@ public class ColumnHelper {
 
   String columnName = null;
   if (columnPrefixBytes == null) {
-LOG.info("null prefix was specified; returning all columns");
+if (LOG.isDebugEnabled()) {
+  LOG.debug("null prefix was specified; returning all columns");
+}
 // Decode the spaces we encoded in the column name.
 columnName = Separator.decode(columnKey, Separator.SPACE);
   } else {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-3816. [Aggregation] App-level aggregation and accumulation for YARN system metrics (Li Lu via sjlee)

2016-07-08 Thread sjlee
YARN-3816. [Aggregation] App-level aggregation and accumulation for YARN system 
metrics (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74c1b597
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74c1b597
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74c1b597

Branch: refs/heads/YARN-2928
Commit: 74c1b5977c1e2af820fe3a37a0b31af86f08430c
Parents: e9cfce4
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri Apr 22 10:24:40 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 8 10:19:01 2016 -0700

--
 .../records/timelineservice/TimelineMetric.java | 140 ++--
 .../TimelineMetricCalculator.java   | 115 ++
 .../TimelineMetricOperation.java| 167 +++
 .../timelineservice/TestTimelineMetric.java | 100 +
 .../TestTimelineServiceRecords.java |   6 +-
 .../timelineservice/NMTimelinePublisher.java|   4 +
 .../collector/AppLevelTimelineCollector.java|  72 +++
 .../collector/TimelineCollector.java| 213 ++-
 .../storage/TimelineAggregationTrack.java   |   2 +-
 .../collector/TestTimelineCollector.java| 127 +++
 .../TestFileSystemTimelineWriterImpl.java   |  43 +++-
 .../storage/TestHBaseTimelineStorage.java   |  35 ++-
 12 files changed, 998 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74c1b597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
index 2f60515..f0c6849 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
@@ -19,12 +19,13 @@ package org.apache.hadoop.yarn.api.records.timelineservice;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Comparator;
+import java.util.Collections;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -48,13 +49,13 @@ public class TimelineMetric {
 
   private Type type;
   private String id;
-  private Comparator reverseComparator = new Comparator() {
-@Override
-public int compare(Long l1, Long l2) {
-  return l2.compareTo(l1);
-}
-  };
-  private TreeMap<Long, Number> values = new TreeMap<>(reverseComparator);
+  // By default, not to do any aggregation operations. This field will NOT be
+  // persisted (like a "transient" member).
+  private TimelineMetricOperation realtimeAggregationOp
+  = TimelineMetricOperation.NOP;
+
+  private TreeMap<Long, Number> values
+  = new TreeMap<>(Collections.reverseOrder());
 
   public TimelineMetric() {
 this(Type.SINGLE_VALUE);
@@ -83,6 +84,26 @@ public class TimelineMetric {
 this.id = metricId;
   }
 
+  /**
+   * Get the real time aggregation operation of this metric.
+   *
+   * @return Real time aggregation operation
+   */
+  public TimelineMetricOperation getRealtimeAggregationOp() {
+return realtimeAggregationOp;
+  }
+
+  /**
+   * Set the real time aggregation operation of this metric.
+   *
+   * @param op A timeline metric operation that the metric should perform on
+   *   real time aggregations
+   */
+  public void setRealtimeAggregationOp(
+  final TimelineMetricOperation op) {
+this.realtimeAggregationOp = op;
+  }
+
   // required by JAXB
   @InterfaceAudience.Private
   @XmlElement(name = "values")
@@ -98,8 +119,8 @@ public class TimelineMetric {
 if (type == Type.SINGLE_VALUE) {
   overwrite(vals);
 } else {
-  if (values != null) {
-this.values = new TreeMap<Long, Number>(reverseComparator);
+  if (vals != null) {
+this.values = new TreeMap<>(Collections.reverseOrder());
 this.values.putAll(vals);
   } else {
 this.values = null;
@@ -166,11 +187,100 @@ public class TimelineMetric {
 
   @

[01/50] [abbrv] hadoop git commit: YARN-3461. Consolidate flow name/version/run defaults. (Sangjin Lee via Varun Saxena) [Forced Update!]

2016-07-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 c994596b2 -> 15962ea0c (forced update)


YARN-3461. Consolidate flow name/version/run defaults. (Sangjin Lee via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/525b30ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/525b30ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/525b30ea

Branch: refs/heads/YARN-2928
Commit: 525b30ea70866b32931ecd443763d4763623e91a
Parents: e31210d
Author: Varun Saxena 
Authored: Thu Apr 7 22:10:11 2016 +0530
Committer: Sangjin Lee 
Committed: Fri Jul 8 10:19:00 2016 -0700

--
 .../mapred/TestMRTimelineEventHandling.java | 46 +++---
 .../distributedshell/TestDistributedShell.java  | 18 --
 .../yarn/util/timeline/TimelineUtils.java   |  8 ++-
 .../resourcemanager/amlauncher/AMLauncher.java  | 67 +++-
 .../RMTimelineCollectorManager.java | 36 +--
 .../TestSystemMetricsPublisherForV2.java| 20 +++---
 .../collector/AppLevelTimelineCollector.java| 11 +---
 .../collector/NodeTimelineCollectorManager.java | 12 
 .../collector/TimelineCollectorContext.java |  5 +-
 9 files changed, 148 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/525b30ea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index f7283ae..300b4fb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -20,15 +20,12 @@ package org.apache.hadoop.mapred;
 
 import java.io.File;
 import java.io.IOException;
-
 import java.util.EnumSet;
 import java.util.List;
-import java.util.Set;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -38,9 +35,9 @@ import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
@@ -48,7 +45,6 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import 
org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
-
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -205,7 +201,7 @@ public class TestMRTimelineEventHandling {
   ApplicationReport appReport = apps.get(0);
   firstAppId = appReport.getApplicationId();
 
-  checkNewTimelineEvent(firstAppId);
+  checkNewTimelineEvent(firstAppId, appReport);
 
   LOG.info("Run 2nd job which should be failed.");
   job = UtilsForTests.runJobFail(new JobConf(conf), inDir, outDir);
@@ -214,11 +210,10 @@ public class TestMRTimelineEventHandling {
   
   apps = yarnClient.getApplications(appStates);
   Assert.assertEquals(apps.size(), 2);
-  
-  ApplicationId secAppId = null;
-  secAppId = apps.get(0).getApplicationId() == firstAppId ? 
-  apps.get(1).getApplicationId() : apps.get(0).getApplicationId();
-  checkNewTimelineEvent(firstAppId);
+
+  appReport = apps.get(0).getApplicationId().equals(firstAppId) ?
+  apps.get(0) : apps.get(1);
+  checkNewTimelineEvent(firstAppId, appReport);
 
 } finally {
   if (cluster != null) {
@@ -235,7 +230,8 @@ public 

[02/50] [abbrv] hadoop git commit: YARN-4711. NM is going down with NPE's due to single thread processing of events by Timeline client (Naganarasimha G R via sjlee)

2016-07-08 Thread sjlee
YARN-4711. NM is going down with NPE's due to single thread processing of 
events by Timeline client (Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e31210dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e31210dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e31210dd

Branch: refs/heads/YARN-2928
Commit: e31210dd01dea3dc665128d5c56f5066f389a95b
Parents: efea605
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Mar 28 15:50:03 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 8 10:19:00 2016 -0700

--
 .../dev-support/findbugs-exclude.xml|  11 +-
 .../records/timelineservice/TimelineEntity.java |  25 ++-
 .../client/api/impl/TimelineClientImpl.java |  35 ++--
 .../api/impl/TestTimelineClientV2Impl.java  |  91 +++-
 .../metrics/ContainerMetricsConstants.java  |   8 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |  10 +-
 .../collectormanager/NMCollectorService.java|  10 +-
 .../application/Application.java|   4 -
 .../application/ApplicationImpl.java|  23 +-
 .../timelineservice/NMTimelinePublisher.java| 210 +++
 .../TestNMTimelinePublisher.java|  24 +--
 .../yarn/server/nodemanager/webapp/MockApp.java |   5 -
 12 files changed, 278 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e31210dd/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index d724026..08c6ba2 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -117,8 +117,15 @@
 
   
   
-
- 
+
+
+
+  
+
+  
+
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e31210dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index acc132e..7ce8279 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -17,15 +17,6 @@
  */
 package org.apache.hadoop.yarn.api.records.timelineservice;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.annotate.JsonSetter;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -33,6 +24,16 @@ import java.util.NavigableSet;
 import java.util.Set;
 import java.util.TreeSet;
 
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
+import org.codehaus.jackson.annotate.JsonSetter;
+
 /**
  * The basic timeline entity data structure for timeline service v2. Timeline
  * entity objects are not thread safe and should not be accessed concurrently.
@@ -564,6 +565,10 @@ public class TimelineEntity implements 
Comparable {
   }
 
   public String toString() {
-return identifier.toString();
+if (real == null) {
+  return identifier.toString();
+} else {
+  return real.toString();
+}
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e31210dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache

hadoop git commit: YARN-5174. [documentation] several updates/corrections to timeline service documentation (Sangjin Lee, Varun Saxena, Naganarasimha G R, and Li Lu via sjlee)

2016-07-01 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 52a4cfe53 -> 27550a454


YARN-5174. [documentation] several updates/corrections to timeline service 
documentation (Sangjin Lee, Varun Saxena, Naganarasimha G R, and Li Lu via 
sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27550a45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27550a45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27550a45

Branch: refs/heads/YARN-2928
Commit: 27550a454ea92115909f19c4449cbbb9de61c069
Parents: 52a4cfe
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri Jul 1 16:23:00 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jul 1 16:23:00 2016 -0700

--
 .../src/site/markdown/TimelineServiceV2.md  | 378 +++
 .../site/resources/images/flow_hierarchy.png| Bin 0 -> 42345 bytes
 2 files changed, 219 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27550a45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 6e151c9..d1ef46b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -21,7 +21,7 @@ The YARN Timeline Service v.2
 * [Current Status](#Current_Status)
 * [Deployment](#Deployment)
 * [Configurations](#Configurations)
-* [Enabling the Timeline Service v.2](#Enabling_Timeline_Service_v2)
+* [Enabling Timeline Service v.2](#Enabling_Timeline_Service_v2)
 * [Publishing of application specific 
data](#Publishing_of_application_specific_data)
 * [Timeline Service v.2 REST API](#Timeline_Service_REST_API_v2)
 * [Query Flows](#REST_API_LIST_FLOWS)
@@ -55,11 +55,15 @@ scales well to a large size while maintaining good response 
times for reads and
  Usability improvements
 In many cases, users are interested in information at the level of "flows" or 
logical groups of
 YARN applications. It is much more common to launch a set or series of YARN 
applications to
-complete a logic application. Timeline Service v.2 supports the notion of 
flows explicitly. In
+complete a logical application. Timeline Service v.2 supports the notion of 
flows explicitly. In
 addition, it supports aggregating metrics at the flow level.
 
-Also, information such as configuration and metrics is treated and supported 
as a first-class
-citizen.
+Also, information such as configuration and metrics is treated and supported 
as first-class
+citizens.
+
+The following diagrams illustrates the relationship between different YARN 
entities modelling flows.
+
+![Flow Hierarchy](./images/flow_hierarchy.png)
 
 ###Architecture
 
@@ -85,30 +89,32 @@ The following diagram illustrates the design at a high 
level.
 
 ### Current Status and Future Plans
 
-YARN Timeline Service v.2 is currently in alpha. It is very much work in 
progress, and many things
-can and will change rapidly. Users must enable Timeline Service v.2 only on a 
test or
+YARN Timeline Service v.2 is currently in alpha ("alpha 1"). It is very much 
work in progress, and
+many things can and will change rapidly. Users must enable Timeline Service 
v.2 only on a test or
 experimental cluster to test the feature.
 
-A complete end-to-end flow of writes and reads must be functional, with Apache 
HBase as the
-backend. You must be able to start generating data. When enabled, all 
YARN-generic events are
-published as well as YARN system metrics such as CPU and memory. Furthermore, 
some applications
-including Distributed Shell and MapReduce write per-framework data to YARN 
Timeline Service v.2.
+Most importantly, **security is not enabled**. Do not set up or use Timeline 
Service v.2 until
+security is implemented if security is a requirement.
 
-The REST API comes with a good number of useful and flexible query patterns 
(see below for more
-information).
+A complete end-to-end flow of writes and reads is functional, with Apache 
HBase as the backend.
+You should be able to start generating data. When enabled, all YARN-generic 
events are
+published as well as YARN system metrics such as CPU and memory. Furthermore, 
some applications
+including Distributed Shell and MapReduce can write per-framework data to YARN 
Timeline Service
+v.2.
 
-Although the basic mode of accessing data is via REST, it also comes with a 
basic web UI based on
-the proposed new YARN UI framework. Cur

hadoop git commit: HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)

2016-06-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e751bb449 -> ae90d4dd9


HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae90d4dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae90d4dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae90d4dd

Branch: refs/heads/branch-2
Commit: ae90d4dd908cf3f9e9ff26fa8e92f028057a9ca1
Parents: e751bb4
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri Jun 24 14:48:36 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jun 24 14:48:36 2016 -0700

--
 .../src/main/conf/log4j.properties  | 13 +++
 .../datanode/web/webhdfs/WebHdfsHandler.java| 38 +++-
 2 files changed, 43 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae90d4dd/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index dc7e705..95afc61 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -300,6 +300,19 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
 
+
+# WebHdfs request log on datanodes
+# Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
+# direct the log to a separate file.
+#datanode.webhdfs.logger=INFO,console
+#log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
+#log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
+#log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+#log4j.appender.HTTPDRFA.DatePattern=.-MM-dd
+
+
 # Appender for viewing information for errors and warnings
 yarn.ewma.cleanupInterval=300
 yarn.ewma.messageAgeLimitSeconds=86400

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae90d4dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index dffe34d..ea824c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -29,6 +29,7 @@ import static io.netty.handler.codec.http.HttpMethod.POST;
 import static io.netty.handler.codec.http.HttpMethod.PUT;
 import static io.netty.handler.codec.http.HttpResponseStatus.CONTINUE;
 import static io.netty.handler.codec.http.HttpResponseStatus.CREATED;
+import static 
io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
 import static io.netty.handler.codec.http.HttpResponseStatus.OK;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HDFS_URI_SCHEME;
@@ -48,6 +49,7 @@ import io.netty.handler.stream.ChunkedStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
@@ -78,6 +80,7 @@ import com.google.common.base.Preconditions;
 
 public class WebHdfsHandler extends SimpleChannelInboundHandler {
   static final Log LOG = LogFactory.getLog(WebHdfsHandler.class);
+  static final Log REQLOG = LogFactory.getLog("datanode.webhdfs");
   public static final String WEBHDFS_PREFIX = WebHdfsFileSystem.PATH_PREFIX;
   public static final int WEBHDFS_PREFIX_LENGTH = WEBHDFS_PREFIX.length();
   public static final String APPLICATION_OCTET_STREAM =
@@ -94,6 +97,7 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
   private String path;
   private ParameterParser params;
   private UserGroupInformation ugi;
+  private DefaultHttpResponse resp = null;
 
   public WebHdfsHandler(Configuration conf, Configuration confForCreate)
 throws IOException {
@@ -115,1

hadoop git commit: HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)

2016-06-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 975786492 -> bf74dbf80


HDFS-7959. WebHdfs logging is missing on Datanode (Kihwal Lee via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf74dbf8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf74dbf8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf74dbf8

Branch: refs/heads/trunk
Commit: bf74dbf80dc9379d669779a598950908adffb8a7
Parents: 97578649
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri Jun 24 14:44:15 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Jun 24 14:44:15 2016 -0700

--
 .../src/main/conf/log4j.properties  | 13 ++
 .../datanode/web/webhdfs/WebHdfsHandler.java| 44 +++-
 2 files changed, 46 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf74dbf8/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index dc7e705..95afc61 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -300,6 +300,19 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
 
+
+# WebHdfs request log on datanodes
+# Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
+# direct the log to a separate file.
+#datanode.webhdfs.logger=INFO,console
+#log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
+#log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
+#log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+#log4j.appender.HTTPDRFA.DatePattern=.-MM-dd
+
+
 # Appender for viewing information for errors and warnings
 yarn.ewma.cleanupInterval=300
 yarn.ewma.messageAgeLimitSeconds=86400

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf74dbf8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index 13f394e..0a8f40d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -34,6 +34,7 @@ import static io.netty.handler.codec.http.HttpMethod.POST;
 import static io.netty.handler.codec.http.HttpMethod.PUT;
 import static io.netty.handler.codec.http.HttpResponseStatus.CONTINUE;
 import static io.netty.handler.codec.http.HttpResponseStatus.CREATED;
+import static 
io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
 import static io.netty.handler.codec.http.HttpResponseStatus.OK;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HDFS_URI_SCHEME;
@@ -53,6 +54,7 @@ import io.netty.handler.stream.ChunkedStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
@@ -82,6 +84,7 @@ import com.google.common.base.Preconditions;
 
 public class WebHdfsHandler extends SimpleChannelInboundHandler {
   static final Log LOG = LogFactory.getLog(WebHdfsHandler.class);
+  static final Log REQLOG = LogFactory.getLog("datanode.webhdfs");
   public static final String WEBHDFS_PREFIX = WebHdfsFileSystem.PATH_PREFIX;
   public static final int WEBHDFS_PREFIX_LENGTH = WEBHDFS_PREFIX.length();
   public static final String APPLICATION_OCTET_STREAM =
@@ -98,6 +101,7 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
   private String path;
   private ParameterParser params;
   private UserGroupInformation ugi;
+  private DefaultHttpResponse resp = null;
 
   public WebHdfsHandler(Configuration conf, Configuration confForCreate)
 throws IOException {
@@ -119,1

hadoop git commit: MAPREDUCE-6719. The list of -libjars archives should be replaced with a wildcard in the distributed cache to reduce the application footprint in the state store (Daniel Templeton vi

2016-06-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 65b4f2682 -> 910742ad1


MAPREDUCE-6719. The list of -libjars archives should be replaced with a 
wildcard in the distributed cache to reduce the application footprint in the 
state store (Daniel Templeton via sjlee)

(cherry picked from commit 605b4b61364781fc99ed27035c793153a20d8f71)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/910742ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/910742ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/910742ad

Branch: refs/heads/branch-2
Commit: 910742ad12de4ac15c3ac1f1a204e8301ffe3a25
Parents: 65b4f26
Author: Sangjin Lee <sj...@twitter.com>
Authored: Tue Jun 21 11:25:11 2016 -0700
Committer: Sangjin Lee <sj...@twitter.com>
Committed: Tue Jun 21 11:26:55 2016 -0700

--
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  70 +++--
 .../java/org/apache/hadoop/mapreduce/Job.java   |   7 +-
 .../hadoop/mapreduce/JobResourceUploader.java   |  20 ++-
 .../hadoop/mapreduce/JobSubmissionFiles.java|   4 +-
 .../apache/hadoop/mapreduce/JobSubmitter.java   |   6 +-
 .../ClientDistributedCacheManager.java  |  31 +++-
 .../mapreduce/filecache/DistributedCache.java   |  76 --
 .../src/main/resources/mapred-default.xml   |  18 +++
 .../TestClientDistributedCacheManager.java  | 151 ---
 .../filecache/TestDistributedCache.java | 132 
 .../hadoop/mapred/TestLocalJobSubmission.java   |  34 +++--
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  48 --
 12 files changed, 510 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/910742ad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 8ca1a9d..a649377 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -326,12 +326,36 @@ public class MRApps extends Apps {
 for (URI u: withLinks) {
   Path p = new Path(u);
   FileSystem remoteFS = p.getFileSystem(conf);
+  String name = p.getName();
+  String wildcard = null;
+
+  // If the path is wildcarded, resolve its parent directory instead
+  if (name.equals(DistributedCache.WILDCARD)) {
+wildcard = name;
+p = p.getParent();
+  }
+
   p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
   remoteFS.getWorkingDirectory()));
-  String name = (null == u.getFragment())
-  ? p.getName() : u.getFragment();
+
+  if ((wildcard != null) && (u.getFragment() != null)) {
+throw new IOException("Invalid path URI: " + p + " - cannot "
++ "contain both a URI fragment and a wildcard");
+  } else if (wildcard != null) {
+name = p.getName() + Path.SEPARATOR + wildcard;
+  } else if (u.getFragment() != null) {
+name = u.getFragment();
+  }
+
+  // If it's not a JAR, add it to the link lookup.
   if (!StringUtils.toLowerCase(name).endsWith(".jar")) {
-linkLookup.put(p, name);
+String old = linkLookup.put(p, name);
+
+if ((old != null) && !name.equals(old)) {
+  LOG.warn("The same path is included more than once "
+  + "with different links or wildcards: " + p + " [" +
+  name + ", " + old + "]");
+}
   }
 }
   }
@@ -598,16 +622,42 @@ public class MRApps extends Apps {
 URI u = uris[i];
 Path p = new Path(u);
 FileSystem remoteFS = p.getFileSystem(conf);
+String linkName = null;
+
+if (p.getName().equals(DistributedCache.WILDCARD)) {
+  p = p.getParent();
+  linkName = p.getName() + Path.SEPARATOR + DistributedCache.WILDCARD;
+}
+
 p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
 remoteFS.getWorkingDirectory()));
-// Add URI fragment or just

hadoop git commit: MAPREDUCE-6719. The list of -libjars archives should be replaced with a wildcard in the distributed cache to reduce the application footprint in the state store (Daniel Templeton vi

2016-06-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk e15cd4336 -> 605b4b613


MAPREDUCE-6719. The list of -libjars archives should be replaced with a 
wildcard in the distributed cache to reduce the application footprint in the 
state store (Daniel Templeton via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/605b4b61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/605b4b61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/605b4b61

Branch: refs/heads/trunk
Commit: 605b4b61364781fc99ed27035c793153a20d8f71
Parents: e15cd43
Author: Sangjin Lee <sj...@twitter.com>
Authored: Tue Jun 21 11:25:11 2016 -0700
Committer: Sangjin Lee <sj...@twitter.com>
Committed: Tue Jun 21 11:25:11 2016 -0700

--
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  70 +++--
 .../java/org/apache/hadoop/mapreduce/Job.java   |   7 +-
 .../hadoop/mapreduce/JobResourceUploader.java   |  20 ++-
 .../hadoop/mapreduce/JobSubmissionFiles.java|   4 +-
 .../apache/hadoop/mapreduce/JobSubmitter.java   |   6 +-
 .../ClientDistributedCacheManager.java  |  31 +++-
 .../mapreduce/filecache/DistributedCache.java   |  76 --
 .../src/main/resources/mapred-default.xml   |  18 +++
 .../TestClientDistributedCacheManager.java  | 151 ---
 .../filecache/TestDistributedCache.java | 132 
 .../hadoop/mapred/TestLocalJobSubmission.java   |  34 +++--
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  48 --
 12 files changed, 510 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/605b4b61/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 31e4c0f..b800d31 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -300,12 +300,36 @@ public class MRApps extends Apps {
 for (URI u: withLinks) {
   Path p = new Path(u);
   FileSystem remoteFS = p.getFileSystem(conf);
+  String name = p.getName();
+  String wildcard = null;
+
+  // If the path is wildcarded, resolve its parent directory instead
+  if (name.equals(DistributedCache.WILDCARD)) {
+wildcard = name;
+p = p.getParent();
+  }
+
   p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
   remoteFS.getWorkingDirectory()));
-  String name = (null == u.getFragment())
-  ? p.getName() : u.getFragment();
+
+  if ((wildcard != null) && (u.getFragment() != null)) {
+throw new IOException("Invalid path URI: " + p + " - cannot "
++ "contain both a URI fragment and a wildcard");
+  } else if (wildcard != null) {
+name = p.getName() + Path.SEPARATOR + wildcard;
+  } else if (u.getFragment() != null) {
+name = u.getFragment();
+  }
+
+  // If it's not a JAR, add it to the link lookup.
   if (!StringUtils.toLowerCase(name).endsWith(".jar")) {
-linkLookup.put(p, name);
+String old = linkLookup.put(p, name);
+
+if ((old != null) && !name.equals(old)) {
+  LOG.warn("The same path is included more than once "
+  + "with different links or wildcards: " + p + " [" +
+  name + ", " + old + "]");
+}
   }
 }
   }
@@ -559,16 +583,42 @@ public class MRApps extends Apps {
 URI u = uris[i];
 Path p = new Path(u);
 FileSystem remoteFS = p.getFileSystem(conf);
+String linkName = null;
+
+if (p.getName().equals(DistributedCache.WILDCARD)) {
+  p = p.getParent();
+  linkName = p.getName() + Path.SEPARATOR + DistributedCache.WILDCARD;
+}
+
 p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
 remoteFS.getWorkingDirectory()));
-// Add URI fragment or just the filename
-Path name = new Path((null == u.getF

hadoop git commit: YARN-4958. The file localization process should allow for wildcards to reduce the application footprint in the state store (Daniel Templeton via sjlee)

2016-06-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk fc6b50cc5 -> 5107a967f


YARN-4958. The file localization process should allow for wildcards to reduce 
the application footprint in the state store (Daniel Templeton via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5107a967
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5107a967
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5107a967

Branch: refs/heads/trunk
Commit: 5107a967fa2558deba11c33a326d4d2e5748f452
Parents: fc6b50c
Author: Sangjin Lee <sj...@twitter.com>
Authored: Mon Jun 20 09:56:53 2016 -0700
Committer: Sangjin Lee <sj...@twitter.com>
Committed: Mon Jun 20 09:56:53 2016 -0700

--
 .../yarn/server/nodemanager/ContainerExecutor.java   | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5107a967/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 98171af..98d45f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 
 public abstract class ContainerExecutor implements Configurable {
-
+  private static final String WILDCARD = "*";
   private static final Log LOG = LogFactory.getLog(ContainerExecutor.class);
   final public static FsPermission TASK_LAUNCH_SCRIPT_PERMISSION =
 FsPermission.createImmutable((short) 0700);
@@ -281,7 +281,18 @@ public abstract class ContainerExecutor implements 
Configurable {
 if (resources != null) {
   for (Map.Entry<Path,List> entry : resources.entrySet()) {
 for (String linkName : entry.getValue()) {
-  sb.symlink(entry.getKey(), new Path(linkName));
+  if (new Path(linkName).getName().equals(WILDCARD)) {
+// If this is a wildcarded path, link to everything in the
+// directory from the working directory
+File directory = new File(entry.getKey().toString());
+
+for (File wildLink : directory.listFiles()) {
+  sb.symlink(new Path(wildLink.toString()),
+  new Path(wildLink.getName()));
+}
+  } else {
+sb.symlink(entry.getKey(), new Path(linkName));
+  }
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5070. upgrade HBase version for first merge (Vrushali C via sjlee)

2016-06-15 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 8edb68aca -> b9b9068ed


YARN-5070. upgrade HBase version for first merge (Vrushali C via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9b9068e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9b9068e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9b9068e

Branch: refs/heads/YARN-2928
Commit: b9b9068ed913d2e154c184149fa65bf28e632356
Parents: 8edb68a
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Jun 15 11:43:36 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Jun 15 11:43:36 2016 -0700

--
 hadoop-project/pom.xml  |   4 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   | 168 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java | 159 +-
 .../storage/flow/FlowRunCoprocessor.java|  17 +-
 .../storage/flow/FlowScanner.java   | 132 ---
 5 files changed, 392 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b9068e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a1edb17..d52bf6d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -49,8 +49,8 @@
 2.11.0
 
 0.8.2.1
-1.0.1
-4.5.0-SNAPSHOT
+1.1.3
+4.7.0-HBase-1.1
 2.5.1
 
 ${project.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b9068e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index 328b25a..6c4c810 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
@@ -107,8 +107,8 @@ public class TestHBaseStorageFlowRun {
   // check in flow run table
   util.waitUntilAllRegionsAssigned(table);
   HRegionServer server = util.getRSForFirstRegionInTable(table);
-  List regions = server.getOnlineRegions(table);
-  for (HRegion region : regions) {
+  List regions = server.getOnlineRegions(table);
+  for (Region region : regions) {
 assertTrue(TimelineStorageUtils.isFlowRunTable(region.getRegionInfo(),
 hbaseConf));
   }
@@ -122,8 +122,8 @@ public class TestHBaseStorageFlowRun {
   // check in flow activity table
   util.waitUntilAllRegionsAssigned(table);
   HRegionServer server = util.getRSForFirstRegionInTable(table);
-  List regions = server.getOnlineRegions(table);
-  for (HRegion region : regions) {
+  List regions = server.getOnlineRegions(table);
+  for (Region region : regions) {
 assertFalse(TimelineStorageUtils.isFlowRunTable(region.getRegionInfo(),
 hbaseConf));
   }
@@ -137,8 +137,8 @@ public class TestHBaseStorageFlowRun {
   // check in entity run table
   util.waitUntilAllRegionsAssigned(table);
   HRegionServer server = util.getRSForFirstRegionInTable(table);
-  List regions = server.getOnlineRegions(table);
-  for (HRegion region : regions) {
+  List regions = server.getOnlineRegions(table);
+  for (Region region : regions) {
 assertFalse(TimelineStorageUtils.isFlowRunTable(region.getRegionInfo(),
 hbaseConf));
   }
@@ -311,6 +311,9 @@ public class TestHBaseStorageFlowRun {
 //

[2/2] hadoop git commit: YARN-5015. entire time series is returned for YARN container system metrics (CPU and memory) (Varun Saxena via sjlee)

2016-05-31 Thread sjlee
YARN-5015. entire time series is returned for YARN container system metrics 
(CPU and memory) (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/646dd4da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/646dd4da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/646dd4da

Branch: refs/heads/YARN-2928
Commit: 646dd4da8c50a498bed0eeb6f30a4f418739063b
Parents: 2af62a0
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue May 31 13:09:59 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue May 31 13:09:59 2016 -0700

--
 ...stTimelineReaderWebServicesHBaseStorage.java | 211 +++--
 .../storage/TestHBaseTimelineStorage.java   | 315 ++-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  26 +-
 .../reader/TimelineDataToRetrieve.java  |  32 +-
 .../reader/TimelineEntityFilters.java   |   5 +-
 .../reader/TimelineReaderWebServices.java   | 274 
 .../reader/TimelineReaderWebServicesUtils.java  |  17 +-
 .../storage/reader/ApplicationEntityReader.java |   3 +-
 .../storage/reader/FlowRunEntityReader.java |  12 +
 .../storage/reader/GenericEntityReader.java |   4 +-
 .../storage/reader/TimelineEntityReader.java|   5 +-
 .../TestFileSystemTimelineReaderImpl.java   |  12 +-
 12 files changed, 711 insertions(+), 205 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/646dd4da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index ca80ed5..f9f4607 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.timelineservice.reader;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
@@ -112,13 +113,14 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 TimelineMetric m1 = new TimelineMetric();
 m1.setId("MAP_SLOT_MILLIS");
 Map<Long, Number> metricValues =
-ImmutableMap.of(ts - 10, (Number)2, ts - 8, 40);
+ImmutableMap.of(ts - 10, (Number)2, ts - 9, 7, ts - 8, 40);
 m1.setType(Type.TIME_SERIES);
 m1.setValues(metricValues);
 metrics.add(m1);
 m1 = new TimelineMetric();
 m1.setId("MAP1_SLOT_MILLIS");
-metricValues = ImmutableMap.of(ts - 10, (Number)2, ts - 8, 40);
+metricValues =
+ImmutableMap.of(ts - 10, (Number)2, ts - 9, 9, ts - 8, 40);
 m1.setType(Type.TIME_SERIES);
 m1.setValues(metricValues);
 metrics.add(m1);
@@ -460,6 +462,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 assertNotNull(resp);
 assertTrue("Response from server should have been " + status,
 resp.getClientResponseStatus().equals(status));
+System.out.println("Response is: " + resp.getEntity(String.class));
   }
 
   @Test
@@ -615,12 +618,18 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 (entity.getStartTime() == 1425016501034L) &&
 (entity.getMetrics().size() == 1)));
   }
+
+  // fields as CONFIGS will lead to a HTTP 400 as it makes no sense for
+  // flow runs.
+  uri = URI.create("http://localhost:; + serverPort + "/ws/v2/" +
+  "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
+  "fields=CONFIGS");
+  verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 } finally {
   client.destroy();
 }
   }
 
-
   @Test
   public void testGetFlowRunsMetri

hadoop git commit: YARN-5111. YARN container system metrics are not aggregated to application (Naganarasimha G R via sjlee)

2016-05-27 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 e2229377b -> 2af62a08b


YARN-5111. YARN container system metrics are not aggregated to application 
(Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2af62a08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2af62a08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2af62a08

Branch: refs/heads/YARN-2928
Commit: 2af62a08bd2a55a7a902b12eeb54a69304fd07d8
Parents: e222937
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri May 27 22:31:00 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri May 27 22:31:00 2016 -0700

--
 .../api/records/timelineservice/TimelineMetric.java  | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2af62a08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
index f0c6849..5c908d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
@@ -17,17 +17,18 @@
  */
 package org.apache.hadoop.yarn.api.records.timelineservice;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.TreeMap;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Collections;
-import java.util.Map;
-import java.util.TreeMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 /**
  * This class contains the information of a metric that is related to some
@@ -89,6 +90,8 @@ public class TimelineMetric {
*
* @return Real time aggregation operation
*/
+  // required by JAXB
+  @XmlElement(name = "aggregationOp")
   public TimelineMetricOperation getRealtimeAggregationOp() {
 return realtimeAggregationOp;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: YARN-5109. timestamps are stored unencoded causing parse errors (Varun Saxena via sjlee)

2016-05-26 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 10b26bb9f -> e2229377b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2229377/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyConverter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyConverter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyConverter.java
new file mode 100644
index 000..642f065
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyConverter.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+
+/**
+ * Encodes and decodes row key for flow run table.
+ * The row key is of the form : clusterId!userId!flowName!flowrunId.
+ * flowrunId is a long and rest are strings.
+ */
+public final class FlowRunRowKeyConverter implements
+KeyConverter {
+  private static final FlowRunRowKeyConverter INSTANCE =
+  new FlowRunRowKeyConverter();
+
+  public static FlowRunRowKeyConverter getInstance() {
+return INSTANCE;
+  }
+
+  private FlowRunRowKeyConverter() {
+  }
+
+  // Flow run row key is of the form
+  // clusterId!userId!flowName!flowrunId with each segment separated by !.
+  // The sizes below indicate sizes of each one of these segments in sequence.
+  // clusterId, userId and flowName are strings. flowrunId is a long hence 8
+  // bytes in size. Strings are variable in size (i.e. end whenever separator 
is
+  // encountered). This is used while decoding and helps in determining where 
to
+  // split.
+  private static final int[] SEGMENT_SIZES = {
+  Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, 
Separator.VARIABLE_SIZE,
+  Bytes.SIZEOF_LONG };
+
+  /*
+   * (non-Javadoc)
+   *
+   * Encodes FlowRunRowKey object into a byte array with each component/field 
in
+   * FlowRunRowKey separated by Separator#QUALIFIERS. This leads to an
+   * flow run row key of the form clusterId!userId!flowName!flowrunId
+   * If flowRunId in passed FlowRunRowKey object is null (and the fields
+   * preceding it i.e. clusterId, userId and flowName are not null), this
+   * returns a row key prefix of the form clusterId!userName!flowName!
+   * flowRunId is inverted while encoding as it helps maintain a descending
+   * order for flow keys in flow run table.
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #encode(java.lang.Object)
+   */
+  @Override
+  public byte[] encode(FlowRunRowKey rowKey) {
+byte[] first = Separator.QUALIFIERS.join(
+Separator.encode(rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
+Separator.QUALIFIERS),
+Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
+Separator.QUALIFIERS),
+Separator.encode(rowKey.getFlowName(), Separator.SPACE, Separator.TAB,
+Separator.QUALIFIERS));
+if (rowKey.getFlowRunId() == null) {
+  return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
+} else {
+  // Note that flowRunId is a long, so we can't encode them all at the same
+  // time.
+  byte[] second = Bytes.toBytes(TimelineStorageUtils.invertLong(
+  rowKey.getFlowRunId()));
+  return Separator.QUALIFIERS.join(first, second);
+}
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Decodes an 

[3/3] hadoop git commit: YARN-5109. timestamps are stored unencoded causing parse errors (Varun Saxena via sjlee)

2016-05-26 Thread sjlee
YARN-5109. timestamps are stored unencoded causing parse errors (Varun Saxena 
via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2229377
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2229377
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2229377

Branch: refs/heads/YARN-2928
Commit: e2229377b0a4bcc54cff1dd4adf4e5b5c0a27bc1
Parents: 10b26bb
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu May 26 21:39:16 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu May 26 21:39:16 2016 -0700

--
 .../storage/TestHBaseTimelineStorage.java   | 145 ++---
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../reader/filter/TimelineFilterUtils.java  |  20 +-
 .../storage/HBaseTimelineWriterImpl.java|  67 +++--
 .../application/ApplicationColumnPrefix.java|  65 ++--
 .../storage/application/ApplicationRowKey.java  |  50 +---
 .../application/ApplicationRowKeyConverter.java | 130 
 .../storage/apptoflow/AppToFlowRowKey.java  |  20 +-
 .../apptoflow/AppToFlowRowKeyConverter.java |  96 ++
 .../storage/common/AppIdKeyConverter.java   | 101 +++
 .../storage/common/ColumnHelper.java| 175 +--
 .../storage/common/ColumnPrefix.java|  43 +--
 .../storage/common/EventColumnName.java |  48 +++
 .../common/EventColumnNameConverter.java| 105 +++
 .../storage/common/KeyConverter.java|  41 +++
 .../storage/common/LongKeyConverter.java|  68 +
 .../storage/common/Separator.java   | 198 -
 .../storage/common/StringKeyConverter.java  |  59 
 .../storage/common/TimelineStorageUtils.java| 199 ++---
 .../storage/entity/EntityColumnPrefix.java  |  48 +--
 .../storage/entity/EntityRowKey.java|  67 +
 .../storage/entity/EntityRowKeyConverter.java   | 143 +
 .../storage/flow/FlowActivityColumnPrefix.java  |  38 +--
 .../storage/flow/FlowActivityRowKey.java|  41 +--
 .../flow/FlowActivityRowKeyConverter.java   | 115 
 .../storage/flow/FlowRunColumnPrefix.java   |  82 ++
 .../storage/flow/FlowRunRowKey.java |  41 +--
 .../storage/flow/FlowRunRowKeyConverter.java| 120 
 .../storage/flow/FlowScanner.java   |   9 +-
 .../reader/FlowActivityEntityReader.java|  12 +-
 .../storage/reader/TimelineEntityReader.java|   4 +-
 .../storage/common/TestKeyConverters.java   | 293 +++
 .../storage/common/TestSeparator.java   |  82 +-
 .../common/TestTimelineStorageUtils.java|  56 
 34 files changed, 1988 insertions(+), 801 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2229377/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
index 68135a0..bcf2d2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -42,7 +43,6 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
@@ -50,25 +50,

[2/3] hadoop git commit: YARN-5109. timestamps are stored unencoded causing parse errors (Varun Saxena via sjlee)

2016-05-26 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2229377/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
new file mode 100644
index 000..32ef1c3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Encodes and decodes event column names for application and entity tables.
+ * The event column name is of the form : eventId=timestamp=infokey.
+ * If info is not associated with the event, event column name is of the form :
+ * eventId=timestamp=
+ * Event timestamp is long and rest are strings.
+ * Column prefixes are not part of the eventcolumn name passed for encoding. It
+ * is added later, if required in the associated ColumnPrefix implementations.
+ */
+public final class EventColumnNameConverter
+implements KeyConverter {
+  private static final EventColumnNameConverter INSTANCE =
+  new EventColumnNameConverter();
+
+  public static EventColumnNameConverter getInstance() {
+return INSTANCE;
+  }
+
+  private EventColumnNameConverter() {
+  }
+
+  // eventId=timestamp=infokey are of types String, Long String
+  // Strings are variable in size (i.e. end whenever separator is encountered).
+  // This is used while decoding and helps in determining where to split.
+  private static final int[] SEGMENT_SIZES = {
+  Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE };
+
+  /*
+   * (non-Javadoc)
+   *
+   * Encodes EventColumnName into a byte array with each component/field in
+   * EventColumnName separated by Separator#VALUES. This leads to an event
+   * column name of the form eventId=timestamp=infokey.
+   * If timestamp in passed EventColumnName object is null (eventId is not 
null)
+   * this returns a column prefix of the form eventId= and if infokey in
+   * EventColumnName is null (other 2 components are not null), this returns a
+   * column name of the form eventId=timestamp=
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #encode(java.lang.Object)
+   */
+  @Override
+  public byte[] encode(EventColumnName key) {
+byte[] first = Separator.encode(key.getId(), Separator.SPACE, 
Separator.TAB,
+Separator.VALUES);
+if (key.getTimestamp() == null) {
+  return Separator.VALUES.join(first, Separator.EMPTY_BYTES);
+}
+byte[] second = Bytes.toBytes(
+TimelineStorageUtils.invertLong(key.getTimestamp()));
+if (key.getInfoKey() == null) {
+  return Separator.VALUES.join(first, second, Separator.EMPTY_BYTES);
+}
+return Separator.VALUES.join(first, second, Separator.encode(
+key.getInfoKey(), Separator.SPACE, Separator.TAB, Separator.VALUES));
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Decodes an event column name of the form eventId=timestamp= or
+   * eventId=timestamp=infoKey represented in byte format and converts it into
+   * an EventColumnName object.
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #decode(byte[])
+   */
+  @Override
+  public EventColumnName decode(byte[] bytes) {
+byte[][] components = Separator.VALUES.split(bytes, SEGMENT_SIZES);
+if (components.length != 3) {
+  throw new IllegalArgumentException("the column name is not valid");
+}
+String id = 

hadoop git commit: YARN-5095. flow activities and flow runs are populated with wrong timestamp when RM restarts w/ recovery enabled (Varun Saxena via sjlee)

2016-05-25 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 97b074c91 -> 506ebff5f


YARN-5095. flow activities and flow runs are populated with wrong timestamp 
when RM restarts w/ recovery enabled (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/506ebff5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/506ebff5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/506ebff5

Branch: refs/heads/YARN-2928
Commit: 506ebff5f4b6f8b69631a0b60d430bd95e12b2a2
Parents: 97b074c
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed May 25 16:56:49 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed May 25 16:56:49 2016 -0700

--
 .../server/resourcemanager/RMAppManager.java| 12 ++--
 .../server/resourcemanager/rmapp/RMAppImpl.java | 19 +-
 .../server/resourcemanager/TestRMRestart.java   | 63 
 3 files changed, 87 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/506ebff5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 30efc8e..2aa54fd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -287,8 +287,10 @@ public class RMAppManager implements 
EventHandler,
   String user) throws YarnException, AccessControlException {
 ApplicationId applicationId = submissionContext.getApplicationId();
 
-RMAppImpl application =
-createAndPopulateNewRMApp(submissionContext, submitTime, user, false);
+// Passing start time as -1. It will be eventually set in RMAppImpl
+// constructor.
+RMAppImpl application = createAndPopulateNewRMApp(
+submissionContext, submitTime, user, false, -1);
 Credentials credentials = null;
 try {
   credentials = parseCredentials(submissionContext);
@@ -326,14 +328,14 @@ public class RMAppManager implements 
EventHandler,
 // create and recover app.
 RMAppImpl application =
 createAndPopulateNewRMApp(appContext, appState.getSubmitTime(),
-appState.getUser(), true);
+appState.getUser(), true, appState.getStartTime());
 
 application.handle(new RMAppRecoverEvent(appId, rmState));
   }
 
   private RMAppImpl createAndPopulateNewRMApp(
   ApplicationSubmissionContext submissionContext, long submitTime,
-  String user, boolean isRecovery)
+  String user, boolean isRecovery, long startTime)
   throws YarnException, AccessControlException {
 // Do queue mapping
 if (!isRecovery) {
@@ -388,7 +390,7 @@ public class RMAppManager implements 
EventHandler,
 submissionContext.getQueue(),
 submissionContext, this.scheduler, this.masterService,
 submitTime, submissionContext.getApplicationType(),
-submissionContext.getApplicationTags(), amReq);
+submissionContext.getApplicationTags(), amReq, startTime);
 // Concurrent app submissions with same applicationId will fail here
 // Concurrent app submissions with different applicationIds will not
 // influence each other

http://git-wip-us.apache.org/repos/asf/hadoop/blob/506ebff5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index b0eccc4..b692a4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/had

hadoop git commit: YARN-5097. NPE in Separator.joinEncoded() (Vrushali C via sjlee)

2016-05-25 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 5e374b46a -> 97b074c91


YARN-5097. NPE in Separator.joinEncoded() (Vrushali C via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97b074c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97b074c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97b074c9

Branch: refs/heads/YARN-2928
Commit: 97b074c915cbbac6aa540903ace13486f8631276
Parents: 5e374b4
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed May 25 15:49:08 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed May 25 15:49:08 2016 -0700

--
 .../storage/TestHBaseTimelineStorage.java   | 57 
 .../storage/HBaseTimelineWriterImpl.java|  9 
 2 files changed, 66 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97b074c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
index aebd936..68135a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
@@ -482,6 +482,63 @@ public class TestHBaseTimelineStorage {
 }
   }
 
+
+  @Test
+  public void testWriteNullApplicationToHBase() throws Exception {
+TimelineEntities te = new TimelineEntities();
+ApplicationEntity entity = new ApplicationEntity();
+String appId = "application_1000178881110_2002";
+entity.setId(appId);
+long cTime = 1425016501000L;
+entity.setCreatedTime(cTime);
+
+// add the info map in Timeline Entity
+Map<String, Object> infoMap = new HashMap<String, Object>();
+infoMap.put("infoMapKey1", "infoMapValue1");
+infoMap.put("infoMapKey2", 10);
+entity.addInfo(infoMap);
+
+te.addEntity(entity);
+HBaseTimelineWriterImpl hbi = null;
+try {
+  Configuration c1 = util.getConfiguration();
+  hbi = new HBaseTimelineWriterImpl(c1);
+  hbi.init(c1);
+  hbi.start();
+  String cluster = "cluster_check_null_application";
+  String user = "user1check_null_application";
+  //set the flow name to null
+  String flow = null;
+  String flowVersion = "AB7822C10F";
+  long runid = 1002345678919L;
+  hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+  hbi.stop();
+
+  // retrieve the row
+  Scan scan = new Scan();
+  scan.setStartRow(Bytes.toBytes(cluster));
+  Connection conn = ConnectionFactory.createConnection(c1);
+  ResultScanner resultScanner = new ApplicationTable()
+  .getResultScanner(c1, conn, scan);
+
+  assertTrue(resultScanner != null);
+  // try to iterate over results
+  int count = 0;
+  for (Result rr = resultScanner.next(); rr != null;
+  rr = resultScanner.next()) {
+ count++;
+  }
+  // there should be no rows written
+  // no exceptions thrown during write
+  assertEquals(0, count);
+} finally {
+  if (hbi != null) {
+hbi.stop();
+hbi.close();
+  }
+}
+  }
+
   @Test
   public void testWriteApplicationToHBase() throws Exception {
 TimelineEntities te = new TimelineEntities();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97b074c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTi

hadoop git commit: YARN-5018. Online aggregation logic should not run immediately after collectors got started (Li Lu via sjlee)

2016-05-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 290349837 -> 5e374b46a


YARN-5018. Online aggregation logic should not run immediately after collectors 
got started (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e374b46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e374b46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e374b46

Branch: refs/heads/YARN-2928
Commit: 5e374b46a132a0c148397c0538ce58d2f53d56d9
Parents: 2903498
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue May 24 11:02:56 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue May 24 11:02:56 2016 -0700

--
 .../RMTimelineCollectorManager.java   |  2 +-
 .../collector/AppLevelTimelineCollector.java  | 17 +++--
 .../collector/NodeTimelineCollectorManager.java   |  2 +-
 .../collector/TimelineCollector.java  | 12 +++-
 .../collector/TimelineCollectorManager.java   | 18 +-
 5 files changed, 45 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e374b46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
index a4f1084..64c3749 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
@@ -49,7 +49,7 @@ public class RMTimelineCollectorManager extends 
TimelineCollectorManager {
   }
 
   @Override
-  public void postPut(ApplicationId appId, TimelineCollector collector) {
+  protected void doPostPut(ApplicationId appId, TimelineCollector collector) {
 RMApp app = rmContext.getRMApps().get(appId);
 if (app == null) {
   throw new YarnRuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e374b46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index eb05262..d276269 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import com.google.common.base.Preconditions;
 
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -93,7 +94,8 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
 new ThreadFactoryBuilder()
 .setNameFormat("TimelineCollector Aggregation thread #%d")
 .build());
-appAggregationExecutor.scheduleAtFixedRate(new AppLevelAggregator(), 0,
+appAggregationExecutor.scheduleAtFixedRate(new AppLevelAggregator(),
+AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
 TimeUnit.SECONDS);
 super.serviceStart();
@@ -126,10 +128,21 @@ public class AppLevelTimelineCollector extends 
TimelineCollector {
   if (LOG.isDebugEnabled()) {
 LOG.debug("A

hadoop git commit: YARN-5093. created time shows 0 in most REST output (Varun Saxena via sjlee)

2016-05-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 2c93006dc -> 290349837


YARN-5093. created time shows 0 in most REST output (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29034983
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29034983
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29034983

Branch: refs/heads/YARN-2928
Commit: 2903498372d2dfb46621857e85faede2e0d691b6
Parents: 2c93006
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue May 24 10:33:04 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue May 24 10:33:04 2016 -0700

--
 .../records/timelineservice/TimelineEntity.java | 16 +--
 .../storage/TestHBaseTimelineStorage.java   | 30 +---
 .../storage/FileSystemTimelineReaderImpl.java   |  2 +-
 .../storage/reader/ApplicationEntityReader.java |  5 ++--
 .../storage/reader/GenericEntityReader.java |  4 +--
 .../reader/TestTimelineReaderWebServices.java   |  4 +--
 .../TestFileSystemTimelineReaderImpl.java   | 10 +++
 7 files changed, 45 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29034983/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 7ce8279..9c0a983 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -144,7 +144,7 @@ public class TimelineEntity implements 
Comparable {
   private NavigableSet events = new TreeSet<>();
   private HashMap<String, Set> isRelatedToEntities = new HashMap<>();
   private HashMap<String, Set> relatesToEntities = new HashMap<>();
-  private long createdTime;
+  private Long createdTime;
 
   public TimelineEntity() {
 identifier = new Identifier();
@@ -490,7 +490,7 @@ public class TimelineEntity implements 
Comparable {
   }
 
   @XmlElement(name = "createdtime")
-  public long getCreatedTime() {
+  public Long getCreatedTime() {
 if (real == null) {
   return createdTime;
 } else {
@@ -499,7 +499,7 @@ public class TimelineEntity implements 
Comparable {
   }
 
   @JsonSetter("createdtime")
-  public void setCreatedTime(long createdTs) {
+  public void setCreatedTime(Long createdTs) {
 if (real == null) {
   this.createdTime = createdTs;
 } else {
@@ -547,6 +547,16 @@ public class TimelineEntity implements 
Comparable {
   public int compareTo(TimelineEntity other) {
 int comparison = getType().compareTo(other.getType());
 if (comparison == 0) {
+  if (getCreatedTime() == null) {
+if (other.getCreatedTime() == null) {
+  return getId().compareTo(other.getId());
+} else {
+  return 1;
+}
+  }
+  if (other.getCreatedTime() == null) {
+return -1;
+  }
   if (getCreatedTime() > other.getCreatedTime()) {
 // Order by created time desc
 return -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29034983/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
index 8ab54bc..aebd936 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
@@ -488,7 +488,7 @@ public class TestHBaseTimelineStorage {

hadoop git commit: YARN-5050. Code cleanup for TestDistributedShell (Li Lu via sjlee)

2016-05-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 8d0b4267e -> 2c93006dc


YARN-5050. Code cleanup for TestDistributedShell (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c93006d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c93006d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c93006d

Branch: refs/heads/YARN-2928
Commit: 2c93006dc0de8b92018aa8023f2bfa3ce331680a
Parents: 8d0b426
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu May 19 17:25:05 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu May 19 17:25:05 2016 -0700

--
 .../distributedshell/TestDistributedShell.java | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c93006d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 4a401e7..22c16e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -135,6 +135,8 @@ public class TestDistributedShell {
 
 conf = new YarnConfiguration();
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
+// reduce the teardown waiting time
+conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000);
 conf.set("yarn.log.dir", "target");
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
 // mark if we need to launch the v1 timeline server
@@ -183,9 +185,6 @@ public class TestDistributedShell {
   conf.set(YarnConfiguration.NM_AUX_SERVICES, TIMELINE_AUX_SERVICE_NAME);
   conf.set(YarnConfiguration.NM_AUX_SERVICES + "." + 
TIMELINE_AUX_SERVICE_NAME
 + ".class", PerNodeTimelineCollectorsAuxService.class.getName());
-  conf.setBoolean(YarnConfiguration.SYSTEM_METRICS_PUBLISHER_ENABLED, 
true);
-  conf.setBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED,
-  false);
 } else {
   Assert.fail("Wrong timeline version number: " + timelineVersion);
 }
@@ -280,7 +279,7 @@ public class TestDistributedShell {
 testDSShell(true);
   }
 
-  @Test(timeout=9)
+  @Test
   @TimelineVersion(2.0f)
   public void testDSShellWithoutDomainV2() throws Exception {
 testDSShell(false);
@@ -290,12 +289,14 @@ public class TestDistributedShell {
 testDSShell(haveDomain, true);
   }
 
-  @Test(timeout=9)
+  @Test
+  @TimelineVersion(2.0f)
   public void testDSShellWithoutDomainV2DefaultFlow() throws Exception {
 testDSShell(false, true);
   }
 
-  @Test(timeout=9)
+  @Test
+  @TimelineVersion(2.0f)
   public void testDSShellWithoutDomainV2CustomizedFlow() throws Exception {
 testDSShell(false, false);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5096 addendum. Turned another logging statement to debug. Contributed by Sangjin Lee.

2016-05-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 b837b916b -> 8d0b4267e


YARN-5096 addendum. Turned another logging statement to debug. Contributed by 
Sangjin Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d0b4267
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d0b4267
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d0b4267

Branch: refs/heads/YARN-2928
Commit: 8d0b4267e4516e7c5d39d23121f17f508234a1b2
Parents: b837b91
Author: Sangjin Lee 
Authored: Thu May 19 15:40:15 2016 -0700
Committer: Sangjin Lee 
Committed: Thu May 19 15:40:15 2016 -0700

--
 .../yarn/server/timelineservice/storage/common/ColumnHelper.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d0b4267/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
index dff677b..759bf27 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -258,7 +258,9 @@ public class ColumnHelper {
 
   String columnName = null;
   if (columnPrefixBytes == null) {
-LOG.info("null prefix was specified; returning all columns");
+if (LOG.isDebugEnabled()) {
+  LOG.debug("null prefix was specified; returning all columns");
+}
 // Decode the spaces we encoded in the column name.
 columnName = Separator.decode(columnKey, Separator.SPACE);
   } else {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-10208. Addendum for HDFS-9579: to handle the case when client machine can't resolve network path (Ming Ma via sjlee)

2016-05-16 Thread sjlee
HDFS-10208. Addendum for HDFS-9579: to handle the case when client machine 
can't resolve network path (Ming Ma via sjlee)

(cherry picked from commit 61f46be071e42f9eb49a54b1bd2e54feac59f808)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9330a7b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9330a7b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9330a7b4

Branch: refs/heads/branch-2
Commit: 9330a7b4de7b023f2242554e72c0d7c0d98cf41d
Parents: 09a613b
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon May 16 18:49:47 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon May 16 18:59:19 2016 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |  7 ++-
 .../org/apache/hadoop/net/NetworkTopology.java  | 35 
 .../java/org/apache/hadoop/net/NodeBase.java|  9 +++
 .../src/main/resources/core-default.xml | 13 +
 .../org/apache/hadoop/hdfs/ClientContext.java   | 47 ++--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 31 +++---
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 59 +---
 .../apache/hadoop/net/TestNetworkTopology.java  |  9 +++
 8 files changed, 174 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9330a7b4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index ca17f8d..e16c0ba 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -116,7 +116,12 @@ public class CommonConfigurationKeysPublic {
   public static final String  FS_TRASH_INTERVAL_KEY = "fs.trash.interval";
   /** Default value for FS_TRASH_INTERVAL_KEY */
   public static final longFS_TRASH_INTERVAL_DEFAULT = 0;
-
+  /** See core-default.xml. */
+  public static final String  FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED =
+  "fs.client.resolve.topology.enabled";
+  /** Default value for FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED. */
+  public static final boolean FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT =
+  false;
   /** See core-default.xml */
   public static final String  IO_MAPFILE_BLOOM_SIZE_KEY =
 "io.mapfile.bloom.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9330a7b4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 1e23ff6..cf5b176 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -655,6 +655,41 @@ public class NetworkTopology {
 return dis+2;
   }
 
+  /** Return the distance between two nodes by comparing their network paths
+   * without checking if they belong to the same ancestor node by reference.
+   * It is assumed that the distance from one node to its parent is 1
+   * The distance between two nodes is calculated by summing up their distances
+   * to their closest common ancestor.
+   * @param node1 one node
+   * @param node2 another node
+   * @return the distance between node1 and node2
+   */
+  static public int getDistanceByPath(Node node1, Node node2) {
+if (node1 == null && node2 == null) {
+  return 0;
+}
+if (node1 == null || node2 == null) {
+  LOG.warn("One of the nodes is a null pointer");
+  return Integer.MAX_VALUE;
+}
+String[] paths1 = NodeBase.getPathComponents(node1);
+String[] paths2 = NodeBase.getPathComponents(node2);
+int dis = 0;
+int index = 0;
+int minLevel = Math.min(paths1.length, paths2.length);
+while (index < minLevel) {
+  if (!paths1[index].equals(paths2[index])) {
+// Once the path starts to diverge,  compute the distance that include
+// the rest of paths.
+dis += 2 * (minLevel - index);
+break;
+  }
+  index++;
+}
+dis += Math.abs(paths1.length - paths2.length);
+return dis;
+  }
+
   /** Check if two nodes are on the same rack
*

[1/2] hadoop git commit: HDFS-10208. Addendum for HDFS-9579: to handle the case when client machine can't resolve network path (Ming Ma via sjlee)

2016-05-16 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 09a613b02 -> 9330a7b4d
  refs/heads/trunk 730bc746f -> 61f46be07


HDFS-10208. Addendum for HDFS-9579: to handle the case when client machine 
can't resolve network path (Ming Ma via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61f46be0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61f46be0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61f46be0

Branch: refs/heads/trunk
Commit: 61f46be071e42f9eb49a54b1bd2e54feac59f808
Parents: 730bc74
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon May 16 18:49:47 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon May 16 18:49:47 2016 -0700

--
 .../fs/CommonConfigurationKeysPublic.java   |  7 ++-
 .../org/apache/hadoop/net/NetworkTopology.java  | 35 
 .../java/org/apache/hadoop/net/NodeBase.java|  9 
 .../src/main/resources/core-default.xml | 13 +
 .../org/apache/hadoop/hdfs/ClientContext.java   | 47 +---
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 31 +++
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 57 +---
 .../apache/hadoop/net/TestNetworkTopology.java  |  9 
 8 files changed, 173 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f46be0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 648ad59..f6ccc56 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -111,7 +111,12 @@ public class CommonConfigurationKeysPublic {
   public static final String  FS_TRASH_INTERVAL_KEY = "fs.trash.interval";
   /** Default value for FS_TRASH_INTERVAL_KEY */
   public static final longFS_TRASH_INTERVAL_DEFAULT = 0;
-
+  /** See core-default.xml. */
+  public static final String  FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED =
+  "fs.client.resolve.topology.enabled";
+  /** Default value for FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED. */
+  public static final boolean FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT =
+  false;
   /** See core-default.xml */
   public static final String  IO_MAPFILE_BLOOM_SIZE_KEY =
 "io.mapfile.bloom.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61f46be0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 1e23ff6..cf5b176 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -655,6 +655,41 @@ public class NetworkTopology {
 return dis+2;
   }
 
+  /** Return the distance between two nodes by comparing their network paths
+   * without checking if they belong to the same ancestor node by reference.
+   * It is assumed that the distance from one node to its parent is 1
+   * The distance between two nodes is calculated by summing up their distances
+   * to their closest common ancestor.
+   * @param node1 one node
+   * @param node2 another node
+   * @return the distance between node1 and node2
+   */
+  static public int getDistanceByPath(Node node1, Node node2) {
+if (node1 == null && node2 == null) {
+  return 0;
+}
+if (node1 == null || node2 == null) {
+  LOG.warn("One of the nodes is a null pointer");
+  return Integer.MAX_VALUE;
+}
+String[] paths1 = NodeBase.getPathComponents(node1);
+String[] paths2 = NodeBase.getPathComponents(node2);
+int dis = 0;
+int index = 0;
+int minLevel = Math.min(paths1.length, paths2.length);
+while (index < minLevel) {
+  if (!paths1[index].equals(paths2[index])) {
+// Once the path starts to diverge,  compute the distance that include
+// the rest of paths.
+dis += 2 * (minLevel - index);
+break;
+  }
+  index++;
+}
+dis += Math.abs(paths1.length - paths2.length);
+return di

hadoop git commit: HADOOP-12971. FileSystemShell doc should explain relative path (John Zhuge via sale)

2016-05-13 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9227dfc25 -> 1f2794b4f


HADOOP-12971. FileSystemShell doc should explain relative path (John Zhuge via 
sale)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f2794b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f2794b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f2794b4

Branch: refs/heads/trunk
Commit: 1f2794b4faf119a1ab66c8184ac84cec710d52a0
Parents: 9227dfc
Author: Sangjin Lee 
Authored: Fri May 13 10:01:46 2016 -0700
Committer: Sangjin Lee 
Committed: Fri May 13 10:01:46 2016 -0700

--
 .../hadoop-common/src/site/markdown/FileSystemShell.md  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f2794b4/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 14bab30..5790bb7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -27,6 +27,11 @@ Most of the commands in FS shell behave like corresponding 
Unix commands. Differ
 
 If HDFS is being used, `hdfs dfs` is a synonym.
 
+Relative paths can be used. For HDFS, the current working directory is the
+HDFS home directory `/user/` that often has to be created manually.
+The HDFS home directory can also be implicitly accessed, e.g., when using the
+HDFS trash folder, the `.Trash` directory in the home directory.
+
 See the [Commands Manual](./CommandsManual.html) for generic shell options.
 
 appendToFile


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HADOOP-12971. FileSystemShell doc should explain relative path (John Zhuge via sale)

2016-05-13 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 08743edd7 -> 8a316676b
  refs/heads/branch-2.8 ff8caccc8 -> 29699d8e7


HADOOP-12971. FileSystemShell doc should explain relative path (John Zhuge via 
sale)

(cherry picked from commit 1f2794b4faf119a1ab66c8184ac84cec710d52a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a316676
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a316676
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a316676

Branch: refs/heads/branch-2
Commit: 8a316676b7d626d5355f0b21b844d47803e6c8fd
Parents: 08743ed
Author: Sangjin Lee 
Authored: Fri May 13 10:01:46 2016 -0700
Committer: Sangjin Lee 
Committed: Fri May 13 10:02:47 2016 -0700

--
 .../hadoop-common/src/site/markdown/FileSystemShell.md  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a316676/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index af97f3b..4946a13 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -27,6 +27,11 @@ Most of the commands in FS shell behave like corresponding 
Unix commands. Differ
 
 If HDFS is being used, `hdfs dfs` is a synonym.
 
+Relative paths can be used. For HDFS, the current working directory is the
+HDFS home directory `/user/` that often has to be created manually.
+The HDFS home directory can also be implicitly accessed, e.g., when using the
+HDFS trash folder, the `.Trash` directory in the home directory.
+
 See the [Commands Manual](./CommandsManual.html) for generic shell options.
 
 appendToFile


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-12971. FileSystemShell doc should explain relative path (John Zhuge via sale)

2016-05-13 Thread sjlee
HADOOP-12971. FileSystemShell doc should explain relative path (John Zhuge via 
sale)

(cherry picked from commit 1f2794b4faf119a1ab66c8184ac84cec710d52a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29699d8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29699d8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29699d8e

Branch: refs/heads/branch-2.8
Commit: 29699d8e7e630c727e0d139f352ffc26fa4d
Parents: ff8cacc
Author: Sangjin Lee 
Authored: Fri May 13 10:01:46 2016 -0700
Committer: Sangjin Lee 
Committed: Fri May 13 10:03:06 2016 -0700

--
 .../hadoop-common/src/site/markdown/FileSystemShell.md  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29699d8e/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index af97f3b..4946a13 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -27,6 +27,11 @@ Most of the commands in FS shell behave like corresponding 
Unix commands. Differ
 
 If HDFS is being used, `hdfs dfs` is a synonym.
 
+Relative paths can be used. For HDFS, the current working directory is the
+HDFS home directory `/user/` that often has to be created manually.
+The HDFS home directory can also be implicitly accessed, e.g., when using the
+HDFS trash folder, the `.Trash` directory in the home directory.
+
 See the [Commands Manual](./CommandsManual.html) for generic shell options.
 
 appendToFile


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4577. Enable aux services to have their own custom classpath/jar file (Xuan Gong via sale)

2016-05-12 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 389dd91dd -> d6d13ec67


YARN-4577. Enable aux services to have their own custom classpath/jar file 
(Xuan Gong via sale)

(cherry picked from commit 0bbe01f8d56191edfba3b50fb9f8859a0b3f826f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d13ec6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d13ec6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d13ec6

Branch: refs/heads/branch-2
Commit: d6d13ec670b60db60d16fe63f7108c7e8c7fa321
Parents: 389dd91
Author: Sangjin Lee 
Authored: Thu May 12 10:10:01 2016 -0700
Committer: Sangjin Lee 
Committed: Thu May 12 10:14:24 2016 -0700

--
 .../java/org/apache/hadoop/util/JarFinder.java  |  26 ++-
 .../java/org/apache/hadoop/util/TestRunJar.java |  29 +--
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +-
 .../containermanager/AuxServices.java   |  43 ++--
 .../AuxiliaryServiceWithCustomClassLoader.java  | 201 +++
 .../containermanager/TestAuxServices.java   | 119 ++-
 6 files changed, 385 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d13ec6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
index 33aa025..478a29b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
@@ -14,7 +14,7 @@
 package org.apache.hadoop.util;
 
 import com.google.common.base.Preconditions;
-
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -173,4 +173,28 @@ public class JarFinder {
 }
 return null;
   }
+
+  public static File makeClassLoaderTestJar(Class target, File rootDir,
+  String jarName, int buffSize, String... clsNames) throws IOException {
+File jarFile = new File(rootDir, jarName);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+for (String clsName: clsNames) {
+  String name = clsName.replace('.', '/') + ".class";
+  InputStream entryInputStream = target.getResourceAsStream(
+  "/" + name);
+  ZipEntry entry = new ZipEntry(name);
+  jstream.putNextEntry(entry);
+  BufferedInputStream bufInputStream = new BufferedInputStream(
+  entryInputStream, buffSize);
+  int count;
+  byte[] data = new byte[buffSize];
+  while ((count = bufInputStream.read(data, 0, buffSize)) != -1) {
+jstream.write(data, 0, count);
+  }
+  jstream.closeEntry();
+}
+jstream.close();
+return jarFile;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d13ec6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 6622389..7b61b32 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -23,11 +23,9 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
-import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -156,7 +154,8 @@ public class TestRunJar {
 when(runJar.getSystemClasses()).thenReturn(systemClasses);
 
 // create the test jar
-File testJar = makeClassLoaderTestJar(mainCls, thirdCls);
+File testJar = JarFinder.makeClassLoaderTestJar(this.getClass(),
+TEST_ROOT_DIR, TEST_JAR_2_NAME, BUFF_SIZE, mainCls, thirdCls);
 // form the args
 String[] args = new String[3];
 args[0] = testJar.getAbsolutePath();
@@ -166,28 +165,4 @@ public class TestRunJar {
 runJar.run(args);
 // it should not throw an exception
   }
-
-  private File makeClassLoaderTestJar(String... clsNames) throws IOException {
-File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_2_NAME);
-JarOutputStream jstream 

hadoop git commit: YARN-4577. Enable aux services to have their own custom classpath/jar file (Xuan Gong via sale)

2016-05-12 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3f2816ab5 -> 0bbe01f8d


YARN-4577. Enable aux services to have their own custom classpath/jar file 
(Xuan Gong via sale)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bbe01f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bbe01f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bbe01f8

Branch: refs/heads/trunk
Commit: 0bbe01f8d56191edfba3b50fb9f8859a0b3f826f
Parents: 3f2816a
Author: Sangjin Lee 
Authored: Thu May 12 10:10:01 2016 -0700
Committer: Sangjin Lee 
Committed: Thu May 12 10:11:23 2016 -0700

--
 .../java/org/apache/hadoop/util/JarFinder.java  |  26 ++-
 .../java/org/apache/hadoop/util/TestRunJar.java |  29 +--
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +-
 .../containermanager/AuxServices.java   |  43 ++--
 .../AuxiliaryServiceWithCustomClassLoader.java  | 201 +++
 .../containermanager/TestAuxServices.java   | 119 ++-
 6 files changed, 385 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbe01f8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
index 33aa025..478a29b 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
@@ -14,7 +14,7 @@
 package org.apache.hadoop.util;
 
 import com.google.common.base.Preconditions;
-
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -173,4 +173,28 @@ public class JarFinder {
 }
 return null;
   }
+
+  public static File makeClassLoaderTestJar(Class target, File rootDir,
+  String jarName, int buffSize, String... clsNames) throws IOException {
+File jarFile = new File(rootDir, jarName);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+for (String clsName: clsNames) {
+  String name = clsName.replace('.', '/') + ".class";
+  InputStream entryInputStream = target.getResourceAsStream(
+  "/" + name);
+  ZipEntry entry = new ZipEntry(name);
+  jstream.putNextEntry(entry);
+  BufferedInputStream bufInputStream = new BufferedInputStream(
+  entryInputStream, buffSize);
+  int count;
+  byte[] data = new byte[buffSize];
+  while ((count = bufInputStream.read(data, 0, buffSize)) != -1) {
+jstream.write(data, 0, count);
+  }
+  jstream.closeEntry();
+}
+jstream.close();
+return jarFile;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbe01f8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 6622389..7b61b32 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -23,11 +23,9 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
-import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -156,7 +154,8 @@ public class TestRunJar {
 when(runJar.getSystemClasses()).thenReturn(systemClasses);
 
 // create the test jar
-File testJar = makeClassLoaderTestJar(mainCls, thirdCls);
+File testJar = JarFinder.makeClassLoaderTestJar(this.getClass(),
+TEST_ROOT_DIR, TEST_JAR_2_NAME, BUFF_SIZE, mainCls, thirdCls);
 // form the args
 String[] args = new String[3];
 args[0] = testJar.getAbsolutePath();
@@ -166,28 +165,4 @@ public class TestRunJar {
 runJar.run(args);
 // it should not throw an exception
   }
-
-  private File makeClassLoaderTestJar(String... clsNames) throws IOException {
-File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_2_NAME);
-JarOutputStream jstream =
-new JarOutputStream(new FileOutputStream(jarFile));
-for 

hadoop git commit: MAPREDUCE-6688. Store job configurations in Timeline Service v2 (Varun Saxena via sjlee)

2016-05-03 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 79008c157 -> bdd5d7dfa


MAPREDUCE-6688. Store job configurations in Timeline Service v2 (Varun Saxena 
via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdd5d7df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdd5d7df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdd5d7df

Branch: refs/heads/YARN-2928
Commit: bdd5d7dfa2ddb501374d1552827aea31ce5ff801
Parents: 79008c1
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue May 3 09:19:36 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue May 3 09:19:36 2016 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../jobhistory/JobHistoryEventHandler.java  | 57 +++-
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  2 +-
 .../mapreduce/jobhistory/JobSubmittedEvent.java | 38 +++-
 .../mapreduce/util/JobHistoryEventUtils.java|  3 +
 .../mapred/TestMRTimelineEventHandling.java | 92 +---
 .../org/apache/hadoop/mapred/UtilsForTests.java |  8 ++
 7 files changed, 184 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd5d7df/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2a09c09..69ab89d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -15,6 +15,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 MAPREDUCE-6337. Added a mode to replay MR job history files and put them
 into the timeline service v2. (Sangjin Lee via zjshen)
 
+MAPREDUCE-6688. Store job configurations in Timeline Service v2 (Varun
+Saxena via sjlee)
+
   IMPROVEMENTS
 
 MAPREDUCE-6546. reconcile the two versions of the timeline service

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd5d7df/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 35f60f1..cc31622 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -1048,7 +1048,16 @@ public class JobHistoryEventHandler extends 
AbstractService
 entity.setId(jobId.toString());
 return entity;
   }
-  
+
+  private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
+  createJobEntity(JobId jobId) {
+org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
+new 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity();
+entity.setId(jobId.toString());
+entity.setType(MAPREDUCE_JOB_ENTITY_TYPE);
+return entity;
+  }
+
   // create ApplicationEntity with job finished Metrics from HistoryEvent
   private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
   createAppEntityWithJobMetrics(HistoryEvent event, JobId jobId) {
@@ -1107,6 +1116,46 @@ public class JobHistoryEventHandler extends 
AbstractService
 return entity;
   }
 
+  private void publishConfigsOnJobSubmittedEvent(JobSubmittedEvent event,
+  JobId jobId) {
+if (event.getJobConf() == null) {
+  return;
+}
+// Publish job configurations both as job and app entity.
+// Configs are split into multiple entities if they exceed 100kb in size.
+org.apache.hadoop.yarn.api.records.timelineservice.
+TimelineEntity jobEntityForConfigs = createJobEntity(jobId);
+ApplicationEntity appEntityForConfigs = new ApplicationEntity();
+String appId = jobId.getAppId().toString();
+appEntityForConfigs.setId(appId);
+try {
+  int configSize = 0;
+  for (Map.Entry<String, String> entry : event.getJobConf()) {
+int size = entry.getKey().length() + entry.getValue().length();
+configSize += size;
+if (configSize > JobHistoryEventUtils.ATS_CONFIG_PUBLISH_SIZE_BYTES) {
+  if (jobEntityForConfigs.getConfigs().size() > 0) {
+timelineClient.putE

[1/3] hadoop git commit: YARN-4447. Provide a mechanism to represent complex filters and parse them at the REST layer (Varun Saxena via sjlee)

2016-05-02 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 cdd9efb2a -> 79008c157


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79008c15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
new file mode 100644
index 000..e991d27
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
@@ -0,0 +1,923 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList.Operator;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValueFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
+import org.junit.Test;
+
+import com.google.common.collect.Sets;
+
+public class TestTimelineReaderWebServicesUtils {
+  private static void verifyFilterList(String expr, TimelineFilterList list,
+  TimelineFilterList expectedList) throws Exception {
+assertNotNull(list);
+assertTrue("Unexpected List received after parsing expression " + expr +
+". Expected=" + expectedList + " but Actual=" + list,
+list.equals(expectedList));
+  }
+
+  @Test
+  public void testMetricFiltersParsing() throws Exception {
+String expr = "(((key11 ne 234 AND key12 gt 23) AND " +
+"(key13 lt 34 OR key14 ge 567)) OR (key21 lt 24 OR key22 le 45))";
+TimelineFilterList expectedList = new TimelineFilterList(
+Operator.OR,
+new TimelineFilterList(
+Operator.AND,
+new TimelineFilterList(
+Operator.AND,
+new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL,
+"key11", 234, false),
+new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN,
+"key12", 23, true)
+),
+new TimelineFilterList(
+Operator.OR,
+new TimelineCompareFilter(TimelineCompareOp.LESS_THAN,
+"key13", 34, true),
+new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL,
+"key14", 567, true)
+)
+),
+new TimelineFilterList(
+Operator.OR,
+new TimelineCompareFilter(TimelineCompareOp.LESS_THAN,
+"key21", 24, true),
+new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL,
+"key22", 45, true)
+)
+);
+verifyFilterList(expr,
+TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
+
+expr = "abc ene 234";
+expectedList = new TimelineFilterList(
+new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL,
+"abc", 234, true)
+);
+verifyFilterList(expr,
+

[3/3] hadoop git commit: YARN-4447. Provide a mechanism to represent complex filters and parse them at the REST layer (Varun Saxena via sjlee)

2016-05-02 Thread sjlee
YARN-4447. Provide a mechanism to represent complex filters and parse them at 
the REST layer (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79008c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79008c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79008c15

Branch: refs/heads/YARN-2928
Commit: 79008c15703ece4bcac9116c4b44c3ab872e14a2
Parents: cdd9efb
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon May 2 14:06:19 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon May 2 14:06:19 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../reader/TimelineParseConstants.java  |  34 +
 .../reader/TimelineParseException.java  |  36 +
 .../timelineservice/reader/TimelineParser.java  |  37 +
 .../reader/TimelineParserForCompareExpr.java| 300 ++
 .../reader/TimelineParserForDataToRetrieve.java |  95 ++
 .../reader/TimelineParserForEqualityExpr.java   | 343 +++
 .../reader/TimelineParserForExistFilters.java   |  51 +
 .../reader/TimelineParserForKVFilters.java  |  78 ++
 .../reader/TimelineParserForNumericFilters.java |  72 ++
 .../TimelineParserForRelationFilters.java   |  71 ++
 .../reader/TimelineReaderWebServices.java   | 220 -
 .../reader/TimelineReaderWebServicesUtils.java  | 196 ++--
 .../reader/filter/TimelineCompareFilter.java|  73 +-
 .../reader/filter/TimelineExistsFilter.java |  49 +-
 .../reader/filter/TimelineFilterList.java   |  36 +
 .../reader/filter/TimelineKeyValueFilter.java   |  13 +
 .../reader/filter/TimelineKeyValuesFilter.java  |  61 +-
 .../reader/filter/TimelinePrefixFilter.java |  37 +
 .../reader/TestTimelineReaderWebServices.java   |  14 +-
 ...stTimelineReaderWebServicesHBaseStorage.java | 900 +-
 .../TestTimelineReaderWebServicesUtils.java | 923 +++
 22 files changed, 3445 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79008c15/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 61a9bc1..2e6996c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -136,6 +136,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3816. [Aggregation] App-level aggregation and accumulation for YARN
 system metrics (Li Lu via sjlee)
 
+YARN-4447. Provide a mechanism to represent complex filters and parse them
+at the REST layer (Varun Saxena via sjlee)
+
   IMPROVEMENTS
 
 YARN-4224. Support fetching entities by UID and change the REST interface 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79008c15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
new file mode 100644
index 000..662a102
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParseConstants.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+/**
+ * Set of constants used while parsing filter expressions.
+ */
+final class TimelineParseConstants {
+  private TimelineParseConstants() {
+  }
+  static final String COMMA_DELIMIT

[2/3] hadoop git commit: YARN-4447. Provide a mechanism to represent complex filters and parse them at the REST layer (Varun Saxena via sjlee)

2016-05-02 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79008c15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 57d75db..2e667d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -18,29 +18,19 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
-import java.io.IOException;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Set;
 
 import javax.servlet.http.HttpServletRequest;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValueFilter;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 
 /**
  * Set of utility methods to be used by timeline reader web services.
  */
 final class TimelineReaderWebServicesUtils {
-  private static final String COMMA_DELIMITER = ",";
-  private static final String COLON_DELIMITER = ":";
 
   private TimelineReaderWebServicesUtils() {
   }
@@ -56,11 +46,10 @@ final class TimelineReaderWebServicesUtils {
* @param entityType Entity Type.
* @param entityId Entity Id.
* @return a {@link TimelineReaderContext} object.
-   * @throws Exception if any problem occurs during parsing.
*/
   static TimelineReaderContext createTimelineReaderContext(String clusterId,
   String userId, String flowName, String flowRunId, String appId,
-  String entityType, String entityId) throws Exception {
+  String entityType, String entityId) {
 return new TimelineReaderContext(parseStr(clusterId), parseStr(userId),
 parseStr(flowName), parseLongStr(flowRunId), parseStr(appId),
 parseStr(entityType), parseStr(entityId));
@@ -79,20 +68,17 @@ final class TimelineReaderWebServicesUtils {
* @param metricfilters Entities to return must match these metric filters.
* @param eventfilters Entities to return must match these event filters.
* @return a {@link TimelineEntityFilters} object.
-   * @throws Exception if any problem occurs during parsing.
+   * @throws TimelineParseException if any problem occurs during parsing.
*/
   static TimelineEntityFilters createTimelineEntityFilters(String limit,
   String createdTimeStart, String createdTimeEnd, String relatesTo,
   String isRelatedTo, String infofilters, String conffilters,
-  String metricfilters, String eventfilters) throws Exception {
+  String metricfilters, String eventfilters) throws TimelineParseException 
{
 return new TimelineEntityFilters(parseLongStr(limit),
 parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
-parseKeyStrValuesStr(relatesTo, COMMA_DELIMITER, COLON_DELIMITER),
-parseKeyStrValuesStr(isRelatedTo, COMMA_DELIMITER, COLON_DELIMITER),
-parseKeyStrValueObj(infofilters, COMMA_DELIMITER, COLON_DELIMITER),
-parseKeyStrValueStr(conffilters, COMMA_DELIMITER, COLON_DELIMITER),
-parseMetricFilters(metricfilters, COMMA_DELIMITER),
-parseValuesStr(eventfilters, COMMA_DELIMITER));
+parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
+parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
+parseMetricFilters(metricfilters), parseEventFilters(eventfilters));
   }
 
   /**
@@ -102,12 +88,13 @@ final class TimelineReaderWebServicesUtils {
* @param metrics metrics to retrieve.
* @param fields fields to retrieve.
* @return a {@link TimelineDataToRetrieve} object.
-   * @throws Exception 

hadoop git commit: YARN-4986. Add a check in the coprocessor for table to operated on (Vrushali C via sjlee)

2016-04-29 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 466ea0d1a -> c6e70a6cf


YARN-4986. Add a check in the coprocessor for table to operated on (Vrushali C 
via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6e70a6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6e70a6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6e70a6c

Branch: refs/heads/YARN-2928
Commit: c6e70a6cf5a3af19c8a96afcf505f5329c6ebeaa
Parents: 466ea0d
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri Apr 29 17:13:32 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Apr 29 17:13:32 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../storage/common/TimelineStorageUtils.java| 20 +++
 .../storage/entity/EntityTable.java |  2 +-
 .../storage/flow/FlowRunCoprocessor.java| 39 +++--
 .../storage/flow/FlowScanner.java   | 13 +++--
 .../storage/flow/TestHBaseStorageFlowRun.java   | 61 
 .../flow/TestHBaseStorageFlowRunCompaction.java | 36 
 7 files changed, 163 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6e70a6c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index db7ed2b..61a9bc1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -255,6 +255,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4711. NM is going down with NPE's due to single thread processing of
 events by Timeline client (Naganarasimha G R via sjlee)
 
+YARN-4986. Add a check in the coprocessor for table to operated on
+(Vrushali C via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6e70a6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
index 2d85bab..18f975a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
@@ -32,8 +32,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Result;
@@ -56,6 +58,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Fiel
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 
 /**
@@ -887,4 +890,21 @@ public final class TimelineStorageUtils {
 Set eventsSet = new HashSet<>(eventsMap.values());
 entity.addEvents(eventsSet);
   }
+
+  public static boolean isFlowRunTable(HRegionInfo hRegionInfo,
+  Configuration conf) {
+String regionTableName = hRegionInfo.getTable().getNameAsString();
+String flowRunTableName = conf.get(FlowRunTable.TABLE_NAME_CONF_NAME,
+FlowRunTable.DEFAULT_TABLE_NAME);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("regionTableName=" + regionTableName);
+}
+if (flowRunTableName.equalsIgnoreCase(regionTableName)) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug(" table is the flow run table!! " + flowRunTableName);
+ 

hadoop git commit: YARN-3816. [Aggregation] App-level aggregation and accumulation for YARN system metrics (Li Lu via sjlee)

2016-04-22 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 3df8b0d61 -> 466ea0d1a


YARN-3816. [Aggregation] App-level aggregation and accumulation for YARN system 
metrics (Li Lu via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/466ea0d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/466ea0d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/466ea0d1

Branch: refs/heads/YARN-2928
Commit: 466ea0d1a1e86d8dafcd9c3b90803fa8a454401c
Parents: 3df8b0d
Author: Sangjin Lee <sj...@apache.org>
Authored: Fri Apr 22 10:24:40 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Fri Apr 22 10:24:40 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../records/timelineservice/TimelineMetric.java | 140 ++--
 .../TimelineMetricCalculator.java   | 115 ++
 .../TimelineMetricOperation.java| 167 +++
 .../timelineservice/TestTimelineMetric.java | 100 +
 .../TestTimelineServiceRecords.java |   6 +-
 .../timelineservice/NMTimelinePublisher.java|   4 +
 .../collector/AppLevelTimelineCollector.java|  72 +++
 .../collector/TimelineCollector.java| 213 ++-
 .../storage/TimelineAggregationTrack.java   |   2 +-
 .../collector/TestTimelineCollector.java| 127 +++
 .../TestFileSystemTimelineWriterImpl.java   |  43 +++-
 .../storage/TestHBaseTimelineStorage.java   |  35 ++-
 13 files changed, 1001 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/466ea0d1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dbc61fa..db7ed2b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -133,6 +133,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3863. Support complex filters in TimelineReader (Varun Saxena via
 sjlee)
 
+YARN-3816. [Aggregation] App-level aggregation and accumulation for YARN
+system metrics (Li Lu via sjlee)
+
   IMPROVEMENTS
 
 YARN-4224. Support fetching entities by UID and change the REST interface 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/466ea0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
index 2f60515..f0c6849 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
@@ -19,12 +19,13 @@ package org.apache.hadoop.yarn.api.records.timelineservice;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
-import java.util.Comparator;
+import java.util.Collections;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -48,13 +49,13 @@ public class TimelineMetric {
 
   private Type type;
   private String id;
-  private Comparator reverseComparator = new Comparator() {
-@Override
-public int compare(Long l1, Long l2) {
-  return l2.compareTo(l1);
-}
-  };
-  private TreeMap<Long, Number> values = new TreeMap<>(reverseComparator);
+  // By default, not to do any aggregation operations. This field will NOT be
+  // persisted (like a "transient" member).
+  private TimelineMetricOperation realtimeAggregationOp
+  = TimelineMetricOperation.NOP;
+
+  private TreeMap<Long, Number> values
+  = new TreeMap<>(Collections.reverseOrder());
 
   public TimelineMetric() {
 this(Type.SINGLE_VALUE);
@@ -83,6 +84,26 @@ public class TimelineMetric {
 this.id = metricId;
   }
 
+  /**
+   * Get the real time aggregation operation of this metric.
+   *
+   * @return Real time aggregation operation
+   */
+  public TimelineMetricOperation getRealtimeAggregationOp() {
+return realtimeAggregationOp;
+  }
+
+  

svn commit: r1739010 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2016-04-13 Thread sjlee
Author: sjlee
Date: Wed Apr 13 21:34:57 2016
New Revision: 1739010

URL: http://svn.apache.org/viewvc?rev=1739010=rev
Log:
Updated who.*ml files.

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1739010=1739009=1739010=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Wed 
Apr 13 21:34:57 2016
@@ -367,6 +367,14 @@
 -8
 
 
+
+  sjlee
+  http://people.apache.org/~sjlee;>Sangjin Lee
+  Twitter
+  
+  -8
+
+

  sradia
  http://people.apache.org/~sradia;>Sanjay Radia

Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/issue_tracking.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/linkmap.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/mailing_lists.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/privacy_policy.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/releases.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/version_control.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/version_control.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/who.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1739010=1739009=1739010=diff
==
--- hadoop/common/site/main/publish/who.html (original)
+++ hadoop/common/site/main/publish/who.html Wed Apr 13 21:34:57 2016
@@ -732,6 +732,17 @@ document.write("Last Published: " + docu
 
 
 
+
+
+  
+sjlee
+  http://people.apache.org/~sjlee;>Sangjin Lee
+  Twitter
+  
+  -8
+
+
+

 
  
@@ -979,7 +990,7 @@ document.write("Last Published: " + docu
 
 
 
-
+
 Emeritus Hadoop PMC Members
 
 
@@ -994,7 +1005,7 @@ document.write("Last Published: " + docu
 
 

-
+
 Hadoop Committers
 
 Hadoop's active committers include:
@@ -2138,7 +2149,7 @@ document.write("Last Published: " + docu
 
 

-
+
 Emeritus Hadoop Committers
 
 Hadoop committers who are no longer active include:

Modified: hadoop/common/site/main/publish/who.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.pdf?rev=1739010=1739009=1739010=diff
==
Binary files - no diff available.




hadoop git commit: Addendum to YARN-3863. Deleted files that were added incorrectly.

2016-04-12 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 f8e8a03bd -> 3df8b0d61


Addendum to YARN-3863. Deleted files that were added incorrectly.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3df8b0d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3df8b0d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3df8b0d6

Branch: refs/heads/YARN-2928
Commit: 3df8b0d6100530080d2e0decf9e528e57c42a90a
Parents: f8e8a03
Author: Sangjin Lee 
Authored: Tue Apr 12 12:32:43 2016 -0700
Committer: Sangjin Lee 
Committed: Tue Apr 12 12:32:43 2016 -0700

--
 .../reader/filter/TimelineExistsFilter.java | 62 -
 .../reader/filter/TimelineKeyValueFilter.java   | 48 -
 .../reader/filter/TimelineKeyValuesFilter.java  | 71 
 .../common/TimelineEntityFiltersType.java   | 71 
 4 files changed, 252 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df8b0d6/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineExistsFilter.java
--
diff --git 
a/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineExistsFilter.java
 
b/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineExistsFilter.java
deleted file mode 100644
index 36d0d7b..000
--- 
a/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineExistsFilter.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-
-/**
- * Filter class which represents filter to be applied based on existence of a
- * value.
- */
-@Private
-@Unstable
-public class TimelineExistsFilter extends TimelineFilter {
-
-  private final TimelineCompareOp compareOp;
-  private final String value;
-
-  public TimelineExistsFilter(TimelineCompareOp op, String value) {
-this.value = value;
-if (op != TimelineCompareOp.EQUAL && op != TimelineCompareOp.NOT_EQUAL) {
-  throw new IllegalArgumentException("CompareOp for exists filter should " 
+
-  "be EQUAL or NOT_EQUAL");
-}
-this.compareOp = op;
-  }
-
-  @Override
-  public TimelineFilterType getFilterType() {
-return TimelineFilterType.EXISTS;
-  }
-
-  public String getValue() {
-return value;
-  }
-
-  public TimelineCompareOp getCompareOp() {
-return compareOp;
-  }
-
-  @Override
-  public String toString() {
-return String.format("%s (%s %s)",
-this.getClass().getSimpleName(), this.compareOp.name(), this.value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df8b0d6/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineKeyValueFilter.java
--
diff --git 
a/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineKeyValueFilter.java
 
b/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineKeyValueFilter.java
deleted file mode 100644
index 58f0ee9..000
--- 
a/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineKeyValueFilter.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- 

[3/5] hadoop git commit: YARN-3863. Support complex filters in TimelineReader (Varun Saxena via sjlee)

2016-04-11 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
index d8f73d4..6696ac5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -19,13 +19,8 @@ package 
org.apache.hadoop.yarn.server.timelineservice.storage.reader;
 
 import java.io.IOException;
 import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
@@ -33,28 +28,22 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
 import org.apache.hadoop.hbase.filter.FamilyFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
@@ -71,7 +60,6 @@ import com.google.common.base.Preconditions;
  */
 class GenericEntityReader extends TimelineEntityReader {
   private static final EntityTable ENTITY_TABLE = new EntityTable();
-  private static final Log LOG = LogFactory.getLog(GenericEntityReader.class);
 
   /**
* Used to look up the flow context.
@@ -97,92 +85,322 @@ class GenericEntityReader extends TimelineEntityReader {
   }
 
   @Override
-  protected FilterList constructFilterListBasedOnFields() {
-FilterList list = new FilterList(Operator.MUST_PASS_ONE);
-TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
-// Fetch all the columns.
-if (dataToRetrieve.getFieldsToRetrieve().contains(Field.ALL) &&
-(dataToRetrieve.getConfsToRetrieve() == null ||
-dataToRetrieve.getConfsToRetrieve().getFilterList().isEmpty()) &&
-(dataToRetrieve.getMetricsToRetrieve() == null ||
-dataToRetrieve.getMetricsToRetrieve().getFilterList().isEmpty())) {
-  return list;
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+// Filters here cannot be null for multiple entity reads 

[1/5] hadoop git commit: YARN-3863. Support complex filters in TimelineReader (Varun Saxena via sjlee)

2016-04-11 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 fb0acd08e -> f8e8a03bd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
index 9793ce6..3b8036d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
@@ -154,6 +154,14 @@ class TestFlowDataGenerator {
 metrics.add(m2);
 
 entity.addMetrics(metrics);
+TimelineEvent event = new TimelineEvent();
+event.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+long endTs = 1439379885000L;
+event.setTimestamp(endTs);
+String expKey = "foo_event_greater";
+String expVal = "test_app_greater";
+event.addInfo(expKey, expVal);
+entity.addEvent(event);
 return entity;
   }
 
@@ -178,6 +186,14 @@ class TestFlowDataGenerator {
 m1.setValues(metricValues);
 metrics.add(m1);
 entity.addMetrics(metrics);
+TimelineEvent event = new TimelineEvent();
+event.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+long endTs = 1439379885000L;
+event.setTimestamp(endTs);
+String expKey = "foo_event_greater";
+String expVal = "test_app_greater";
+event.addInfo(expKey, expVal);
+entity.addEvent(event);
 return entity;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index f04dd48..a724db2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -47,8 +47,10 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList.Operator;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
@@ -307,7 +309,7 @@ public class TestHBaseStorageFlowRun {
   assertEquals(141L, Bytes.toLong(values.get(q)));
 
   // check metric2
-  assertEquals(2, values.size());
+  assertEquals(3, values.size());
   q = ColumnHelper.getColumnQualifier(
   FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(), metric2);
   assertTrue(values.containsKey(q));
@@ -318,11 +320,10 @@ public class TestHBaseStorageFlowRun {
 
   @Test
   public void testWriteFlowRunMetricsPrefix() throws Exception {
-String cluster = "testWriteFlowRunMetricsOneFlow_cluster1";
-String user = "testWriteFlowRunMetricsOneFlow_user1";
-String flow = "testing_flowRun_metrics_flow_name";
+String cluster 

[4/5] hadoop git commit: YARN-3863. Support complex filters in TimelineReader (Varun Saxena via sjlee)

2016-04-11 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
index b5fc214..2d85bab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
@@ -17,21 +17,26 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
+import java.io.IOException;
 import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedSet;
-import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
@@ -39,6 +44,15 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValueFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilter.TimelineFilterType;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
@@ -53,6 +67,8 @@ public final class TimelineStorageUtils {
   private TimelineStorageUtils() {
   }
 
+  private static final Log LOG = LogFactory.getLog(TimelineStorageUtils.class);
+
   /** empty bytes. */
   public static final byte[] EMPTY_BYTES = new byte[0];
 
@@ -312,6 +328,21 @@ public final class TimelineStorageUtils {
   }
 
   /**
+   * Check if we have a certain field amongst fields to retrieve. This method
+   * checks against {@link Field#ALL} as well because that would mean field
+   * passed needs to be matched.
+   *
+   * @param fieldsToRetrieve fields to be retrieved.
+   * @param requiredField fields to be checked in fieldsToRetrieve.
+   * @return true if has the required field, false otherwise.
+   */
+  public static boolean hasField(EnumSet fieldsToRetrieve,
+  Field requiredField) {
+return fieldsToRetrieve.contains(Field.ALL) ||
+fieldsToRetrieve.contains(requiredField);
+  }
+
+  /**
* Checks if the input TimelineEntity object is an ApplicationEntity.
*
* @param te TimelineEntity object.
@@ -385,87 +416,317 @@ public final class TimelineStorageUtils {
   }
 
   /**
+   * Matches key-values filter. Used for relatesTo/isRelatedTo filters.
*
-   * @param entityRelations the relations of an entity
-   * @param relationFilters the relations for filtering
-   * @return a boolean flag to indicate if both match
+   * @param entity entity which holds relatesTo/isRelatedTo relations 

[5/5] hadoop git commit: YARN-3863. Support complex filters in TimelineReader (Varun Saxena via sjlee)

2016-04-11 Thread sjlee
YARN-3863. Support complex filters in TimelineReader (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8e8a03b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8e8a03b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8e8a03b

Branch: refs/heads/YARN-2928
Commit: f8e8a03bd51e97cac46ca7fbbec7c94b5259157a
Parents: fb0acd0
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Apr 11 21:07:32 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Apr 11 21:07:32 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt |3 +
 .../reader/TimelineEntityFilters.java   |  170 +-
 .../reader/TimelineReaderWebServicesUtils.java  |   88 +-
 .../reader/filter/TimelineCompareFilter.java|   35 +-
 .../reader/filter/TimelineExistsFilter.java |   62 +
 .../reader/filter/TimelineFilter.java   |   16 +-
 .../reader/filter/TimelineFilterList.java   |   14 +
 .../reader/filter/TimelineFilterUtils.java  |  206 +-
 .../reader/filter/TimelineKeyValueFilter.java   |   48 +
 .../reader/filter/TimelineKeyValuesFilter.java  |   71 +
 .../reader/filter/TimelinePrefixFilter.java |6 +
 .../storage/FileSystemTimelineReaderImpl.java   |   36 +-
 .../storage/HBaseTimelineWriterImpl.java|   31 +-
 .../storage/application/ApplicationColumn.java  |   28 +-
 .../application/ApplicationColumnPrefix.java|   37 +-
 .../storage/apptoflow/AppToFlowColumn.java  |   16 +
 .../timelineservice/storage/common/Column.java  |   17 +
 .../storage/common/ColumnHelper.java|   16 +
 .../storage/common/ColumnPrefix.java|   35 +
 .../common/TimelineEntityFiltersType.java   |   71 +
 .../storage/common/TimelineStorageUtils.java|  461 +++-
 .../storage/entity/EntityColumn.java|   28 +-
 .../storage/entity/EntityColumnPrefix.java  |   38 +-
 .../storage/flow/FlowActivityColumnPrefix.java  |   35 +
 .../storage/flow/FlowRunColumn.java |3 +
 .../storage/flow/FlowRunColumnPrefix.java   |   26 +
 .../storage/flow/FlowScanner.java   |1 +
 .../storage/reader/ApplicationEntityReader.java |  426 ++--
 .../reader/FlowActivityEntityReader.java|7 +
 .../storage/reader/FlowRunEntityReader.java |   97 +-
 .../storage/reader/GenericEntityReader.java |  623 ++---
 .../storage/reader/TimelineEntityReader.java|   71 +-
 .../reader/TestTimelineReaderWebServices.java   |   10 +-
 .../TestFileSystemTimelineReaderImpl.java   |  332 ++-
 .../storage/TestHBaseTimelineStorage.java   | 2172 +-
 .../storage/flow/TestFlowDataGenerator.java |   16 +
 .../storage/flow/TestHBaseStorageFlowRun.java   |  267 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |2 +-
 .../reader/filter/TimelineExistsFilter.java |   62 +
 .../reader/filter/TimelineKeyValueFilter.java   |   48 +
 .../reader/filter/TimelineKeyValuesFilter.java  |   71 +
 .../common/TimelineEntityFiltersType.java   |   71 +
 42 files changed, 5057 insertions(+), 816 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c66a851..dbc61fa 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -130,6 +130,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4062. Add the flush and compaction functionality via coprocessors and
 scanners for flow run table (Vrushali C via sjlee)
 
+YARN-3863. Support complex filters in TimelineReader (Varun Saxena via
+sjlee)
+
   IMPROVEMENTS
 
 YARN-4224. Support fetching entities by UID and change the REST interface 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
index 5b2c300..4821d31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
+++ 
b/hadoop-yarn-project/hadoop-yarn

[2/5] hadoop git commit: YARN-3863. Support complex filters in TimelineReader (Varun Saxena via sjlee)

2016-04-11 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e8a03b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
index 4e07ecf..6b57ec4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorage.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -52,10 +53,14 @@ import 
org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineCompareOp;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValueFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineExistsFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList.Operator;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
@@ -75,9 +80,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
 /**
  * Various tests to test writing entities to HBase and reading them back from
  * it.
@@ -113,30 +115,29 @@ public class TestHBaseTimelineStorage {
 String id = "application_11_";
 entity.setId(id);
 entity.setType(TimelineEntityType.YARN_APPLICATION.toString());
-Long cTime = 1425016501000L;
+Long cTime = 1425016502000L;
 entity.setCreatedTime(cTime);
 // add the info map in Timeline Entity
 Map infoMap = new HashMap();
-infoMap.put("infoMapKey1", "infoMapValue1");
-infoMap.put("infoMapKey2", 10);
+infoMap.put("infoMapKey1", "infoMapValue2");
+infoMap.put("infoMapKey2", 20);
+infoMap.put("infoMapKey3", 85.85);
 entity.addInfo(infoMap);
 // add the isRelatedToEntity info
-String key = "task";
-String value = "is_related_to_entity_id_here";
 Set isRelatedToSet = new HashSet();
-isRelatedToSet.add(value);
+isRelatedToSet.add("relatedto1");
 Map isRelatedTo = new HashMap();
-isRelatedTo.put(key, isRelatedToSet);
+isRelatedTo.put("task", isRelatedToSet);
 entity.setIsRelatedToEntities(isRelatedTo);
 // add the relatesTo info
-key = "container";
-value = "relates_to_entity_id_here";
 Set relatesToSet = new HashSet();
-relatesToSet.add(value);
-value = "relates_to_entity_id_here_Second";
-relatesToSet.add(value);
+relatesToSet.add("relatesto1");
+relatesToSet.add("relatesto3");
 Map relatesTo = new HashMap();
-relatesTo.put(key, relatesToSet);
+relatesTo.put("container", relatesToSet);
+Set relatesToSet11 = new HashSet();
+relatesToSet11.add("relatesto4");
+relatesTo.put("container1", relatesToSet11);
 entity.setRelatesToEntities(relatesTo);
 // add some config entries
 Map conf = new HashMap();
@@ -166,8 +167,8 @@ public class TestHBaseTimelineStorage {
 metrics.add(m12);
 entity.addMetrics(metrics);
 TimelineEvent event = new 

hadoop git commit: YARN-4183. Clarify the behavior of timeline service config properties (Naganarasimha G R via sjlee)

2016-03-31 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6db3c9d39 -> b91928cd2


YARN-4183. Clarify the behavior of timeline service config properties 
(Naganarasimha G R via sjlee)

(cherry picked from commit 6d67420dbc5c6097216fa40fcec8ed626b2bae14)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b91928cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b91928cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b91928cd

Branch: refs/heads/branch-2
Commit: b91928cd26f19b079dc643bf9bedd13d3573ca54
Parents: 6db3c9d
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu Mar 31 10:49:03 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu Mar 31 10:49:45 2016 -0700

--
 .../src/main/resources/yarn-default.xml| 13 ++---
 .../src/site/markdown/TimelineServer.md|  6 +++---
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b91928cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 33cd919..cb3c73a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1874,8 +1874,12 @@
   
 
   
-Indicate to clients whether timeline service is enabled or 
not.
-If enabled, clients will put entities and events to the timeline server.
+
+In the server side it indicates whether timeline service is enabled or not.
+And in the client side, users can enable it to indicate whether client 
wants
+to use timeline service. If it's enabled in the client side along with
+security, then yarn client tries to fetch the delegation tokens for the
+timeline server.
 
 yarn.timeline-service.enabled
 false
@@ -2027,7 +2031,10 @@
   
 
   
-Client policy for whether timeline operations are 
non-fatal
+Client policy for whether timeline operations are non-fatal.
+Should the failure to obtain a delegation token be considered an 
application
+failure (option = false),  or should the client attempt to continue to
+publish information without it (option=true)
 yarn.timeline-service.client.best-effort
 false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b91928cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 8ef7d9a..9283e58 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -141,7 +141,7 @@ and cluster operators.
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.timeline-service.enabled` | Indicate to clients whether Timeline 
service is enabled or not. If enabled, the `TimelineClient` library used by 
applications will post entities and events to the Timeline server. Defaults to 
`false`. |
+| `yarn.timeline-service.enabled` | In the server side it indicates whether 
timeline service is enabled or not. And in the client side, users can enable it 
to indicate whether client wants to use timeline service. If it's enabled in 
the client side along with security, then yarn client tries to fetch the 
delegation tokens for the timeline server. Defaults to `false`. |
 | `yarn.resourcemanager.system-metrics-publisher.enabled` | The setting that 
controls whether or not YARN system metrics are published on the timeline 
server by RM. Defaults to `false`. |
 | `yarn.timeline-service.generic-application-history.enabled` | Indicate to 
clients whether to query generic application data from timeline history-service 
or not. If not enabled then application data is queried only from Resource 
Manager. Defaults to `false`. |
 
@@ -150,7 +150,7 @@ and cluster operators.
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.store-class` | Store class name for timeline store. 
Defaults to `org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore`. |
-| `yarn.timeline-service.leveldb-timeline-store.path` | Store file name for 
leveldb timeline store. Defaults to `${hadoop.tmp.dir}/yarn/timelin`e. |
+|

hadoop git commit: YARN-4183. Clarify the behavior of timeline service config properties (Naganarasimha G R via sjlee)

2016-03-31 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 480b01ba9 -> 856a131c6


YARN-4183. Clarify the behavior of timeline service config properties 
(Naganarasimha G R via sjlee)

(cherry picked from commit 6d67420dbc5c6097216fa40fcec8ed626b2bae14)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/856a131c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/856a131c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/856a131c

Branch: refs/heads/branch-2.7
Commit: 856a131c6585dff6be7db2c41d5f8b765a31a4a8
Parents: 480b01b
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu Mar 31 10:49:03 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu Mar 31 10:52:39 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../src/main/resources/yarn-default.xml| 13 ++---
 .../src/site/markdown/TimelineServer.md|  6 +++---
 3 files changed, 16 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/856a131c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 33b07ad..74bcdda 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -110,6 +110,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4785. inconsistent value type of the "type" field for LeafQueueInfo 
 in response of RM REST API. (Varun Vasudev via junping_du)
 
+YARN-4183. Clarify the behavior of timeline service config properties
+    (Naganarasimha G R via sjlee)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/856a131c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index efd6a90..55abd12 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1319,8 +1319,12 @@
   
 
   
-Indicate to clients whether timeline service is enabled or 
not.
-If enabled, clients will put entities and events to the timeline server.
+
+In the server side it indicates whether timeline service is enabled or not.
+And in the client side, users can enable it to indicate whether client 
wants
+to use timeline service. If it's enabled in the client side along with
+security, then yarn client tries to fetch the delegation tokens for the
+timeline server.
 
 yarn.timeline-service.enabled
 false
@@ -1472,7 +1476,10 @@
   
 
   
-Client policy for whether timeline operations are 
non-fatal
+Client policy for whether timeline operations are non-fatal.
+Should the failure to obtain a delegation token be considered an 
application
+failure (option = false),  or should the client attempt to continue to
+publish information without it (option=true)
 yarn.timeline-service.client.best-effort
 false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/856a131c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index e31622b..f3473f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -137,7 +137,7 @@ and cluster operators.
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.timeline-service.enabled` | Indicate to clients whether Timeline 
service is enabled or not. If enabled, the `TimelineClient` library used by 
applications will post entities and events to the Timeline server. Defaults to 
`false`. |
+| `yarn.timeline-service.enabled` | In the server side it indicates whether 
timeline service is enabled or not. And in the client side, users can enable it 
to indicate whether client wants to use timeline service. If it's enabled in 
the client side along with security, then yarn client tries to fetch the 
delegation tokens for the timeline server. Defaults to `false`. |
 | `yarn.resourcemanager.system-metrics-publisher.enabled` | The setting that 
controls w

hadoop git commit: YARN-4183. Clarify the behavior of timeline service config properties (Naganarasimha G R via sjlee)

2016-03-31 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a0c001c0b -> 5e94e539e


YARN-4183. Clarify the behavior of timeline service config properties 
(Naganarasimha G R via sjlee)

(cherry picked from commit 6d67420dbc5c6097216fa40fcec8ed626b2bae14)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e94e539
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e94e539
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e94e539

Branch: refs/heads/branch-2.8
Commit: 5e94e539e0285bd74da8c3e74bcedfbf602f4964
Parents: a0c001c
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu Mar 31 10:49:03 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu Mar 31 10:50:18 2016 -0700

--
 .../src/main/resources/yarn-default.xml| 13 ++---
 .../src/site/markdown/TimelineServer.md|  6 +++---
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e94e539/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index c93fb36..a588893 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1798,8 +1798,12 @@
   
 
   
-Indicate to clients whether timeline service is enabled or 
not.
-If enabled, clients will put entities and events to the timeline server.
+
+In the server side it indicates whether timeline service is enabled or not.
+And in the client side, users can enable it to indicate whether client 
wants
+to use timeline service. If it's enabled in the client side along with
+security, then yarn client tries to fetch the delegation tokens for the
+timeline server.
 
 yarn.timeline-service.enabled
 false
@@ -1951,7 +1955,10 @@
   
 
   
-Client policy for whether timeline operations are 
non-fatal
+Client policy for whether timeline operations are non-fatal.
+Should the failure to obtain a delegation token be considered an 
application
+failure (option = false),  or should the client attempt to continue to
+publish information without it (option=true)
 yarn.timeline-service.client.best-effort
 false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e94e539/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index fb41e40..d8e534e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -137,7 +137,7 @@ and cluster operators.
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.timeline-service.enabled` | Indicate to clients whether Timeline 
service is enabled or not. If enabled, the `TimelineClient` library used by 
applications will post entities and events to the Timeline server. Defaults to 
`false`. |
+| `yarn.timeline-service.enabled` | In the server side it indicates whether 
timeline service is enabled or not. And in the client side, users can enable it 
to indicate whether client wants to use timeline service. If it's enabled in 
the client side along with security, then yarn client tries to fetch the 
delegation tokens for the timeline server. Defaults to `false`. |
 | `yarn.resourcemanager.system-metrics-publisher.enabled` | The setting that 
controls whether or not YARN system metrics are published on the timeline 
server by RM. Defaults to `false`. |
 | `yarn.timeline-service.generic-application-history.enabled` | Indicate to 
clients whether to query generic application data from timeline history-service 
or not. If not enabled then application data is queried only from Resource 
Manager. Defaults to `false`. |
 
@@ -146,7 +146,7 @@ and cluster operators.
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.store-class` | Store class name for timeline store. 
Defaults to `org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore`. |
-| `yarn.timeline-service.leveldb-timeline-store.path` | Store file name for 
leveldb timeline store. Defaults to `${hadoop.tmp.dir}/yarn/timelin`e. |
+|

hadoop git commit: YARN-4183. Clarify the behavior of timeline service config properties (Naganarasimha G R via sjlee)

2016-03-31 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk d95c6eb32 -> 6d67420db


YARN-4183. Clarify the behavior of timeline service config properties 
(Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d67420d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d67420d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d67420d

Branch: refs/heads/trunk
Commit: 6d67420dbc5c6097216fa40fcec8ed626b2bae14
Parents: d95c6eb
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu Mar 31 10:49:03 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu Mar 31 10:49:03 2016 -0700

--
 .../src/main/resources/yarn-default.xml| 13 ++---
 .../src/site/markdown/TimelineServer.md|  6 +++---
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d67420d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 33cd919..cb3c73a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1874,8 +1874,12 @@
   
 
   
-Indicate to clients whether timeline service is enabled or 
not.
-If enabled, clients will put entities and events to the timeline server.
+
+In the server side it indicates whether timeline service is enabled or not.
+And in the client side, users can enable it to indicate whether client 
wants
+to use timeline service. If it's enabled in the client side along with
+security, then yarn client tries to fetch the delegation tokens for the
+timeline server.
 
 yarn.timeline-service.enabled
 false
@@ -2027,7 +2031,10 @@
   
 
   
-Client policy for whether timeline operations are 
non-fatal
+Client policy for whether timeline operations are non-fatal.
+Should the failure to obtain a delegation token be considered an 
application
+failure (option = false),  or should the client attempt to continue to
+publish information without it (option=true)
 yarn.timeline-service.client.best-effort
 false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d67420d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 8ef7d9a..9283e58 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -141,7 +141,7 @@ and cluster operators.
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.timeline-service.enabled` | Indicate to clients whether Timeline 
service is enabled or not. If enabled, the `TimelineClient` library used by 
applications will post entities and events to the Timeline server. Defaults to 
`false`. |
+| `yarn.timeline-service.enabled` | In the server side it indicates whether 
timeline service is enabled or not. And in the client side, users can enable it 
to indicate whether client wants to use timeline service. If it's enabled in 
the client side along with security, then yarn client tries to fetch the 
delegation tokens for the timeline server. Defaults to `false`. |
 | `yarn.resourcemanager.system-metrics-publisher.enabled` | The setting that 
controls whether or not YARN system metrics are published on the timeline 
server by RM. Defaults to `false`. |
 | `yarn.timeline-service.generic-application-history.enabled` | Indicate to 
clients whether to query generic application data from timeline history-service 
or not. If not enabled then application data is queried only from Resource 
Manager. Defaults to `false`. |
 
@@ -150,7 +150,7 @@ and cluster operators.
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.store-class` | Store class name for timeline store. 
Defaults to `org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore`. |
-| `yarn.timeline-service.leveldb-timeline-store.path` | Store file name for 
leveldb timeline store. Defaults to `${hadoop.tmp.dir}/yarn/timelin`e. |
+| `yarn.timeline-service.leveldb-timeline-store.path` | Store file name for 
leveldb

hadoop git commit: YARN-4711. NM is going down with NPE's due to single thread processing of events by Timeline client (Naganarasimha G R via sjlee)

2016-03-28 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 bcbb52d8a -> f746c80b3


YARN-4711. NM is going down with NPE's due to single thread processing of 
events by Timeline client (Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f746c80b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f746c80b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f746c80b

Branch: refs/heads/YARN-2928
Commit: f746c80b34e9acf2390c90eab2e8a4eb993e52b9
Parents: bcbb52d
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Mar 28 15:50:03 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Mar 28 15:50:03 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../dev-support/findbugs-exclude.xml|  11 +-
 .../records/timelineservice/TimelineEntity.java |  25 ++-
 .../client/api/impl/TimelineClientImpl.java |  35 ++--
 .../api/impl/TestTimelineClientV2Impl.java  |  91 +++-
 .../metrics/ContainerMetricsConstants.java  |   8 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |  10 +-
 .../collectormanager/NMCollectorService.java|  10 +-
 .../application/Application.java|   4 -
 .../application/ApplicationImpl.java|  24 +--
 .../timelineservice/NMTimelinePublisher.java| 210 +++
 .../TestNMTimelinePublisher.java|  24 +--
 .../yarn/server/nodemanager/webapp/MockApp.java |   5 -
 13 files changed, 281 insertions(+), 179 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f746c80b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ab4c706..d71f7fd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -244,6 +244,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4712. CPU Usage Metric is not captured properly in YARN-2928.
 (Naganarasimha G R via varunsaxena)
 
+YARN-4711. NM is going down with NPE's due to single thread processing of
+events by Timeline client (Naganarasimha G R via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f746c80b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index bd6cca5..dbc5506 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -117,8 +117,15 @@
 
   
   
-
- 
+
+
+
+  
+
+  
+
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f746c80b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index acc132e..7ce8279 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -17,15 +17,6 @@
  */
 package org.apache.hadoop.yarn.api.records.timelineservice;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.annotate.JsonSetter;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -33,6 +24,16 @@ import java.util.NavigableSet;
 import java.util.Set;
 import java.util.TreeSet;
 
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.Timeli

hadoop git commit: HDFS-9579. Provide bytes-read-by-network-distance metrics at FileSystem.Statistics level (Ming Ma via sjlee)

2016-03-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 478a25b92 -> d956e0a0b


HDFS-9579. Provide bytes-read-by-network-distance metrics at 
FileSystem.Statistics level (Ming Ma via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d956e0a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d956e0a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d956e0a0

Branch: refs/heads/branch-2
Commit: d956e0a0bbb9ba19b359680488d7167eb09681c5
Parents: 478a25b
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Mar 21 10:56:32 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Mar 21 10:56:32 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 118 ++-
 .../java/org/apache/hadoop/net/NetUtils.java|  16 ++-
 .../org/apache/hadoop/net/NetworkTopology.java  |  17 ++-
 .../java/org/apache/hadoop/net/NodeBase.java|  18 ++-
 .../org/apache/hadoop/hdfs/BlockReader.java |  10 +-
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   7 +-
 .../apache/hadoop/hdfs/BlockReaderLocal.java|  10 +-
 .../hadoop/hdfs/BlockReaderLocalLegacy.java |  10 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |  56 -
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  14 +--
 .../apache/hadoop/hdfs/ExternalBlockReader.java |  10 +-
 .../apache/hadoop/hdfs/RemoteBlockReader.java   |  29 ++---
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  |  29 ++---
 .../org/apache/hadoop/hdfs/ReplicaAccessor.java |   7 ++
 .../hadoop/fs/TestEnhancedByteBufferAccess.java |   4 +-
 .../hadoop/hdfs/TestBlockReaderLocal.java   |   4 +-
 .../org/apache/hadoop/hdfs/TestConnCache.java   |   2 -
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  62 ++
 .../hadoop/hdfs/TestExternalBlockReader.java|   8 +-
 .../apache/hadoop/net/TestNetworkTopology.java  |   7 ++
 21 files changed, 366 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d956e0a0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 59907bf..ca5f3a3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3009,11 +3009,15 @@ public abstract class FileSystem extends Configured 
implements Closeable {
  * need.
  */
 public static class StatisticsData {
-  volatile long bytesRead;
-  volatile long bytesWritten;
-  volatile int readOps;
-  volatile int largeReadOps;
-  volatile int writeOps;
+  private volatile long bytesRead;
+  private volatile long bytesWritten;
+  private volatile int readOps;
+  private volatile int largeReadOps;
+  private volatile int writeOps;
+  private volatile long bytesReadLocalHost;
+  private volatile long bytesReadDistanceOfOneOrTwo;
+  private volatile long bytesReadDistanceOfThreeOrFour;
+  private volatile long bytesReadDistanceOfFiveOrLarger;
 
   /**
* Add another StatisticsData object to this one.
@@ -3024,6 +3028,12 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 this.readOps += other.readOps;
 this.largeReadOps += other.largeReadOps;
 this.writeOps += other.writeOps;
+this.bytesReadLocalHost += other.bytesReadLocalHost;
+this.bytesReadDistanceOfOneOrTwo += other.bytesReadDistanceOfOneOrTwo;
+this.bytesReadDistanceOfThreeOrFour +=
+other.bytesReadDistanceOfThreeOrFour;
+this.bytesReadDistanceOfFiveOrLarger +=
+other.bytesReadDistanceOfFiveOrLarger;
   }
 
   /**
@@ -3035,6 +3045,12 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 this.readOps = -this.readOps;
 this.largeReadOps = -this.largeReadOps;
 this.writeOps = -this.writeOps;
+this.bytesReadLocalHost = -this.bytesReadLocalHost;
+this.bytesReadDistanceOfOneOrTwo = -this.bytesReadDistanceOfOneOrTwo;
+this.bytesReadDistanceOfThreeOrFour =
+-this.bytesReadDistanceOfThreeOrFour;
+this.bytesReadDistanceOfFiveOrLarger =
+-this.bytesReadDistanceOfFiveOrLarger;
   }
 
   @Override
@@ -3063,6 +3079,22 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   public int getWriteOps() {
 

hadoop git commit: HDFS-9579. Provide bytes-read-by-network-distance metrics at FileSystem.Statistics level (Ming Ma via sjlee)

2016-03-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 33239c992 -> cd8b6889a


HDFS-9579. Provide bytes-read-by-network-distance metrics at 
FileSystem.Statistics level (Ming Ma via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd8b6889
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd8b6889
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd8b6889

Branch: refs/heads/trunk
Commit: cd8b6889a74a949e37f4b2eb664cdf3b59bfb93b
Parents: 33239c9
Author: Sangjin Lee <sj...@apache.org>
Authored: Sat Mar 19 14:02:04 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Sat Mar 19 14:02:04 2016 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 118 ++-
 .../java/org/apache/hadoop/net/NetUtils.java|  16 ++-
 .../org/apache/hadoop/net/NetworkTopology.java  |  17 ++-
 .../java/org/apache/hadoop/net/NodeBase.java|  18 ++-
 .../org/apache/hadoop/hdfs/BlockReader.java |  10 +-
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |   7 +-
 .../apache/hadoop/hdfs/BlockReaderLocal.java|  10 +-
 .../hadoop/hdfs/BlockReaderLocalLegacy.java |  10 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |  56 -
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  11 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  14 +--
 .../hadoop/hdfs/DFSStripedInputStream.java  |   3 -
 .../apache/hadoop/hdfs/ExternalBlockReader.java |  10 +-
 .../apache/hadoop/hdfs/RemoteBlockReader.java   |  29 ++---
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  |  29 ++---
 .../org/apache/hadoop/hdfs/ReplicaAccessor.java |   7 ++
 .../erasurecode/ErasureCodingWorker.java|   3 +-
 .../hadoop/fs/TestEnhancedByteBufferAccess.java |   4 +-
 .../hadoop/hdfs/TestBlockReaderLocal.java   |   4 +-
 .../org/apache/hadoop/hdfs/TestConnCache.java   |   2 -
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  62 ++
 .../hadoop/hdfs/TestExternalBlockReader.java|   8 +-
 .../apache/hadoop/net/TestNetworkTopology.java  |   7 ++
 23 files changed, 368 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8b6889/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index a96ea40..a8a5c6d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3023,11 +3023,15 @@ public abstract class FileSystem extends Configured 
implements Closeable {
  * need.
  */
 public static class StatisticsData {
-  volatile long bytesRead;
-  volatile long bytesWritten;
-  volatile int readOps;
-  volatile int largeReadOps;
-  volatile int writeOps;
+  private volatile long bytesRead;
+  private volatile long bytesWritten;
+  private volatile int readOps;
+  private volatile int largeReadOps;
+  private volatile int writeOps;
+  private volatile long bytesReadLocalHost;
+  private volatile long bytesReadDistanceOfOneOrTwo;
+  private volatile long bytesReadDistanceOfThreeOrFour;
+  private volatile long bytesReadDistanceOfFiveOrLarger;
 
   /**
* Add another StatisticsData object to this one.
@@ -3038,6 +3042,12 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 this.readOps += other.readOps;
 this.largeReadOps += other.largeReadOps;
 this.writeOps += other.writeOps;
+this.bytesReadLocalHost += other.bytesReadLocalHost;
+this.bytesReadDistanceOfOneOrTwo += other.bytesReadDistanceOfOneOrTwo;
+this.bytesReadDistanceOfThreeOrFour +=
+other.bytesReadDistanceOfThreeOrFour;
+this.bytesReadDistanceOfFiveOrLarger +=
+other.bytesReadDistanceOfFiveOrLarger;
   }
 
   /**
@@ -3049,6 +3059,12 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 this.readOps = -this.readOps;
 this.largeReadOps = -this.largeReadOps;
 this.writeOps = -this.writeOps;
+this.bytesReadLocalHost = -this.bytesReadLocalHost;
+this.bytesReadDistanceOfOneOrTwo = -this.bytesReadDistanceOfOneOrTwo;
+this.bytesReadDistanceOfThreeOrFour =
+-this.bytesReadDistanceOfThreeOrFour;
+this.bytesReadDistanceOfFiveOrLarger =
+-this.bytesReadDistanceOfFiveOrLarger;
   }
 
   @Override
@@ -3077,6 +3093,22 @@ public abstract class Fil

[2/2] hadoop git commit: YARN-4062. Add the flush and compaction functionality via coprocessors and scanners for flow run table (Vrushali C via sjlee)

2016-03-19 Thread sjlee
YARN-4062. Add the flush and compaction functionality via coprocessors and 
scanners for flow run table (Vrushali C via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc698197
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc698197
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc698197

Branch: refs/heads/YARN-2928
Commit: bc698197cde0f40e6e85a9fb1a11f1f92952e91e
Parents: c6f4c51
Author: Sangjin Lee <sj...@apache.org>
Authored: Thu Mar 17 18:22:04 2016 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Thu Mar 17 18:22:04 2016 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  16 +
 .../src/main/resources/yarn-default.xml |  10 +
 .../storage/HBaseTimelineWriterImpl.java|   5 +-
 .../storage/common/TimelineStorageUtils.java|  55 ++
 .../storage/common/TimestampGenerator.java  |  13 +-
 .../storage/flow/AggregationOperation.java  |  17 +-
 .../storage/flow/FlowRunColumn.java |   4 +-
 .../storage/flow/FlowRunColumnPrefix.java   |   2 +-
 .../storage/flow/FlowRunCoprocessor.java|  70 +-
 .../storage/flow/FlowRunRowKey.java |  16 +
 .../storage/flow/FlowScanner.java   | 269 ++--
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/TestFlowDataGenerator.java | 178 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   | 112 +++-
 .../flow/TestHBaseStorageFlowRunCompaction.java | 635 +++
 16 files changed, 1365 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc698197/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4b7fd2c..762e43c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -127,6 +127,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4179. [reader implementation] support flow activity queries based on
 time (Varun Saxena via sjlee)
 
+YARN-4062. Add the flush and compaction functionality via coprocessors and
+scanners for flow run table (Vrushali C via sjlee)
+
   IMPROVEMENTS
 
 YARN-4224. Support fetching entities by UID and change the REST interface 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc698197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6ac6fb9..863b5a1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1757,6 +1757,22 @@ public class YarnConfiguration extends Configuration {
   public static final int
   DEFAULT_TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS = 60;
 
+  /**
+   * The name for setting that controls how long the final value of
+   * a metric of a completed app is retained before merging
+   * into the flow sum.
+   */
+  public static final String APP_FINAL_VALUE_RETENTION_THRESHOLD =
+  TIMELINE_SERVICE_PREFIX
+  + "coprocessor.app-final-value-retention-milliseconds";
+
+  /**
+   * The setting that controls how long the final value of a metric
+   * of a completed app is retained before merging into the flow sum.
+   */
+  public static final long DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD = 3 * 24
+  * 60 * 60 * 1000L;
+
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
   TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc698197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2cbc836..31b897b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2067,6 +2067,7 @@
 604800
   
 
+  
   
 The setting that controls 

[1/2] hadoop git commit: YARN-4062. Add the flush and compaction functionality via coprocessors and scanners for flow run table (Vrushali C via sjlee)

2016-03-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 c6f4c5136 -> bc698197c


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc698197/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
new file mode 100644
index 000..ace218b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
@@ -0,0 +1,635 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotEquals;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+
+/**
+ * Tests the FlowRun and FlowActivity Tables
+ */
+public class TestHBaseStorageFlowRunCompaction {
+
+  private static HBaseTestingUtility util;
+
+  private final String metric1 = "MAP_SLOT_MILLIS";
+  private final String metric2 = "HDFS_BYTES_READ";
+
+  private final byte[] aRowKey = Bytes.toBytes("a");
+  private final byte[] aFamily = Bytes.toBytes("family");
+  private final byte[] aQualifier = Bytes.toBytes("qualifier");
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+util = new HBaseTestingUtility();
+Configuration conf = util.getConfiguration();
+conf.setInt("hfile.format.version", 3);
+util.startMiniCluster();
+createSchema();
+  }
+
+  private static void createSchema() throws IOException {
+TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
+  }
+
+  @Test
+  public void testWriteFlowRunCompaction() throws Exception {
+String cluster = "kompaction_cluster1";
+String user = 

hadoop git commit: YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via sjlee)

2016-02-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 9e10dd607 -> 84233f013


YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via 
sjlee)

(cherry picked from commit 553b591ba06bbf0b18dca674d25a48218fed0a26)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84233f01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84233f01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84233f01

Branch: refs/heads/branch-2.6
Commit: 84233f013df195484a817064291515cdad3bf833
Parents: 9e10dd6
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 24 09:29:41 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 24 09:37:28 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java | 5 -
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java | 5 -
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84233f01/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1470db5..3b95bfa 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -18,6 +18,9 @@ Release 2.6.5 - UNRELEASED
 YARN-2046. Out of band heartbeats are sent only on container kill and
 possibly too early (Ming Ma via jlowe)
 
+YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via
+sjlee)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84233f01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index b7be255..9b126af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -50,6 +50,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
 
   private final BlockingQueue eventQueue;
+  private volatile int lastEventQueueSizeLogged = 0;
   private volatile boolean stopped = false;
 
   // Configuration flag for enabling/disabling draining dispatcher's events on
@@ -238,7 +239,9 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 
   /* all this method does is enqueue all the events onto the queue */
   int qSize = eventQueue.size();
-  if (qSize !=0 && qSize %1000 == 0) {
+  if (qSize != 0 && qSize % 1000 == 0
+  && lastEventQueueSizeLogged != qSize) {
+lastEventQueueSizeLogged = qSize;
 LOG.info("Size of event-queue is " + qSize);
   }
   int remCapacity = eventQueue.remainingCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84233f01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 34d7e47..7159e64 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -645,6 +645,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 private final ResourceScheduler scheduler;
 private final BlockingQueue eventQueue =
   new LinkedBlockingQueue();
+private volatile int lastEventQueueSizeLogged = 0;
 private final Thread eventProcessor;
 private volatile boolean stopped = false;
 private bo

hadoop git commit: YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via sjlee)

2016-02-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 b406dcaff -> 8ceb9f38a


YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via 
sjlee)

(cherry picked from commit 553b591ba06bbf0b18dca674d25a48218fed0a26)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ceb9f38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ceb9f38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ceb9f38

Branch: refs/heads/branch-2.7
Commit: 8ceb9f38af5cbce631e2928ecd7728bf1a358369
Parents: b406dca
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 24 09:29:41 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 24 09:35:23 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java | 5 -
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java | 5 -
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb9f38/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f0de550..f0e44e5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -978,6 +978,9 @@ Release 2.6.5 - UNRELEASED
 YARN-2046. Out of band heartbeats are sent only on container kill and
 possibly too early (Ming Ma via jlowe)
 
+YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via
+sjlee)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb9f38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 403381b..0f1f05e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -50,6 +50,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
 
   private final BlockingQueue eventQueue;
+  private volatile int lastEventQueueSizeLogged = 0;
   private volatile boolean stopped = false;
 
   // Configuration flag for enabling/disabling draining dispatcher's events on
@@ -236,7 +237,9 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 
   /* all this method does is enqueue all the events onto the queue */
   int qSize = eventQueue.size();
-  if (qSize !=0 && qSize %1000 == 0) {
+  if (qSize != 0 && qSize % 1000 == 0
+  && lastEventQueueSizeLogged != qSize) {
+lastEventQueueSizeLogged = qSize;
 LOG.info("Size of event-queue is " + qSize);
   }
   int remCapacity = eventQueue.remainingCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb9f38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 711b69c..5faaab2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -633,6 +633,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 private final ResourceScheduler scheduler;
 private final BlockingQueue eventQueue =
   new LinkedBlockingQueue();
+private volatile int lastEventQueueSizeLogged = 0;
 private final Thread eventProcessor;
 private volatile boolean stopped = false;
 private bo

hadoop git commit: YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via sjlee)

2016-02-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8ad907f06 -> 6b59e5986


YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via 
sjlee)

(cherry picked from commit 553b591ba06bbf0b18dca674d25a48218fed0a26)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b59e598
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b59e598
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b59e598

Branch: refs/heads/branch-2.8
Commit: 6b59e59865893ee8412f4b3205a67c4a805e61ef
Parents: 8ad907f
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 24 09:29:41 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 24 09:32:54 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java | 5 -
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java | 5 -
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b59e598/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aa02fc2..904cbcd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -2173,6 +2173,9 @@ Release 2.6.5 - UNRELEASED
 YARN-2046. Out of band heartbeats are sent only on container kill and
 possibly too early (Ming Ma via jlowe)
 
+YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via
+sjlee)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b59e598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index ee6a637..f5361c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -50,6 +50,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
 
   private final BlockingQueue eventQueue;
+  private volatile int lastEventQueueSizeLogged = 0;
   private volatile boolean stopped = false;
 
   // Configuration flag for enabling/disabling draining dispatcher's events on
@@ -236,7 +237,9 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 
   /* all this method does is enqueue all the events onto the queue */
   int qSize = eventQueue.size();
-  if (qSize !=0 && qSize %1000 == 0) {
+  if (qSize != 0 && qSize % 1000 == 0
+  && lastEventQueueSizeLogged != qSize) {
+lastEventQueueSizeLogged = qSize;
 LOG.info("Size of event-queue is " + qSize);
   }
   int remCapacity = eventQueue.remainingCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b59e598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index bd8a968..ee4419d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -658,6 +658,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 private final ResourceScheduler scheduler;
 private final BlockingQueue eventQueue =
   new LinkedBlockingQueue();
+private volatile int lastEventQueueSizeLogged = 0;
 private final Thread eventProcessor;
 private volatile boolean stopped = false;
 private bo

hadoop git commit: YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via sjlee)

2016-02-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9ea19a861 -> 432a2367c


YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via 
sjlee)

(cherry picked from commit 553b591ba06bbf0b18dca674d25a48218fed0a26)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/432a2367
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/432a2367
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/432a2367

Branch: refs/heads/branch-2
Commit: 432a2367ce33e4684f74bb04d88e86a6a5aaabbd
Parents: 9ea19a8
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 24 09:29:41 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 24 09:30:37 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java | 5 -
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java | 5 -
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/432a2367/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6f64bd1..a470ff1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -2346,6 +2346,9 @@ Release 2.6.5 - UNRELEASED
 YARN-2046. Out of band heartbeats are sent only on container kill and
 possibly too early (Ming Ma via jlowe)
 
+YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via
+sjlee)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/432a2367/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index ee6a637..f5361c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -50,6 +50,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
 
   private final BlockingQueue eventQueue;
+  private volatile int lastEventQueueSizeLogged = 0;
   private volatile boolean stopped = false;
 
   // Configuration flag for enabling/disabling draining dispatcher's events on
@@ -236,7 +237,9 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 
   /* all this method does is enqueue all the events onto the queue */
   int qSize = eventQueue.size();
-  if (qSize !=0 && qSize %1000 == 0) {
+  if (qSize != 0 && qSize % 1000 == 0
+  && lastEventQueueSizeLogged != qSize) {
+lastEventQueueSizeLogged = qSize;
 LOG.info("Size of event-queue is " + qSize);
   }
   int remCapacity = eventQueue.remainingCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/432a2367/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index b2950bb..80b33a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -727,6 +727,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 private final ResourceScheduler scheduler;
 private final BlockingQueue eventQueue =
   new LinkedBlockingQueue();
+private volatile int lastEventQueueSizeLogged = 0;
 private final Thread eventProcessor;
 private volatile boolean stopped = false;
 private bo

hadoop git commit: YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via sjlee)

2016-02-24 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3369a4f69 -> 553b591ba


YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via 
sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/553b591b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/553b591b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/553b591b

Branch: refs/heads/trunk
Commit: 553b591ba06bbf0b18dca674d25a48218fed0a26
Parents: 3369a4f
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 24 09:29:41 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 24 09:29:41 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java | 5 -
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java | 5 -
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/553b591b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 482fb21..94bbcbe 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -2396,6 +2396,9 @@ Release 2.6.5 - UNRELEASED
 YARN-2046. Out of band heartbeats are sent only on container kill and
 possibly too early (Ming Ma via jlowe)
 
+YARN-4722. AsyncDispatcher logs redundant event queue sizes (Jason Lowe via
+sjlee)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/553b591b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index ee6a637..f5361c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -50,6 +50,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);
 
   private final BlockingQueue eventQueue;
+  private volatile int lastEventQueueSizeLogged = 0;
   private volatile boolean stopped = false;
 
   // Configuration flag for enabling/disabling draining dispatcher's events on
@@ -236,7 +237,9 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 
   /* all this method does is enqueue all the events onto the queue */
   int qSize = eventQueue.size();
-  if (qSize !=0 && qSize %1000 == 0) {
+  if (qSize != 0 && qSize % 1000 == 0
+  && lastEventQueueSizeLogged != qSize) {
+lastEventQueueSizeLogged = qSize;
 LOG.info("Size of event-queue is " + qSize);
   }
   int remCapacity = eventQueue.remainingCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/553b591b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index b2950bb..80b33a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -727,6 +727,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 private final ResourceScheduler scheduler;
 private final BlockingQueue eventQueue =
   new LinkedBlockingQueue();
+private volatile int lastEventQueueSizeLogged = 0;
 private final Thread eventProcessor;
 private volatile boolean stopped = false;
 private boolean shouldExitOnError = false;
@@ -804,7 +805,9 @@ public class Res

hadoop git commit: YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when possible (Ming Ma via sjlee)

2016-02-17 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 25c2597f9 -> 23b5c7172


YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when 
possible (Ming Ma via sjlee)

(cherry picked from commit 7de70680fe44967e2afc92ba4c92f8e7afa7b151)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b5c717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b5c717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b5c717

Branch: refs/heads/branch-2
Commit: 23b5c71729c26c2879e6cc41ce5862af0215d1fa
Parents: 25c2597
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 17 20:55:21 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 17 21:05:10 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java   | 7 ++-
 2 files changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b5c717/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 50c913a..38a672f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -2312,6 +2312,9 @@ Release 2.6.5 - UNRELEASED
 
   OPTIMIZATIONS
 
+YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when
+possible (Ming Ma via sjlee)
+
   BUG FIXES
 
 Release 2.6.4 - 2016-02-11

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b5c717/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 488f34e..7e0a693 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -890,7 +890,12 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   public Resource getResourceUsage() {
 // Here the getPreemptedResources() always return zero, except in
 // a preemption round
-return Resources.subtract(getCurrentConsumption(), 
getPreemptedResources());
+// In the common case where preempted resource is zero, return the
+// current consumption Resource object directly without calling
+// Resources.subtract which creates a new Resource object for each call.
+return getPreemptedResources().equals(Resources.none()) ?
+getCurrentConsumption() :
+Resources.subtract(getCurrentConsumption(), getPreemptedResources());
   }
 
   @Override



hadoop git commit: YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when possible (Ming Ma via sjlee)

2016-02-17 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5ff2012f6 -> 3a8c7ffeb


YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when 
possible (Ming Ma via sjlee)

(cherry picked from commit 7de70680fe44967e2afc92ba4c92f8e7afa7b151)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a8c7ffe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a8c7ffe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a8c7ffe

Branch: refs/heads/branch-2.8
Commit: 3a8c7ffeb4a3f9f335debb93feb77acb3e6aaefe
Parents: 5ff2012
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 17 20:55:21 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 17 21:03:07 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java   | 7 ++-
 2 files changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a8c7ffe/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c9c02e2..78f8a71 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -2147,6 +2147,9 @@ Release 2.6.5 - UNRELEASED
 
   OPTIMIZATIONS
 
+YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when
+possible (Ming Ma via sjlee)
+
   BUG FIXES
 
 Release 2.6.4 - 2016-02-11

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a8c7ffe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 5f753dd..1daea0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -865,7 +865,12 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   public Resource getResourceUsage() {
 // Here the getPreemptedResources() always return zero, except in
 // a preemption round
-return Resources.subtract(getCurrentConsumption(), 
getPreemptedResources());
+// In the common case where preempted resource is zero, return the
+// current consumption Resource object directly without calling
+// Resources.subtract which creates a new Resource object for each call.
+return getPreemptedResources().equals(Resources.none()) ?
+getCurrentConsumption() :
+Resources.subtract(getCurrentConsumption(), getPreemptedResources());
   }
 
   @Override



hadoop git commit: YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when possible (Ming Ma via sjlee)

2016-02-17 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 89e90becf -> 319790432


YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when 
possible (Ming Ma via sjlee)

(cherry picked from commit 7de70680fe44967e2afc92ba4c92f8e7afa7b151)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31979043
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31979043
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31979043

Branch: refs/heads/branch-2.6
Commit: 319790432d01f7a86bb7235d406a32ac4230b840
Parents: 89e90be
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 17 20:55:21 2016 -0800
Committer: Sangjin Lee <sj...@twitter.com>
Committed: Wed Feb 17 20:57:09 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java   | 7 ++-
 2 files changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31979043/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ad6898c..4327e51 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -10,6 +10,9 @@ Release 2.6.5 - UNRELEASED
 
   OPTIMIZATIONS
 
+YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when
+possible (Ming Ma via sjlee)
+
   BUG FIXES
 
 Release 2.6.4 - 2016-02-11

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31979043/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index b23ec3e..a30a2c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -722,7 +722,12 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   public Resource getResourceUsage() {
 // Here the getPreemptedResources() always return zero, except in
 // a preemption round
-return Resources.subtract(getCurrentConsumption(), 
getPreemptedResources());
+// In the common case where preempted resource is zero, return the
+// current consumption Resource object directly without calling
+// Resources.subtract which creates a new Resource object for each call.
+return getPreemptedResources().equals(Resources.none()) ?
+getCurrentConsumption() :
+Resources.subtract(getCurrentConsumption(), getPreemptedResources());
   }
 
   @Override



hadoop git commit: YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when possible (Ming Ma via sjlee)

2016-02-17 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk a0c95b5fc -> 7de70680f


YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when 
possible (Ming Ma via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7de70680
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7de70680
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7de70680

Branch: refs/heads/trunk
Commit: 7de70680fe44967e2afc92ba4c92f8e7afa7b151
Parents: a0c95b5
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 17 20:55:21 2016 -0800
Committer: Sangjin Lee <sj...@twitter.com>
Committed: Wed Feb 17 20:55:21 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../server/resourcemanager/scheduler/fair/FSAppAttempt.java   | 7 ++-
 2 files changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7de70680/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c172054..fa0ba44 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -2362,6 +2362,9 @@ Release 2.6.5 - UNRELEASED
 
   OPTIMIZATIONS
 
+YARN-4690. Skip object allocation in FSAppAttempt#getResourceUsage when
+possible (Ming Ma via sjlee)
+
   BUG FIXES
 
 Release 2.6.4 - 2016-02-11

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7de70680/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 488f34e..7e0a693 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -890,7 +890,12 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   public Resource getResourceUsage() {
 // Here the getPreemptedResources() always return zero, except in
 // a preemption round
-return Resources.subtract(getCurrentConsumption(), 
getPreemptedResources());
+// In the common case where preempted resource is zero, return the
+// current consumption Resource object directly without calling
+// Resources.subtract which creates a new Resource object for each call.
+return getPreemptedResources().equals(Resources.none()) ?
+getCurrentConsumption() :
+Resources.subtract(getCurrentConsumption(), getPreemptedResources());
   }
 
   @Override



hadoop git commit: YARN-3367. Replace starting a separate thread for post entity with event loop in TimelineClient (Naganarasimha G R via sjlee)

2016-02-09 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 db76a3ad0 -> d491ef080


YARN-3367. Replace starting a separate thread for post entity with event loop 
in TimelineClient (Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d491ef08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d491ef08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d491ef08

Branch: refs/heads/YARN-2928
Commit: d491ef080096c62964b8327555bf47ceae6e9292
Parents: db76a3a
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Feb 9 09:07:37 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue Feb 9 09:07:37 2016 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  61 +---
 .../mapred/JobHistoryFileReplayMapper.java  |   8 +-
 .../hadoop/mapred/TimelineEntityConverter.java  |  12 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../timelineservice/TimelineEntities.java   |  17 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../distributedshell/ApplicationMaster.java |  78 +
 .../api/async/impl/AMRMClientAsyncImpl.java |  26 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |   8 +-
 .../client/api/impl/TimelineClientImpl.java | 286 ++---
 .../src/main/resources/yarn-default.xml |   7 +
 .../api/impl/TestTimelineClientV2Impl.java  | 304 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   4 +-
 13 files changed, 623 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d491ef08/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 6e5afb1..1c5446f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -27,10 +27,7 @@ import java.util.Map;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -75,7 +72,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
 /**
  * The job history events get routed to this class. This class writes the Job
  * history events to the DFS directly into a staging dir and then moved to a
@@ -129,10 +125,6 @@ public class JobHistoryEventHandler extends AbstractService
   
   private boolean timelineServiceV2Enabled = false;
 
-  // For posting entities in new timeline service in a non-blocking way
-  // TODO YARN-3367 replace with event loop in TimelineClient.
-  private ExecutorService threadPool;
-
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
   private static final String MAPREDUCE_TASK_ATTEMPT_ENTITY_TYPE =
@@ -272,10 +264,6 @@ public class JobHistoryEventHandler extends AbstractService
 YarnConfiguration.timelineServiceV2Enabled(conf);
 LOG.info("Timeline service is enabled; version: " +
 YarnConfiguration.getTimelineServiceVersion(conf));
-if (timelineServiceV2Enabled) {
-  // initialize the thread pool for v.2 timeline service
-  threadPool = createThreadPool();
-}
   } else {
 LOG.info("Timeline service is not enabled");
   }
@@ -449,35 +437,9 @@ public class JobHistoryEventHandler extends AbstractService
 if (timelineClient != null) {
   timelineClient.stop();
 }
-if (threadPool != null) {
-  shutdownAndAwaitTermination();
-}
 LOG.info("Stopped JobHistoryEventHandler. super.stop()");
 super.serviceStop();
   }
-  
-  // TODO remove threa

[1/4] hadoop git commit: YARN-4409. Fix javadoc and checkstyle issues in timelineservice code (Varun Saxena via sjlee)

2016-02-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 10a4f8ae6 -> db76a3ad0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
index d991b42..f9eb5b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
@@ -24,7 +24,8 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 /**
  * Represents the flow run table column families.
  */
-public enum FlowActivityColumnFamily implements 
ColumnFamily {
+public enum FlowActivityColumnFamily
+implements ColumnFamily {
 
   /**
* Info column family houses known columns, specifically ones included in

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 21ddcc2..a5933da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -31,12 +31,13 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStor
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
 
 /**
- * Identifies partially qualified columns for the {@link FlowActivityTable}
+ * Identifies partially qualified columns for the {@link FlowActivityTable}.
  */
-public enum FlowActivityColumnPrefix implements 
ColumnPrefix {
+public enum FlowActivityColumnPrefix
+implements ColumnPrefix {
 
   /**
-   * To store run ids of the flows
+   * To store run ids of the flows.
*/
   RUN_ID(FlowActivityColumnFamily.INFO, "r", null);
 
@@ -162,8 +163,8 @@ public enum FlowActivityColumnPrefix implements 
ColumnPrefix
* org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
* #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result)
*/
-  public  NavigableMap> 
readResultsWithTimestamps(
-  Result result) throws IOException {
+  public  NavigableMap>
+  readResultsWithTimestamps(Result result) throws IOException {
 return column.readResultsWithTimestamps(result, columnPrefixBytes);
   }
 
@@ -179,8 +180,8 @@ public enum FlowActivityColumnPrefix implements 
ColumnPrefix
   public static final FlowActivityColumnPrefix columnFor(String columnPrefix) {
 
 // Match column based on value, assume column family matches.
-for (FlowActivityColumnPrefix flowActivityColPrefix : 
FlowActivityColumnPrefix
-.values()) {
+for (FlowActivityColumnPrefix flowActivityColPrefix :
+FlowActivityColumnPrefix.values()) {
   // Find a match based only on name.
   if (flowActivityColPrefix.getColumnPrefix().equals(columnPrefix)) {
 return flowActivityColPrefix;
@@ -209,8 +210,8 @@ public enum FlowActivityColumnPrefix implements 
ColumnPrefix
 // TODO: needs unit test to confirm and need to update javadoc to explain
 // null prefix case.
 
-for (FlowActivityColumnPrefix flowActivityColumnPrefix : 
FlowActivityColumnPrefix
-.values()) {
+for (FlowActivityColumnPrefix flowActivityColumnPrefix :
+FlowActivityColumnPrefix.values()) {
   // Find a match based column 

[3/4] hadoop git commit: YARN-4409. Fix javadoc and checkstyle issues in timelineservice code (Varun Saxena via sjlee)

2016-02-08 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index fc05310..12daa95 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -388,15 +388,15 @@ public class RMActiveServiceContext {
   @Private
   @Unstable
   public void setRMTimelineCollectorManager(
-  RMTimelineCollectorManager timelineCollectorManager) {
-this.timelineCollectorManager = timelineCollectorManager;
+  RMTimelineCollectorManager collectorManager) {
+this.timelineCollectorManager = collectorManager;
   }
 
   @Private
   @Unstable
   public void setSystemMetricsPublisher(
-  SystemMetricsPublisher systemMetricsPublisher) {
-this.systemMetricsPublisher = systemMetricsPublisher;
+  SystemMetricsPublisher metricsPublisher) {
+this.systemMetricsPublisher = metricsPublisher;
   }
 
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index e122ab4..4c72912 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -379,8 +379,8 @@ public class RMContextImpl implements RMContext {
   
   @Override
   public void setSystemMetricsPublisher(
-  SystemMetricsPublisher systemMetricsPublisher) {
-this.systemMetricsPublisher = systemMetricsPublisher;
+  SystemMetricsPublisher metricsPublisher) {
+this.systemMetricsPublisher = metricsPublisher;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
index a8c00a4..d4a4fc3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
@@ -30,6 +30,10 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 
+/**
+ * Abstract implementation of SystemMetricsPublisher which is then extended by
+ * metrics publisher implementations depending on timeline service version.
+ */
 public abstract class AbstractSystemMetricsPublisher extends CompositeService
 implements SystemMetricsPublisher {
   private MultiThreadedDispatcher dispatcher;
@@ -46,13 +50,18 @@ public abstract class AbstractSystemMetricsPublisher 
extends CompositeService
   protected void serviceInit(Configuration conf) throws Exception {
 dispatcher =
 new 

[4/4] hadoop git commit: YARN-4409. Fix javadoc and checkstyle issues in timelineservice code (Varun Saxena via sjlee)

2016-02-08 Thread sjlee
YARN-4409. Fix javadoc and checkstyle issues in timelineservice code (Varun 
Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db76a3ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db76a3ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db76a3ad

Branch: refs/heads/YARN-2928
Commit: db76a3ad03682fdafcbec5276eef11ecdbd2719d
Parents: 10a4f8a
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Feb 8 12:17:43 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Feb 8 12:17:43 2016 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  | 170 ++-
 .../hadoop/mapreduce/jobhistory/TestEvents.java |   2 +-
 .../mapreduce/util/JobHistoryEventUtils.java|   7 +-
 .../hadoop/mapred/TimelineEntityConverter.java  |   6 +-
 .../hadoop/mapreduce/JobHistoryFileParser.java  |   3 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ApplicationAttemptEntity.java   |   9 +-
 .../timelineservice/ApplicationEntity.java  |   9 +-
 .../records/timelineservice/ClusterEntity.java  |   6 +-
 .../timelineservice/ContainerEntity.java|   9 +-
 .../records/timelineservice/FlowRunEntity.java  |   9 +-
 .../HierarchicalTimelineEntity.java |   8 +-
 .../records/timelineservice/QueueEntity.java|   6 +-
 .../timelineservice/TimelineEntities.java   |  11 +-
 .../records/timelineservice/TimelineEntity.java | 106 ++--
 .../timelineservice/TimelineEntityType.java |  71 +---
 .../records/timelineservice/TimelineEvent.java  |  30 ++--
 .../records/timelineservice/TimelineMetric.java |  39 +++--
 .../timelineservice/TimelineWriteResponse.java  |  59 +++
 .../api/records/timelineservice/UserEntity.java |   6 +-
 .../records/timelineservice/package-info.java   |   8 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  13 +-
 .../client/api/impl/TimelineClientImpl.java |  16 +-
 .../yarn/util/timeline/TimelineUtils.java   |  21 ++-
 .../yarn/server/nodemanager/NodeManager.java|   2 +-
 .../collectormanager/NMCollectorService.java|   9 +-
 .../collectormanager/package-info.java  |  28 +++
 .../timelineservice/NMTimelineEvent.java|   4 +
 .../timelineservice/NMTimelineEventType.java|   3 +
 .../timelineservice/NMTimelinePublisher.java|  14 +-
 .../timelineservice/package-info.java   |  29 
 .../resourcemanager/RMActiveServiceContext.java |   8 +-
 .../server/resourcemanager/RMContextImpl.java   |   4 +-
 .../metrics/AbstractSystemMetricsPublisher.java |  20 ++-
 .../metrics/NoOpSystemMetricPublisher.java  |   2 +-
 .../metrics/SystemMetricsPublisher.java |   3 +
 .../metrics/TimelineServiceV1Publisher.java |   8 +-
 .../metrics/TimelineServiceV2Publisher.java |   7 +-
 .../resourcemanager/metrics/package-info.java   |  28 +++
 .../rmapp/RMAppCollectorUpdateEvent.java|   3 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |   5 +-
 .../RMTimelineCollectorManager.java |  33 ++--
 .../timelineservice/package-info.java   |  28 +++
 .../collector/AppLevelTimelineCollector.java|   3 +-
 .../collector/NodeTimelineCollectorManager.java |   8 +-
 .../PerNodeTimelineCollectorsAuxService.java|   2 +
 .../collector/TimelineCollector.java|   2 +
 .../collector/TimelineCollectorManager.java |  12 +-
 .../collector/TimelineCollectorWebService.java  |  70 +---
 .../timelineservice/collector/package-info.java |  29 
 .../reader/TimelineReaderManager.java   |  32 +++-
 .../reader/TimelineReaderServer.java|   2 +-
 .../reader/TimelineReaderWebServices.java   |  28 +--
 .../reader/TimelineReaderWebServicesUtils.java  |  50 +++---
 .../reader/TimelineUIDConverter.java|  10 +-
 .../reader/filter/TimelineFilterUtils.java  |   8 +-
 .../timelineservice/reader/package-info.java|   6 +
 .../storage/FileSystemTimelineReaderImpl.java   |  70 
 .../storage/FileSystemTimelineWriterImpl.java   |   7 +-
 .../storage/HBaseTimelineReaderImpl.java|   3 +
 .../storage/HBaseTimelineWriterImpl.java|  29 ++--
 .../storage/OfflineAggregationWriter.java   |  13 +-
 .../PhoenixOfflineAggregationWriterImpl.java|  27 +--
 .../storage/TimelineAggregationTrack.java   |   2 +-
 .../timelineservice/storage/TimelineReader.java |   6 +-
 .../storage/TimelineSchemaCreator.java  |   4 +-
 .../timelineservice/storage/TimelineWriter.java |  15 +-
 .../storage/application/ApplicationColumn.java  |   4 +-
 .../application/ApplicationColumnPrefix.java|   8 +-
 .../storage/application/ApplicationRowKey.java  |  33 ++--
 .../storage/application/ApplicationTable.java   |  16 +-
 .../storage/application/package-info.java

[2/4] hadoop git commit: YARN-4409. Fix javadoc and checkstyle issues in timelineservice code (Varun Saxena via sjlee)

2016-02-08 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
index 50136de..663a18a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
@@ -42,12 +42,13 @@ public interface TimelineWriter extends Service {
* @param userId context user ID
* @param flowName context flow name
* @param flowVersion context flow version
-   * @param flowRunId
-   * @param appId context app ID
+   * @param flowRunId run id for the flow.
+   * @param appId context app ID.
* @param data
*  a {@link TimelineEntities} object.
* @return a {@link TimelineWriteResponse} object.
-   * @throws IOException
+   * @throws IOException if there is any exception encountered while storing
+   * or writing entities to the backend storage.
*/
   TimelineWriteResponse write(String clusterId, String userId,
   String flowName, String flowVersion, long flowRunId, String appId,
@@ -65,8 +66,11 @@ public interface TimelineWriter extends Service {
*  a {@link TimelineEntity} object
*  a {@link TimelineAggregationTrack} enum
*  value.
+   * @param track Specifies the track or dimension along which aggregation 
would
+   * occur. Includes USER, FLOW, QUEUE, etc.
* @return a {@link TimelineWriteResponse} object.
-   * @throws IOException
+   * @throws IOException if there is any exception encountered while 
aggregating
+   * entities to the backend storage.
*/
   TimelineWriteResponse aggregate(TimelineEntity data,
   TimelineAggregationTrack track) throws IOException;
@@ -76,7 +80,8 @@ public interface TimelineWriter extends Service {
* written to the storage when the method returns. This may be a potentially
* time-consuming operation, and should be used judiciously.
*
-   * @throws IOException
+   * @throws IOException if there is any exception encountered while flushing
+   * entities to the backend storage.
*/
   void flush() throws IOException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db76a3ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
index c03c9b6..5734389 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
@@ -34,7 +34,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
 public enum ApplicationColumn implements Column {
 
   /**
-   * App id
+   * App id.
*/
   ID(ApplicationColumnFamily.INFO, "id"),
 
@@ -84,7 +84,7 @@ public enum ApplicationColumn implements 
Column {
   /**
* Retrieve an {@link ApplicationColumn} given a name, or null if there is no
* match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
+   * and only if {@code x.equals(y)} or {@code (x == y == null)}.
*
* @param columnQualifier Name of the column to retrieve
* @return the corresponding {@link ApplicationColumn} or null


hadoop git commit: HADOOP-12773. HBase classes fail to load with client/job classloader enabled (sjlee)

2016-02-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 494cfd1dc -> 463195dbe


HADOOP-12773. HBase classes fail to load with client/job classloader enabled 
(sjlee)

(cherry picked from commit 58acbf940a92ef8a761208a7a743175ee7b3377d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/463195db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/463195db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/463195db

Branch: refs/heads/branch-2
Commit: 463195dbe66c1e111499787912f79c94b3b3d4e8
Parents: 494cfd1
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Feb 8 13:55:30 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Feb 8 13:57:44 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../org.apache.hadoop.application-classloader.properties  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/463195db/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ddd5710..38413d398 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1918,6 +1918,9 @@ Release 2.6.5 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12773. HBase classes fail to load with client/job classloader
+    enabled (sjlee)
+
 Release 2.6.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/463195db/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
index 2264920..cbbb887 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
@@ -50,6 +50,7 @@ system.classes.default=java.,\
   org.xml.sax.,\
   org.apache.commons.logging.,\
   org.apache.log4j.,\
+  -org.apache.hadoop.hbase.,\
   org.apache.hadoop.,\
   core-default.xml,\
   hdfs-default.xml,\



hadoop git commit: HADOOP-12773. HBase classes fail to load with client/job classloader enabled (sjlee)

2016-02-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 aae0f183a -> 13bb016ba


HADOOP-12773. HBase classes fail to load with client/job classloader enabled 
(sjlee)

(cherry picked from commit 58acbf940a92ef8a761208a7a743175ee7b3377d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13bb016b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13bb016b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13bb016b

Branch: refs/heads/branch-2.8
Commit: 13bb016ba90511a8e2568262865925e8a1fcd4f0
Parents: aae0f18
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Feb 8 13:55:30 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Feb 8 13:59:10 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../org.apache.hadoop.application-classloader.properties  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13bb016b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1f4f96e..3fb94af 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1856,6 +1856,9 @@ Release 2.6.5 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12773. HBase classes fail to load with client/job classloader
+    enabled (sjlee)
+
 Release 2.6.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13bb016b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
index 2264920..cbbb887 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
@@ -50,6 +50,7 @@ system.classes.default=java.,\
   org.xml.sax.,\
   org.apache.commons.logging.,\
   org.apache.log4j.,\
+  -org.apache.hadoop.hbase.,\
   org.apache.hadoop.,\
   core-default.xml,\
   hdfs-default.xml,\



hadoop git commit: HADOOP-12773. HBase classes fail to load with client/job classloader enabled (sjlee)

2016-02-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk bffaa38a9 -> 58acbf940


HADOOP-12773. HBase classes fail to load with client/job classloader enabled 
(sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58acbf94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58acbf94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58acbf94

Branch: refs/heads/trunk
Commit: 58acbf940a92ef8a761208a7a743175ee7b3377d
Parents: bffaa38
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Feb 8 13:55:30 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Feb 8 13:55:30 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../org.apache.hadoop.application-classloader.properties  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58acbf94/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9130a8d..f8535e1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2530,6 +2530,9 @@ Release 2.6.5 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12773. HBase classes fail to load with client/job classloader
+    enabled (sjlee)
+
 Release 2.6.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58acbf94/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
index 2264920..cbbb887 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
@@ -50,6 +50,7 @@ system.classes.default=java.,\
   org.xml.sax.,\
   org.apache.commons.logging.,\
   org.apache.log4j.,\
+  -org.apache.hadoop.hbase.,\
   org.apache.hadoop.,\
   core-default.xml,\
   hdfs-default.xml,\



hadoop git commit: HADOOP-12773. HBase classes fail to load with client/job classloader enabled (sjlee)

2016-02-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 dfea9c711 -> 20e013cac


HADOOP-12773. HBase classes fail to load with client/job classloader enabled 
(sjlee)

(cherry picked from commit 58acbf940a92ef8a761208a7a743175ee7b3377d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20e013ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20e013ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20e013ca

Branch: refs/heads/branch-2.7
Commit: 20e013cac816aa82082c85805070b7e827e0830f
Parents: dfea9c7
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Feb 8 13:55:30 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Feb 8 14:00:19 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../org.apache.hadoop.application-classloader.properties  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20e013ca/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a84cee5..30a5721 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -868,6 +868,9 @@ Release 2.6.5 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12773. HBase classes fail to load with client/job classloader
+    enabled (sjlee)
+
 Release 2.6.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20e013ca/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
index 2264920..cbbb887 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
@@ -50,6 +50,7 @@ system.classes.default=java.,\
   org.xml.sax.,\
   org.apache.commons.logging.,\
   org.apache.log4j.,\
+  -org.apache.hadoop.hbase.,\
   org.apache.hadoop.,\
   core-default.xml,\
   hdfs-default.xml,\



hadoop git commit: HADOOP-12773. HBase classes fail to load with client/job classloader enabled (sjlee)

2016-02-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 ea517acfb -> 7f8c89c33


HADOOP-12773. HBase classes fail to load with client/job classloader enabled 
(sjlee)

(cherry picked from commit 58acbf940a92ef8a761208a7a743175ee7b3377d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f8c89c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f8c89c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f8c89c3

Branch: refs/heads/branch-2.6
Commit: 7f8c89c33a216e31c4f7ad04d86db2fa46550b2f
Parents: ea517ac
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Feb 8 13:55:30 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Feb 8 14:00:55 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../org.apache.hadoop.application-classloader.properties  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f8c89c3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9f37a0a..a50fafb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.6.5 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12773. HBase classes fail to load with client/job classloader
+    enabled (sjlee)
+
 Release 2.6.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f8c89c3/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
index 2264920..cbbb887 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
+++ 
b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties
@@ -50,6 +50,7 @@ system.classes.default=java.,\
   org.xml.sax.,\
   org.apache.commons.logging.,\
   org.apache.log4j.,\
+  -org.apache.hadoop.hbase.,\
   org.apache.hadoop.,\
   core-default.xml,\
   hdfs-default.xml,\



[2/3] hadoop git commit: YARN-4446. Refactor reader API for better extensibility (Varun Saxena via sjlee)

2016-02-03 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a4f8ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
index 0eeb195..ccb33b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
@@ -20,17 +20,14 @@ package 
org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import java.io.IOException;
 
-import java.util.EnumSet;
-import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.service.Service;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
 
 /** ATSv2 reader interface. */
 @Private
@@ -38,11 +35,6 @@ import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefi
 public interface TimelineReader extends Service {
 
   /**
-   * Default limit for {@link #getEntities}.
-   */
-  long DEFAULT_LIMIT = 100;
-
-  /**
* Possible fields to retrieve for {@link #getEntities} and
* {@link #getEntity}.
*/
@@ -57,55 +49,61 @@ public interface TimelineReader extends Service {
   }
 
   /**
-   * The API to fetch the single entity given the entity identifier in the
-   * scope of the given context.
-   *
-   * @param userId
-   *Context user Id(optional).
-   * @param clusterId
-   *Context cluster Id(mandatory).
-   * @param flowName
-   *Context flow Id (optional).
-   * @param flowRunId
-   *Context flow run Id (optional).
-   * @param appId
-   *Context app Id (mandatory)
-   * @param entityType
-   *Entity type (mandatory)
-   * @param entityId
-   *Entity Id (mandatory)
-   * @param confsToRetrieve
-   *Used for deciding which configs to return in response. This is
-   *represented as a {@link TimelineFilterList} object containing
-   *{@link TimelinePrefixFilter} objects. These can either be exact config
-   *keys' or prefixes which are then compared against config keys' to 
decide
-   *configs to return in response.
-   * @param metricsToRetrieve
-   *Used for deciding which metrics to return in response. This is
-   *represented as a {@link TimelineFilterList} object containing
-   *{@link TimelinePrefixFilter} objects. These can either be exact metric
-   *ids' or prefixes which are then compared against metric ids' to decide
-   *metrics to return in response.
-   * @param fieldsToRetrieve
-   *Specifies which fields of the entity object to retrieve(optional), see
-   *{@link Field}. If null, retrieves 4 fields namely entity id,
-   *entity type and entity created time. All fields will be returned if
-   *{@link Field#ALL} is specified.
-   * @return a {@link TimelineEntity} instance or null. The entity will
-   *contain the metadata plus the given fields to retrieve.
+   * The API to fetch the single entity given the identifier(depending on
+   * the entity type) in the scope of the given context.
+   * @param context Context which defines the scope in which query has to be
+   *made. Use getters of {@link TimelineReaderContext} to fetch context
+   *fields. Context contains the following :
+   *
+   *entityType - Entity type(mandatory).
+   *clusterId - Identifies the cluster(mandatory).
+   *userId - Identifies the user.
+   *flowName - Context flow name.
+   *flowRunId - Context flow run id.
+   *appId - Context app id.
+   *entityId - Entity id.
+   *
+   *Fields in context which are mandatory depends on entity type. 

[3/3] hadoop git commit: YARN-4446. Refactor reader API for better extensibility (Varun Saxena via sjlee)

2016-02-03 Thread sjlee
YARN-4446. Refactor reader API for better extensibility (Varun Saxena via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10a4f8ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10a4f8ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10a4f8ae

Branch: refs/heads/YARN-2928
Commit: 10a4f8ae63db4e256404f81a79ecd17f7eafc054
Parents: e2e5a9a
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 3 16:03:55 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 3 16:03:55 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../reader/TimelineDataToRetrieve.java  | 119 ++
 .../reader/TimelineEntityFilters.java   | 187 +
 .../reader/TimelineReaderContext.java   |  10 +
 .../reader/TimelineReaderManager.java   |  44 +-
 .../reader/TimelineReaderUtils.java |   4 +-
 .../reader/TimelineReaderWebServices.java   | 417 ---
 .../reader/TimelineReaderWebServicesUtils.java  |  68 +++
 .../storage/FileSystemTimelineReaderImpl.java   | 116 +++---
 .../storage/HBaseTimelineReaderImpl.java|  36 +-
 .../timelineservice/storage/TimelineReader.java | 234 +--
 .../storage/reader/ApplicationEntityReader.java | 204 -
 .../reader/FlowActivityEntityReader.java|  59 +--
 .../storage/reader/FlowRunEntityReader.java | 101 ++---
 .../storage/reader/GenericEntityReader.java | 192 -
 .../storage/reader/TimelineEntityReader.java| 101 ++---
 .../reader/TimelineEntityReaderFactory.java |  74 ++--
 .../TestFileSystemTimelineReaderImpl.java   | 156 ---
 .../storage/TestHBaseTimelineStorage.java   | 252 ++-
 .../flow/TestHBaseStorageFlowActivity.java  |  33 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  48 ++-
 21 files changed, 1370 insertions(+), 1088 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a4f8ae/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 03dc60f..ae5d907 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -164,6 +164,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3586. RM to only get back addresses of Collectors that NM needs to 
know.
 (Junping Du via varunsaxena)
 
+YARN-4446. Refactor reader API for better extensibility (Varun Saxena via
+sjlee)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a4f8ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
new file mode 100644
index 000..0cc83d7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filt

[1/3] hadoop git commit: YARN-4446. Refactor reader API for better extensibility (Varun Saxena via sjlee)

2016-02-03 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 e2e5a9aed -> 10a4f8ae6


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a4f8ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
index b7804e7..a8a2ff8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
@@ -37,6 +37,9 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.junit.AfterClass;
@@ -258,9 +261,10 @@ public class TestFileSystemTimelineReaderImpl {
   public void testGetEntityDefaultView() throws Exception {
 // If no fields are specified, entity is returned with default view i.e.
 // only the id, type and created time.
-TimelineEntity result =
-reader.getEntity("user1", "cluster1", "flow1", 1L, "app1",
-"app", "id_1", null, null, null);
+TimelineEntity result = reader.getEntity(
+new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
+"app", "id_1"),
+new TimelineDataToRetrieve(null, null, null));
 Assert.assertEquals(
 (new TimelineEntity.Identifier("app", "id_1")).toString(),
 result.getIdentifier().toString());
@@ -272,9 +276,10 @@ public class TestFileSystemTimelineReaderImpl {
   @Test
   public void testGetEntityByClusterAndApp() throws Exception {
 // Cluster and AppId should be enough to get an entity.
-TimelineEntity result =
-reader.getEntity(null, "cluster1", null, null, "app1",
-"app", "id_1", null, null, null);
+TimelineEntity result = reader.getEntity(
+new TimelineReaderContext("cluster1", null, null, null, "app1", "app",
+"id_1"),
+new TimelineDataToRetrieve(null, null, null));
 Assert.assertEquals(
 (new TimelineEntity.Identifier("app", "id_1")).toString(),
 result.getIdentifier().toString());
@@ -288,9 +293,10 @@ public class TestFileSystemTimelineReaderImpl {
   public void testAppFlowMappingCsv() throws Exception {
 // Test getting an entity by cluster and app where flow entry
 // in app flow mapping csv has commas.
-TimelineEntity result =
-reader.getEntity(null, "cluster1", null, null, "app2",
-"app", "id_5", null, null, null);
+TimelineEntity result = reader.getEntity(
+new TimelineReaderContext("cluster1", null, null, null, "app2",
+"app", "id_5"),
+new TimelineDataToRetrieve(null, null, null));
 Assert.assertEquals(
 (new TimelineEntity.Identifier("app", "id_5")).toString(),
 result.getIdentifier().toString());
@@ -300,10 +306,11 @@ public class TestFileSystemTimelineReaderImpl {
   @Test
   public void testGetEntityCustomFields() throws Exception {
 // Specified fields in addition to default view will be returned.
-TimelineEntity result =
-reader.getEntity("user1", "cluster1", "flow1", 1L,
-"app1", "app", "id_1", null, null,
-EnumSet.of(Field.INFO, Field.CONFIGS, Field.METRICS));
+TimelineEntity result = reader.getEntity(
+new TimelineReaderContext("cluster1","user1", "flow1", 1L, "app1",
+"app", "id_1"),
+new TimelineDataToRetrieve(null, null,
+EnumSet.of(Field.INFO, Field.CONFIGS, Field.METRICS)));
 Assert.assertEquals(
 (new TimelineEntity.Identifier("app", "id_1")).toString(),
 result.getIdentifier().toString());
@@ -318,9 +325,10 @@ public class TestFileSystemTimelineReaderImpl {
   

hadoop git commit: HADOOP-12761. incremental maven build is not really incremental (sjlee)

2016-02-03 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 af9de19cf -> 5131ee56a


HADOOP-12761. incremental maven build is not really incremental (sjlee)

(cherry picked from commit 4dc0a3949386ce2961356143a5a843dd537829dc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5131ee56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5131ee56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5131ee56

Branch: refs/heads/branch-2.7
Commit: 5131ee56a63561abe0c8547b7e96e3591fa2999c
Parents: af9de19
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 3 08:58:49 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 3 09:03:03 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-project/pom.xml  | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5131ee56/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 10ae05a..0324137 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -45,6 +45,8 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
+HADOOP-12761. incremental maven build is not really incremental (sjlee)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5131ee56/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3beab18..ce51d61 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -961,6 +961,7 @@
   
 ${javac.version}
 ${javac.version}
+false
   
 
 



hadoop git commit: HADOOP-12761. incremental maven build is not really incremental (sjlee)

2016-02-03 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c2ec24198 -> b49ac969b


HADOOP-12761. incremental maven build is not really incremental (sjlee)

(cherry picked from commit 4dc0a3949386ce2961356143a5a843dd537829dc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b49ac969
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b49ac969
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b49ac969

Branch: refs/heads/branch-2.8
Commit: b49ac969bcabc9e0d05ddd8d4f5b476c6230b62d
Parents: c2ec241
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 3 08:58:49 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 3 09:02:02 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-project/pom.xml  | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49ac969/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 165fc6f..9320032 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1014,6 +1014,8 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
+HADOOP-12761. incremental maven build is not really incremental (sjlee)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49ac969/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f9a0ee4..af0ef02 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -994,6 +994,7 @@
   
 ${javac.version}
 ${javac.version}
+false
   
 
 



hadoop git commit: HADOOP-12761. incremental maven build is not really incremental (sjlee)

2016-02-03 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1adb64e09 -> 4dc0a3949


HADOOP-12761. incremental maven build is not really incremental (sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4dc0a394
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4dc0a394
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4dc0a394

Branch: refs/heads/trunk
Commit: 4dc0a3949386ce2961356143a5a843dd537829dc
Parents: 1adb64e
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 3 08:58:49 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 3 08:58:49 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-project/pom.xml  | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dc0a394/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 385e63d..21d8202 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1706,6 +1706,8 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
+HADOOP-12761. incremental maven build is not really incremental (sjlee)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dc0a394/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3f4e93f..1f391db 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1005,6 +1005,7 @@
   
 ${javac.version}
 ${javac.version}
+false
   
 
 



hadoop git commit: HADOOP-12761. incremental maven build is not really incremental (sjlee)

2016-02-03 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e211ab777 -> b0ab6df07


HADOOP-12761. incremental maven build is not really incremental (sjlee)

(cherry picked from commit 4dc0a3949386ce2961356143a5a843dd537829dc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0ab6df0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0ab6df0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0ab6df0

Branch: refs/heads/branch-2
Commit: b0ab6df0795aa3e89887e8fc81d59e5d571760d7
Parents: e211ab7
Author: Sangjin Lee <sj...@apache.org>
Authored: Wed Feb 3 08:58:49 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Feb 3 09:00:41 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-project/pom.xml  | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ab6df0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c79c460..4271c36 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1070,6 +1070,8 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
+HADOOP-12761. incremental maven build is not really incremental (sjlee)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ab6df0/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c641ef9..50e6d2c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -994,6 +994,7 @@
   
 ${javac.version}
 ${javac.version}
+false
   
 
 



[hadoop] Git Push Summary

2016-01-20 Thread sjlee
Repository: hadoop
Updated Tags:  refs/tags/feature_YARN-2928-2016-01-20 [created] cab8a1d1e


[hadoop] Git Push Summary

2016-01-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 [created] cab8a1d1e


[hadoop] Git Push Summary

2016-01-15 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 [deleted] 3c4e424ed


[hadoop] Git Push Summary

2016-01-11 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928-rebase [deleted] 28fc7b140


[hadoop] Git Push Summary

2016-01-11 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928-new [deleted] 0db9ecb3d


[hadoop] Git Push Summary

2016-01-11 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928-old [deleted] 5a3db963b


[hadoop] Git Push Summary

2016-01-11 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928-old-2015-11-09 [deleted] 3c4e424ed


hadoop git commit: YARN-3995. Some of the NM events are not getting published due race condition when AM container finishes in NM (Naganarasimha G R via sjlee)

2016-01-11 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/feature-YARN-2928 00452d3c7 -> 36d74ec41


YARN-3995. Some of the NM events are not getting published due race condition 
when AM container finishes in NM (Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36d74ec4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36d74ec4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36d74ec4

Branch: refs/heads/feature-YARN-2928
Commit: 36d74ec41f1aac97ff3138e8e893cd6ac5bab608
Parents: 00452d3
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon Jan 11 10:09:34 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon Jan 11 10:09:34 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../hadoop/yarn/conf/YarnConfiguration.java |  5 
 .../src/main/resources/yarn-default.xml |  7 ++
 .../PerNodeTimelineCollectorsAuxService.java| 25 +---
 ...TestPerNodeTimelineCollectorsAuxService.java | 11 +
 5 files changed, 38 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36d74ec4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5ff425c..cf4522a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -208,6 +208,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-4350. TestDistributedShell fails for V2 scenarios. (Naganarasimha G R
 via varunsaxena)
 
+YARN-3995. Some of the NM events are not getting published due race
+condition when AM container finishes in NM (Naganarasimha G R via sjlee)
+
 Trunk - Unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36d74ec4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6a3854a..da9acb1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1610,6 +1610,11 @@ public class YarnConfiguration extends Configuration {
   public static final int
   DEFAULT_TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS = 60;
 
+  public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
+  TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
+
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+
   // mark app-history related configs @Private as application history is going
   // to be integrated into the timeline service
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36d74ec4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a9adbbf..13b952e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1984,6 +1984,13 @@
 60
   
 
+  
+Time period till which the application collector will be alive
+ in NM, after the  application master container finishes.
+yarn.timeline-service.app-collector.linger-period.ms
+1000
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36d74ec4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 0319e34..b738530 100644
--- 
a/h

hadoop git commit: MAPREDUCE-6577. MR AM unable to load native library without MR_AM_ADMIN_USER_ENV set (sjlee)

2016-01-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 49ba43e05 -> ae535ec93


MAPREDUCE-6577. MR AM unable to load native library without 
MR_AM_ADMIN_USER_ENV set (sjlee)

(cherry picked from commit f6f16118d38fcfe3b724f05fad752cb223f441ec)
(cherry picked from commit 28fd4c70cab27c2872dc6a63ddee223ccf53f21d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae535ec9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae535ec9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae535ec9

Branch: refs/heads/branch-2.8
Commit: ae535ec93e304c1e5832334861aba01532e49092
Parents: 49ba43e
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Jan 5 15:22:50 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Wed Jan 6 08:44:37 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../apache/hadoop/mapreduce/MRJobConfig.java| 21 --
 .../org/apache/hadoop/mapred/YARNRunner.java|  5 +-
 .../hadoop/mapred/TestMiniMRChildTask.java  | 68 ++--
 .../apache/hadoop/mapred/TestYARNRunner.java| 47 +++---
 hadoop-project/pom.xml  |  1 +
 6 files changed, 95 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae535ec9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f19ff50..664909d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -689,6 +689,9 @@ Release 2.6.4 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6577. MR AM unable to load native library without
+MR_AM_ADMIN_USER_ENV set (sjlee)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae535ec9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 0ed2f29..45033ff 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -731,6 +731,16 @@ public interface MRJobConfig {
   public static final String MR_AM_ADMIN_USER_ENV =
   MR_AM_PREFIX + "admin.user.env";
 
+  // although the AM admin user env default should be the same as the task user
+  // env default, there are problems in making it work on Windows currently
+  // MAPREDUCE-6588 should address the issue and set it to a proper non-empty
+  // value
+  public static final String DEFAULT_MR_AM_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
+
   public static final String MR_AM_PROFILE = MR_AM_PREFIX + "profile";
   public static final boolean DEFAULT_MR_AM_PROFILE = false;
   public static final String MR_AM_PROFILE_PARAMS = MR_AM_PREFIX
@@ -754,10 +764,13 @@ public interface MRJobConfig {
   public static final String MAPRED_ADMIN_USER_ENV =
   "mapreduce.admin.user.env";
 
-  public final String DEFAULT_MAPRED_ADMIN_USER_ENV = 
-  Shell.WINDOWS ? 
-  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin":
-  "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native";
+  // the "%...%" macros can be expanded prematurely and are probably not OK
+  // this should be addressed by MAPREDUCE-6588
+  public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
 
   public static final String WORKDIR = "work";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae535ec9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main

hadoop git commit: MAPREDUCE-6577. MR AM unable to load native library without MR_AM_ADMIN_USER_ENV set (sjlee)

2016-01-05 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk dec8fedb6 -> f6f16118d


MAPREDUCE-6577. MR AM unable to load native library without 
MR_AM_ADMIN_USER_ENV set (sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6f16118
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6f16118
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6f16118

Branch: refs/heads/trunk
Commit: f6f16118d38fcfe3b724f05fad752cb223f441ec
Parents: dec8fed
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Jan 5 15:22:50 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue Jan 5 15:22:50 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../apache/hadoop/mapreduce/MRJobConfig.java| 21 --
 .../org/apache/hadoop/mapred/YARNRunner.java|  5 +-
 .../hadoop/mapred/TestMiniMRChildTask.java  | 68 ++--
 .../apache/hadoop/mapred/TestYARNRunner.java| 47 +++---
 hadoop-project/pom.xml  |  1 +
 6 files changed, 95 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f16118/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d04ce5d..ce22824 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -315,6 +315,9 @@ Release 2.9.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6577. MR AM unable to load native library without
+MR_AM_ADMIN_USER_ENV set (sjlee)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f16118/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 3d1e841..c98746a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -740,6 +740,16 @@ public interface MRJobConfig {
   public static final String MR_AM_ADMIN_USER_ENV =
   MR_AM_PREFIX + "admin.user.env";
 
+  // although the AM admin user env default should be the same as the task user
+  // env default, there are problems in making it work on Windows currently
+  // MAPREDUCE-6588 should address the issue and set it to a proper non-empty
+  // value
+  public static final String DEFAULT_MR_AM_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
+
   public static final String MR_AM_PROFILE = MR_AM_PREFIX + "profile";
   public static final boolean DEFAULT_MR_AM_PROFILE = false;
   public static final String MR_AM_PROFILE_PARAMS = MR_AM_PREFIX
@@ -763,10 +773,13 @@ public interface MRJobConfig {
   public static final String MAPRED_ADMIN_USER_ENV =
   "mapreduce.admin.user.env";
 
-  public final String DEFAULT_MAPRED_ADMIN_USER_ENV = 
-  Shell.WINDOWS ? 
-  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin":
-  "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native";
+  // the "%...%" macros can be expanded prematurely and are probably not OK
+  // this should be addressed by MAPREDUCE-6588
+  public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
 
   public static final String WORKDIR = "work";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f16118/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java

hadoop git commit: MAPREDUCE-6577. MR AM unable to load native library without MR_AM_ADMIN_USER_ENV set (sjlee)

2016-01-05 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 150c9a80e -> 2dfa0613a


MAPREDUCE-6577. MR AM unable to load native library without 
MR_AM_ADMIN_USER_ENV set (sjlee)

(cherry picked from commit f6f16118d38fcfe3b724f05fad752cb223f441ec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2dfa0613
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2dfa0613
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2dfa0613

Branch: refs/heads/branch-2
Commit: 2dfa0613a1588b8def83255d33143b9e571d77d7
Parents: 150c9a8
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Jan 5 15:22:50 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue Jan 5 15:25:47 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../apache/hadoop/mapreduce/MRJobConfig.java| 21 --
 .../org/apache/hadoop/mapred/YARNRunner.java|  5 +-
 .../hadoop/mapred/TestMiniMRChildTask.java  | 68 ++--
 .../apache/hadoop/mapred/TestYARNRunner.java| 47 +++---
 hadoop-project/pom.xml  |  1 +
 6 files changed, 95 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dfa0613/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 74686c1..99444a1 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.9.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6577. MR AM unable to load native library without
+MR_AM_ADMIN_USER_ENV set (sjlee)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dfa0613/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 0ed2f29..45033ff 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -731,6 +731,16 @@ public interface MRJobConfig {
   public static final String MR_AM_ADMIN_USER_ENV =
   MR_AM_PREFIX + "admin.user.env";
 
+  // although the AM admin user env default should be the same as the task user
+  // env default, there are problems in making it work on Windows currently
+  // MAPREDUCE-6588 should address the issue and set it to a proper non-empty
+  // value
+  public static final String DEFAULT_MR_AM_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
+
   public static final String MR_AM_PROFILE = MR_AM_PREFIX + "profile";
   public static final boolean DEFAULT_MR_AM_PROFILE = false;
   public static final String MR_AM_PROFILE_PARAMS = MR_AM_PREFIX
@@ -754,10 +764,13 @@ public interface MRJobConfig {
   public static final String MAPRED_ADMIN_USER_ENV =
   "mapreduce.admin.user.env";
 
-  public final String DEFAULT_MAPRED_ADMIN_USER_ENV = 
-  Shell.WINDOWS ? 
-  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin":
-  "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native";
+  // the "%...%" macros can be expanded prematurely and are probably not OK
+  // this should be addressed by MAPREDUCE-6588
+  public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
 
   public static final String WORKDIR = "work";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dfa0613/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
 
b/hadoop-mapreduce-project/h

hadoop git commit: MAPREDUCE-6577 addendum. Moved the changelog to 2.6.4 which is the earliest target.

2016-01-05 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2dfa0613a -> 745885488


MAPREDUCE-6577 addendum. Moved the changelog to 2.6.4 which is the earliest 
target.

(cherry picked from commit 355c0ce7237c5208557bb32ec0423819bb9b47b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74588548
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74588548
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74588548

Branch: refs/heads/branch-2
Commit: 7458854888fa17d15d9719bd5b9d2f922b8f
Parents: 2dfa061
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Jan 5 15:43:10 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue Jan 5 15:45:01 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74588548/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 99444a1..f8a734c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -12,9 +12,6 @@ Release 2.9.0 - UNRELEASED
 
   BUG FIXES
 
-MAPREDUCE-6577. MR AM unable to load native library without
-MR_AM_ADMIN_USER_ENV set (sjlee)
-
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -701,12 +698,15 @@ Release 2.6.4 - UNRELEASED
 
   IMPROVEMENTS
 
-  MAPREDUCE-6436. JobHistory cache issue. (Kai Sasaki via zxu)
+MAPREDUCE-6436. JobHistory cache issue. (Kai Sasaki via zxu)
 
   OPTIMIZATIONS
 
   BUG FIXES
 
+MAPREDUCE-6577. MR AM unable to load native library without
+MR_AM_ADMIN_USER_ENV set (sjlee)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES



hadoop git commit: MAPREDUCE-6577. MR AM unable to load native library without MR_AM_ADMIN_USER_ENV set (sjlee)

2016-01-05 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 68c68e187 -> 28fd4c70c


MAPREDUCE-6577. MR AM unable to load native library without 
MR_AM_ADMIN_USER_ENV set (sjlee)

(cherry picked from commit f6f16118d38fcfe3b724f05fad752cb223f441ec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28fd4c70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28fd4c70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28fd4c70

Branch: refs/heads/branch-2.7
Commit: 28fd4c70cab27c2872dc6a63ddee223ccf53f21d
Parents: 68c68e1
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Jan 5 15:22:50 2016 -0800
Committer: Sangjin Lee <sj...@apache.org>
Committed: Tue Jan 5 16:01:09 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../apache/hadoop/mapreduce/MRJobConfig.java| 21 --
 .../org/apache/hadoop/mapred/YARNRunner.java|  5 +-
 .../hadoop/mapred/TestMiniMRChildTask.java  | 68 ++--
 .../apache/hadoop/mapred/TestYARNRunner.java| 47 +++---
 hadoop-project/pom.xml  |  1 +
 6 files changed, 95 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fd4c70/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ae5c239..d8a6f29 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -315,6 +315,9 @@ Release 2.6.4 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6577. MR AM unable to load native library without
+MR_AM_ADMIN_USER_ENV set (sjlee)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fd4c70/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index fbe0a06..fd29a2b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -677,6 +677,16 @@ public interface MRJobConfig {
   public static final String MR_AM_ADMIN_USER_ENV =
   MR_AM_PREFIX + "admin.user.env";
 
+  // although the AM admin user env default should be the same as the task user
+  // env default, there are problems in making it work on Windows currently
+  // MAPREDUCE-6588 should address the issue and set it to a proper non-empty
+  // value
+  public static final String DEFAULT_MR_AM_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
+
   public static final String MR_AM_PROFILE = MR_AM_PREFIX + "profile";
   public static final boolean DEFAULT_MR_AM_PROFILE = false;
   public static final String MR_AM_PROFILE_PARAMS = MR_AM_PREFIX
@@ -700,10 +710,13 @@ public interface MRJobConfig {
   public static final String MAPRED_ADMIN_USER_ENV =
   "mapreduce.admin.user.env";
 
-  public final String DEFAULT_MAPRED_ADMIN_USER_ENV = 
-  Shell.WINDOWS ? 
-  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin":
-  "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native";
+  // the "%...%" macros can be expanded prematurely and are probably not OK
+  // this should be addressed by MAPREDUCE-6588
+  public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
+  Shell.WINDOWS ?
+  "PATH=%PATH%;%HADOOP_COMMON_HOME%\\bin" :
+  "LD_LIBRARY_PATH=" + Apps.crossPlatformify("HADOOP_COMMON_HOME") +
+  "/lib/native";
 
   public static final String WORKDIR = "work";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fd4c70/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
 
b/hadoop-mapreduce

<    1   2   3   4   5   6   7   8   9   10   >