http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/ams_query.py
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/ams_query.py
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/ams_query.py
new file mode 100644
index 0000000..d51357a
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/ams_query.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import urllib2
+import signal
+import sys
+import optparse
+import time
+
+# http://162.216.148.45:8188/ws/v1/timeline/metrics?
+# metricNames=rpc.rpc.RpcAuthenticationSuccesses
+# &appId=nodemanager&hostname=local.0&startTime=1414152029&endTime=1414155629
+
+AMS_URL = "http://{0}:8188/ws/v1/timeline/metrics?metricNames={1}&appid={"; \
+          "2}&hostname={3}"
+
+# in fact it can be list automatically generated from ambari
+# UI queries
+host_metrics = {
+  'cpu': ['cpu_user', 'cpu_wio', 'cpu_nice', 'cpu_aidle', 'cpu_system', 
'cpu_idle'],
+  'disk': ['disk_total', 'disk_free'],
+  'load': ['load_one', 'load_fifteen', 'load_five'],
+  'mem': ['swap_free', 'mem_shared', 'mem_free', 'mem_cached', 'mem_buffers'],
+  'network': ['bytes_in', 'bytes_out', 'pkts_in', 'pkts_out'],
+  'process': ['proc_total', 'proc_run']
+}
+
+# HDFS_SERVICE
+namenode_metrics = {
+  'dfs.Capacity': ['dfs.FSNamesystem.CapacityRemainingGB',
+                   'dfs.FSNamesystem.CapacityUsedGB',
+                   'dfs.FSNamesystem.CapacityTotalGB'],
+  'dfs.Replication': ['dfs.FSNamesystem.PendingReplicationBlocks',
+                      'dfs.FSNamesystem.UnderReplicatedBlocks'],
+  'dfs.File': ['dfs.namenode.FileInfoOps', 'dfs.namenode.CreateFileOps'],
+  'jvm.gc': ['jvm.JvmMetrics.GcTimeMillis'],
+  'jvm.mem': ['jvm.JvmMetrics.MemNonHeapUsedM',
+              'jvm.JvmMetrics.MemNonHeapCommittedM',
+              'jvm.JvmMetrics.MemHeapUsedM',
+              'jvm.JvmMetrics.MemHeapCommittedM'],
+  'jvm.thread': ['jvm.JvmMetrics.ThreadsRunnable',
+                 'jvm.JvmMetrics.ThreadsBlocked',
+                 'jvm.JvmMetrics.ThreadsWaiting',
+                 'jvm.JvmMetrics.ThreadsTimedWaiting'],
+  'rpc': ['rpc.rpc.RpcQueueTimeAvgTime']
+}
+
+all_metrics = {
+  'HOST': host_metrics,
+  'namenode': namenode_metrics
+}
+
+all_metrics_times = {}
+
+
+# hostnames = ['EPPLKRAW0101.0']  # 'local.0'
+# metrics_test_host = '162.216.150.247' # metricstest-100
+# metrics_test_host = '162.216.148.45'    # br-3
+# start_time = int(time.time())           # 1414425208
+
+
+def main(argv=None):
+  # Allow Ctrl-C
+  signal.signal(signal.SIGINT, signal_handler)
+
+  parser = optparse.OptionParser()
+
+  parser.add_option("-H", "--host", dest="host",
+                    help="AMS host")
+  parser.add_option("-t", "--starttime", dest="start_time_secs",
+                    default=int(time.time()),
+                    help="start time in seconds, default value is current 
time")
+  parser.add_option("-n", "--nodes", dest="node_names",
+                    help="nodes from cluster, used as a param to query for")
+  (options, args) = parser.parse_args()
+
+  if options.host is None:
+    print "AMS host name is required (--host or -h)"
+    exit(-1)
+
+  if options.node_names is None:
+    print "cluster nodes are required (--nodes or -n)"
+    exit(3)
+
+  global start_time_secs, metrics_test_host, hostnames
+
+  metrics_test_host = options.host
+  start_time_secs = int(options.start_time_secs)
+  hostnames = [options.node_names]
+
+  while True:
+    run()
+    time.sleep(15)
+    start_time_secs += 15
+
+
+def signal_handler(signal, frame):
+  print('Exiting, Ctrl+C press detected!')
+  print_all_metrics(all_metrics_times)
+  sys.exit(0)
+
+
+def run():
+  hostname = ','.join(hostnames)
+  qs = QuerySender(metrics_test_host, True)
+  for metric_name in all_metrics:
+    print
+    print 'Querying for ' + metric_name + ' metrics'
+    current_time_secs = start_time_secs
+    qs.query_all_app_metrics(hostname, metric_name,
+                             all_metrics[metric_name],
+                             current_time_secs)
+
+
+def add_query_metrics_for_app_id(app_id, metric_timing):
+  if not app_id in all_metrics_times:
+    all_metrics_times[app_id] = []
+  all_metrics_times[app_id].append(metric_timing)
+
+
+def print_all_metrics(metrics):
+  print 'Metrics Summary'
+  for app_id in sorted(metrics):
+    first = True
+    for single_query_metrics in metrics[app_id]:
+      print_app_metrics(app_id, single_query_metrics, first)
+      first = False
+
+
+def print_app_metrics(app_id, metric_timing, header=False):
+  #header
+  if header:
+    print app_id + ': ' + ','.join(sorted(metric_timing.keys()))
+  #vals
+  print app_id + ':',
+  for key in sorted(metric_timing):
+    print '%.3f,' % metric_timing[key],
+  print
+
+
+class QuerySender:
+  def __init__(self, metrics_address, print_responses=False):
+    self.metrics_address = metrics_address
+    self.print_responses = print_responses
+
+  def query_all_app_metrics(self, hostname, app_id, metrics, 
current_time_secs):
+    metric_timing = {}
+    for key in metrics:
+      print 'Getting metrics for', key
+      query_time = time.time()
+
+      metric_names = ','.join(metrics[key])
+      self.query(hostname, app_id, metric_names, current_time_secs)
+      query_time_elapsed = time.time() - query_time
+
+      print 'Query for "%s" took %s' % (key, query_time_elapsed)
+      metric_timing[key] = query_time_elapsed
+
+    add_query_metrics_for_app_id(app_id, metric_timing)
+    if self.print_responses:
+      print_app_metrics(app_id, metric_timing)
+
+  def query(self, hostname, app_id, metric_names, current_time_secs):
+    url = self.create_url(hostname, metric_names, app_id, current_time_secs)
+    print url
+    response = self.send(url)
+    if self.print_responses:
+      print response
+    pass
+
+  def send(self, url):
+    request = urllib2.Request(url)
+    try:
+      response = urllib2.urlopen(request, timeout=int(30))
+      response = response.read()
+      return response
+
+    except urllib2.URLError as e:
+      print e.reason
+
+  def create_url(self, hostname, metric_names, app_id, current_time_secs):
+    server = AMS_URL.format(self.metrics_address,
+                            metric_names,
+                            app_id,
+                            hostname)
+    t = current_time_secs
+    server += '&startTime=%s&endTime=%s' % (t, t + 3600)
+    return server
+
+
+if __name__ == '__main__':
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start.sh
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start.sh
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start.sh
new file mode 100644
index 0000000..1d6fff5
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# set -x
+cd "$(dirname "$0")"
+
+METRICS_HOST=$1
+HOST_COUNT=$2
+
+echo $$ > sim_pid
+cat sim_pid
+#HOMEDIR
+exec  java -jar 
../lib/ambari-metrics/ambari-metrics-timelineservice-simulator*.jar  -h 
`hostname -f` -n ${HOST_COUNT} -m ${METRICS_HOST} -c 15000 -s 60000

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start_slaves.sh
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start_slaves.sh
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start_slaves.sh
new file mode 100644
index 0000000..e1e51c8
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/start_slaves.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+## set -x
+
+SLAVES=$1
+METRICS_HOST=$2
+HOST_COUNT=$3
+LOADSIM_HOMEDIR="LoadSimulator-1.0-SNAPSHOT"
+
+pdsh -w ${SLAVES} "$LOADSIM_HOMEDIR/start.sh ${METRICS_HOST} ${HOST_COUNT}'"

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/status_slaves.sh
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/status_slaves.sh
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/status_slaves.sh
new file mode 100644
index 0000000..79787fd
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/status_slaves.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+## set -x
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop.sh
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop.sh
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop.sh
new file mode 100644
index 0000000..2220861
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# set -x
+cd "$(dirname "$0")"
+read PID <./sim_pid
+
+FOUND_PID=`ps aux | grep simulator | grep $PID`
+if [ -z "${FOUND_PID}" ]
+then
+   echo "process simulator not fund under pid $PID"
+else
+   echo "process simulator running as $PID, killing"
+   kill ${PID}
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop_slaves.sh
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop_slaves.sh
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop_slaves.sh
new file mode 100644
index 0000000..7cb567a
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/resources/scripts/stop_slaves.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# set -x
+
+SLAVES=$1
+LOADSIM_HOMEDIR="LoadSimulator-1.0-SNAPSHOT"
+
+pdsh -w ${SLAVES} "$LOADSIM_HOMEDIR/stop.sh"

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/ams-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/ams-site.xml 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/ams-site.xml
new file mode 100644
index 0000000..b08609d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/ams-site.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<!-- Empty site, all defaults should be initialized in the code -->
+<configuration>
+  <property>
+    <name>test</name>
+    <value>testReady</value>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hadoop-policy.xml 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hadoop-policy.xml
new file mode 100644
index 0000000..41bde16
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hadoop-policy.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode 
protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status 
etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hbase-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hbase-site.xml 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hbase-site.xml
new file mode 100644
index 0000000..c453900
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/conf/hbase-site.xml
@@ -0,0 +1,230 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>file:///var/lib/hbase</value>
+    <description>
+      AMS service uses HBase as default storage backend. Set the rootdir for
+      HBase to either local filesystem path if using AMS in embedded mode or
+      to a HDFS dir, example: hdfs://namenode.example.org:9000/hbase.  By
+      default HBase writes into /tmp. Change this configuration else all data
+      will be lost on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/tmp</value>
+    <description>
+      Temporary directory on the local filesystem.
+      Change this setting to point to a location more permanent
+      than '/tmp' (The '/tmp' directory is often cleared on
+      machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local 
storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>
+      The mode the cluster will be in. Possible values are false for
+      standalone mode and true for distributed mode. If false, startup will run
+      all HBase and ZooKeeper daemons together in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.wait.on.regionservers.mintostart</name>
+    <value>1</value>
+    <description>
+      Ensure that HBase Master waits for # many region server to start.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+      By default this is set to localhost for local and pseudo-distributed 
modes
+      of operation. For a fully-distributed setup, this should be set to a full
+      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in 
hbase-env.sh
+      this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI</description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>90010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>90030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>0</value>
+    <description>
+      The time (in milliseconds) between 'major' compactions of all
+      HStoreFiles in a region.
+      0 to disable automated major compactions.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>12582912</value>
+    <description>
+      Threshold size in bytes after which results from parallelly executed
+      query results are spooled to disk. Default is 20 mb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>${hbase.tmp.dir}/zookeeper</value>
+    <description>
+      Property from ZooKeeper's config zoo.cfg.
+      The directory where the snapshot is stored.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>10000</value>
+    <description>
+      Number of rows that will be fetched when calling next on a scanner
+      if it is not served from (local, client) memory.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.3</value>
+    <description>
+      Percentage of maximum heap (-Xmx setting) to allocate to block cache
+      used by a StoreFile. Default of 0.4 means allocate 40%.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.5</value>
+    <description>
+      Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.4</value>
+    <description>
+      When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.groupby.maxCacheSize</name>
+    <value>307200000</value>
+    <description>
+      Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+      Block updates if memstore has hbase.hregion.memstore.block.multiplier
+      times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
+      memstore during spikes in update traffic.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flusher.count</name>
+    <value>2</value>
+    <description>
+      The number of flush threads. With fewer threads, the MemStore flushes
+      will be queued. With more threads, the flushes will be executed in 
parallel,
+      increasing the load on HDFS, and potentially causing more compactions.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.timeoutMs</name>
+    <value>1200000</value>
+    <description>
+      Number of milliseconds after which a query will timeout on the client.
+      Default is 10 min.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.timeout.period</name>
+    <value>900000</value>
+    <description>
+      Client scanner lease period in milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.thread.compaction.large</name>
+    <value>2</value>
+    <description>
+      Configuration key for the large compaction threads.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.thread.compaction.small</name>
+    <value>3</value>
+    <description>
+      Configuration key for the small compaction threads.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>200</value>
+    <description>
+      If more than this number of StoreFiles exist in any one Store
+      (one StoreFile is written per flush of MemStore), updates are blocked for
+      this region until a compaction is completed, or until
+      hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+      Memstore will be flushed to disk if size of the memstore exceeds this
+      number of bytes. Value is checked by a thread that runs every
+      hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
new file mode 100644
index 0000000..c41b8a7
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
+
+public class ApplicationHistoryStoreTestUtils {
+
+  protected ApplicationHistoryStore store;
+
+  protected void writeApplicationStartData(ApplicationId appId)
+      throws IOException {
+    store.applicationStarted(ApplicationStartData.newInstance(appId,
+      appId.toString(), "test type", "test queue", "test user", 0, 0));
+  }
+
+  protected void writeApplicationFinishData(ApplicationId appId)
+      throws IOException {
+    store.applicationFinished(ApplicationFinishData.newInstance(appId, 0,
+      appId.toString(), FinalApplicationStatus.UNDEFINED,
+      YarnApplicationState.FINISHED));
+  }
+
+  protected void writeApplicationAttemptStartData(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance(
+      appAttemptId, appAttemptId.toString(), 0,
+      ContainerId.newInstance(appAttemptId, 1)));
+  }
+
+  protected void writeApplicationAttemptFinishData(
+      ApplicationAttemptId appAttemptId) throws IOException {
+    store.applicationAttemptFinished(ApplicationAttemptFinishData.newInstance(
+      appAttemptId, appAttemptId.toString(), "test tracking url",
+      FinalApplicationStatus.UNDEFINED, YarnApplicationAttemptState.FINISHED));
+  }
+
+  protected void writeContainerStartData(ContainerId containerId)
+      throws IOException {
+    store.containerStarted(ContainerStartData.newInstance(containerId,
+      Resource.newInstance(0, 0), NodeId.newInstance("localhost", 0),
+      Priority.newInstance(containerId.getId()), 0));
+  }
+
+  protected void writeContainerFinishData(ContainerId containerId)
+      throws IOException {
+    store.containerFinished(ContainerFinishData.newInstance(containerId, 0,
+      containerId.toString(), 0, ContainerState.COMPLETE));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
new file mode 100644
index 0000000..6b06918
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+// Timeline service client support is not enabled for AMS
+@Ignore
+public class TestApplicationHistoryClientService extends
+    ApplicationHistoryStoreTestUtils {
+
+  ApplicationHistoryServer historyServer = null;
+  String expectedLogUrl = null;
+
+  @Before
+  public void setup() {
+    historyServer = new ApplicationHistoryServer();
+    Configuration config = new YarnConfiguration();
+    expectedLogUrl = WebAppUtils.getHttpSchemePrefix(config) +
+        WebAppUtils.getAHSWebAppURLWithoutScheme(config) +
+        "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/" +
+        "container_0_0001_01_000001/test user";
+    config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
+      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
+    historyServer.init(config);
+    historyServer.start();
+    store =
+        ((ApplicationHistoryManagerImpl) historyServer.getApplicationHistory())
+          .getHistoryStore();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    historyServer.stop();
+  }
+
+  @Test
+  public void testApplicationReport() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    GetApplicationReportRequest request =
+        GetApplicationReportRequest.newInstance(appId);
+    GetApplicationReportResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplicationReport(request);
+    ApplicationReport appReport = response.getApplicationReport();
+    Assert.assertNotNull(appReport);
+    Assert.assertEquals("application_0_0001", appReport.getApplicationId()
+      .toString());
+    Assert.assertEquals("test type", 
appReport.getApplicationType().toString());
+    Assert.assertEquals("test queue", appReport.getQueue().toString());
+  }
+
+  @Test
+  public void testApplications() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    ApplicationId appId1 = ApplicationId.newInstance(0, 2);
+    writeApplicationStartData(appId1);
+    writeApplicationFinishData(appId1);
+    GetApplicationsRequest request = GetApplicationsRequest.newInstance();
+    GetApplicationsResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplications(request);
+    List<ApplicationReport> appReport = response.getApplicationList();
+    Assert.assertNotNull(appReport);
+    Assert.assertEquals(appId, appReport.get(0).getApplicationId());
+    Assert.assertEquals(appId1, appReport.get(1).getApplicationId());
+  }
+
+  @Test
+  public void testApplicationAttemptReport() throws IOException, YarnException 
{
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    writeApplicationAttemptStartData(appAttemptId);
+    writeApplicationAttemptFinishData(appAttemptId);
+    GetApplicationAttemptReportRequest request =
+        GetApplicationAttemptReportRequest.newInstance(appAttemptId);
+    GetApplicationAttemptReportResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplicationAttemptReport(request);
+    ApplicationAttemptReport attemptReport =
+        response.getApplicationAttemptReport();
+    Assert.assertNotNull(attemptReport);
+    Assert.assertEquals("appattempt_0_0001_000001", attemptReport
+      .getApplicationAttemptId().toString());
+  }
+
+  @Test
+  public void testApplicationAttempts() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ApplicationAttemptId appAttemptId1 =
+        ApplicationAttemptId.newInstance(appId, 2);
+    writeApplicationAttemptStartData(appAttemptId);
+    writeApplicationAttemptFinishData(appAttemptId);
+    writeApplicationAttemptStartData(appAttemptId1);
+    writeApplicationAttemptFinishData(appAttemptId1);
+    GetApplicationAttemptsRequest request =
+        GetApplicationAttemptsRequest.newInstance(appId);
+    GetApplicationAttemptsResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getApplicationAttempts(request);
+    List<ApplicationAttemptReport> attemptReports =
+        response.getApplicationAttemptList();
+    Assert.assertNotNull(attemptReports);
+    Assert.assertEquals(appAttemptId, attemptReports.get(0)
+      .getApplicationAttemptId());
+    Assert.assertEquals(appAttemptId1, attemptReports.get(1)
+      .getApplicationAttemptId());
+  }
+
+  @Test
+  public void testContainerReport() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    writeContainerStartData(containerId);
+    writeContainerFinishData(containerId);
+    writeApplicationFinishData(appId);
+    GetContainerReportRequest request =
+        GetContainerReportRequest.newInstance(containerId);
+    GetContainerReportResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getContainerReport(request);
+    ContainerReport container = response.getContainerReport();
+    Assert.assertNotNull(container);
+    Assert.assertEquals(containerId, container.getContainerId());
+    Assert.assertEquals(expectedLogUrl, container.getLogUrl());
+  }
+
+  @Test
+  public void testContainers() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2);
+    writeContainerStartData(containerId);
+    writeContainerFinishData(containerId);
+    writeContainerStartData(containerId1);
+    writeContainerFinishData(containerId1);
+    writeApplicationFinishData(appId);
+    GetContainersRequest request =
+        GetContainersRequest.newInstance(appAttemptId);
+    GetContainersResponse response =
+        historyServer.getClientService().getClientHandler()
+          .getContainers(request);
+    List<ContainerReport> containers = response.getContainerList();
+    Assert.assertNotNull(containers);
+    Assert.assertEquals(containerId, containers.get(1).getContainerId());
+    Assert.assertEquals(containerId1, containers.get(0).getContainerId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
new file mode 100644
index 0000000..aad23d9
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+public class TestApplicationHistoryManagerImpl extends
+    ApplicationHistoryStoreTestUtils {
+  ApplicationHistoryManagerImpl applicationHistoryManagerImpl = null;
+
+  @Before
+  public void setup() throws Exception {
+    Configuration config = new Configuration();
+    config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
+      MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
+    applicationHistoryManagerImpl = new ApplicationHistoryManagerImpl();
+    applicationHistoryManagerImpl.init(config);
+    applicationHistoryManagerImpl.start();
+    store = applicationHistoryManagerImpl.getHistoryStore();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    applicationHistoryManagerImpl.stop();
+  }
+
+  @Test
+  @Ignore
+  public void testApplicationReport() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    writeApplicationAttemptStartData(appAttemptId);
+    writeApplicationAttemptFinishData(appAttemptId);
+    ApplicationReport appReport =
+        applicationHistoryManagerImpl.getApplication(appId);
+    Assert.assertNotNull(appReport);
+    Assert.assertEquals(appId, appReport.getApplicationId());
+    Assert.assertEquals(appAttemptId,
+      appReport.getCurrentApplicationAttemptId());
+    Assert.assertEquals(appAttemptId.toString(), appReport.getHost());
+    Assert.assertEquals("test type", 
appReport.getApplicationType().toString());
+    Assert.assertEquals("test queue", appReport.getQueue().toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
new file mode 100644
index 0000000..3720852
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
+  .timeline.DefaultPhoenixDataSource;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
+  .timeline.PhoenixHBaseAccessor;
+import org.apache.zookeeper.ClientCnxn;
+import org.easymock.EasyMock;
+import org.junit.*;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
+  .timeline.TimelineMetricConfiguration.METRICS_SITE_CONFIGURATION_FILE;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.*;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.junit.Assert.*;
+import static org.powermock.api.easymock.PowerMock.*;
+import static 
org.powermock.api.support.membermodification.MemberMatcher.method;
+import static org.powermock.api.support.membermodification.MemberModifier
+  .suppress;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({ PhoenixHBaseAccessor.class, UserGroupInformation.class,
+  ClientCnxn.class, DefaultPhoenixDataSource.class})
+@PowerMockIgnore( {"javax.management.*"})
+public class TestApplicationHistoryServer {
+
+  ApplicationHistoryServer historyServer = null;
+  Configuration metricsConf = null;
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  @Before
+  @SuppressWarnings("all")
+  public void setup() throws URISyntaxException, IOException {
+    folder.create();
+    File hbaseSite = folder.newFile("hbase-site.xml");
+    File amsSite = folder.newFile("ams-site.xml");
+
+    FileUtils.writeStringToFile(hbaseSite, "<configuration>\n" +
+      "  <property>\n" +
+      "    <name>hbase.defaults.for.version.skip</name>\n" +
+      "    <value>true</value>\n" +
+      "  </property>" +
+      "  <property> " +
+      "    <name>hbase.zookeeper.quorum</name>\n" +
+      "    <value>localhost</value>\n" +
+      "  </property>" +
+      "</configuration>");
+
+    FileUtils.writeStringToFile(amsSite, "<configuration>\n" +
+      "  <property>\n" +
+      "    <name>test</name>\n" +
+      "    <value>testReady</value>\n" +
+      "  </property>\n" +
+      "  <property>\n" +
+      "    <name>timeline.metrics.host.aggregator.hourly.disabled</name>\n" +
+      "    <value>true</value>\n" +
+      "    <description>\n" +
+      "      Disable host based hourly aggregations.\n" +
+      "    </description>\n" +
+      "  </property>\n" +
+      "  <property>\n" +
+      "    <name>timeline.metrics.host.aggregator.minute.disabled</name>\n" +
+      "    <value>true</value>\n" +
+      "    <description>\n" +
+      "      Disable host based minute aggregations.\n" +
+      "    </description>\n" +
+      "  </property>\n" +
+      "  <property>\n" +
+      "    <name>timeline.metrics.cluster.aggregator.hourly.disabled</name>\n" 
+
+      "    <value>true</value>\n" +
+      "    <description>\n" +
+      "      Disable cluster based hourly aggregations.\n" +
+      "    </description>\n" +
+      "  </property>\n" +
+      "  <property>\n" +
+      "    <name>timeline.metrics.cluster.aggregator.minute.disabled</name>\n" 
+
+      "    <value>true</value>\n" +
+      "    <description>\n" +
+      "      Disable cluster based minute aggregations.\n" +
+      "    </description>\n" +
+      "  </property>" +
+      "</configuration>");
+
+    ClassLoader currentClassLoader = 
Thread.currentThread().getContextClassLoader();
+
+    // Add the conf dir to the classpath
+    // Chain the current thread classloader
+    URLClassLoader urlClassLoader = null;
+    try {
+      urlClassLoader = new URLClassLoader(new URL[] {
+        folder.getRoot().toURI().toURL() }, currentClassLoader);
+    } catch (MalformedURLException e) {
+      e.printStackTrace();
+    }
+
+    Thread.currentThread().setContextClassLoader(urlClassLoader);
+    metricsConf = new Configuration(false);
+    metricsConf.addResource(Thread.currentThread().getContextClassLoader()
+      .getResource(METRICS_SITE_CONFIGURATION_FILE).toURI().toURL());
+    assertNotNull(metricsConf.get("test"));
+  }
+
+  // simple test init/start/stop ApplicationHistoryServer. Status should 
change.
+  @Test(timeout = 50000)
+  public void testStartStopServer() throws Exception {
+    Configuration config = new YarnConfiguration();
+    UserGroupInformation ugi =
+      UserGroupInformation.createUserForTesting("ambari", new String[] 
{"ambari"});
+
+    mockStatic(UserGroupInformation.class);
+    expect(UserGroupInformation.getCurrentUser()).andReturn(ugi).anyTimes();
+    
expect(UserGroupInformation.isSecurityEnabled()).andReturn(false).anyTimes();
+    config.set(YarnConfiguration.APPLICATION_HISTORY_STORE,
+      
"org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore");
+
+    Connection connection = createNiceMock(Connection.class);
+    Statement stmt = createNiceMock(Statement.class);
+    mockStatic(DriverManager.class);
+    expect(DriverManager.getConnection("jdbc:phoenix:localhost:2181:/hbase"))
+      .andReturn(connection).anyTimes();
+    expect(connection.createStatement()).andReturn(stmt).anyTimes();
+    suppress(method(Statement.class, "executeUpdate", String.class));
+    connection.close();
+    expectLastCall();
+
+    EasyMock.replay(connection, stmt);
+    replayAll();
+
+    historyServer = new ApplicationHistoryServer();
+    historyServer.init(config);
+
+    verifyAll();
+
+    assertEquals(STATE.INITED, historyServer.getServiceState());
+    assertEquals(4, historyServer.getServices().size());
+    ApplicationHistoryClientService historyService =
+      historyServer.getClientService();
+    assertNotNull(historyServer.getClientService());
+    assertEquals(STATE.INITED, historyService.getServiceState());
+
+    historyServer.start();
+    assertEquals(STATE.STARTED, historyServer.getServiceState());
+    assertEquals(STATE.STARTED, historyService.getServiceState());
+    historyServer.stop();
+    assertEquals(STATE.STOPPED, historyServer.getServiceState());
+  }
+
+  // test launch method
+  @Ignore
+  @Test(timeout = 60000)
+  public void testLaunch() throws Exception {
+
+    UserGroupInformation ugi =
+      UserGroupInformation.createUserForTesting("ambari", new 
String[]{"ambari"});
+    mockStatic(UserGroupInformation.class);
+    expect(UserGroupInformation.getCurrentUser()).andReturn(ugi).anyTimes();
+    
expect(UserGroupInformation.isSecurityEnabled()).andReturn(false).anyTimes();
+
+    ExitUtil.disableSystemExit();
+    try {
+      historyServer = ApplicationHistoryServer.launchAppHistoryServer(new 
String[0]);
+    } catch (ExitUtil.ExitException e) {
+      assertEquals(0, e.status);
+      ExitUtil.resetFirstExitException();
+      fail();
+    }
+  }
+
+  @After
+  public void stop() {
+    if (historyServer != null) {
+      historyServer.stop();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
new file mode 100644
index 0000000..bc16d36
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+import java.net.URI;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFileSystemApplicationHistoryStore extends
+    ApplicationHistoryStoreTestUtils {
+
+  private FileSystem fs;
+  private Path fsWorkingPath;
+
+  @Before
+  public void setup() throws Exception {
+    fs = new RawLocalFileSystem();
+    Configuration conf = new Configuration();
+    fs.initialize(new URI("/"), conf);
+    fsWorkingPath = new Path("Test");
+    fs.delete(fsWorkingPath, true);
+    conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI, 
fsWorkingPath.toString());
+    store = new FileSystemApplicationHistoryStore();
+    store.init(conf);
+    store.start();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    store.stop();
+    fs.delete(fsWorkingPath, true);
+    fs.close();
+  }
+
+  @Test
+  public void testReadWriteHistoryData() throws IOException {
+    testWriteHistoryData(5);
+    testReadHistoryData(5);
+  }
+
+  private void testWriteHistoryData(int num) throws IOException {
+    testWriteHistoryData(num, false, false);
+  }
+  
+  private void testWriteHistoryData(
+      int num, boolean missingContainer, boolean missingApplicationAttempt)
+          throws IOException {
+    // write application history data
+    for (int i = 1; i <= num; ++i) {
+      ApplicationId appId = ApplicationId.newInstance(0, i);
+      writeApplicationStartData(appId);
+
+      // write application attempt history data
+      for (int j = 1; j <= num; ++j) {
+        ApplicationAttemptId appAttemptId =
+            ApplicationAttemptId.newInstance(appId, j);
+        writeApplicationAttemptStartData(appAttemptId);
+
+        if (missingApplicationAttempt && j == num) {
+          continue;
+        }
+        // write container history data
+        for (int k = 1; k <= num; ++k) {
+          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          writeContainerStartData(containerId);
+          if (missingContainer && k == num) {
+            continue;
+          }
+          writeContainerFinishData(containerId);
+        }
+        writeApplicationAttemptFinishData(appAttemptId);
+      }
+      writeApplicationFinishData(appId);
+    }
+  }
+
+  private void testReadHistoryData(int num) throws IOException {
+    testReadHistoryData(num, false, false);
+  }
+  
+  private void testReadHistoryData(
+      int num, boolean missingContainer, boolean missingApplicationAttempt)
+          throws IOException {
+    // read application history data
+    Assert.assertEquals(num, store.getAllApplications().size());
+    for (int i = 1; i <= num; ++i) {
+      ApplicationId appId = ApplicationId.newInstance(0, i);
+      ApplicationHistoryData appData = store.getApplication(appId);
+      Assert.assertNotNull(appData);
+      Assert.assertEquals(appId.toString(), appData.getApplicationName());
+      Assert.assertEquals(appId.toString(), appData.getDiagnosticsInfo());
+
+      // read application attempt history data
+      Assert.assertEquals(num, store.getApplicationAttempts(appId).size());
+      for (int j = 1; j <= num; ++j) {
+        ApplicationAttemptId appAttemptId =
+            ApplicationAttemptId.newInstance(appId, j);
+        ApplicationAttemptHistoryData attemptData =
+            store.getApplicationAttempt(appAttemptId);
+        Assert.assertNotNull(attemptData);
+        Assert.assertEquals(appAttemptId.toString(), attemptData.getHost());
+        
+        if (missingApplicationAttempt && j == num) {
+          Assert.assertNull(attemptData.getDiagnosticsInfo());
+          continue;
+        } else {
+          Assert.assertEquals(appAttemptId.toString(),
+              attemptData.getDiagnosticsInfo());
+        }
+
+        // read container history data
+        Assert.assertEquals(num, store.getContainers(appAttemptId).size());
+        for (int k = 1; k <= num; ++k) {
+          ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+          ContainerHistoryData containerData = store.getContainer(containerId);
+          Assert.assertNotNull(containerData);
+          Assert.assertEquals(Priority.newInstance(containerId.getId()),
+            containerData.getPriority());
+          if (missingContainer && k == num) {
+            Assert.assertNull(containerData.getDiagnosticsInfo());
+          } else {
+            Assert.assertEquals(containerId.toString(),
+                containerData.getDiagnosticsInfo());
+          }
+        }
+        ContainerHistoryData masterContainer =
+            store.getAMContainer(appAttemptId);
+        Assert.assertNotNull(masterContainer);
+        Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+          masterContainer.getContainerId());
+      }
+    }
+  }
+
+  @Test
+  public void testWriteAfterApplicationFinish() throws IOException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    writeApplicationFinishData(appId);
+    // write application attempt history data
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    try {
+      writeApplicationAttemptStartData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+    try {
+      writeApplicationAttemptFinishData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+    // write container history data
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    try {
+      writeContainerStartData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+    try {
+      writeContainerFinishData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is not opened"));
+    }
+  }
+
+  @Test
+  public void testMassiveWriteContainerHistoryData() throws IOException {
+    long mb = 1024 * 1024;
+    long usedDiskBefore = fs.getContentSummary(fsWorkingPath).getLength() / mb;
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    writeApplicationStartData(appId);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    for (int i = 1; i <= 100000; ++i) {
+      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+      writeContainerStartData(containerId);
+      writeContainerFinishData(containerId);
+    }
+    writeApplicationFinishData(appId);
+    long usedDiskAfter = fs.getContentSummary(fsWorkingPath).getLength() / mb;
+    Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20);
+  }
+
+  @Test
+  public void testMissingContainerHistoryData() throws IOException {
+    testWriteHistoryData(3, true, false);
+    testReadHistoryData(3, true, false);
+  }
+  
+  @Test
+  public void testMissingApplicationAttemptHistoryData() throws IOException {
+    testWriteHistoryData(3, false, true);
+    testReadHistoryData(3, false, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
new file mode 100644
index 0000000..7a45405
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice;
+
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestMemoryApplicationHistoryStore extends
+    ApplicationHistoryStoreTestUtils {
+
+  @Before
+  public void setup() {
+    store = new MemoryApplicationHistoryStore();
+  }
+
+  @Test
+  public void testReadWriteApplicationHistory() throws Exception {
+    // Out of order
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    try {
+      writeApplicationFinishData(appId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+        "is stored before the start information"));
+    }
+    // Normal
+    int numApps = 5;
+    for (int i = 1; i <= numApps; ++i) {
+      appId = ApplicationId.newInstance(0, i);
+      writeApplicationStartData(appId);
+      writeApplicationFinishData(appId);
+    }
+    Assert.assertEquals(numApps, store.getAllApplications().size());
+    for (int i = 1; i <= numApps; ++i) {
+      appId = ApplicationId.newInstance(0, i);
+      ApplicationHistoryData data = store.getApplication(appId);
+      Assert.assertNotNull(data);
+      Assert.assertEquals(appId.toString(), data.getApplicationName());
+      Assert.assertEquals(appId.toString(), data.getDiagnosticsInfo());
+    }
+    // Write again
+    appId = ApplicationId.newInstance(0, 1);
+    try {
+      writeApplicationStartData(appId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+    try {
+      writeApplicationFinishData(appId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+  }
+
+  @Test
+  public void testReadWriteApplicationAttemptHistory() throws Exception {
+    // Out of order
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    try {
+      writeApplicationAttemptFinishData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+        "is stored before the start information"));
+    }
+    // Normal
+    int numAppAttempts = 5;
+    writeApplicationStartData(appId);
+    for (int i = 1; i <= numAppAttempts; ++i) {
+      appAttemptId = ApplicationAttemptId.newInstance(appId, i);
+      writeApplicationAttemptStartData(appAttemptId);
+      writeApplicationAttemptFinishData(appAttemptId);
+    }
+    Assert.assertEquals(numAppAttempts, store.getApplicationAttempts(appId)
+      .size());
+    for (int i = 1; i <= numAppAttempts; ++i) {
+      appAttemptId = ApplicationAttemptId.newInstance(appId, i);
+      ApplicationAttemptHistoryData data =
+          store.getApplicationAttempt(appAttemptId);
+      Assert.assertNotNull(data);
+      Assert.assertEquals(appAttemptId.toString(), data.getHost());
+      Assert.assertEquals(appAttemptId.toString(), data.getDiagnosticsInfo());
+    }
+    writeApplicationFinishData(appId);
+    // Write again
+    appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
+    try {
+      writeApplicationAttemptStartData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+    try {
+      writeApplicationAttemptFinishData(appAttemptId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+  }
+
+  @Test
+  public void testReadWriteContainerHistory() throws Exception {
+    // Out of order
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+    try {
+      writeContainerFinishData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+        "is stored before the start information"));
+    }
+    // Normal
+    writeApplicationAttemptStartData(appAttemptId);
+    int numContainers = 5;
+    for (int i = 1; i <= numContainers; ++i) {
+      containerId = ContainerId.newInstance(appAttemptId, i);
+      writeContainerStartData(containerId);
+      writeContainerFinishData(containerId);
+    }
+    Assert
+      .assertEquals(numContainers, store.getContainers(appAttemptId).size());
+    for (int i = 1; i <= numContainers; ++i) {
+      containerId = ContainerId.newInstance(appAttemptId, i);
+      ContainerHistoryData data = store.getContainer(containerId);
+      Assert.assertNotNull(data);
+      Assert.assertEquals(Priority.newInstance(containerId.getId()),
+        data.getPriority());
+      Assert.assertEquals(containerId.toString(), data.getDiagnosticsInfo());
+    }
+    ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId);
+    Assert.assertNotNull(masterContainer);
+    Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+      masterContainer.getContainerId());
+    writeApplicationAttemptFinishData(appAttemptId);
+    // Write again
+    containerId = ContainerId.newInstance(appAttemptId, 1);
+    try {
+      writeContainerStartData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+    try {
+      writeContainerFinishData(containerId);
+      Assert.fail();
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains("is already stored"));
+    }
+  }
+
+  @Test
+  public void testMassiveWriteContainerHistory() throws IOException {
+    long mb = 1024 * 1024;
+    Runtime runtime = Runtime.getRuntime();
+    long usedMemoryBefore = (runtime.totalMemory() - runtime.freeMemory()) / 
mb;
+    int numContainers = 100000;
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    for (int i = 1; i <= numContainers; ++i) {
+      ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+      writeContainerStartData(containerId);
+      writeContainerFinishData(containerId);
+    }
+    long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
+    Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 200);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestAppMetrics.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestAppMetrics.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestAppMetrics.java
new file mode 100644
index 0000000..499dab6
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestAppMetrics.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.data;
+
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.util.Json;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestAppMetrics {
+  private static final String SAMPLE_SINGLE_METRIC_HOST_JSON = "{\n" +
+    "  \"metrics\" : [ {\n" +
+    "    \"instanceid\" : \"\",\n" +
+    "    \"hostname\" : \"localhost\",\n" +
+    "    \"metrics\" : {\n" +
+    "      \"0\" : \"5.35\",\n" +
+    "      \"5000\" : \"5.35\",\n" +
+    "      \"10000\" : \"5.35\",\n" +
+    "      \"15000\" : \"5.35\"\n" +
+    "    },\n" +
+    "    \"starttime\" : \"1411663170112\",\n" +
+    "    \"appid\" : \"HOST\",\n" +
+    "    \"metricname\" : \"disk_free\"\n" +
+    "  } ]\n" +
+    "}";
+
+  private static final String SAMPLE_TWO_METRIC_HOST_JSON = "{\n" +
+    "  \"metrics\" : [ {\n" +
+    "    \"instanceid\" : \"\",\n" +
+    "    \"hostname\" : \"localhost\",\n" +
+    "    \"metrics\" : {\n" +
+    "      \"0\" : \"5.35\",\n" +
+    "      \"5000\" : \"5.35\",\n" +
+    "      \"10000\" : \"5.35\",\n" +
+    "      \"15000\" : \"5.35\"\n" +
+    "    },\n" +
+    "    \"starttime\" : \"0\",\n" +
+    "    \"appid\" : \"HOST\",\n" +
+    "    \"metricname\" : \"disk_free\"\n" +
+    "  }, {\n" +
+    "    \"instanceid\" : \"\",\n" +
+    "    \"hostname\" : \"localhost\",\n" +
+    "    \"metrics\" : {\n" +
+    "      \"0\" : \"94.0\",\n" +
+    "      \"5000\" : \"94.0\",\n" +
+    "      \"10000\" : \"94.0\",\n" +
+    "      \"15000\" : \"94.0\"\n" +
+    "    },\n" +
+    "    \"starttime\" : \"0\",\n" +
+    "    \"appid\" : \"HOST\",\n" +
+    "    \"metricname\" : \"mem_cached\"\n" +
+    "  } ]\n" +
+    "}";
+
+  private long[] timestamps;
+
+  @Before
+  public void setUp() throws Exception {
+    timestamps = new long[4];
+    timestamps[0] = 0;
+    timestamps[1] = timestamps[0] + 5000;
+    timestamps[2] = timestamps[1] + 5000;
+    timestamps[3] = timestamps[2] + 5000;
+
+  }
+
+  @Test
+  public void testHostDiskMetricsSerialization() throws IOException {
+    long timestamp = 1411663170112L;
+    AppMetrics appMetrics = new AppMetrics(new 
ApplicationInstance("localhost", AppID.HOST, ""), timestamp);
+
+    Metric diskFree = appMetrics.createMetric("disk_free");
+    double value = 5.35;
+
+    diskFree.putMetric(timestamps[0], Double.toString(value));
+    diskFree.putMetric(timestamps[1], Double.toString(value));
+    diskFree.putMetric(timestamps[2], Double.toString(value));
+    diskFree.putMetric(timestamps[3], Double.toString(value));
+
+    appMetrics.addMetric(diskFree);
+
+    String expected = SAMPLE_SINGLE_METRIC_HOST_JSON;
+    String s = new Json(true).serialize(appMetrics);
+
+    assertEquals("Serialized Host Metrics", expected, s);
+  }
+
+
+  @Test
+  public void testSingleHostManyMetricsSerialization() throws IOException {
+    AppMetrics appMetrics = new AppMetrics(new 
ApplicationInstance("localhost", AppID.HOST, ""), timestamps[0]);
+
+    Metric diskFree = appMetrics.createMetric("disk_free");
+    double value = 5.35;
+    diskFree.putMetric(timestamps[0], Double.toString(value));
+    diskFree.putMetric(timestamps[1], Double.toString(value));
+    diskFree.putMetric(timestamps[2], Double.toString(value));
+    diskFree.putMetric(timestamps[3], Double.toString(value));
+
+    appMetrics.addMetric(diskFree);
+
+    Metric memCache = appMetrics.createMetric("mem_cached");
+    double memVal = 94;
+    memCache.putMetric(timestamps[0], Double.toString(memVal));
+    memCache.putMetric(timestamps[1], Double.toString(memVal));
+    memCache.putMetric(timestamps[2], Double.toString(memVal));
+    memCache.putMetric(timestamps[3], Double.toString(memVal));
+
+    appMetrics.addMetric(memCache);
+
+    String expected = SAMPLE_TWO_METRIC_HOST_JSON;
+    String s = new Json(true).serialize(appMetrics);
+
+    assertEquals("Serialized Host Metrics", expected, s);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a52f8a55/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestMetric.java
----------------------------------------------------------------------
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestMetric.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestMetric.java
new file mode 100644
index 0000000..a0572a2
--- /dev/null
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/data/TestMetric.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.data;
+
+import 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.loadsimulator.util.Json;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.entry;
+import static org.junit.Assert.assertEquals;
+
+public class TestMetric {
+  private static final String SAMPLE_METRIC_IN_JSON = "{\n" +
+    "  \"instanceid\" : \"\",\n" +
+    "  \"hostname\" : \"localhost\",\n" +
+    "  \"metrics\" : {\n" +
+    "    \"0\" : \"5.35\",\n" +
+    "    \"5000\" : \"5.35\",\n" +
+    "    \"10000\" : \"5.35\",\n" +
+    "    \"15000\" : \"5.35\"\n" +
+    "  },\n" +
+    "  \"starttime\" : \"0\",\n" +
+    "  \"appid\" : \"HOST\",\n" +
+    "  \"metricname\" : \"disk_free\"\n" +
+    "}";
+
+  @Test
+  public void testSerializeToJson() throws IOException {
+    Metric diskOnHostMetric = new Metric(new ApplicationInstance("localhost", 
AppID.HOST, ""), "disk_free", 0);
+
+    long timestamp = 0;
+    double value = 5.35;
+
+    diskOnHostMetric.putMetric(timestamp, Double.toString(value));
+    diskOnHostMetric.putMetric(timestamp + 5000, Double.toString(value));
+    diskOnHostMetric.putMetric(timestamp + 10000, Double.toString(value));
+    diskOnHostMetric.putMetric(timestamp + 15000, Double.toString(value));
+
+    String expected = SAMPLE_METRIC_IN_JSON;
+    String s = new Json(true).serialize(diskOnHostMetric);
+
+    assertEquals("Json should match", expected, s);
+  }
+
+  @Test
+  public void testDeserializeObjectFromString() throws IOException {
+    String source = SAMPLE_METRIC_IN_JSON;
+
+    Metric m = new Json().deserialize(source, Metric.class);
+
+    assertEquals("localhost", m.getHostname());
+    assertEquals("HOST", m.getAppid());
+    assertEquals("", m.getInstanceid());
+    assertEquals("disk_free", m.getMetricname());
+    assertEquals("0", m.getStarttime());
+
+    assertThat(m.getMetrics()).isNotEmpty().hasSize(4).contains(
+      entry("0", "5.35"),
+      entry("5000", "5.35"),
+      entry("10000", "5.35"),
+      entry("15000", "5.35"));
+  }
+}
\ No newline at end of file

Reply via email to