http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index ea08470,4d92dbe..efd00bf
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@@ -48,11 -50,13 +50,13 @@@ architecture = get_architecture(
  dfs_type = default("/commandParams/dfs_type", "")
  
  artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 -jdk_name = default("/hostLevelParams/jdk_name", None)
 -java_home = config['hostLevelParams']['java_home']
 -java_version = expect("/hostLevelParams/java_version", int)
 -jdk_location = config['hostLevelParams']['jdk_location']
 +jdk_name = default("/ambariLevelParams/jdk_name", None)
 +java_home = config['ambariLevelParams']['java_home']
 +java_version = expect("/ambariLevelParams/java_version", int)
 +jdk_location = config['ambariLevelParams']['jdk_location']
  
+ hadoop_custom_extensions_enabled = 
default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+ 
  sudo = AMBARI_SUDO_BINARY
  
  ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
index a2b119d,50c5a40..811cc11
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@@ -109,5 -109,7 +109,7 @@@ smoke_user_dirs = format("/tmp/hadoop-{
  if has_hbase_masters:
    hbase_user_dirs = 
format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
  #repo params
 -repo_info = config['hostLevelParams']['repo_info']
 +repo_info = config['hostLevelParams']['repoInfo']
  service_repo_info = default("/hostLevelParams/service_repo_info",None)
+ 
+ repo_file = default("/repositoryFile", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
index 28f5df4,9f2b344..d0456ce
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
@@@ -19,17 -19,18 +19,16 @@@ limitations under the License
  
  from ambari_commons.os_check import OSCheck
  from resource_management.libraries.resources.repository import Repository
+ from resource_management.libraries.functions.repository_util import 
create_repo_files, CommandRepository, UBUNTU_REPO_COMPONENTS_POSTFIX
  from resource_management.core.logger import Logger
- import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
+ import ambari_simplejson as json
  
- # components_lits = repoName + postfix
- _UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
  
 -def _alter_repo(action, repo_string, repo_template):
 +def _alter_repo(action, repo_dicts, repo_template):
    """
    @param action: "delete" or "create"
 -  @param repo_string: e.g. 
"[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]";
 +  @param repo_dicts: e.g. 
"[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]";
    """
 -  repo_dicts = json.loads(repo_string)
 -
    if not isinstance(repo_dicts, list):
      repo_dicts = [repo_dicts]
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 1f13e03,c8880ae..466837c
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@@ -35,8 -39,13 +39,13 @@@ config = Script.get_config(
  tmp_dir = Script.get_tmp_dir()
  artifact_dir = tmp_dir + "/AMBARI-artifacts"
  
+ version_for_stack_feature_checks = get_stack_feature_version(config)
+ stack_supports_hadoop_custom_extensions = 
check_stack_feature(StackFeature.HADOOP_CUSTOM_EXTENSIONS, 
version_for_stack_feature_checks)
+ 
+ sudo = AMBARI_SUDO_BINARY
+ 
  # Global flag enabling or disabling the sysprep feature
 -host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 +host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
  
  # Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
  # This is required if tarballs are going to be copied to HDFS, so set to False
@@@ -45,8 -54,9 +54,9 @@@ sysprep_skip_copy_fast_jar_hdfs = host_
  # Whether to skip setting up the unlimited key JCE policy
  sysprep_skip_setup_jce = host_sys_prepped and 
default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
  
 -stack_version_unformatted = config['hostLevelParams']['stack_version']
 +stack_version_unformatted = config['clusterLevelParams']['stack_version']
  stack_version_formatted = format_stack_version(stack_version_unformatted)
+ major_stack_version = get_major_version(stack_version_formatted)
  
  dfs_type = default("/commandParams/dfs_type", "")
  hadoop_conf_dir = "/etc/hadoop/conf"
@@@ -106,12 -116,19 +116,19 @@@ oozie_servers = default("/clusterHostIn
  hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
  hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
  hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 -hs_host = default("/clusterHostInfo/hs_host", [])
 +hs_host = default("/clusterHostInfo/historyserver_hosts", [])
  jtnode_host = default("/clusterHostInfo/jtnode_host", [])
 -namenode_host = default("/clusterHostInfo/namenode_host", [])
 +namenode_host = default("/clusterHostInfo/namenode_hosts", [])
  zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
  ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
- ams_collector_hosts = 
",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+ cluster_name = config["clusterName"]
+ set_instanceId = "false"
+ if 'cluster-env' in config['configurations'] and \
+     'metrics_collector_external_hosts' in 
config['configurations']['cluster-env']:
+   ams_collector_hosts = 
config['configurations']['cluster-env']['metrics_collector_external_hosts']
+   set_instanceId = "true"
+ else:
+   ams_collector_hosts = 
",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
  
  has_namenode = not len(namenode_host) == 0
  has_resourcemanager = not len(rm_host) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
index d2979bb,ddc6100..e553e18
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
@@@ -40,11 -41,21 +41,21 @@@ host_sys_prepped = default("/ambariLeve
  
  sudo = AMBARI_SUDO_BINARY
  
 -stack_version_unformatted = config['hostLevelParams']['stack_version']
 +stack_version_unformatted = config['clusterLevelParams']['stack_version']
  stack_version_formatted = format_stack_version(stack_version_unformatted)
  
- # current host stack version
- current_version = default("/hostLevelParams/current_version", None)
+ # service name
+ service_name = config['serviceName']
+ 
+ # logsearch configuration
+ logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
+ 
+ agent_cache_dir = config['hostLevelParams']['agentCacheDir']
+ service_package_folder = config['commandParams']['service_package_folder']
+ logsearch_service_name = service_name.lower().replace("_", "-")
+ logsearch_config_file_name = 'input.config-' + logsearch_service_name + 
".json"
+ logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + 
"/templates/" + logsearch_config_file_name + ".j2"
+ logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
  
  # default hadoop params
  mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
index f9a17d6,50c5a40..6d375db
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
@@@ -109,5 -109,7 +109,7 @@@ smoke_user_dirs = format("/tmp/hadoop-{
  if has_hbase_masters:
    hbase_user_dirs = 
format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
  #repo params
 -repo_info = config['hostLevelParams']['repo_info']
 +repo_info = config['hostLevelParams']['repoInfo']
  service_repo_info = default("/hostLevelParams/service_repo_info",None)
+ 
+ repo_file = default("/repositoryFile", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
index e2735d9,d70030d..136939e
--- 
a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@@ -100,12 -100,20 +100,20 @@@ oozie_servers = default("/clusterHostIn
  hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
  hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
  hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 -hs_host = default("/clusterHostInfo/hs_host", [])
 +hs_host = default("/clusterHostInfo/historyserver_hosts", [])
  jtnode_host = default("/clusterHostInfo/jtnode_host", [])
 -namenode_host = default("/clusterHostInfo/namenode_host", [])
 +namenode_host = default("/clusterHostInfo/namenode_hosts", [])
  zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
  ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
- ams_collector_hosts = 
",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+ 
+ cluster_name = config["clusterName"]
+ set_instanceId = "false"
+ if 'cluster-env' in config['configurations'] and \
+         'metrics_collector_external_hosts' in 
config['configurations']['cluster-env']:
+   ams_collector_hosts = 
config['configurations']['cluster-env']['metrics_collector_external_hosts']
+   set_instanceId = "true"
+ else:
+   ams_collector_hosts = 
",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
  
  has_namenode = not len(namenode_host) == 0
  has_resourcemanager = not len(rm_host) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
index 3a1fbe1,e0e78b9..35a39cd
--- 
a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
@@@ -33,12 -33,15 +33,15 @@@ user_group = config['configurations']['
  user_to_gid_dict = collections.defaultdict(lambda:user_group)
  user_to_groups_dict = collections.defaultdict(lambda:[user_group])
  
 -jdk_name = default("/hostLevelParams/jdk_name", None)
 -java_home = config['hostLevelParams']['java_home']
 +jdk_name = default("/ambariLevelParams/jdk_name", None)
 +java_home = config['ambariLevelParams']['java_home']
  artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 -jdk_location = config['hostLevelParams']['jdk_location']
 -java_version = expect("/hostLevelParams/java_version", int)
 +jdk_location = config['ambariLevelParams']['jdk_location']
 +java_version = expect("/ambariLevelParams/java_version", int)
  
+ ambari_java_home = default("/commandParams/ambari_java_home", None)
+ ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ 
  service_name = config["serviceName"]
  component_name = config["role"]
  sudo = AMBARI_SUDO_BINARY

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/templates/input.config-hdfs.json.j2
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/templates/input.config-hdfs.json.j2
index af89b90,af89b90..0000000
deleted file mode 100644,100644
--- 
a/ambari-server/src/main/resources/stacks/PERF/1.0/services/FAKEHDFS/package/templates/input.config-hdfs.json.j2
+++ /dev/null
@@@ -1,216 -1,216 +1,0 @@@
--{#
-- # Licensed to the Apache Software Foundation (ASF) under one
-- # or more contributor license agreements.  See the NOTICE file
-- # distributed with this work for additional information
-- # regarding copyright ownership.  The ASF licenses this file
-- # to you under the Apache License, Version 2.0 (the
-- # "License"); you may not use this file except in compliance
-- # with the License.  You may obtain a copy of the License at
-- #
-- #   http://www.apache.org/licenses/LICENSE-2.0
-- #
-- # Unless required by applicable law or agreed to in writing, software
-- # distributed under the License is distributed on an "AS IS" BASIS,
-- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- # See the License for the specific language governing permissions and
-- # limitations under the License.
-- #}
--{
--  "input":[
--    {
--      "type":"hdfs_datanode",
--      "rowtype":"service",
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}-datanode-*.log"
--    },
--    {
--      "type":"hdfs_namenode",
--      "rowtype":"service",
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}-namenode-*.log"
--    },
--    {
--      "type":"hdfs_journalnode",
--      "rowtype":"service",
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}-journalnode-*.log"
--    },
--    {
--      "type":"hdfs_secondarynamenode",
--      "rowtype":"service",
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}-secondarynamenode-*.log"
--    },
--    {
--      "type":"hdfs_zkfc",
--      "rowtype":"service",
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}-zkfc-*.log"
--    },
--    {
--      "type":"hdfs_nfs3",
--      "rowtype":"service",
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}-nfs3-*.log"
--    },
--    {
--      "type":"hdfs_audit",
--      "rowtype":"audit",
--      "is_enabled":"true",
--      "add_fields":{
--        "logType":"FAKEHDFSAudit",
--        "enforcer":"hadoop-acl",
--        "repoType":"1",
--        "repo":"hdfs"
--      },
--      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', 
'/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 
'hdfs')}}/hdfs-audit.log"
--    }
--  ],
--  "filter":[
--    {
--      "filter":"grok",
--      "conditions":{
--        "fields":{
--          "type":[
--            "hdfs_datanode",
--            "hdfs_journalnode",
--            "hdfs_secondarynamenode",
--            "hdfs_namenode",
--            "hdfs_zkfc",
--            "hdfs_nfs3"
--          ]
--        }
--      },
--      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
--      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
--      
"message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
--      "post_map_values":{
--        "logtime":{
--          "map_date":{
--            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
--          }
--        }
--      }
--    },
--    {
--      "filter":"grok",
--      "conditions":{
--        "fields":{
--          "type":[
--            "hdfs_audit"
--          ]
--        }
--      },
--      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
--      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
--      
"message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
--      "post_map_values":{
--        "evtTime":{
--          "map_date":{
--            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
--          }
--        }
--      }
--    },
--    {
--      "filter":"keyvalue",
--      "sort_order":1,
--      "conditions":{
--        "fields":{
--          "type":[
--            "hdfs_audit"
--          ]
--        }
--      },
--      "source_field":"log_message",
--      "value_split":"=",
--      "field_split":"\t",
--      "post_map_values":{
--        "src":{
--          "map_fieldname":{
--            "new_fieldname":"resource"
--          }
--        },
--        "ip":{
--          "map_fieldname":{
--            "new_fieldname":"cliIP"
--          }
--        },
--        "allowed":[
--          {
--            "map_fieldvalue":{
--              "pre_value":"true",
--              "post_value":"1"
--            }
--          },
--          {
--            "map_fieldvalue":{
--              "pre_value":"false",
--              "post_value":"0"
--            }
--          },
--          {
--            "map_fieldname":{
--              "new_fieldname":"result"
--            }
--          }
--        ],
--        "cmd":{
--          "map_fieldname":{
--            "new_fieldname":"action"
--          }
--        },
--        "proto":{
--          "map_fieldname":{
--            "new_fieldname":"cliType"
--          }
--        },
--        "callerContext":{
--          "map_fieldname":{
--            "new_fieldname":"req_caller_id"
--          }
--        }
--      }
--    },
--    {
--      "filter":"grok",
--      "sort_order":2,
--      "source_field":"ugi",
--      "remove_source_field":"false",
--      "conditions":{
--        "fields":{
--          "type":[
--            "hdfs_audit"
--          ]
--        }
--      },
--      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via 
%{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
--      "post_map_values":{
--        "user":{
--          "map_fieldname":{
--            "new_fieldname":"reqUser"
--          }
--        },
--        "x_user":{
--          "map_fieldname":{
--            "new_fieldname":"reqUser"
--          }
--        },
--        "p_user":{
--          "map_fieldname":{
--            "new_fieldname":"reqUser"
--          }
--        },
--        "k_user":{
--          "map_fieldname":{
--            "new_fieldname":"proxyUsers"
--          }
--        },
--        "p_authType":{
--          "map_fieldname":{
--            "new_fieldname":"authType"
--          }
--        },
--        "k_authType":{
--          "map_fieldname":{
--            "new_fieldname":"proxyAuthType"
--          }
--        }
--      }
--    }
--  ]
--}

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/properties/krb5_conf.j2
----------------------------------------------------------------------
diff --cc 
ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/properties/krb5_conf.j2
index 612751b,612751b..0000000
deleted file mode 100644,100644
--- 
a/ambari-server/src/main/resources/stacks/PERF/1.0/services/KERBEROS/properties/krb5_conf.j2
+++ /dev/null
@@@ -1,60 -1,60 +1,0 @@@
--{#
--# Licensed to the Apache Software Foundation (ASF) under one
--# or more contributor license agreements.  See the NOTICE file
--# distributed with this work for additional information
--# regarding copyright ownership.  The ASF licenses this file
--# to you under the Apache License, Version 2.0 (the
--# "License"); you may not use this file except in compliance
--# with the License.  You may obtain a copy of the License at
--#
--#   http://www.apache.org/licenses/LICENSE-2.0
--#
--# Unless required by applicable law or agreed to in writing, software
--# distributed under the License is distributed on an "AS IS" BASIS,
--# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--# See the License for the specific language governing permissions and
--# limitations under the License.
--#}
--[libdefaults]
--  renew_lifetime = 7d
--  forwardable = true
--  default_realm = {{realm}}
--  ticket_lifetime = 24h
--  dns_lookup_realm = false
--  dns_lookup_kdc = false
--  default_ccache_name = /tmp/krb5cc_%{uid}
--  #default_tgs_enctypes = {{encryption_types}}
--  #default_tkt_enctypes = {{encryption_types}}
--{% if domains %}
--[domain_realm]
--{%- for domain in domains.split(',') %}
--  {{domain|trim()}} = {{realm}}
--{%- endfor %}
--{% endif %}
--[logging]
--  default = FILE:/var/log/krb5kdc.log
--  admin_server = FILE:/var/log/kadmind.log
--  kdc = FILE:/var/log/krb5kdc.log
--
--[realms]
--  {{realm}} = {
--{%- if master_kdc %}
--    master_kdc = {{master_kdc|trim()}}
--{%- endif -%}
--{%- if kdc_hosts > 0 -%}
--{%- set kdc_host_list = kdc_hosts.split(',')  -%}
--{%- if kdc_host_list and kdc_host_list|length > 0 %}
--    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), 
True)}}
--{%- if kdc_host_list -%}
--{%- if master_kdc and (master_kdc not in kdc_host_list) %}
--    kdc = {{master_kdc|trim()}}
--{%- endif -%}
--{% for kdc_host in kdc_host_list %}
--    kdc = {{kdc_host|trim()}}
--{%- endfor -%}
--{% endif %}
--{%- endif %}
--{%- endif %}
--  }
--
--{# Append additional realm declarations below #}

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
index c1056dd,94799cc..c449aae
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
@@@ -191,9 -191,9 +191,9 @@@ public class TestActionDBAccessorImpl 
    @Test
    public void testGetStagesInProgress() throws AmbariException {
      List<Stage> stages = new ArrayList<>();
 -    stages.add(createStubStage(hostName, requestId, stageId, false));
 -    stages.add(createStubStage(hostName, requestId, stageId + 1, false));
 +    stages.add(createStubStage(hostName, requestId, stageId));
 +    stages.add(createStubStage(hostName, requestId, stageId + 1));
-     Request request = new Request(stages, clusters);
+     Request request = new Request(stages, "", clusters);
      db.persistActions(request);
      assertEquals(2, stages.size());
    }
@@@ -705,11 -730,11 +707,11 @@@
    }
  
    private void populateActionDB(ActionDBAccessor db, String hostname,
 -      long requestId, long stageId, boolean retryAllowed) throws 
AmbariException {
 -    Stage s = createStubStage(hostname, requestId, stageId, retryAllowed);
 +      long requestId, long stageId) throws AmbariException {
 +    Stage s = createStubStage(hostname, requestId, stageId);
      List<Stage> stages = new ArrayList<>();
      stages.add(s);
-     Request request = new Request(stages, clusters);
+     Request request = new Request(stages, "", clusters);
      db.persistActions(request);
    }
  
@@@ -730,10 -755,10 +732,10 @@@
    private void populateActionDBWithCompletedRequest(ActionDBAccessor db, 
String hostname,
        long requestId, long stageId) throws AmbariException {
  
 -    Stage s = createStubStage(hostname, requestId, stageId, false);
 +    Stage s = createStubStage(hostname, requestId, stageId);
      List<Stage> stages = new ArrayList<>();
      stages.add(s);
-     Request request = new Request(stages, clusters);
+     Request request = new Request(stages, "", clusters);
  
      s.setHostRoleStatus(hostname, Role.HBASE_REGIONSERVER.name(), 
HostRoleStatus.COMPLETED);
      s.setHostRoleStatus(hostname, Role.HBASE_MASTER.name(), 
HostRoleStatus.COMPLETED);
@@@ -754,9 -779,9 +756,9 @@@
      db.persistActions(request);
    }
  
 -  private Stage createStubStage(String hostname, long requestId, long 
stageId, boolean retryAllowed) {
 +  private Stage createStubStage(String hostname, long requestId, long 
stageId) {
      Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, 
"action db accessor test",
-       "clusterHostInfo", "commandParamsStage", "hostParamsStage");
+       "commandParamsStage", "hostParamsStage");
      s.setStageId(stageId);
      s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
          RoleCommand.START,

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index 3b485b0,38b77da..8bfa6fd
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@@ -77,10 -78,10 +78,10 @@@ import org.apache.ambari.server.topolog
  import org.codehaus.jettison.json.JSONException;
  import org.codehaus.jettison.json.JSONObject;
  import org.easymock.EasyMock;
 -import org.eclipse.jetty.server.SessionManager;
 +import org.eclipse.jetty.server.session.SessionHandler;
  import org.junit.Test;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
  
  import com.google.gson.Gson;
  import com.google.gson.GsonBuilder;

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index b2546e0,b3c4e26..f55db08
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@@ -471,10 -476,10 +459,10 @@@ public class HeartbeatProcessorTest 
      hb.setResponseId(0);
      hb.setHostname(DummyHostname1);
      hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, 
DummyHostStatus));
-     hb.setReports(new ArrayList<CommandReport>());
+     hb.setReports(new ArrayList<>());
      ArrayList<ComponentStatus> componentStatuses = new ArrayList<>();
      ComponentStatus componentStatus1 = new ComponentStatus();
 -    componentStatus1.setClusterName(DummyCluster);
 +    //componentStatus1.setClusterName(DummyCluster);
      componentStatus1.setServiceName(HDFS);
      componentStatus1.setMessage(DummyHostStatus);
      componentStatus1.setStatus(State.STARTED.name());
@@@ -840,10 -845,10 +823,10 @@@
      hb.setResponseId(1);
      hb.setHostname(DummyHostname1);
      hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, 
DummyHostStatus));
-     hb.setReports(new ArrayList<CommandReport>());
+     hb.setReports(new ArrayList<>());
  
      componentStatus1 = new ComponentStatus();
 -    componentStatus1.setClusterName(DummyCluster);
 +    //componentStatus1.setClusterName(DummyCluster);
      componentStatus1.setServiceName(HDFS);
      componentStatus1.setMessage(DummyHostStatus);
      componentStatus1.setStatus(State.STARTED.name());

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/agent/TestActionQueue.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 5581c5b,20ff949..4b4da49
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@@ -1386,10 -1362,10 +1361,10 @@@ public class TestHeartbeatHandler 
  
  
    private ComponentStatus createComponentStatus(String clusterName, String 
serviceName, String message,
-                                                 State state, SecurityState 
securityState,
+                                                 State state,
                                                  String componentName, String 
stackVersion) {
      ComponentStatus componentStatus1 = new ComponentStatus();
 -    componentStatus1.setClusterName(clusterName);
 +    //componentStatus1.setClusterName(clusterName);
      componentStatus1.setServiceName(serviceName);
      componentStatus1.setMessage(message);
      componentStatus1.setStatus(state.name());

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
index 7b7d817,8fcb1b2..5193245
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
@@@ -121,12 -158,16 +158,16 @@@ public class HostsMasterMaintenanceChec
      Mockito.when(upgradePack.getName()).thenReturn(upgradePackName);
      upgradePacks.put(upgradePack.getName(), upgradePack);
      Mockito.when(ambariMetaInfo.getUpgradePacks(Mockito.anyString(), 
Mockito.anyString())).thenReturn(upgradePacks);
-     Mockito.when(upgradePack.getTasks()).thenReturn(new HashMap<String, 
Map<String,ProcessingComponent>>());
-     Mockito.when(cluster.getServices()).thenReturn(new HashMap<String, 
Service>());
-     
Mockito.when(clusters.getHostsForCluster(Mockito.anyString())).thenReturn(new 
HashMap<String, Host>());
+     Mockito.when(upgradePack.getTasks()).thenReturn(new HashMap<>());
+     Mockito.when(cluster.getServices()).thenReturn(new HashMap<>());
+     
Mockito.when(clusters.getHostsForCluster(Mockito.anyString())).thenReturn(new 
HashMap<>());
  
      check = new PrerequisiteCheck(null, null);
-     hostsMasterMaintenanceCheck.perform(check, new 
PrereqCheckRequest("cluster"));
+     checkRequest = new PrereqCheckRequest("cluster");
+     checkRequest.setSourceStackId(new StackId("HDP-1.0"));
+     checkRequest.setTargetRepositoryVersion(m_repositoryVersion);
+ 
+     hostsMasterMaintenanceCheck.perform(check, checkRequest);
      Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus());
    }
 -}
 +}

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
index 996f349,7485257..55eeb4e
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServiceCheckValidityCheckTest.java
@@@ -96,8 -94,13 +95,7 @@@ public class ServiceCheckValidityCheckT
          return clusters;
        }
      };
 -    serviceCheckValidityCheck.actionMetadataProvider = new 
Provider<ActionMetadata>() {
 -      @Override
 -      public ActionMetadata get() {
 -        return actionMetadata;
 -      }
 -    };
  
- 
      Cluster cluster = mock(Cluster.class);
      when(clusters.getCluster(CLUSTER_NAME)).thenReturn(cluster);
      when(cluster.getClusterId()).thenReturn(CLUSTER_ID);
@@@ -118,6 -122,35 +117,42 @@@
    }
  
    @Test
+   public void testWithNullCommandDetailAtCommand() throws AmbariException {
+     ServiceComponent serviceComponent = mock(ServiceComponent.class);
+     when(serviceComponent.isVersionAdvertised()).thenReturn(true);
+ 
+     when(service.getMaintenanceState()).thenReturn(MaintenanceState.OFF);
+     
when(service.getServiceComponents()).thenReturn(ImmutableMap.of(SERVICE_COMPONENT_NAME,
 serviceComponent));
+ 
+     ServiceConfigEntity serviceConfigEntity = new ServiceConfigEntity();
+     serviceConfigEntity.setServiceName(SERVICE_NAME);
+     serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
+ 
 -    LastServiceCheckDTO lastServiceCheckDTO1 = new 
LastServiceCheckDTO(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK.name(), 
SERVICE_CHECK_START_TIME);
 -    LastServiceCheckDTO lastServiceCheckDTO2 = new 
LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), SERVICE_CHECK_START_TIME);
++    HostRoleCommandEntity hostRoleCommandEntity1 = new 
HostRoleCommandEntity();
++    hostRoleCommandEntity1.setRoleCommand(RoleCommand.SERVICE_CHECK);
++    hostRoleCommandEntity1.setCommandDetail(null);
++    hostRoleCommandEntity1.setStartTime(SERVICE_CHECK_START_TIME);
++    hostRoleCommandEntity1.setRole(Role.ZOOKEEPER_SERVER);
++
++    HostRoleCommandEntity hostRoleCommandEntity2 = new 
HostRoleCommandEntity();
++    hostRoleCommandEntity2.setRoleCommand(RoleCommand.SERVICE_CHECK);
++    hostRoleCommandEntity2.setCommandDetail(COMMAND_DETAIL);
++    hostRoleCommandEntity2.setStartTime(SERVICE_CHECK_START_TIME);
++    hostRoleCommandEntity2.setRole(Role.HDFS_SERVICE_CHECK);
+ 
+     when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), 
eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
 -    
when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(asList(lastServiceCheckDTO1,
 lastServiceCheckDTO2));
++    when(hostRoleCommandDAO.findAll(any(Request.class), 
any(Predicate.class))).thenReturn(asList(hostRoleCommandEntity1, 
hostRoleCommandEntity2));
+ 
+     PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
+     try {
+       serviceCheckValidityCheck.perform(check, new 
PrereqCheckRequest(CLUSTER_NAME));
+     } catch (NullPointerException ex){
+       Assert.fail("serviceCheckValidityCheck failed due to null at start_time 
were not handled");
+     }
+     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
+   }
+ 
+   @Test
    public void testFailWhenServiceWithOutdatedServiceCheckExists() throws 
AmbariException {
      ServiceComponent serviceComponent = mock(ServiceComponent.class);
      when(serviceComponent.isVersionAdvertised()).thenReturn(true);
@@@ -157,7 -186,7 +192,7 @@@
      serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
  
      when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), 
eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
-     when(hostRoleCommandDAO.findAll(any(Request.class), 
any(Predicate.class))).thenReturn(Collections.<HostRoleCommandEntity>emptyList());
 -    
when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(Collections.<LastServiceCheckDTO>emptyList());
++    when(hostRoleCommandDAO.findAll(any(Request.class), 
any(Predicate.class))).thenReturn(Collections.emptyList());
  
      PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
      serviceCheckValidityCheck.perform(check, new 
PrereqCheckRequest(CLUSTER_NAME));
@@@ -176,23 -205,49 +211,23 @@@
      serviceConfigEntity.setServiceName(SERVICE_NAME);
      serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
  
 -    LastServiceCheckDTO lastServiceCheckDTO1 = new 
LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), SERVICE_CHECK_START_TIME);
 -    LastServiceCheckDTO lastServiceCheckDTO2 = new 
LastServiceCheckDTO(Role.HDFS_SERVICE_CHECK.name(), CONFIG_CREATE_TIMESTAMP - 
1L);
 +    HostRoleCommandEntity hostRoleCommandEntity1 = new 
HostRoleCommandEntity();
 +    hostRoleCommandEntity1.setRoleCommand(RoleCommand.SERVICE_CHECK);
 +    hostRoleCommandEntity1.setCommandDetail(COMMAND_DETAIL);
 +    hostRoleCommandEntity1.setStartTime(SERVICE_CHECK_START_TIME);
 +    hostRoleCommandEntity1.setRole(Role.HDFS_SERVICE_CHECK);
 +
 +    HostRoleCommandEntity hostRoleCommandEntity2 = new 
HostRoleCommandEntity();
 +    hostRoleCommandEntity2.setRoleCommand(RoleCommand.SERVICE_CHECK);
 +    hostRoleCommandEntity2.setCommandDetail(COMMAND_DETAIL);
 +    hostRoleCommandEntity2.setStartTime(CONFIG_CREATE_TIMESTAMP - 1L);
 +    hostRoleCommandEntity2.setRole(Role.HDFS_SERVICE_CHECK);
  
      when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), 
eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
-     when(hostRoleCommandDAO.findAll(any(Request.class), 
any(Predicate.class))).thenReturn(Arrays.asList(hostRoleCommandEntity1, 
hostRoleCommandEntity2));
 -    
when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(asList(lastServiceCheckDTO1,
 lastServiceCheckDTO2));
++    when(hostRoleCommandDAO.findAll(any(Request.class), 
any(Predicate.class))).thenReturn(asList(hostRoleCommandEntity1, 
hostRoleCommandEntity2));
  
      PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
      serviceCheckValidityCheck.perform(check, new 
PrereqCheckRequest(CLUSTER_NAME));
      Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
    }
 -
 -  /**
 -   * Tests that old, oudated service checks for the FOO2 service doesn't cause
 -   * problems when checking values for the FOO service.
 -   * <p/>
 -   * The specific test case here is that the FOO2 service was added a long 
time
 -   * ago and then removed. We don't want old service checks for FOO2 to match
 -   * when querying for FOO.
 -   *
 -   * @throws AmbariException
 -   */
 -  @Test
 -  public void testPassWhenSimilarlyNamedServiceIsOutdated() throws 
AmbariException {
 -    ServiceComponent serviceComponent = mock(ServiceComponent.class);
 -    when(serviceComponent.isVersionAdvertised()).thenReturn(true);
 -
 -    when(service.getMaintenanceState()).thenReturn(MaintenanceState.OFF);
 -    
when(service.getServiceComponents()).thenReturn(ImmutableMap.of(SERVICE_COMPONENT_NAME,
 serviceComponent));
 -
 -    ServiceConfigEntity serviceConfigEntity = new ServiceConfigEntity();
 -    serviceConfigEntity.setServiceName(SERVICE_NAME);
 -    serviceConfigEntity.setCreateTimestamp(CONFIG_CREATE_TIMESTAMP);
 -
 -    String hdfsRole = Role.HDFS_SERVICE_CHECK.name();
 -    String hdfs2Role = hdfsRole.replace("HDFS", "HDFS2");
 -
 -    LastServiceCheckDTO lastServiceCheckDTO1 = new 
LastServiceCheckDTO(hdfsRole, SERVICE_CHECK_START_TIME);
 -    LastServiceCheckDTO lastServiceCheckDTO2 = new 
LastServiceCheckDTO(hdfs2Role, CONFIG_CREATE_TIMESTAMP - 1L);
 -
 -    when(serviceConfigDAO.getLastServiceConfig(eq(CLUSTER_ID), 
eq(SERVICE_NAME))).thenReturn(serviceConfigEntity);
 -    
when(hostRoleCommandDAO.getLatestServiceChecksByRole(any(Long.class))).thenReturn(asList(lastServiceCheckDTO1,
 lastServiceCheckDTO2));
 -
 -    PrerequisiteCheck check = new PrerequisiteCheck(null, CLUSTER_NAME);
 -    serviceCheckValidityCheck.perform(check, new 
PrereqCheckRequest(CLUSTER_NAME));
 -    Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());  }
--}
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index a35957a,7b8bd8b..1b37435
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@@ -36,10 -36,11 +36,13 @@@ import org.apache.ambari.server.H2Datab
  import org.apache.ambari.server.agent.HeartbeatTestHelper;
  import org.apache.ambari.server.agent.RecoveryConfig;
  import org.apache.ambari.server.agent.RecoveryConfigHelper;
 +import 
org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData;
 +import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
  import org.apache.ambari.server.orm.GuiceJpaInitializer;
  import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+ import org.apache.ambari.server.orm.OrmTestHelper;
+ import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
  import org.apache.ambari.server.state.Cluster;
  import org.apache.ambari.server.state.Config;
  import org.apache.ambari.server.state.MaintenanceState;

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 554e089,b370829..9309abe
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@@ -7310,7 -6977,7 +6977,7 @@@ public class AmbariManagementController
      Assert.assertEquals(1, responsesWithParams.size());
      StackVersionResponse resp = responsesWithParams.iterator().next();
      assertNotNull(resp.getUpgradePacks());
-     assertEquals(13, resp.getUpgradePacks().size());
 -    assertTrue(resp.getUpgradePacks().size() > 0);
++    assertEquals(15, resp.getUpgradePacks().size());
      assertTrue(resp.getUpgradePacks().contains("upgrade_test"));
    }
  
@@@ -10159,6 -9327,80 +9327,79 @@@
    }
  
    @Test
+   public void testRestartIndicatorsAndSlaveFilesUpdateAtComponentsDelete() 
throws Exception {
+     String cluster1 = getUniqueName();
+     createCluster(cluster1);
+     Cluster cluster = clusters.getCluster(cluster1);
+     StackId stackId = new StackId("HDP-2.0.7");
+     cluster.setDesiredStackVersion(stackId);
+     cluster.setCurrentStackVersion(stackId);
+ 
+     String hdfsService = "HDFS";
+     String zookeeperService = "ZOOKEEPER";
+     createService(cluster1, hdfsService, null);
+     createService(cluster1, zookeeperService, null);
+ 
+     String namenode = "NAMENODE";
+     String datanode = "DATANODE";
+     String zookeeperServer = "ZOOKEEPER_SERVER";
+     String zookeeperClient = "ZOOKEEPER_CLIENT";
+ 
+     createServiceComponent(cluster1, hdfsService, namenode,
+         State.INIT);
+     createServiceComponent(cluster1, hdfsService, datanode,
+         State.INIT);
+     createServiceComponent(cluster1, zookeeperService, zookeeperServer,
+         State.INIT);
+     createServiceComponent(cluster1, zookeeperService, zookeeperClient,
+         State.INIT);
+ 
+     String host1 = getUniqueName();
+     String host2 = getUniqueName();
+ 
+     addHostToCluster(host1, cluster1);
+     createServiceComponentHost(cluster1, hdfsService, namenode, host1, null);
+     createServiceComponentHost(cluster1, hdfsService, datanode, host1, null);
+     createServiceComponentHost(cluster1, zookeeperService, zookeeperServer, 
host1,
+         null);
+     createServiceComponentHost(cluster1, zookeeperService, zookeeperClient, 
host1,
+         null);
+ 
+     ServiceComponentHost nameNodeSch = null;
+     for (ServiceComponentHost sch : cluster.getServiceComponentHosts(host1)) {
+       if (sch.getServiceComponentName().equals(namenode)) {
+         nameNodeSch = sch;
+       }
+     }
+ 
+     assertFalse(nameNodeSch.isRestartRequired());
+ 
+     addHostToCluster(host2, cluster1);
+ 
+     createServiceComponentHost(cluster1, hdfsService, datanode, host2, null);
+     assertFalse(nameNodeSch.isRestartRequired());  //No restart required if 
adding host
+ 
+     deleteServiceComponentHost(cluster1, hdfsService, datanode, host2, null);
+     deleteHost(host2);
+ 
+     assertFalse(nameNodeSch.isRestartRequired());   //NameNode doesn't need 
to be restarted!
+ 
+     List<Long> requestIDs = actionDB.getRequestsByStatus(null, 1, false);
+     Request request = actionDB.getRequest(requestIDs.get(0));
+     assertEquals("Update Include/Exclude Files for [HDFS]", 
request.getRequestContext());
 -    assertEquals(false, request.isExclusive());
+     Type type = new TypeToken<Map<String, String>>(){}.getType();
+     Map<String, String> requestParams = 
StageUtils.getGson().fromJson(request.getInputs(), type);
+     assertEquals(2, requestParams.size());
+     assertEquals("true", requestParams.get("is_add_or_delete_slave_request"));
+     assertEquals("true", requestParams.get("update_files_only"));
+     assertEquals(1, request.getResourceFilters().size());
+     RequestResourceFilter resourceFilter = 
request.getResourceFilters().get(0);
+     assertEquals(resourceFilter.getServiceName(), hdfsService);
+     assertEquals(resourceFilter.getComponentName(), namenode);
+     assertEquals(resourceFilter.getHostNames(), new ArrayList<String>());
+   }
+ 
+   @Test
    public void testMaintenanceState() throws Exception {
      String cluster1 = getUniqueName();
      createCluster(cluster1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariServerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariSessionManagerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/be73d167/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --cc 
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 5c1836a,68d6349..734dd7e
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@@ -4939,12 -5014,11 +5013,12 @@@ public class BlueprintConfigurationProc
      atlasProperties.put("atlas.audit.hbase.zookeeper.quorum", "localhost");
  
  
-     Configuration clusterConfig = new Configuration(properties, 
Collections.<String, Map<String, Map<String, String>>>emptyMap());
+     Configuration clusterConfig = new Configuration(properties, 
Collections.emptyMap());
  
 -    Collection<String> hg1Components = new HashSet<>();
 -    hg1Components.add("KAFKA_BROKER");
 -    hg1Components.add("HBASE_MASTER");
 +    Collection<String> hgComponents = new HashSet<>();
 +    hgComponents.add("KAFKA_BROKER");
 +    hgComponents.add("ZOOKEEPER_SERVER");
 +    hgComponents.add("HBASE_MASTER");
      List<String> hosts = new ArrayList<>();
      hosts.add(host1);
      hosts.add(host2);

Reply via email to