[ambari] branch trunk updated: [AMBARI-23057] Upgrade fails because of Stale alert definitions (ap… (#2884)

2019-04-03 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 604c82e  [AMBARI-23057] Upgrade fails because of Stale alert 
definitions (ap… (#2884)
604c82e is described below

commit 604c82e3cd49e71643a870dd9ca77576cc2268c9
Author: amarnathreddy pappu 
AuthorDate: Wed Apr 3 10:51:08 2019 -0700

[AMBARI-23057] Upgrade fails because of Stale alert definitions (ap… (#2884)
---
 .../checks/DatabaseConsistencyCheckHelper.java | 105 -
 .../checks/DatabaseConsistencyCheckHelperTest.java |  49 ++
 2 files changed, 153 insertions(+), 1 deletion(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index 7f52b66..3d8b4e9 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -48,6 +48,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
 import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
@@ -55,6 +56,7 @@ import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.StageDAO;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
@@ -91,6 +93,7 @@ public class DatabaseConsistencyCheckHelper {
   private static Injector injector;
 
   private static MetainfoDAO metainfoDAO;
+  private static AlertDefinitionDAO alertDefinitionDAO;
   private static Connection connection;
   private static AmbariMetaInfo ambariMetaInfo;
   private static DBAccessor dbAccessor;
@@ -158,6 +161,7 @@ public class DatabaseConsistencyCheckHelper {
 closeConnection();
 connection = null;
 metainfoDAO = null;
+alertDefinitionDAO = null;
 ambariMetaInfo = null;
 dbAccessor = null;
   }
@@ -189,6 +193,7 @@ public class DatabaseConsistencyCheckHelper {
 fixConfigGroupHostMappings();
 fixConfigGroupsForDeletedServices();
 fixConfigsSelectedMoreThanOnce();
+fixAlertsForDeletedServices();
   }
   checkSchemaName();
   checkMySQLEngine();
@@ -201,6 +206,7 @@ public class DatabaseConsistencyCheckHelper {
   checkConfigGroupsHasServiceName();
   checkConfigGroupHostMapping(true);
   checkConfigGroupsForDeletedServices(true);
+  checkForStalealertdefs();
   LOG.info("*** Check database completed 
***");
   return checkResult;
 }
@@ -216,7 +222,9 @@ public class DatabaseConsistencyCheckHelper {
 if (metainfoDAO == null) {
   metainfoDAO = injector.getInstance(MetainfoDAO.class);
 }
-
+if (alertDefinitionDAO == null) {
+  alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
+}
 MetainfoEntity schemaVersionEntity = 
metainfoDAO.findByKey(Configuration.SERVER_VERSION_KEY);
 String schemaVersion = null;
 
@@ -807,6 +815,52 @@ public class DatabaseConsistencyCheckHelper {
   }
 
   /**
+   * This method checks for stale alert definitions..
+   * */
+  static Map  checkForStalealertdefs () {
+Configuration conf = injector.getInstance(Configuration.class);
+Map alertInfo = new HashMap<>();
+LOG.info("Checking to ensure there is no stale alert definitions");
+
+ensureConnection();
+
+String STALE_ALERT_DEFINITIONS = "select definition_name, service_name 
from alert_definition where service_name not in " +
+"(select service_name from clusterservices) and service_name not 
in ('AMBARI')";
+
+ResultSet rs = null;
+Statement statement;
+
+try {
+  statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, 
ResultSet.CONCUR_UPDATABLE);
+  rs = statement.executeQuery(STALE_ALERT_DEFINITIONS);
+  if (rs != null) {
+while (rs.next()) {
+  
alertInfo.put(rs.getString("definition_name"),rs.getString("service_name"));
+}
+if

[ambari] branch branch-2.7 updated: [AMBARI-23057] Upgrade fails because of Stale alert definitions (apappu) (#2885)

2019-04-03 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
 new ef51fd9  [AMBARI-23057] Upgrade fails because of Stale alert 
definitions (apappu) (#2885)
ef51fd9 is described below

commit ef51fd9e8ec8f7bb90687ff7abb04d233504e191
Author: amarnathreddy pappu 
AuthorDate: Wed Apr 3 10:47:25 2019 -0700

[AMBARI-23057] Upgrade fails because of Stale alert definitions (apappu) 
(#2885)
---
 .../checks/DatabaseConsistencyCheckHelper.java | 108 -
 .../checks/DatabaseConsistencyCheckHelperTest.java |  49 ++
 2 files changed, 155 insertions(+), 2 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index 375bcd9..e2e8b0b 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -48,6 +48,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
 import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
@@ -55,6 +56,7 @@ import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.dao.StageDAO;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
@@ -93,6 +95,7 @@ public class DatabaseConsistencyCheckHelper {
   private static Injector injector;
 
   private static MetainfoDAO metainfoDAO;
+  private static AlertDefinitionDAO alertDefinitionDAO;
   private static Connection connection;
   private static AmbariMetaInfo ambariMetaInfo;
   private static DBAccessor dbAccessor;
@@ -160,6 +163,7 @@ public class DatabaseConsistencyCheckHelper {
 closeConnection();
 connection = null;
 metainfoDAO = null;
+alertDefinitionDAO = null;
 ambariMetaInfo = null;
 dbAccessor = null;
   }
@@ -191,6 +195,7 @@ public class DatabaseConsistencyCheckHelper {
 fixConfigGroupHostMappings();
 fixConfigGroupsForDeletedServices();
 fixConfigsSelectedMoreThanOnce();
+fixAlertsForDeletedServices();
   }
   checkSchemaName();
   checkMySQLEngine();
@@ -203,6 +208,7 @@ public class DatabaseConsistencyCheckHelper {
   checkConfigGroupsHasServiceName();
   checkConfigGroupHostMapping(true);
   checkConfigGroupsForDeletedServices(true);
+  checkForStalealertdefs();
   LOG.info("*** Check database completed 
***");
   return checkResult;
 }
@@ -218,7 +224,9 @@ public class DatabaseConsistencyCheckHelper {
 if (metainfoDAO == null) {
   metainfoDAO = injector.getInstance(MetainfoDAO.class);
 }
-
+if (alertDefinitionDAO == null) {
+  alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
+}
 MetainfoEntity schemaVersionEntity = 
metainfoDAO.findByKey(Configuration.SERVER_VERSION_KEY);
 String schemaVersion = null;
 
@@ -810,7 +818,54 @@ public class DatabaseConsistencyCheckHelper {
 }
   }
 
-  /**
+
+   /**
+   * This method checks for stale alert definitions..
+   * */
+  static Map  checkForStalealertdefs () {
+Configuration conf = injector.getInstance(Configuration.class);
+Map alertInfo = new HashMap<>();
+LOG.info("Checking to ensure there is no stale alert definitions");
+
+ensureConnection();
+
+String STALE_ALERT_DEFINITIONS = "select definition_name, service_name 
from alert_definition where service_name not in " +
+"(select service_name from clusterservices) and service_name not 
in ('AMBARI')";
+
+ResultSet rs = null;
+Statement statement;
+
+try {
+  statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, 
ResultSet.CONCUR_UPDATABLE);
+  rs = statement.executeQuery(STALE_ALERT_DEFINITIONS);
+  if (rs != null) {
+while (rs.next()) {
+  
alertInfo.put(rs.getString("definition_name"),rs.getS

[ambari] branch branch-2.7 updated: AMBARI-24874 : Storm Service Check fail with 'The TGT found is not renewable' (#2638)

2018-11-26 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
 new d0360c8  AMBARI-24874 : Storm Service Check fail with 'The TGT found 
is not renewable' (#2638)
d0360c8 is described below

commit d0360c81448f562cd104e4e79910779809c95be6
Author: Rajkumar Singh 
AuthorDate: Mon Nov 26 14:37:26 2018 -0800

AMBARI-24874 : Storm Service Check fail with 'The TGT found is not 
renewable' (#2638)
---
 .../STORM/0.9.1/package/scripts/service_check.py  | 11 +++
 1 file changed, 11 insertions(+)

diff --git 
a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/service_check.py
index 80ea0f5..52f73ed 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/service_check.py
+++ 
b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/service_check.py
@@ -49,6 +49,12 @@ class ServiceCheckDefault(ServiceCheck):
 import params
 env.set_params(params)
 
+if params.security_enabled:
+kinit_cmd = format(
+"{kinit_path_local} -kt {storm_keytab_path} 
{storm_jaas_principal}; ")
+else:
+kinit_cmd = ""
+
 unique = get_unique_id_and_date()
 
 File("/tmp/wordCount.jar",
@@ -64,6 +70,11 @@ class ServiceCheckDefault(ServiceCheck):
 elif params.nimbus_host is not None:
   cmd = format("storm jar /tmp/wordCount.jar 
storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")
 
+# use client jaas for service check
+if params.security_enabled:
+storm_client_jaas_file = format("{conf_dir}/client_jaas.conf")
+cmd = format("{kinit_cmd}{cmd} -c 
java.security.auth.login.config={storm_client_jaas_file}")
+
 Execute(cmd,
 logoutput=True,
 path=params.storm_bin_dir,



[ambari-metrics] branch master updated (ffa5c96 -> d26b90b)

2018-09-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/ambari-metrics.git.


from ffa5c96  Update README.md
 add 061a18f  Updated README.md
 new d26b90b  Merge pull request #1 from apache/trunk

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 README.md | 1 -
 1 file changed, 1 deletion(-)



[ambari-metrics] 01/01: Merge pull request #1 from apache/trunk

2018-09-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ambari-metrics.git

commit d26b90b336e6a6cdb0f07bdd8a370d34195c2952
Merge: ffa5c96 061a18f
Author: Siddharth 
AuthorDate: Tue Sep 25 13:48:26 2018 -0700

Merge pull request #1 from apache/trunk

Updated README.md

 README.md | 1 -
 1 file changed, 1 deletion(-)



[ambari-metrics] branch trunk updated: Updated README.md

2018-09-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari-metrics.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 061a18f  Updated README.md
061a18f is described below

commit 061a18fc33f4ab200befb28ac60c0ed8122368e0
Author: Siddharth 
AuthorDate: Tue Sep 25 13:47:23 2018 -0700

Updated README.md
---
 README.md | 19 +++
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/README.md b/README.md
index 7192fc7..f8158ca 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,17 @@
 # ambari-metrics
 Apache Ambari subproject - Ambari Metrics
 
-Ambari Metrics System ("AMS") is a system for collecting, aggregating, serving 
and visualizing daemon and system metrics in Ambari-managed clusters.
+**Ambari Metrics System** ("AMS") is a system for collecting, aggregating, 
serving and visualizing daemon and system metrics in Ambari-managed clusters.
 
 The original JIRA Epic for Ambari Metrics System can be found here: 
https://issues.apache.org/jira/browse/AMBARI-5707 
 First official release of AMS was with Ambari 2.0.0. With metrics repro split, 
the aim is to be able to release the sub-project with separate cadence than 
Amabri.
 
- 
-
-| Term  | Definition   
   |
- 
-
-| Metrics Collector | The standalone server that collects metrics, 
aggregates metrics, serves metrics |
-|   | from the Hadoop service sinks and the Metrics 
Monitor.  |
-| Metrics Monitor   | Installed on each host in the cluster to collect 
system-level metrics and   |
-|   | forward to the Metrics Collector.
   |
-| Metrics Hadoop Sinks  | Plug-ins into the various Hadoop components sinks to 
send Hadoop metrics to the |
-|   | Metrics Collector.   
   |

+
+| Term | Definition |
+-- | ---
+Metrics Collector | The standalone server that collects metrics, aggregates 
metrics, serves metrics from the Hadoop service sinks and the Metrics Monitor.
+Metrics Monitor | Installed on each host in the cluster to collect 
system-level metrics and forward to the Metrics Collector.
+Metrics Hadoop Sinks  | Plug-ins into the various Hadoop components sinks to 
send Hadoop metrics to the Metrics Collector. 
 
 The Metrics Collector is daemon that receives data from registered publishers 
(the Monitors and Sinks). 
 The Collector itself is build using Hadoop technologies such as HBase Phoenix 
and ATS. 
@@ -23,4 +19,3 @@ The Collector can store data on the local filesystem 
(referred to as "embedded m
 It is a fully distributed collection and aggregation system starting from 2.7.0
 
 Please refer to the wiki for more detailed info: 
https://cwiki.apache.org/confluence/display/AMBARI/Metrics
-



[ambari-metrics] branch master updated: Update README.md

2018-09-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ambari-metrics.git


The following commit(s) were added to refs/heads/master by this push:
 new ffa5c96  Update README.md
ffa5c96 is described below

commit ffa5c966dfa865fae2c93f1f3497a4626bb8097b
Author: Siddharth 
AuthorDate: Tue Sep 25 13:46:17 2018 -0700

Update README.md
---
 README.md | 18 +++---
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/README.md b/README.md
index 7192fc7..fe2dd6e 100644
--- a/README.md
+++ b/README.md
@@ -1,21 +1,17 @@
 # ambari-metrics
 Apache Ambari subproject - Ambari Metrics
 
-Ambari Metrics System ("AMS") is a system for collecting, aggregating, serving 
and visualizing daemon and system metrics in Ambari-managed clusters.
+**Ambari Metrics System** ("AMS") is a system for collecting, aggregating, 
serving and visualizing daemon and system metrics in Ambari-managed clusters.
 
 The original JIRA Epic for Ambari Metrics System can be found here: 
https://issues.apache.org/jira/browse/AMBARI-5707 
 First official release of AMS was with Ambari 2.0.0. With metrics repro split, 
the aim is to be able to release the sub-project with separate cadence than 
Amabri.
 
- 
-
-| Term  | Definition   
   |
- 
-
-| Metrics Collector | The standalone server that collects metrics, 
aggregates metrics, serves metrics |
-|   | from the Hadoop service sinks and the Metrics 
Monitor.  |
-| Metrics Monitor   | Installed on each host in the cluster to collect 
system-level metrics and   |
-|   | forward to the Metrics Collector.
   |
-| Metrics Hadoop Sinks  | Plug-ins into the various Hadoop components sinks to 
send Hadoop metrics to the |
-|   | Metrics Collector.   
   |

+
+| Term | Definition |
+-- | ---
+Metrics Collector | The standalone server that collects metrics, aggregates 
metrics, serves metrics from the Hadoop service sinks and the Metrics Monitor.
+Metrics Monitor | Installed on each host in the cluster to collect 
system-level metrics and forward to the Metrics Collector.
+Metrics Hadoop Sinks  | Plug-ins into the various Hadoop components sinks to 
send Hadoop metrics to the Metrics Collector. 
 
 The Metrics Collector is daemon that receives data from registered publishers 
(the Monitors and Sinks). 
 The Collector itself is build using Hadoop technologies such as HBase Phoenix 
and ATS. 



[ambari-metrics] branch master updated: Create README.md

2018-09-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ambari-metrics.git


The following commit(s) were added to refs/heads/master by this push:
 new 40966c1  Create README.md
40966c1 is described below

commit 40966c1bda2d020db4db21f8f882556a624f4175
Author: Siddharth 
AuthorDate: Tue Sep 25 13:02:04 2018 -0700

Create README.md
---
 README.md | 26 ++
 1 file changed, 26 insertions(+)

diff --git a/README.md b/README.md
new file mode 100644
index 000..7192fc7
--- /dev/null
+++ b/README.md
@@ -0,0 +1,26 @@
+# ambari-metrics
+Apache Ambari subproject - Ambari Metrics
+
+Ambari Metrics System ("AMS") is a system for collecting, aggregating, serving 
and visualizing daemon and system metrics in Ambari-managed clusters.
+
+The original JIRA Epic for Ambari Metrics System can be found here: 
https://issues.apache.org/jira/browse/AMBARI-5707 
+First official release of AMS was with Ambari 2.0.0. With metrics repro split, 
the aim is to be able to release the sub-project with separate cadence than 
Amabri.
+
+ 
-
+| Term  | Definition   
   |
+ 
-
+| Metrics Collector | The standalone server that collects metrics, 
aggregates metrics, serves metrics |
+|   | from the Hadoop service sinks and the Metrics 
Monitor.  |
+| Metrics Monitor   | Installed on each host in the cluster to collect 
system-level metrics and   |
+|   | forward to the Metrics Collector.
   |
+| Metrics Hadoop Sinks  | Plug-ins into the various Hadoop components sinks to 
send Hadoop metrics to the |
+|   | Metrics Collector.   
   |
+---
+
+The Metrics Collector is daemon that receives data from registered publishers 
(the Monitors and Sinks). 
+The Collector itself is build using Hadoop technologies such as HBase Phoenix 
and ATS. 
+The Collector can store data on the local filesystem (referred to as "embedded 
mode") or use an external HDFS (referred to as "distributed mode").
+It is a fully distributed collection and aggregation system starting from 2.7.0
+
+Please refer to the wiki for more detailed info: 
https://cwiki.apache.org/confluence/display/AMBARI/Metrics
+



[ambari] branch branch-2.7 updated: [AMBARI-24503] LLAP application on Yarn fails with CNF exception on sysprepped cluster. (#2114)

2018-08-19 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
 new 06a061a  [AMBARI-24503] LLAP application on Yarn fails with CNF 
exception on sysprepped cluster. (#2114)
06a061a is described below

commit 06a061a82bd6d0d6518b5514665acdfe6d29d773
Author: avijayanhwx 
AuthorDate: Sun Aug 19 19:42:06 2018 -0700

[AMBARI-24503] LLAP application on Yarn fails with CNF exception on 
sysprepped cluster. (#2114)
---
 ambari-server/src/main/resources/scripts/Ambaripreupload.py | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py 
b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index 5e9688c..dca9bb5 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -152,7 +152,8 @@ with Environment() as env:
 hdfs_lib_dir = hdfs_home_dir + '/lib'
 hadoop_home_dir = '/usr/hdp/' + stack_version + '/hadoop'
 hadoop_lib_dir = hadoop_home_dir + '/lib'
-   
+hadoop_mapreduce_dir = '/usr/hdp/' + stack_version + '/hadoop-mapreduce'
+
 oozie_secure = ''
 oozie_home="/usr/hdp/" + stack_version + "/oozie"
 oozie_setup_sh=format("/usr/hdp/" + stack_version + 
"/oozie/bin/oozie-setup.sh")
@@ -313,7 +314,8 @@ with Environment() as env:
 Logger.info(format("Creating {yarn_service_tarball}"))
 folders = [params.yarn_home_dir, params.yarn_lib_dir, 
params.hdfs_home_dir, params.hdfs_lib_dir,
params.hadoop_home_dir,
-   params.hadoop_lib_dir]
+   params.hadoop_lib_dir,
+   params.hadoop_mapreduce_dir]
 with closing(tarfile.open(params.yarn_service_tarball, "w:gz")) as tar:
   for folder in folders:
 for filepath in glob.glob(format("{folder}/*.jar")):



[ambari] branch trunk updated: [AMBARI-24088] Log Feeder dint start during cluster install because of java.net.ConnectException (#1525)

2018-06-13 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3906975  [AMBARI-24088] Log Feeder dint start during cluster install 
because of java.net.ConnectException (#1525)
3906975 is described below

commit 390697530ddd2995e9bcaa06e8fcfeecbc191e15
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Thu Jun 14 02:27:59 2018 +0200

[AMBARI-24088] Log Feeder dint start during cluster install because of 
java.net.ConnectException (#1525)
---
 .../config/zookeeper/LogSearchConfigZK.java| 54 +++---
 1 file changed, 48 insertions(+), 6 deletions(-)

diff --git 
a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
 
b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
index 64a6777..382d8fa 100644
--- 
a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
+++ 
b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
@@ -28,10 +28,12 @@ import 
org.apache.ambari.logsearch.config.api.LogSearchPropertyDescription;
 import 
org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.curator.RetryPolicy;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.framework.recipes.cache.TreeCache;
-import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.retry.RetryForever;
+import org.apache.curator.retry.RetryUntilElapsed;
 import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
@@ -46,8 +48,9 @@ import com.google.gson.GsonBuilder;
 public class LogSearchConfigZK implements LogSearchConfig {
   private static final Logger LOG = 
LoggerFactory.getLogger(LogSearchConfigZK.class);
 
-  private static final int SESSION_TIMEOUT = 6;
-  private static final int CONNECTION_TIMEOUT = 3;
+  private static final int DEFAULT_SESSION_TIMEOUT = 6;
+  private static final int DEFAULT_CONNECTION_TIMEOUT = 3;
+  private static final int RETRY_INTERVAL_MS = 1;
   private static final String DEFAULT_ZK_ROOT = "/logsearch";
   private static final String DATE_FORMAT = "-MM-dd'T'HH:mm:ss.SSS";
 
@@ -76,6 +79,30 @@ public class LogSearchConfigZK implements LogSearchConfig {
   )
   private static final String ZK_ROOT_NODE_PROPERTY = 
"logsearch.config.zk_root";
 
+  @LogSearchPropertyDescription(
+name = "logsearch.config.zk_session_time_out_ms",
+description = "ZooKeeper session timeout in milliseconds",
+examples = {"6"},
+sources = {"logsearch.properties", "logfeeder.properties"}
+  )
+  private static final String ZK_SESSION_TIMEOUT_PROPERTY = 
"logsearch.config.zk_session_time_out_ms";
+
+  @LogSearchPropertyDescription(
+name = "logsearch.config.zk_connection_time_out_ms",
+description = "ZooKeeper connection timeout in milliseconds",
+examples = {"3"},
+sources = {"logsearch.properties", "logfeeder.properties"}
+  )
+  private static final String ZK_CONNECTION_TIMEOUT_PROPERTY = 
"logsearch.config.zk_connection_time_out_ms";
+
+  @LogSearchPropertyDescription(
+name = "logsearch.config.zk_connection_retry_time_out_ms",
+description = "The maximum elapsed time for connecting to ZooKeeper in 
milliseconds. 0 means retrying forever.",
+examples = {"120"},
+sources = {"logsearch.properties", "logfeeder.properties"}
+  )
+  private static final String ZK_CONNECTION_RETRY_TIMEOUT_PROPERTY = 
"logsearch.config.zk_connection_retry_time_out_ms";
+
   protected Map properties;
   protected CuratorFramework client;
   protected TreeCache outputCache;
@@ -88,9 +115,9 @@ public class LogSearchConfigZK implements LogSearchConfig {
 LOG.info("Connecting to ZooKeeper at " + 
properties.get(ZK_CONNECT_STRING_PROPERTY) + root);
 client = CuratorFrameworkFactory.builder()
 .connectString(properties.get(ZK_CONNECT_STRING_PROPERTY) + root)
-.retryPolicy(new ExponentialBackoffRetry(1000, 3))
-.connectionTimeoutMs(CONNECTION_TIMEOUT)
-.sessionTimeoutMs(SESSION_TIMEOUT)
+
.retryPolicy(getRetryPo

[ambari] branch trunk updated: [AMBARI-24022] AutoStart Is not working for some of the components in the cluster (aonishuk) (#1472)

2018-06-05 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f7d398f  [AMBARI-24022] AutoStart Is not working for some of the 
components in the cluster  (aonishuk) (#1472)
f7d398f is described below

commit f7d398f594535ad6e90af93610f94f40eb205a1c
Author: aonishuk 
AuthorDate: Wed Jun 6 00:30:45 2018 +0300

[AMBARI-24022] AutoStart Is not working for some of the components in the 
cluster  (aonishuk) (#1472)
---
 .../AmbariCustomCommandExecutionHelper.java| 29 --
 .../controller/AmbariManagementControllerImpl.java | 12 -
 2 files changed, 41 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 4c69991..30db434 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -758,19 +758,6 @@ public class AmbariCustomCommandExecutionHelper {
 }
 
 Map commandParams = new TreeMap<>();
-
-//Propagate HCFS service type info
-Map serviceInfos = 
ambariMetaInfo.getServices(stackId.getStackName(), stackId.getStackVersion());
-for (ServiceInfo serviceInfoInstance : serviceInfos.values()) {
-  if (serviceInfoInstance.getServiceType() != null) {
-LOG.debug("Adding {} to command parameters for {}", 
serviceInfoInstance.getServiceType(),
-serviceInfoInstance.getName());
-
-commandParams.put("dfs_type", serviceInfoInstance.getServiceType());
-break;
-  }
-}
-
 String commandTimeout = getStatusCommandTimeout(serviceInfo);
 
 if 
(serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
@@ -1260,22 +1247,6 @@ public class AmbariCustomCommandExecutionHelper {
   if (null == stackId && null != cluster) {
 stackId = cluster.getDesiredStackVersion();
   }
-
-  //Propogate HCFS service type info to command params
-  if (null != stackId) {
-Map serviceInfos = 
ambariMetaInfo.getServices(stackId.getStackName(),
-stackId.getStackVersion());
-
-for (ServiceInfo serviceInfoInstance : serviceInfos.values()) {
-  if (serviceInfoInstance.getServiceType() != null) {
-LOG.debug("Adding {} to command parameters for {}",
-serviceInfoInstance.getServiceType(), 
serviceInfoInstance.getName());
-
-commandParamsStage.put("dfs_type", 
serviceInfoInstance.getServiceType());
-break;
-  }
-}
-  }
 }
 
 String hostParamsStageJson = StageUtils.getGson().toJson(hostParamsStage);
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5a738e1..ef63ad4 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2422,18 +2422,6 @@ public class AmbariManagementControllerImpl implements 
AmbariManagementControlle
   commandParams.putAll(commandParamsInp);
 }
 
-// Propagate HCFS service type info
-for (Service service : cluster.getServices().values()) {
-  ServiceInfo serviceInfoInstance = servicesMap.get(service.getName());
-  LOG.debug("Iterating service type Instance in createHostAction: {}", 
serviceInfoInstance.getName());
-  String serviceType = serviceInfoInstance.getServiceType();
-  if (serviceType != null) {
-LOG.info("Adding service type info in createHostAction: {}", 
serviceType);
-commandParams.put("dfs_type", serviceType);
-break;
-  }
-}
-
 boolean isInstallCommand = roleCommand.equals(RoleCommand.INSTALL);
 String agentDefaultCommandTimeout = 
configs.getDefaultAgentTaskTimeout(isInstallCommand);
 String scriptCommandTimeout = "";

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-24035] - Autostart is not obeying Maintenance Mode (#1465)

2018-06-05 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cc7fb90  [AMBARI-24035] - Autostart is not obeying Maintenance Mode 
(#1465)
cc7fb90 is described below

commit cc7fb90dfc8485afdf71fa95df453247979698b9
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Tue Jun 5 20:24:18 2018 +0200

[AMBARI-24035] - Autostart is not obeying Maintenance Mode (#1465)

* AMBARI-24035 - Autostart is not obeying Maintenance Mode

* AMBARI-24035 - Autostart is not obeying Maintenance Mode
- fix method name
---
 .../server/agent/stomp/HostLevelParamsHolder.java  | 41 --
 1 file changed, 31 insertions(+), 10 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/HostLevelParamsHolder.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/HostLevelParamsHolder.java
index 8190130..3c44f57 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/HostLevelParamsHolder.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/HostLevelParamsHolder.java
@@ -26,11 +26,13 @@ import 
org.apache.ambari.server.agent.stomp.dto.HostLevelParamsCluster;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.events.ClusterComponentsRepoChangedEvent;
 import org.apache.ambari.server.events.HostLevelParamsUpdateEvent;
+import org.apache.ambari.server.events.MaintenanceModeEvent;
 import org.apache.ambari.server.events.ServiceComponentRecoveryChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.commons.collections.MapUtils;
 
 import com.google.common.eventbus.Subscribe;
@@ -119,23 +121,42 @@ public class HostLevelParamsHolder extends 
AgentHostDataHolder

[ambari] branch trunk updated: [AMBARI-23975] Logsearch: do not save downloaded configuration to tmp file when comparing (#1441)

2018-06-01 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c9da7e2  [AMBARI-23975] Logsearch: do not save downloaded 
configuration to tmp file when comparing (#1441)
c9da7e2 is described below

commit c9da7e2886ac9388d3d8a2e217643136388b9c9b
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Fri Jun 1 18:09:51 2018 +0200

[AMBARI-23975] Logsearch: do not save downloaded configuration to tmp file 
when comparing (#1441)
---
 .../handler/AbstractSolrConfigHandler.java | 60 +-
 .../handler/UploadConfigurationHandler.java| 37 ++---
 ambari-logsearch/pom.xml   |  1 +
 3 files changed, 44 insertions(+), 54 deletions(-)

diff --git 
a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
 
b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
index 28e8ad7..f58b29d 100644
--- 
a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
+++ 
b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/handler/AbstractSolrConfigHandler.java
@@ -18,20 +18,20 @@
  */
 package org.apache.ambari.logsearch.handler;
 
+import static org.apache.solr.common.cloud.ZkConfigManager.CONFIGS_ZKNODE;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.FileSystems;
+
 import org.apache.ambari.logsearch.conf.SolrPropsConfig;
-import org.apache.commons.io.FileUtils;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.FileSystems;
-import java.nio.file.Paths;
-import java.util.UUID;
-
 public abstract class AbstractSolrConfigHandler implements 
SolrZkRequestHandler {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(AbstractSolrConfigHandler.class);
@@ -46,17 +46,14 @@ public abstract class AbstractSolrConfigHandler implements 
SolrZkRequestHandler<
   public Boolean handle(CloudSolrClient solrClient, SolrPropsConfig 
solrPropsConfig) throws Exception {
 boolean reloadCollectionNeeded = false;
 String separator = FileSystems.getDefault().getSeparator();
-String downloadFolderLocation = String.format("%s%s%s%s%s", 
System.getProperty("java.io.tmpdir"), separator,
-  UUID.randomUUID().toString(), separator, 
solrPropsConfig.getConfigName());
 solrClient.connect();
 SolrZkClient zkClient = solrClient.getZkStateReader().getZkClient();
-File tmpDir = new File(downloadFolderLocation);
 try {
   ZkConfigManager zkConfigManager = new ZkConfigManager(zkClient);
   boolean configExists = 
zkConfigManager.configExists(solrPropsConfig.getConfigName());
   if (configExists) {
 uploadMissingConfigFiles(zkClient, zkConfigManager, 
solrPropsConfig.getConfigName());
-reloadCollectionNeeded = doIfConfigExists(solrPropsConfig, zkClient, 
separator, downloadFolderLocation, tmpDir);
+reloadCollectionNeeded = doIfConfigExists(solrPropsConfig, zkClient, 
separator);
   } else {
 doIfConfigNotExist(solrPropsConfig, zkConfigManager);
 uploadMissingConfigFiles(zkClient, zkConfigManager, 
solrPropsConfig.getConfigName());
@@ -64,24 +61,15 @@ public abstract class AbstractSolrConfigHandler implements 
SolrZkRequestHandler<
 } catch (Exception e) {
   throw new RuntimeException(String.format("Cannot upload configurations 
to zk. (collection: %s, config set folder: %s)",
 solrPropsConfig.getCollection(), 
solrPropsConfig.getConfigSetFolder()), e);
-} finally {
-  if (tmpDir.exists()) {
-try {
-  FileUtils.deleteDirectory(tmpDir);
-} catch (IOException e){
-  LOG.error("Cannot delete temp directory.", e);
-}
-  }
 }
 return reloadCollectionNeeded;
   }
 
   /**
-   * Update config file (like solrconfig.xml) to zookeeper znode of solr, 
contains a download location as well which can be
-   * used to determine that you need to update the configuration or not
+   * Update config file (like solrconfig.xml) to zookeeper znode of solr
*/
   public abstract boolean updateConfigIfNeeded(SolrPropsConfig 
solrPropsConfig, SolrZkClient zkClient, File file,
-   String separator, String 
downloadFolderLocation) throws IOException;
+   S

[ambari] branch trunk updated: AMBARI-23922 - PERF 1.0 package not installed in the cluster (#1377)

2018-05-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new dd2b807  AMBARI-23922 - PERF 1.0 package not installed in the cluster 
(#1377)
dd2b807 is described below

commit dd2b807314298bd0915267e9fbf7bcc346ed6f13
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Fri May 25 17:23:47 2018 +0200

AMBARI-23922 - PERF 1.0 package not installed in the cluster (#1377)
---
 .../src/main/resources/stacks/PERF/install_packages.sed | 17 +++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/ambari-server/src/main/resources/stacks/PERF/install_packages.sed 
b/ambari-server/src/main/resources/stacks/PERF/install_packages.sed
index 013d8df..a62f571 100644
--- a/ambari-server/src/main/resources/stacks/PERF/install_packages.sed
+++ b/ambari-server/src/main/resources/stacks/PERF/install_packages.sed
@@ -14,12 +14,25 @@
 # limitations under the License.
 /actionexecute/{i\
   def actionexecute(self, env):\
+from resource_management.core.resources.system import Execute\
 # Parse parameters\
 config = Script.get_config()\
-repository_version = config['roleParams']['repository_version']\
+try:\
+  command_repository = CommandRepository(config['repositoryFile'])\
+except KeyError:\
+  raise Fail("The command repository indicated by 'repositoryFile' was not 
found")\
+self.repository_version = command_repository.version_string\
+if self.repository_version is None:\
+  raise Fail("Cannot determine the repository version to install")\
+self.repository_version = self.repository_version.strip()\
 (stack_selector_name, stack_selector_path, stack_selector_package) = 
stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)\
-command = 'ambari-python-wrap {0} install {1}'.format(stack_selector_path, 
repository_version)\
+command = 'ambari-python-wrap {0} install {1}'.format(stack_selector_path, 
self.repository_version)\
 Execute(command)\
+self.structured_output = {\
+  'package_installation_result': 'SUCCESS',\
+  'repository_version_id': command_repository.version_id\
+}\
+self.put_structured_out(self.structured_output)\
   def actionexecute_old(self, env):
 d
 }
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23832 : Support Atlas HBase hook from Ambari (#1259)

2018-05-14 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9d7df89  AMBARI-23832 : Support Atlas HBase hook from Ambari (#1259)
9d7df89 is described below

commit 9d7df89092366d9885efd1f10e92d29887ea9e1e
Author: Vishal Suvagia 
AuthorDate: Mon May 14 23:05:45 2018 +0530

AMBARI-23832 : Support Atlas HBase hook from Ambari (#1259)
---
 ambari-common/src/main/python/ambari_commons/constants.py   | 1 +
 .../python/resource_management/libraries/functions/setup_atlas_hook.py  | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/ambari-common/src/main/python/ambari_commons/constants.py 
b/ambari-common/src/main/python/ambari_commons/constants.py
index 31133fc..f1ab4a2 100644
--- a/ambari-common/src/main/python/ambari_commons/constants.py
+++ b/ambari-common/src/main/python/ambari_commons/constants.py
@@ -52,3 +52,4 @@ class SERVICE:
   YARN = "YARN"
   ZEPPELIN = "ZEPPELIN"
   ZOOKEEPER = "ZOOKEEPER"
+  HBASE = "HBASE"
\ No newline at end of file
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
index 985eb37..4f915a4 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
@@ -98,7 +98,7 @@ SHARED_ATLAS_HOOK_SECURITY_CONFIGS_FOR_NON_CLIENT_SERVICE = 
set(
   ]
 )
 
-NON_CLIENT_SERVICES = [SERVICE.HIVE, SERVICE.STORM, SERVICE.FALCON]
+NON_CLIENT_SERVICES = [SERVICE.HIVE, SERVICE.STORM, SERVICE.FALCON, 
SERVICE.HBASE]
 
 def has_atlas_in_cluster():
   """

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23778 v3 Ambari assigns /home for NameNode, DataNode and NodeManager directories (#1249)

2018-05-11 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d15e9e0  AMBARI-23778 v3 Ambari assigns /home for NameNode, DataNode 
and NodeManager directories (#1249)
d15e9e0 is described below

commit d15e9e0964dd7e025be116e361a1c25308192cfc
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Fri May 11 19:17:49 2018 +0200

AMBARI-23778 v3 Ambari assigns /home for NameNode, DataNode and NodeManager 
directories (#1249)
---
 ambari-agent/src/main/python/ambari_agent/Hardware.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py 
b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index f922043..0742d70 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -40,7 +40,7 @@ class Hardware:
   CHECK_REMOTE_MOUNTS_KEY = 'agent.check.remote.mounts'
   CHECK_REMOTE_MOUNTS_TIMEOUT_KEY = 'agent.check.mounts.timeout'
   CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT = '10'
-  IGNORE_ROOT_MOUNTS = ["proc", "dev", "sys", "boot"]
+  IGNORE_ROOT_MOUNTS = ["proc", "dev", "sys", "boot", "home"]
   IGNORE_DEVICES = ["proc", "tmpfs", "cgroup", "mqueue", "shm"]
   LINUX_PATH_SEP = "/"
 
@@ -174,7 +174,7 @@ class Hardware:
- mount path or a part of mount path is not in the blacklist
   """
   if mount["device"] not in self.IGNORE_DEVICES and\
- mount["mountpoint"].split("/")[0] not in self.IGNORE_ROOT_MOUNTS and\
+ mount["mountpoint"].strip()[1:].split("/")[0] not in 
self.IGNORE_ROOT_MOUNTS and\
  self._chk_writable_mount(mount['mountpoint']) and\
  not path_isfile(mount["mountpoint"]) and\
  not self._is_mount_blacklisted(blacklisted_mount_points, 
mount["mountpoint"]):

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23787] Log rotation for ambari metrics monitor log (#1204)

2018-05-11 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ef1103d  [AMBARI-23787] Log rotation for ambari metrics monitor log 
(#1204)
ef1103d is described below

commit ef1103dae2782f1e61121cd2d15d715d659d88ec
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Fri May 11 17:30:34 2018 +0200

[AMBARI-23787] Log rotation for ambari metrics monitor log (#1204)

* AMBARI-23787 Log rotation for ambari metrics monitor log

* AMBARI-23787 log and out file config improvement
---
 .../src/main/python/amhm_service.py   |  6 +++---
 .../src/main/python/core/config_reader.py | 16 
 .../src/main/python/main.py   | 19 ++-
 3 files changed, 25 insertions(+), 16 deletions(-)

diff --git 
a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/amhm_service.py 
b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/amhm_service.py
index a23226f..91ec81b 100644
--- 
a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/amhm_service.py
+++ 
b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/amhm_service.py
@@ -26,10 +26,10 @@ from ambari_commons.ambari_service import AmbariService
 from ambari_commons.exceptions import FatalException, NonFatalException
 from ambari_commons.logging_utils import print_warning_msg, print_error_msg
 from ambari_commons.os_windows import SvcStatusCallback
-from core.config_reader import SERVER_OUT_FILE, SERVICE_USERNAME_KEY, 
SERVICE_PASSWORD_KEY, \
+from core.config_reader import SERVICE_USERNAME_KEY, SERVICE_PASSWORD_KEY, \
   SETUP_ACTION, START_ACTION, STOP_ACTION, RESTART_ACTION, STATUS_ACTION
 from core.stop_handler import bind_signal_handlers, StopHandler
-from main import server_process_main
+from main import server_process_main, main_config
 
 
 #
@@ -64,7 +64,7 @@ class AMHostMonitoringService(AmbariService):
 return init_options_parser()
 
   def redirect_output_streams(self):
-self._RedirectOutputStreamsToFile(SERVER_OUT_FILE)
+self._RedirectOutputStreamsToFile(main_config.ams_monitor_out_file())
 pass
 
 
diff --git 
a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
 
b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 7cc9fb8..940d11c 100644
--- 
a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ 
b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -83,10 +83,6 @@ CONFIG_FILE_PATH = configDefaults.get_config_file_path()
 METRIC_FILE_PATH = configDefaults.get_metric_file_path()
 CA_CERTS_FILE_PATH = configDefaults.get_ca_certs_file_path()
 
-OUT_DIR = os.path.join(os.sep, "var", "log", "ambari-metrics-host-monitoring")
-SERVER_OUT_FILE = OUT_DIR + os.sep + "ambari-metrics-host-monitoring.out"
-SERVER_LOG_FILE = OUT_DIR + os.sep + "ambari-metrics-host-monitoring.log"
-
 PID_DIR = os.path.join(os.sep, "var", "run", "ambari-metrics-host-monitoring")
 PID_OUT_FILE = PID_DIR + os.sep + "ambari-metrics-host-monitoring.pid"
 EXITCODE_OUT_FILE = PID_DIR + os.sep + 
"ambari-metrics-host-monitoring.exitcode"
@@ -281,6 +277,18 @@ class Configuration:
 hosts = self.get("aggregation", "ams_monitor_log_dir", 
"/var/log/ambari-metrics-monitor")
 return hosts
 
+  def ams_monitor_log_file(self):
+"""
+:returns the log file
+"""
+return self.ams_monitor_log_dir() + os.sep + "ambari-metrics-monitor.log"
+
+  def ams_monitor_out_file(self):
+"""
+:returns the out file
+"""
+return self.ams_monitor_log_dir() + os.sep + "ambari-metrics-monitor.out"
+
   def is_set_instanceid(self):
 return "true" == str(self.get("default", "set.instanceId", 
'false')).lower()
 
diff --git 
a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py 
b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
index 53d27f8..cab6a04 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
@@ -19,18 +19,20 @@ limitations under the License.
 '''
 
 import logging
+import logging.handlers
 import os
 import sys
 import signal
 from ambari_commons.os_utils import remove_file
 
 from core.controller import Controller
-from core.config_reader import Configuration, PID_OUT_FILE, SERVER_LOG_FILE, 
SERVER_OUT_FILE
+from core.config_reader import Configuration, P

[ambari] branch trunk updated: [AMBARI-23805] HBase deployment failed in Namenode Federated Environment (#1232)

2018-05-10 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c1552d0  [AMBARI-23805] HBase deployment failed in Namenode Federated 
Environment (#1232)
c1552d0 is described below

commit c1552d0e159041b6edc8f6e642a1866ad5ccecd1
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Thu May 10 20:43:39 2018 +0200

[AMBARI-23805] HBase deployment failed in Namenode Federated Environment 
(#1232)
---
 .../libraries/providers/hdfs_resource.py   | 35 +-
 1 file changed, 14 insertions(+), 21 deletions(-)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
 
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index a298f39..23bfbc5 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -19,30 +19,26 @@ limitations under the License.
 Ambari Agent
 
 """
-import re
-import os
+import \
+  ambari_simplejson as json  # simplejson is much faster comparing to Python 
2.6 json module and has the same functions set.
 import grp
+import os
 import pwd
+import re
 import time
-from resource_management.core.environment import Environment
+from resource_management.core import shell
+from resource_management.core import sudo
 from resource_management.core.base import Fail
+from resource_management.core.environment import Environment
+from resource_management.core.logger import Logger
+from resource_management.core.providers import Provider
 from resource_management.core.resources.system import Execute
 from resource_management.core.resources.system import File
-from resource_management.core.providers import Provider
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from resource_management.core import sudo
-from resource_management.libraries.script import Script
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_user_call_output import 
get_user_call_output
-from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions import namenode_ha_utils
+from resource_management.libraries.functions.get_user_call_output import 
get_user_call_output
 from resource_management.libraries.functions.hdfs_utils import 
is_https_enabled_in_hdfs
 
-
-import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
-from ambari_commons import subprocess32
-
 JSON_PATH = '/var/lib/ambari-agent/tmp/hdfs_resources_{timestamp}.json'
 JAR_PATH = '/var/lib/ambari-agent/lib/fast-hdfs-resource.jar'
 
@@ -151,15 +147,12 @@ class WebHDFSCallException(Fail):
 
 class WebHDFSUtil:
   def __init__(self, hdfs_site, nameservice, run_user, security_enabled, 
logoutput=None):
-https_nn_address = 
namenode_ha_utils.get_property_for_active_namenode(hdfs_site, nameservice, 
'dfs.namenode.https-address',
-  
security_enabled, run_user)
-http_nn_address = 
namenode_ha_utils.get_property_for_active_namenode(hdfs_site, nameservice, 
'dfs.namenode.http-address',
- 
security_enabled, run_user)
 self.is_https_enabled = 
is_https_enabled_in_hdfs(hdfs_site['dfs.http.policy'], 
hdfs_site['dfs.https.enable'])
-
-address = https_nn_address if self.is_https_enabled else http_nn_address
+address_property = 'dfs.namenode.https-address' if self.is_https_enabled 
else 'dfs.namenode.http-address'
+address = namenode_ha_utils.get_property_for_active_namenode(hdfs_site, 
nameservice, address_property,
+ 
security_enabled, run_user)
 protocol = "https" if self.is_https_enabled else "http"
-
+
 self.address = format("{protocol}://{address}")
 self.run_user = run_user
 self.security_enabled = security_enabled

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: Revert "Revert "AMBARI-23778 Ambari assigns /home for NameNode, DataNode and NodeManager directories (#1201)" (#1215)" (#1224)

2018-05-09 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fef6634  Revert "Revert "AMBARI-23778 Ambari assigns /home for 
NameNode, DataNode and NodeManager directories (#1201)" (#1215)" (#1224)
fef6634 is described below

commit fef6634106429d1715c2e137cbd5e5ab9b0f8262
Author: Siddharth 
AuthorDate: Wed May 9 12:37:16 2018 -0700

Revert "Revert "AMBARI-23778 Ambari assigns /home for NameNode, DataNode 
and NodeManager directories (#1201)" (#1215)" (#1224)

This reverts commit bc38be0fae82e444152c63d6f466c4e81a3dd64c.
---
 .../src/main/resources/stacks/stack_advisor.py  | 21 +
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/stack_advisor.py
index b199f5f..23db162 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2280,7 +2280,7 @@ class DefaultStackAdvisor(StackAdvisor):
 
 return sorted(mounts)
 
-  def getMountPathVariations(self, initial_value, component_name, services, 
hosts):
+  def getMountPathVariations(self, initial_value, component_name, services, 
hosts, banned_mounts=[]):
 """
 Recommends best fitted mount by prefixing path with it.
 
@@ -2291,6 +2291,7 @@ class DefaultStackAdvisor(StackAdvisor):
 :type component_name str
 :type services dict
 :type hosts dict
+:type banned_mounts list
 :rtype list
 """
 available_mounts = []
@@ -2299,6 +2300,8 @@ class DefaultStackAdvisor(StackAdvisor):
   return available_mounts
 
 mounts = self.__getSameHostMounts(hosts)
+for banned in banned_mounts:
+  mounts.remove(banned)
 sep = "/"
 
 if not mounts:
@@ -2312,7 +2315,7 @@ class DefaultStackAdvisor(StackAdvisor):
 # no list transformations after filling the list, because this will cause 
item order change
 return available_mounts
 
-  def getMountPathVariation(self, initial_value, component_name, services, 
hosts):
+  def getMountPathVariation(self, initial_value, component_name, services, 
hosts, banned_mounts=[]):
 """
 Recommends best fitted mount by prefixing path with it.
 
@@ -2323,14 +2326,15 @@ class DefaultStackAdvisor(StackAdvisor):
 :type component_name str
 :type services dict
 :type hosts dict
+:type banned_mounts list
 :rtype str
 """
 try:
-  return [self.getMountPathVariations(initial_value, component_name, 
services, hosts)[0]]
+  return [self.getMountPathVariations(initial_value, component_name, 
services, hosts, banned_mounts)[0]]
 except IndexError:
   return []
 
-  def updateMountProperties(self, siteConfig, propertyDefinitions, 
configurations,  services, hosts):
+  def updateMountProperties(self, siteConfig, propertyDefinitions, 
configurations,  services, hosts, banned_mounts=[]):
 """
 Update properties according to recommendations for available mount-points
 
@@ -2349,6 +2353,7 @@ class DefaultStackAdvisor(StackAdvisor):
 :type configurations dict
 :type services dict
 :type hosts dict
+:type banned_mounts list
 """
 
 props = self.getServicesSiteProperties(services, siteConfig)
@@ -2360,14 +2365,14 @@ class DefaultStackAdvisor(StackAdvisor):
 
   if props is None or name not in props:
 if rc_type == "multi":
-  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts, banned_mounts)
 else:
-  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts, banned_mounts)
   elif props and name in props and props[name] == default_value:
 if rc_type == "multi":
-  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts, banned_mounts)
 else:
-  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts, banned_mounts)
 
   if recommendation:
 put_f(name, ",".join(recommendation))

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23798] After adding Namespace, Some HDFS Quicklinks do not wo… (#1220)

2018-05-09 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a83a4df  [AMBARI-23798] After adding Namespace, Some HDFS Quicklinks 
do not wo… (#1220)
a83a4df is described below

commit a83a4df65207cbb3af93c789e81c15937a0feffd
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Wed May 9 20:15:55 2018 +0200

[AMBARI-23798] After adding Namespace, Some HDFS Quicklinks do not wo… 
(#1220)

* AMBARI-23798 - After adding Namespace, Some HDFS Quicklinks do not work 
intermittently

* Update quick_view_link_view.js
---
 ambari-web/app/views/common/quick_view_link_view.js | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/ambari-web/app/views/common/quick_view_link_view.js 
b/ambari-web/app/views/common/quick_view_link_view.js
index 816fca3..4a64c20 100644
--- a/ambari-web/app/views/common/quick_view_link_view.js
+++ b/ambari-web/app/views/common/quick_view_link_view.js
@@ -487,8 +487,8 @@ App.QuickLinksView = Em.View.extend({
 var configPropertiesObject = configProperties.findProperty('type', 
'hdfs-site');
 if (configPropertiesObject && configPropertiesObject.properties) {
   var properties = configPropertiesObject.properties;
-  var nameServiceId = properties['dfs.nameservices'];
-  var nnProperties = ['dfs.namenode.{0}-address.{1}.nn1', 
'dfs.namenode.{0}-address.{1}.nn2'].invoke('format', protocol, nameServiceId);
+  var nnKeyRegex = new RegExp('^dfs\.namenode\.' + protocol + 
'-address\.');
+  var nnProperties = Object.keys(properties).filter(key => 
nnKeyRegex.test(key));
   var nnPropertiesLength = nnProperties.length;
   for (var i = nnPropertiesLength; i--;) {
 var propertyName = nnProperties[i];

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: Revert "AMBARI-23778 Ambari assigns /home for NameNode, DataNode and NodeManager directories (#1201)" (#1215)

2018-05-08 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bc38be0  Revert "AMBARI-23778 Ambari assigns /home for NameNode, 
DataNode and NodeManager directories (#1201)" (#1215)
bc38be0 is described below

commit bc38be0fae82e444152c63d6f466c4e81a3dd64c
Author: Siddharth 
AuthorDate: Tue May 8 15:15:58 2018 -0700

Revert "AMBARI-23778 Ambari assigns /home for NameNode, DataNode and 
NodeManager directories (#1201)" (#1215)

This reverts commit 88e1f324f9c7ddf771e74fb38f7f6605cad42674.
---
 .../src/main/resources/stacks/stack_advisor.py  | 21 -
 1 file changed, 8 insertions(+), 13 deletions(-)

diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 23db162..b199f5f 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2280,7 +2280,7 @@ class DefaultStackAdvisor(StackAdvisor):
 
 return sorted(mounts)
 
-  def getMountPathVariations(self, initial_value, component_name, services, 
hosts, banned_mounts=[]):
+  def getMountPathVariations(self, initial_value, component_name, services, 
hosts):
 """
 Recommends best fitted mount by prefixing path with it.
 
@@ -2291,7 +2291,6 @@ class DefaultStackAdvisor(StackAdvisor):
 :type component_name str
 :type services dict
 :type hosts dict
-:type banned_mounts list
 :rtype list
 """
 available_mounts = []
@@ -2300,8 +2299,6 @@ class DefaultStackAdvisor(StackAdvisor):
   return available_mounts
 
 mounts = self.__getSameHostMounts(hosts)
-for banned in banned_mounts:
-  mounts.remove(banned)
 sep = "/"
 
 if not mounts:
@@ -2315,7 +2312,7 @@ class DefaultStackAdvisor(StackAdvisor):
 # no list transformations after filling the list, because this will cause 
item order change
 return available_mounts
 
-  def getMountPathVariation(self, initial_value, component_name, services, 
hosts, banned_mounts=[]):
+  def getMountPathVariation(self, initial_value, component_name, services, 
hosts):
 """
 Recommends best fitted mount by prefixing path with it.
 
@@ -2326,15 +2323,14 @@ class DefaultStackAdvisor(StackAdvisor):
 :type component_name str
 :type services dict
 :type hosts dict
-:type banned_mounts list
 :rtype str
 """
 try:
-  return [self.getMountPathVariations(initial_value, component_name, 
services, hosts, banned_mounts)[0]]
+  return [self.getMountPathVariations(initial_value, component_name, 
services, hosts)[0]]
 except IndexError:
   return []
 
-  def updateMountProperties(self, siteConfig, propertyDefinitions, 
configurations,  services, hosts, banned_mounts=[]):
+  def updateMountProperties(self, siteConfig, propertyDefinitions, 
configurations,  services, hosts):
 """
 Update properties according to recommendations for available mount-points
 
@@ -2353,7 +2349,6 @@ class DefaultStackAdvisor(StackAdvisor):
 :type configurations dict
 :type services dict
 :type hosts dict
-:type banned_mounts list
 """
 
 props = self.getServicesSiteProperties(services, siteConfig)
@@ -2365,14 +2360,14 @@ class DefaultStackAdvisor(StackAdvisor):
 
   if props is None or name not in props:
 if rc_type == "multi":
-  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts, banned_mounts)
+  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts)
 else:
-  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts, banned_mounts)
+  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts)
   elif props and name in props and props[name] == default_value:
 if rc_type == "multi":
-  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts, banned_mounts)
+  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts)
 else:
-  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts, banned_mounts)
+  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts)
 
   if recommendation:
 put_f(name, ",".join(recommendation))

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23795] Server Error while Bulk Deleting Hosts (#1214)

2018-05-08 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a1b30c9  [AMBARI-23795] Server Error while Bulk Deleting Hosts (#1214)
a1b30c9 is described below

commit a1b30c9340e084bbebfd9ec9e09b1a1808aa6097
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Tue May 8 21:13:21 2018 +0200

[AMBARI-23795] Server Error while Bulk Deleting Hosts (#1214)
---
 ambari-project/pom.xml | 2 +-
 ambari-server/pom.xml  | 2 +-
 pom.xml| 1 +
 3 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index e20a968..854f9cb 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -218,7 +218,7 @@
   
 org.eclipse.persistence
 eclipselink
-2.6.2
+${eclipselink.version}
   
   
 org.postgresql
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index cc82a6b..b52baf4 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -497,7 +497,7 @@
   
 org.eclipse.persistence
 eclipselink
-2.6.2
+${eclipselink.version}
   
 
   
diff --git a/pom.xml b/pom.xml
index b2469c4..1da7be0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -61,6 +61,7 @@
 Apache Release Distribution 
Repository
 
https://repository.apache.org/service/local/staging/deploy/maven2
 package 
+2.6.2
   
   
 

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23778 Ambari assigns /home for NameNode, DataNode and NodeManager directories (#1201)

2018-05-08 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 88e1f32  AMBARI-23778 Ambari assigns /home for NameNode, DataNode and 
NodeManager directories (#1201)
88e1f32 is described below

commit 88e1f324f9c7ddf771e74fb38f7f6605cad42674
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Tue May 8 17:38:33 2018 +0200

AMBARI-23778 Ambari assigns /home for NameNode, DataNode and NodeManager 
directories (#1201)
---
 .../src/main/resources/stacks/stack_advisor.py  | 21 +
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/stack_advisor.py
index b199f5f..23db162 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2280,7 +2280,7 @@ class DefaultStackAdvisor(StackAdvisor):
 
 return sorted(mounts)
 
-  def getMountPathVariations(self, initial_value, component_name, services, 
hosts):
+  def getMountPathVariations(self, initial_value, component_name, services, 
hosts, banned_mounts=[]):
 """
 Recommends best fitted mount by prefixing path with it.
 
@@ -2291,6 +2291,7 @@ class DefaultStackAdvisor(StackAdvisor):
 :type component_name str
 :type services dict
 :type hosts dict
+:type banned_mounts list
 :rtype list
 """
 available_mounts = []
@@ -2299,6 +2300,8 @@ class DefaultStackAdvisor(StackAdvisor):
   return available_mounts
 
 mounts = self.__getSameHostMounts(hosts)
+for banned in banned_mounts:
+  mounts.remove(banned)
 sep = "/"
 
 if not mounts:
@@ -2312,7 +2315,7 @@ class DefaultStackAdvisor(StackAdvisor):
 # no list transformations after filling the list, because this will cause 
item order change
 return available_mounts
 
-  def getMountPathVariation(self, initial_value, component_name, services, 
hosts):
+  def getMountPathVariation(self, initial_value, component_name, services, 
hosts, banned_mounts=[]):
 """
 Recommends best fitted mount by prefixing path with it.
 
@@ -2323,14 +2326,15 @@ class DefaultStackAdvisor(StackAdvisor):
 :type component_name str
 :type services dict
 :type hosts dict
+:type banned_mounts list
 :rtype str
 """
 try:
-  return [self.getMountPathVariations(initial_value, component_name, 
services, hosts)[0]]
+  return [self.getMountPathVariations(initial_value, component_name, 
services, hosts, banned_mounts)[0]]
 except IndexError:
   return []
 
-  def updateMountProperties(self, siteConfig, propertyDefinitions, 
configurations,  services, hosts):
+  def updateMountProperties(self, siteConfig, propertyDefinitions, 
configurations,  services, hosts, banned_mounts=[]):
 """
 Update properties according to recommendations for available mount-points
 
@@ -2349,6 +2353,7 @@ class DefaultStackAdvisor(StackAdvisor):
 :type configurations dict
 :type services dict
 :type hosts dict
+:type banned_mounts list
 """
 
 props = self.getServicesSiteProperties(services, siteConfig)
@@ -2360,14 +2365,14 @@ class DefaultStackAdvisor(StackAdvisor):
 
   if props is None or name not in props:
 if rc_type == "multi":
-  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts, banned_mounts)
 else:
-  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts, banned_mounts)
   elif props and name in props and props[name] == default_value:
 if rc_type == "multi":
-  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariations(default_value, 
component, services, hosts, banned_mounts)
 else:
-  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts)
+  recommendation = self.getMountPathVariation(default_value, 
component, services, hosts, banned_mounts)
 
   if recommendation:
 put_f(name, ",".join(recommendation))

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23727] Predicate evaluation does not work as expected for RequestResourceFilters. (swagle) (#1139)

2018-04-30 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 154b361  [AMBARI-23727] Predicate evaluation does not work as expected 
for RequestResourceFilters. (swagle) (#1139)
154b361 is described below

commit 154b3610f75013350990ff4543900a1a19977854
Author: Siddharth 
AuthorDate: Mon Apr 30 12:18:18 2018 -0700

[AMBARI-23727] Predicate evaluation does not work as expected for 
RequestResourceFilters. (swagle) (#1139)
---
 .../internal/RequestResourceProvider.java  | 73 ++
 .../internal/RequestResourceProviderTest.java  | 19 +-
 2 files changed, 50 insertions(+), 42 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index c4ef23b..48a9432 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -43,17 +43,18 @@ import 
org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.ExecuteActionRequest;
 import org.apache.ambari.server.controller.RequestRequest;
 import org.apache.ambari.server.controller.RequestStatusResponse;
+import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.NoSuchResourceException;
 import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.QueryResponse;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.customactions.ActionDefinition;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
@@ -536,52 +537,46 @@ public class RequestResourceProvider extends 
AbstractControllerResourceProvider
 throw new SystemException(msg, e);
   }
 
-  ResourceProvider resourceProvider = 
getResourceProvider(Resource.Type.HostComponent);
-
   Set propertyIds = new HashSet<>();
   propertyIds.add(CLUSTER_NAME);
   propertyIds.add(SERVICE_NAME);
   propertyIds.add(COMPONENT_NAME);
 
   Request request = PropertyHelper.getReadRequest(propertyIds);
-
-  Predicate finalPredicate = new PredicateBuilder(filterPredicate)
-.property(CLUSTER_NAME).equals(clusterName).and()
-.property(SERVICE_NAME).equals(serviceName).and()
-.property(COMPONENT_NAME).equals(componentName)
-.toPredicate();
-
+  
   try {
-Set resources = resourceProvider.getResources(request, 
finalPredicate);
-
-if (resources != null && !resources.isEmpty()) {
-  // Allow request to span services / components using just the 
predicate
-  Map> dupleListMap = new 
HashMap<>();
-  for (Resource resource : resources) {
-String hostnameStr = (String) resource.getPropertyValue(HOST_NAME);
-if (hostnameStr != null) {
-  String computedServiceName = (String) 
resource.getPropertyValue(SERVICE_NAME);
-  String computedComponentName = (String) 
resource.getPropertyValue(COMPONENT_NAME);
-  ServiceComponentTuple duple =
-new ServiceComponentTuple(computedServiceName, 
computedComponentName);
-
-  if (!dupleListMap.containsKey(duple)) {
-hostList = new ArrayList<>();
-hostList.add(hostnameStr);
-dupleListMap.put(duple, hostList);
-  } else {
-dupleListMap.get(duple).add(hostnameStr);
-  }
+ClusterController clusterController = 
ClusterControllerHelper.getClusterController();
+QueryResponse queryResponse = clusterController.getResources(
+  Resource.Type.HostComponent, request, filterPredicate);
+Iterable resourceIterable = clusterController.getIterable(
+  Resource.Type.HostComponent, queryResponse, request,
+

[ambari] branch trunk updated: [AMBARI-23703] While adding HDFS Namespace from UI, Timeline service fails to start (#1108)

2018-04-26 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f269232  [AMBARI-23703] While adding HDFS Namespace from UI, Timeline 
service fails to start (#1108)
f269232 is described below

commit f269232151bccfe4a25db879a83cb3a8533fc5f1
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Thu Apr 26 20:12:24 2018 +0200

[AMBARI-23703] While adding HDFS Namespace from UI, Timeline service fails 
to start (#1108)
---
 .../main/python/resource_management/libraries/providers/hdfs_resource.py | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
 
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index acd469f..a298f39 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -64,6 +64,7 @@ RESOURCE_TO_JSON_FIELDS = {
 EXCEPTIONS_TO_RETRY = {
   # "ExceptionName": (try_count, try_sleep_seconds)
   "LeaseExpiredException": (20, 6),
+  "RetriableException": (20, 6),
 }
 
 class HdfsResourceJar:

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23691] Fix CVE security issues in AMS dependencies. (#1097)

2018-04-25 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5cc76a8  [AMBARI-23691] Fix CVE security issues in AMS dependencies. 
(#1097)
5cc76a8 is described below

commit 5cc76a8604c6ef8edb1c2c5f42b139d65913a0e7
Author: avijayanhwx 
AuthorDate: Wed Apr 25 22:35:03 2018 -0700

[AMBARI-23691] Fix CVE security issues in AMS dependencies. (#1097)
---
 ambari-metrics/ambari-metrics-common/pom.xml  | 2 +-
 ambari-metrics/ambari-metrics-hadoop-sink/pom.xml | 5 ++---
 ambari-metrics/ambari-metrics-host-aggregator/pom.xml | 2 +-
 ambari-metrics/ambari-metrics-kafka-sink/pom.xml  | 2 +-
 ambari-metrics/ambari-metrics-timelineservice/pom.xml | 9 +
 5 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/ambari-metrics/ambari-metrics-common/pom.xml 
b/ambari-metrics/ambari-metrics-common/pom.xml
index 872e2b4..99b4331 100644
--- a/ambari-metrics/ambari-metrics-common/pom.xml
+++ b/ambari-metrics/ambari-metrics-common/pom.xml
@@ -155,7 +155,7 @@
 
   org.apache.curator
   curator-framework
-  2.12.0
+  4.0.0
 
 
   org.codehaus.jackson
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml 
b/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
index 0b291cb..97d573a0 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
@@ -31,7 +31,6 @@ limitations under the License.
   jar
   
 
${project.artifactId}-with-common-${project.version}.jar
-3.0.0-beta1
   
 
 
@@ -142,7 +141,7 @@ limitations under the License.
 
   org.apache.hadoop
   hadoop-common
-  ${hadoopVersion}
+  3.0.0
   compile
 
 
@@ -171,7 +170,7 @@ limitations under the License.
 
   commons-configuration
   commons-configuration
-  1.6
+  1.10
   compile
 
 
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml 
b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
index d126be5..41081d0 100644
--- a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
+++ b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
@@ -75,7 +75,7 @@
 
 org.apache.hadoop
 hadoop-common
-2.7.1.2.3.4.0-3347
+3.0.0
 
 
 com.sun.jersey.jersey-test-framework
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/pom.xml 
b/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
index 91f8fe7..46afed3 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
@@ -144,7 +144,7 @@ limitations under the License.
 
   org.apache.kafka
   kafka_2.10
-  0.10.1.0
+  0.10.2.1
   
 
   com.sun.jdmk
diff --git a/ambari-metrics/ambari-metrics-timelineservice/pom.xml 
b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
index 98744a1..fcb8186 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/pom.xml
+++ b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
@@ -311,6 +311,10 @@
   zkclient
   com.101tec
 
+
+  zookeeper
+  org.apache.zookeeper
+
   
 
 
@@ -320,6 +324,11 @@
   0.9
 
 
+  org.apache.zookeeper
+  zookeeper
+  3.4.5.1.3.0.0-107
+
+
   org.apache.phoenix
   phoenix-core
   ${phoenix.version}

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23515] : Rearranging configuration file creation for Ranger Plu… (#1038)

2018-04-19 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 391de63  [AMBARI-23515] : Rearranging configuration file creation for 
Ranger Plu… (#1038)
391de63 is described below

commit 391de63db4e9e35f857bee77de0a17d4a7c3d898
Author: vishalsuvagia 
AuthorDate: Fri Apr 20 12:22:38 2018 +0530

[AMBARI-23515] : Rearranging configuration file creation for Ranger Plu… 
(#1038)

* AMBARI-23515 : Rearranging configuration file creation for Ranger Plugins.

* AMBARI-23515 : Rearranging configuration file creation for Ranger Plugins.
---
 .../libraries/functions/setup_ranger_plugin_xml.py  | 16 
 .../0.8.1/package/scripts/setup_ranger_kafka.py | 10 +-
 .../0.5.0.2.2/package/scripts/setup_ranger_knox.py  | 10 +-
 .../0.9.1/package/scripts/setup_ranger_storm.py | 21 -
 4 files changed, 26 insertions(+), 31 deletions(-)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index e7289a2..d3d12f4 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -45,7 +45,7 @@ def setup_ranger_plugin(component_select_name, service_name, 
previous_jdbc_jar,
 cache_service_list, plugin_audit_properties, 
plugin_audit_attributes,
 plugin_security_properties, plugin_security_attributes,
 plugin_policymgr_ssl_properties, 
plugin_policymgr_ssl_attributes,
-component_list, audit_db_is_enabled, credential_file, 
+component_list, audit_db_is_enabled, credential_file,
 xa_audit_db_password, ssl_truststore_password,
 ssl_keystore_password, api_version=None, 
stack_version_override = None, skip_if_rangeradmin_down = True,
 is_security_enabled = False, 
is_stack_supports_ranger_kerberos = False,
@@ -111,7 +111,7 @@ def setup_ranger_plugin(component_select_name, 
service_name, previous_jdbc_jar,
   policy_user)
 
 current_datetime = datetime.now()
-
+
 File(format('{component_conf_dir}/ranger-security.xml'),
   owner = component_user,
   group = component_group,
@@ -174,7 +174,7 @@ def setup_ranger_plugin(component_select_name, 
service_name, previous_jdbc_jar,
 configuration_attributes=plugin_policymgr_ssl_attributes,
 owner = component_user,
 group = component_group,
-mode=0744) 
+mode=0744)
 else:
   XmlConfig("ranger-policymgr-ssl.xml",
 conf_dir=component_conf_dir,
@@ -182,7 +182,7 @@ def setup_ranger_plugin(component_select_name, 
service_name, previous_jdbc_jar,
 configuration_attributes=plugin_policymgr_ssl_attributes,
 owner = component_user,
 group = component_group,
-mode=0744) 
+mode=0744)
 
 # creating symblink should be done by rpm package
 # setup_ranger_plugin_jar_symblink(stack_version, service_name, 
component_list)
@@ -193,8 +193,8 @@ def setup_ranger_plugin(component_select_name, 
service_name, previous_jdbc_jar,
 
   else:
 File(format('{component_conf_dir}/ranger-security.xml'),
-  action="delete"  
-)
+  action="delete"
+)
 
 def setup_ranger_plugin_jar_symblink(stack_version, service_name, 
component_list):
 
@@ -240,8 +240,8 @@ def setup_ranger_plugin_keystore(service_name, 
audit_db_is_enabled, stack_versio
 mode = 0640
   )
 
-def setup_core_site_for_required_plugins(component_user, component_group, 
create_core_site_path, configurations = {}, configuration_attributes = {}):
-  XmlConfig('core-site.xml',
+def setup_configuration_file_for_required_plugins(component_user, 
component_group, create_core_site_path, configurations = {}, 
configuration_attributes = {}, file_name='core-site.xml'):
+  XmlConfig(file_name,
 conf_dir = create_core_site_path,
 configurations = configurations,
 configuration_attributes = configuration_attributes,
diff --git 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
index 16eff94..bf1fb28 100644
--- 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
+++ 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_

[ambari] branch trunk updated: [AMBARI-23510] FluentPropertyBeanIntrospector from CLI operation log output, when running hdfs commands (#933)

2018-04-18 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a105455  [AMBARI-23510] FluentPropertyBeanIntrospector from CLI 
operation log output, when running hdfs commands (#933)
a105455 is described below

commit a105455bef20098f6d6e0eb5a7ee84ed4ce1d3a8
Author: Bharat Viswanadham 
AuthorDate: Wed Apr 18 14:04:35 2018 -0700

[AMBARI-23510] FluentPropertyBeanIntrospector from CLI operation log 
output, when running hdfs commands (#933)
---
 .../common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml| 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
index b1db232..41d7b31 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
@@ -236,6 +236,9 @@ 
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 
 # Removes "deprecated" messages
 log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+# Adding logging for 3rd party library
+log4j.logger.org.apache.commons.beanutils=WARN
 
 
   content

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23529 : Utility function to get namenode logical hostname and namespace for a given hostname. (#954)

2018-04-12 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 70c42f5  AMBARI-23529 : Utility function to get namenode logical 
hostname and namespace for a given hostname. (#954)
70c42f5 is described below

commit 70c42f560032bf9f7f03c3ee74194c4b1f99a5c5
Author: vishalsuvagia 
AuthorDate: Fri Apr 13 04:14:58 2018 +0530

AMBARI-23529 : Utility function to get namenode logical hostname and 
namespace for a given hostname. (#954)
---
 .../libraries/functions/namenode_ha_utils.py   | 28 ++
 1 file changed, 28 insertions(+)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index 0d2cd3f..c68d713 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -383,3 +383,31 @@ def get_name_service_by_hostname(hdfs_site, host_name):
   nn_rpc = nn_rpc_port.split(':')[0]
   if nn_rpc == host_name:
 return ns
+
+def get_namespace_mapping_for_hostname(hostname, hdfs_site, security_enabled, 
run_user):
+  namenode_address_map_list = get_namenode_states(hdfs_site, security_enabled, 
run_user)
+  namenode_logical_name = None
+  for each_namenode_address_map in namenode_address_map_list:
+if len(each_namenode_address_map) != 0:
+  for namenode_hostname_logical_map_tuple in each_namenode_address_map:
+namenode_hostname = 
namenode_hostname_logical_map_tuple[1].split(":")[0]
+if hostname == namenode_hostname:
+  namenode_logical_name = namenode_hostname_logical_map_tuple[0]
+  break
+if namenode_logical_name is not None:
+  break
+
+
+  namenode_nameservices = get_nameservices(hdfs_site)
+  namespace_nameservice = None
+  if namenode_nameservices and len(namenode_nameservices) > 0:
+for name_service in namenode_nameservices:
+  namenode_logical_names_list = hdfs_site.get('dfs.ha.namenodes.' + 
str(name_service), None)
+  if namenode_logical_names_list and ',' in namenode_logical_names_list:
+for each_namenode_logical_name in 
namenode_logical_names_list.split(','):
+  if namenode_logical_name == each_namenode_logical_name:
+namespace_nameservice = name_service
+break
+  if namespace_nameservice is not None:
+break
+  return namespace_nameservice,namenode_logical_name

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23142 - ADDENDUM Add AMS Metrics publisher to Infra Solr (#969)

2018-04-12 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ad6c7b0  AMBARI-23142 - ADDENDUM Add AMS Metrics publisher to Infra 
Solr (#969)
ad6c7b0 is described below

commit ad6c7b0ee632d8d5d78150aaea8bb738065aba12
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Thu Apr 12 18:03:45 2018 +0200

AMBARI-23142 - ADDENDUM Add AMS Metrics publisher to Infra Solr (#969)
---
 .../ambari-metrics/datasource.js   | 158 ---
 ...es.json => grafana-infra-solr-collections.json} | 505 ++---
 .../HDP/grafana-infra-solr-cores.json  | 250 +-
 .../HDP/grafana-infra-solr-hosts.json  | 335 +-
 4 files changed, 705 insertions(+), 543 deletions(-)

diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js 
b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
index 4a6a77c..65b834b 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
@@ -343,18 +343,31 @@ define([
   };
 
 // Infra Solr Calls
-var getSolrCoreData = function(target) {
-var instanceId = typeof target.templatedCluster == 'undefined' 
 ? '' : '&instanceId=' + target.templatedCluster;
-var precision = target.precision === 'default' || typeof 
target.precision == 'undefined'  ? '' : '&precision='
-+ target.precision;
-var metricAggregator = target.aggregator === "none" ? '' : 
'._' + target.aggregator;
-var metricTransform = !target.transform || target.transform 
=== "none" ? '' : '._' + target.transform;
-var seriesAggregator = !target.seriesAggregator || 
target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + 
target.seriesAggregator;
-return self.doAmbariRequest({ url: 
'/ws/v1/timeline/metrics?metricNames=' + target.sCoreMetric + metricTransform + 
instanceId
-+ metricAggregator + '&appId=ambari-infra-solr&startTime=' + 
from + '&endTime=' + to + precision + seriesAggregator }).then(
-allHostMetricsData(target)
-);
-};
+  var getSolrCoreData = function(target) {
+  var instanceId = typeof target.templatedCluster == 'undefined'  
? '' : '&instanceId=' + target.templatedCluster;
+  var precision = target.precision === 'default' || typeof 
target.precision == 'undefined'  ? '' : '&precision='
+  + target.precision;
+  var metricAggregator = target.aggregator === "none" ? '' : '._' 
+ target.aggregator;
+  var metricTransform = !target.transform || target.transform === 
"none" ? '' : '._' + target.transform;
+  var seriesAggregator = !target.seriesAggregator || 
target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + 
target.seriesAggregator;
+  return self.doAmbariRequest({ url: 
'/ws/v1/timeline/metrics?metricNames=' + target.sCoreMetric + metricTransform + 
instanceId
+  + metricAggregator + '&appId=ambari-infra-solr&startTime=' + 
from + '&endTime=' + to + precision + seriesAggregator }).then(
+  allHostMetricsData(target)
+  );
+  };
+
+  var getSolrCollectionData = function(target) {
+  var instanceId = typeof target.templatedCluster == 'undefined'  
? '' : '&instanceId=' + target.templatedCluster;
+  var precision = target.precision === 'default' || typeof 
target.precision == 'undefined'  ? '' : '&precision='
+  + target.precision;
+  var metricAggregator = target.aggregator === "none" ? '' : '._' 
+ target.aggregator;
+  var metricTransform = !target.transform || target.transform === 
"none" ? '' : '._' + target.transform;
+  var seriesAggregator = !target.seriesAggregator || 
target.seriesAggregator === "none" ? '' : '&seriesAggregateFunction=' + 
target.seriesAggregator;
+  return self.doAmbariRequest({ url: 
'/ws/v1/timeli

[ambari] branch trunk updated: [AMBARI-23494] Incorrect servicerpc address configs being generated by NN Federation wizard. Second patch. (akovalenko) (#961)

2018-04-10 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 85e7122  [AMBARI-23494] Incorrect servicerpc address configs being 
generated by NN Federation wizard. Second patch. (akovalenko) (#961)
85e7122 is described below

commit 85e71227e5fa6043a0477c7a96998a21e7355860
Author: Aleksandr Kovalenko 
AuthorDate: Tue Apr 10 23:42:23 2018 +0300

[AMBARI-23494] Incorrect servicerpc address configs being generated by NN 
Federation wizard. Second patch. (akovalenko) (#961)
---
 .../main/admin/federation/step3_controller.js  | 32 --
 1 file changed, 24 insertions(+), 8 deletions(-)

diff --git 
a/ambari-web/app/controllers/main/admin/federation/step3_controller.js 
b/ambari-web/app/controllers/main/admin/federation/step3_controller.js
index 1f05844..748c19f 100644
--- a/ambari-web/app/controllers/main/admin/federation/step3_controller.js
+++ b/ambari-web/app/controllers/main/admin/federation/step3_controller.js
@@ -85,9 +85,10 @@ App.NameNodeFederationWizardStep3Controller = 
Em.Controller.extend(App.Blueprint
 
   onLoad: function () {
 if (this.get('isConfigsLoaded') && 
App.router.get('clusterController.isHDFSNameSpacesLoaded')) {
-  this.tweakServiceConfigs(this.get('federationConfig.configs'));
+  var federationConfig = this.get('federationConfig');
+  federationConfig.configs = 
this.tweakServiceConfigs(federationConfig.configs);
   this.removeConfigs(this.get('configsToRemove'), 
this.get('serverConfigData'));
-  this.renderServiceConfigs(this.get('federationConfig'));
+  this.renderServiceConfigs(federationConfig);
   this.set('isLoaded', true);
 }
   }.observes('isConfigsLoaded', 
'App.router.clusterController.isHDFSNameSpacesLoaded'),
@@ -137,16 +138,31 @@ App.NameNodeFederationWizardStep3Controller = 
Em.Controller.extend(App.Blueprint
 
   tweakServiceConfigs: function (configs) {
 var dependencies = this.prepareDependencies();
+var result = [];
+var configsToRemove = [];
+var hdfsSiteConfigs = 
this.get('serverConfigData').items.findProperty('type', 'hdfs-site').properties;
+
+if (!hdfsSiteConfigs['dfs.namenode.servicerpc-address.' + 
dependencies.nameservice1 + '.nn1'] && 
!hdfsSiteConfigs['dfs.namenode.servicerpc-address.' + dependencies.nameservice1 
+ '.nn2']) {
+  configsToRemove = configsToRemove.concat([
+'dfs.namenode.servicerpc-address.{{nameservice1}}.nn1',
+'dfs.namenode.servicerpc-address.{{nameservice1}}.nn2',
+'dfs.namenode.servicerpc-address.{{nameservice2}}.nn3',
+'dfs.namenode.servicerpc-address.{{nameservice2}}.nn4'
+  ]);
+}
 
 configs.forEach(function (config) {
-  config.isOverridable = false;
-  config.name = this.replaceDependencies(config.name, dependencies);
-  config.displayName = this.replaceDependencies(config.displayName, 
dependencies);
-  config.value = this.replaceDependencies(config.value, dependencies);
-  config.recommendedValue = 
this.replaceDependencies(config.recommendedValue, dependencies);
+  if (!configsToRemove.contains(config.name)) {
+config.isOverridable = false;
+config.name = this.replaceDependencies(config.name, dependencies);
+config.displayName = this.replaceDependencies(config.displayName, 
dependencies);
+config.value = this.replaceDependencies(config.value, dependencies);
+config.recommendedValue = 
this.replaceDependencies(config.recommendedValue, dependencies);
+result.push(config);
+  }
 }, this);
 
-return configs;
+return result;
   },
 
   replaceDependencies: function (value, dependencies) {

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23370] Download client configs fails due to clusterLevelParam m… (#914)

2018-04-09 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e3e5621  [AMBARI-23370] Download client configs fails due to 
clusterLevelParam m… (#914)
e3e5621 is described below

commit e3e56219bb5d79e6b59e436f8e623d54037e45b8
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Mon Apr 9 20:29:13 2018 +0200

[AMBARI-23370] Download client configs fails due to clusterLevelParam m… 
(#914)

* AMBARI-23370 Download client configs fails due to clusterLevelParam 
missing

* Removed agentCache param
---
 .../server/controller/KerberosHelperImpl.java  |  10 +-
 .../internal/ClientConfigResourceProvider.java |  75 ++---
 .../topology/ClusterConfigurationRequest.java  |   5 +-
 .../org/apache/ambari/server/utils/StageUtils.java |  61 ++-
 .../stacks/HDP/2.6/services/YARN/kerberos.json |   2 +-
 .../controller/AmbariManagementControllerTest.java |   2 +-
 .../internal/ClientConfigResourceProviderTest.java | 184 +++--
 .../apache/ambari/server/utils/StageUtilsTest.java |  24 +--
 8 files changed, 106 insertions(+), 257 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index d2323c4..3f0ea84 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -588,8 +588,6 @@ public class KerberosHelperImpl implements KerberosHelper {
   configurations.put("clusterHostInfo", clusterHostInfoMap);
 }
 
-Map componentToClusterInfoMap = 
StageUtils.getComponentToClusterInfoKeyMap();
-
 // Iterate through the recommendations to find the recommended host 
assignments
 for (RecommendationResponse.HostGroup hostGroup : hostGroups) {
   Set> components = hostGroup.getComponents();
@@ -607,13 +605,7 @@ public class KerberosHelperImpl implements KerberosHelper {
   // If the component filter is null or the current component 
is found in the filter,
   // include it in the map
   if ((componentFilter == null) || 
componentFilter.contains(componentName)) {
-String key = componentToClusterInfoMap.get(componentName);
-
-if (StringUtils.isEmpty(key)) {
-  // If not found in the componentToClusterInfoMap, then 
keys are assumed to be
-  // in the form of _hosts (lowercase)
-  key = componentName.toLowerCase() + "_hosts";
-}
+String key = 
StageUtils.getClusterHostInfoKey(componentName);
 
 Set fqdns = new TreeSet<>();
 
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index fda3817..0dfb5dc 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -18,22 +18,7 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_COUNT;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_ON_UNAVAILABILITY;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GPL_LICENSE_ACCEPTED;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
 import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
-import static 
org.apache.ambari.server.agent.ExecutionCommand.K

[ambari] branch trunk updated: [AMBARI-23497] NN Federation Wizard state not restored after closing session (akovalenko) (#919)

2018-04-06 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e9bb069  [AMBARI-23497] NN Federation Wizard state not restored after 
closing session (akovalenko) (#919)
e9bb069 is described below

commit e9bb069e29b9c6f3ccc59a3c4526fbc7c0c11634
Author: Aleksandr Kovalenko 
AuthorDate: Fri Apr 6 22:24:49 2018 +0300

[AMBARI-23497] NN Federation Wizard state not restored after closing 
session (akovalenko) (#919)

Merging this critical changes since all tests have passed.
---
 ambari-web/app/data/controller_route.js | 4 
 1 file changed, 4 insertions(+)

diff --git a/ambari-web/app/data/controller_route.js 
b/ambari-web/app/data/controller_route.js
index 13d0380..eb3b1ff 100644
--- a/ambari-web/app/data/controller_route.js
+++ b/ambari-web/app/data/controller_route.js
@@ -80,5 +80,9 @@ module.exports = [
   {
 wizardControllerName: App.router.get('widgetEditController.name'),
 route: 'main.editWidget'
+  },
+  {
+wizardControllerName: 
App.router.get('nameNodeFederationWizardController.name'),
+route: 'main.services.enableNameNodeFederation'
   }
 ];

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: [AMBARI-23502] Format Namenode step in NN Fed wizard should pass in HDFS ClusterId as argument. (aonishuk) (#924)

2018-04-06 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 70d0c08  [AMBARI-23502] Format Namenode step in NN Fed wizard should 
pass in HDFS ClusterId as argument. (aonishuk) (#924)
70d0c08 is described below

commit 70d0c088a4f0dbe232b926e9a48393d2a9cf059d
Author: aonishuk 
AuthorDate: Fri Apr 6 22:15:38 2018 +0300

[AMBARI-23502] Format Namenode step in NN Fed wizard should pass in HDFS 
ClusterId as argument. (aonishuk) (#924)
---
 .../libraries/functions/namenode_ha_utils.py   | 65 +++---
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py |  5 +-
 2 files changed, 48 insertions(+), 22 deletions(-)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index ae1a681..4d51e69 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -40,7 +40,9 @@ NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
 NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
 NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
 NAMENODE_RPC_NON_HA = 'dfs.namenode.rpc-address'
-JMX_URI_FRAGMENT = 
"{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem"
+JMX_URI_DEFAULT = "{0}://{1}/jmx?qry={{0}}"
+JMX_BEAN_FS = "Hadoop:service=NameNode,name=FSNamesystem"
+JMX_BEAN_NN_INFO = "Hadoop:service=NameNode,name=NameNodeInfo"
 INADDR_ANY = '0.0.0.0'
 
 class NoActiveNamenodeException(Fail):
@@ -93,6 +95,30 @@ def _get_namenode_states_noretries_single_ns(hdfs_site, 
name_service, security_e
   standby_namenodes = []
   unknown_namenodes = []
 
+  for nn_unique_id, address, jmx_uri in all_jmx_namenode_addresses(hdfs_site, 
name_service):
+is_https_enabled = is_https_enabled_in_hdfs(hdfs_site['dfs.http.policy'], 
hdfs_site['dfs.https.enable'])
+jmx_uri = jmx_uri.format(JMX_BEAN_FS)
+state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, 
run_user, is_https_enabled, last_retry)
+# If JMX parsing failed
+if not state:
+  check_service_cmd = "hdfs haadmin -ns {0} -getServiceState 
{1}".format(name_service, nn_unique_id)
+  code, out = shell.call(check_service_cmd, logoutput=True, user=run_user)
+  if code == 0 and out:
+if HDFS_NN_STATE_STANDBY in out:
+  state = HDFS_NN_STATE_STANDBY
+elif HDFS_NN_STATE_ACTIVE in out:
+  state = HDFS_NN_STATE_ACTIVE
+
+if state == HDFS_NN_STATE_ACTIVE:
+  active_namenodes.append((nn_unique_id, address))
+elif state == HDFS_NN_STATE_STANDBY:
+  standby_namenodes.append((nn_unique_id, address))
+else:
+  unknown_namenodes.append((nn_unique_id, address))
+
+  return active_namenodes, standby_namenodes, unknown_namenodes
+
+def all_jmx_namenode_addresses(hdfs_site, name_service):
   nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
 
   # now we have something like 'nn1,nn2,nn3,nn4'
@@ -119,28 +145,25 @@ def _get_namenode_states_noretries_single_ns(hdfs_site, 
name_service, security_e
   rpc_host = rpc_value.split(":")[0]
   value = value.replace(INADDR_ANY, rpc_host)
 
-  jmx_uri = JMX_URI_FRAGMENT.format(protocol, value)
-
-  state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, 
run_user, is_https_enabled, last_retry)
-  # If JMX parsing failed
-  if not state:
-check_service_cmd = "hdfs haadmin -ns {0} -getServiceState 
{1}".format(name_service, nn_unique_id)
-code, out = shell.call(check_service_cmd, logoutput=True, 
user=run_user)
-if code == 0 and out:
-  if HDFS_NN_STATE_STANDBY in out:
-state = HDFS_NN_STATE_STANDBY
-  elif HDFS_NN_STATE_ACTIVE in out:
-state = HDFS_NN_STATE_ACTIVE
-
-  if state == HDFS_NN_STATE_ACTIVE:
-active_namenodes.append((nn_unique_id, value))
-  elif state == HDFS_NN_STATE_STANDBY:
-standby_namenodes.append((nn_unique_id, value))
-  else:
-unknown_namenodes.append((nn_unique_id, value))
+jmx_uri = JMX_URI_DEFAULT.format(protocol, value)
 
-  return active_namenodes, standby_namenodes, unknown_namenodes
+yield nn_unique_id, value, jmx_uri
+
+
+def get_hdfs_cluster_id_from_jmx(hdfs_site, security_enabled, run_user):
+  name_services = get_nameservices(hdfs_site)
+  for name_service in name_services:
+for nn_unique_id, address, jmx_uri in 
all_jmx_namenode_addresses(hdfs_site, name_service):
+  jmx_uri = 

[ambari] branch trunk updated: [AMBARI-23240] Add "tag" property/column to widget (#664)

2018-04-04 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d000a59  [AMBARI-23240] Add "tag" property/column to widget (#664)
d000a59 is described below

commit d000a5933dd88d9ee719029281ce6eb0b19e0623
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Wed Apr 4 16:54:29 2018 +0200

[AMBARI-23240] Add "tag" property/column to widget (#664)

* AMBARI-23240 added "tag" property/column to widget

* AMBARI-23240 added widget tag handling to WidgetResponse
---
 .../apache/ambari/server/controller/WidgetResponse.java | 10 ++
 .../controller/internal/WidgetResourceProvider.java | 11 +++
 .../apache/ambari/server/orm/entities/WidgetEntity.java | 17 +
 .../apache/ambari/server/upgrade/UpgradeCatalog270.java |  8 
 .../src/main/resources/Ambari-DDL-Derby-CREATE.sql  |  1 +
 .../src/main/resources/Ambari-DDL-MySQL-CREATE.sql  |  1 +
 .../src/main/resources/Ambari-DDL-Oracle-CREATE.sql |  1 +
 .../src/main/resources/Ambari-DDL-Postgres-CREATE.sql   |  1 +
 .../main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql|  1 +
 .../src/main/resources/Ambari-DDL-SQLServer-CREATE.sql  |  1 +
 .../ambari/server/upgrade/UpgradeCatalog270Test.java|  6 ++
 11 files changed, 58 insertions(+)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/WidgetResponse.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/WidgetResponse.java
index c5b4a0e..32dee17 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/WidgetResponse.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/WidgetResponse.java
@@ -40,6 +40,7 @@ public class WidgetResponse {
   private String widgetValues;
   private String properties;
   private String clusterName;
+  private String tag;
 
   @JsonProperty("id")
   public Long getId() {
@@ -144,6 +145,14 @@ public class WidgetResponse {
 this.clusterName = clusterName;
   }
 
+  public String getTag() {
+return tag;
+  }
+
+  public void setTag(String tag) {
+this.tag = tag;
+  }
+
   @Override
   public String toString() {
 return widgetName;
@@ -175,6 +184,7 @@ public class WidgetResponse {
 response.setProperties(entity.getProperties());
 String clusterName = (entity.getClusterEntity() != null) ? 
entity.getClusterEntity().getClusterName() : null;
 response.setClusterName(clusterName);
+response.setTag(entity.getTag());
 
 return response;
   }
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
index 89a5aa4..5f5d028 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/WidgetResourceProvider.java
@@ -74,6 +74,7 @@ public class WidgetResourceProvider extends 
AbstractControllerResourceProvider {
   public static final String WIDGET_METRICS_PROPERTY_ID = 
PropertyHelper.getPropertyId("WidgetInfo", "metrics");
   public static final String WIDGET_VALUES_PROPERTY_ID = 
PropertyHelper.getPropertyId("WidgetInfo", "values");
   public static final String WIDGET_PROPERTIES_PROPERTY_ID = 
PropertyHelper.getPropertyId("WidgetInfo", "properties");
+  public static final String WIDGET_TAG_PROPERTY_ID = 
PropertyHelper.getPropertyId("WidgetInfo", "tag");
   public enum SCOPE {
 CLUSTER,
 USER
@@ -100,6 +101,7 @@ public class WidgetResourceProvider extends 
AbstractControllerResourceProvider {
   add(WIDGET_METRICS_PROPERTY_ID);
   add(WIDGET_VALUES_PROPERTY_ID);
   add(WIDGET_PROPERTIES_PROPERTY_ID);
+  add(WIDGET_TAG_PROPERTY_ID);
 }
   };
 
@@ -189,6 +191,10 @@ public class WidgetResourceProvider extends 
AbstractControllerResourceProvider {
   null : gson.toJson(widgetPropertiesMap);
   entity.setProperties(widgetProperties);
 
+  if (properties.containsKey(WIDGET_TAG_PROPERTY_ID)){
+entity.setTag(properties.get(WIDGET_TAG_PROPERTY_ID).toString());
+  }
+
   widgetDAO.create(entity);
   notifyCreate(Type.Widget, request);
   return entity;
@@ -246,6 +252,7 @@ public class WidgetResourceProvider extends 
AbstractControllerResourceProvider {
   resource.setProperty(WIDGET_SCOPE_PROPERTY_ID, entity.getScope());
   setResourceProperty(resource, WIDGET_VALUES_PROPERTY_ID, 
entity.getWidgetValu

[ambari] branch trunk updated: [AMBARI-23370] Download client configs fails due to 'clusterLevelParams' not found (#802)

2018-03-29 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6f79c88  [AMBARI-23370] Download client configs fails due to 
'clusterLevelParams' not found (#802)
6f79c88 is described below

commit 6f79c888aed062ef60d40bbd647d43f5629b0b2d
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Thu Mar 29 20:55:37 2018 +0200

[AMBARI-23370] Download client configs fails due to 'clusterLevelParams' 
not found (#802)
---
 .../server/controller/KerberosHelperImpl.java  | 10 +---
 .../internal/ClientConfigResourceProvider.java | 61 +++---
 .../topology/ClusterConfigurationRequest.java  |  5 +-
 .../org/apache/ambari/server/utils/StageUtils.java | 61 +-
 .../controller/AmbariManagementControllerTest.java |  2 +-
 .../apache/ambari/server/utils/StageUtilsTest.java | 24 -
 6 files changed, 45 insertions(+), 118 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index d2323c4..3f0ea84 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -588,8 +588,6 @@ public class KerberosHelperImpl implements KerberosHelper {
   configurations.put("clusterHostInfo", clusterHostInfoMap);
 }
 
-Map componentToClusterInfoMap = 
StageUtils.getComponentToClusterInfoKeyMap();
-
 // Iterate through the recommendations to find the recommended host 
assignments
 for (RecommendationResponse.HostGroup hostGroup : hostGroups) {
   Set> components = hostGroup.getComponents();
@@ -607,13 +605,7 @@ public class KerberosHelperImpl implements KerberosHelper {
   // If the component filter is null or the current component 
is found in the filter,
   // include it in the map
   if ((componentFilter == null) || 
componentFilter.contains(componentName)) {
-String key = componentToClusterInfoMap.get(componentName);
-
-if (StringUtils.isEmpty(key)) {
-  // If not found in the componentToClusterInfoMap, then 
keys are assumed to be
-  // in the form of _hosts (lowercase)
-  key = componentName.toLowerCase() + "_hosts";
-}
+String key = 
StageUtils.getClusterHostInfoKey(componentName);
 
 Set fqdns = new TreeSet<>();
 
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index fda3817..8992413 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -65,6 +65,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.controller.ServiceComponentHostRequest;
 import org.apache.ambari.server.controller.ServiceComponentHostResponse;
@@ -369,27 +370,13 @@ public class ClientConfigResourceProvider extends 
AbstractControllerResourceProv
 }
 osFamily = clusters.getHost(hostName).getOsFamily();
 
-TreeMap hostLevelParams = new TreeMap<>();
-StageUtils.useStackJdkIfExists(hostLevelParams, configs);
-hostLevelParams.put(JDK_LOCATION, 
managementController.getJdkResourceUrl());
-hostLevelParams.put(STACK_NAME, stackId.getStackName());
-hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
-hostLevelParams.put(DB_NAME, managementController.getServerDB());
-hostLevelParams.put(MYSQL_JDBC_URL, 
managementController.getMysqljdbcUrl());
-hostLevelParams.put(ORACLE_JDBC_URL, 
managementController.getOjdbcUrl());
-hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
-hostLevelParams.putAll(managementController.getRcaParameters());
-hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, 
configs.isAgentStackRetryOnInstallEna

[ambari] branch trunk updated: [AMBARI-23219] Set mariadb the default database for Hive on Amazon Linux 2 distribution (#641)

2018-03-14 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ec62c4d  [AMBARI-23219] Set mariadb the default database for Hive on 
Amazon Linux 2 distribution (#641)
ec62c4d is described below

commit ec62c4d5ff271000303d1268e38f51a43e335670
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Wed Mar 14 15:02:21 2018 +0100

[AMBARI-23219] Set mariadb the default database for Hive on Amazon Linux 2 
distribution (#641)
---
 ambari-common/src/main/python/ambari_commons/resources/os_family.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/ambari-common/src/main/python/ambari_commons/resources/os_family.json 
b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
index 5579378..c731557 100644
--- a/ambari-common/src/main/python/ambari_commons/resources/os_family.json
+++ b/ambari-common/src/main/python/ambari_commons/resources/os_family.json
@@ -79,7 +79,7 @@
 "aliases": {
   "amazon2015": "amazon6",
   "amazon2016": "amazon6",
-  "amazon2017": "amazon6",
+  "amazon2017": "redhat7",
   "suse11sp3": "suse11"
 }
 }

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23172. Build fails for Debian (jdeb plugin not found). (swagle) (#583)

2018-03-07 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e5e10cd  AMBARI-23172. Build fails for Debian (jdeb plugin not found). 
(swagle) (#583)
e5e10cd is described below

commit e5e10cd81fcf48cbcee82456769145ad8c954e1e
Author: Siddharth 
AuthorDate: Wed Mar 7 12:55:54 2018 -0800

AMBARI-23172. Build fails for Debian (jdeb plugin not found). (swagle) 
(#583)
---
 ambari-serviceadvisor/pom.xml | 17 +
 1 file changed, 17 insertions(+)

diff --git a/ambari-serviceadvisor/pom.xml b/ambari-serviceadvisor/pom.xml
index dad3d36..5399bdd 100644
--- a/ambari-serviceadvisor/pom.xml
+++ b/ambari-serviceadvisor/pom.xml
@@ -136,6 +136,23 @@
 
   
   
+org.vafer
+jdeb
+1.0.1
+
+  
+none
+
+  jdeb
+
+  
+
+
+  true
+  false
+
+  
+  
 org.apache.rat
 apache-rat-plugin
 0.12

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23137. Allow users to skip backup steps on Express Upgrades to save time. (swagle) (#540)

2018-03-05 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cc4a4d6  AMBARI-23137. Allow users to skip backup steps on Express 
Upgrades to save time. (swagle) (#540)
cc4a4d6 is described below

commit cc4a4d6e2806f53b6aca190cd6cf3c734a995cc5
Author: Siddharth 
AuthorDate: Mon Mar 5 12:16:21 2018 -0800

AMBARI-23137. Allow users to skip backup steps on Express Upgrades to save 
time. (swagle) (#540)
---
 .../common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py  | 6 --
 .../common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py  | 4 
 .../resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml| 1 +
 .../src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml  | 1 +
 4 files changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 197bd2e..e82e73a 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -175,8 +175,10 @@ class NameNodeDefault(NameNode):
 hdfs_binary = self.get_hdfs_binary()
 namenode_upgrade.prepare_upgrade_check_for_previous_dir()
 namenode_upgrade.prepare_upgrade_enter_safe_mode(hdfs_binary)
-namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
-namenode_upgrade.prepare_upgrade_backup_namenode_dir()
+if not params.skip_namenode_save_namespace_express:
+  namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
+if not params.skip_namenode_namedir_backup_express:
+  namenode_upgrade.prepare_upgrade_backup_namenode_dir()
 namenode_upgrade.prepare_upgrade_finalize_previous_upgrades(hdfs_binary)
 
 # Call -rollingUpgrade prepare
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 8222aff..7e06829 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -133,6 +133,10 @@ if stack_version_formatted and 
check_stack_feature(StackFeature.ROLLING_UPGRADE,
 else:
   hadoop_secure_dn_user = '""'
 
+# Parameters for upgrade packs
+skip_namenode_save_namespace_express = 
default("/configurations/cluster-env/stack_upgrade_express_skip_namenode_save_namespace",
 False)
+skip_namenode_namedir_backup_express = 
default("/configurations/cluster-env/stack_upgrade_express_skip_backup_namenode_dir",
 False)
+
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 limits_conf_dir = "/etc/security/limits.d"
 
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index d3b2e56..10b042d 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -168,6 +168,7 @@
   
 
   
+
 
   scripts/hbase_upgrade.py
   take_snapshot
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 4f10ea5..aff9cb0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -69,6 +69,7 @@
   false
 
   
+
 
   scripts/hbase_upgrade.py
   take_snapshot

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-23101. Rhel6: Deploy with NewMySQL for Hive fails (aonishuk) (#543)

2018-03-05 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f2b006c  AMBARI-23101. Rhel6: Deploy with NewMySQL for Hive fails 
(aonishuk) (#543)
f2b006c is described below

commit f2b006cce249eb0f62ff9928937605eaa2bb8e46
Author: aonishuk 
AuthorDate: Mon Mar 5 21:24:04 2018 +0200

AMBARI-23101. Rhel6: Deploy with NewMySQL for Hive fails (aonishuk) (#543)
---
 .../ambari_agent/dummy_files/alert_definitions.json   |  4 ++--
 .../HIVE/0.12.0.2.0/package/scripts/mysql_server.py   |  6 +++---
 .../HIVE/0.12.0.2.0/package/scripts/mysql_service.py  | 15 ++-
 .../HIVE/0.12.0.2.0/package/scripts/mysql_users.py|  5 +
 .../HIVE/0.12.0.2.0/package/scripts/params_linux.py   |  1 -
 .../HIVE/0.12.0.2.0/package/scripts/status_params.py  |  7 +++
 .../test/python/stacks/2.0.6/HIVE/test_mysql_server.py|  2 ++
 7 files changed, 29 insertions(+), 11 deletions(-)

diff --git 
a/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json 
b/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json
index 341017c..d9a82a7 100644
--- 
a/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json
+++ 
b/ambari-agent/src/test/python/ambari_agent/dummy_files/alert_definitions.json
@@ -7,9 +7,9 @@
   {
 "name": "namenode_process", 
 "service": "HDFS", 
-"component": "NAMENODE", 
-"interval": 6, 
 "enabled": true, 
+"interval": 6, 
+"component": "NAMENODE", 
 "label": "NameNode process", 
 "source": {
   "reporting": {
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
index d213aa1..76a8b55 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
@@ -45,18 +45,18 @@ class MysqlServer(Script):
   def start(self, env, rolling_restart=False):
 import params
 env.set_params(params)
-mysql_service(daemon_name=params.daemon_name, action='start')
+mysql_service(action='start')
 
   def stop(self, env, rolling_restart=False):
 import params
 env.set_params(params)
-mysql_service(daemon_name=params.daemon_name, action='stop')
+mysql_service(action='stop')
 
   def status(self, env):
 import status_params
 env.set_params(status_params)
 
-mysql_service(daemon_name=status_params.daemon_name, action='status')
+mysql_service(action='status')
 
 
 if __name__ == "__main__":
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
index 7862774..fe6d7b9 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
@@ -18,12 +18,25 @@ limitations under the License.
 
 """
 
+import os
 from resource_management.core.resources.system import Execute
 from resource_management.core.exceptions import ComponentIsNotRunning, Fail
 from resource_management.libraries.functions.format import format
 
 
-def mysql_service(daemon_name=None, action='start'): 
+def get_daemon_name():
+  import status_params
+
+  for possible_daemon_name in status_params.POSSIBLE_DAEMON_NAMES:
+daemon_path = os.path.join(status_params.SERVICES_DIR, 
possible_daemon_name)
+if os.path.exists(daemon_path):
+  return possible_daemon_name
+
+  raise Fail("Could not find service daemon for mysql")
+
+def mysql_service(action='start'): 
+  daemon_name = get_daemon_name()
+  
   status_cmd = format("pgrep -l '^{process_name}$'")
   cmd = ('service', daemon_name, action)
 
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
index d34c2bc..f394dda 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
@@ -21,6 +21,7 @@ limitations u

[ambari] branch branch-2.6 updated: [AMBARI-23137] Allow users to skip backup steps on Express Upgrades to save time. (swagle) (#534)

2018-03-04 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new f406a23  [AMBARI-23137] Allow users to skip backup steps on Express 
Upgrades to save time. (swagle) (#534)
f406a23 is described below

commit f406a2368b0adba5ec9cd6d8e02b9745f31f924d
Author: Siddharth 
AuthorDate: Sun Mar 4 17:46:09 2018 -0800

[AMBARI-23137] Allow users to skip backup steps on Express Upgrades to save 
time. (swagle) (#534)

* AMBARI-23137. Allow users to skip backup steps on Express Upgrades to 
save time. (swagle)
---
 .../common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py  | 6 --
 .../common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py  | 4 
 .../resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml| 1 +
 .../src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml  | 1 +
 4 files changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 3f39a49..aa00bbe 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -158,8 +158,10 @@ class NameNodeDefault(NameNode):
 hdfs_binary = self.get_hdfs_binary()
 namenode_upgrade.prepare_upgrade_check_for_previous_dir()
 namenode_upgrade.prepare_upgrade_enter_safe_mode(hdfs_binary)
-namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
-namenode_upgrade.prepare_upgrade_backup_namenode_dir()
+if not params.skip_namenode_save_namespace_express:
+  namenode_upgrade.prepare_upgrade_save_namespace(hdfs_binary)
+if not params.skip_namenode_namedir_backup_express:
+  namenode_upgrade.prepare_upgrade_backup_namenode_dir()
 namenode_upgrade.prepare_upgrade_finalize_previous_upgrades(hdfs_binary)
 
 # Call -rollingUpgrade prepare
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 5e1f4ac..df8ce99 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -131,6 +131,10 @@ if stack_version_formatted and 
check_stack_feature(StackFeature.ROLLING_UPGRADE,
 else:
   hadoop_secure_dn_user = '""'
 
+# Parameters for upgrade packs
+skip_namenode_save_namespace_express = 
default("/configurations/cluster-env/stack_upgrade_express_skip_namenode_save_namespace",
 False)
+skip_namenode_namedir_backup_express = 
default("/configurations/cluster-env/stack_upgrade_express_skip_backup_namenode_dir",
 False)
+
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 limits_conf_dir = "/etc/security/limits.d"
 
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index d506f1f..d81dd4b 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -176,6 +176,7 @@
   
 
   
+
 
   scripts/hbase_upgrade.py
   take_snapshot
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 17a6394..0218029 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -69,6 +69,7 @@
   false
 
   
+
 
   scripts/hbase_upgrade.py
   take_snapshot

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch branch-2.6 updated: [AMBARI-23127] Allow default value of number of parallel tasks run during Upgrade to be customizable. (swagle) (#523)

2018-03-02 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 5bc23e6  [AMBARI-23127] Allow default value of number of parallel 
tasks run during Upgrade to be customizable. (swagle) (#523)
5bc23e6 is described below

commit 5bc23e6c54a30d9584b3dc927a078aeaef0059e5
Author: Siddharth 
AuthorDate: Fri Mar 2 08:10:59 2018 -0800

[AMBARI-23127] Allow default value of number of parallel tasks run during 
Upgrade to be customizable. (swagle) (#523)
---
 ambari-server/docs/configuration/index.md  | 18 --
 .../ambari/server/configuration/Configuration.java | 13 +
 .../org/apache/ambari/server/state/UpgradeContext.java | 14 ++
 .../ambari/server/state/stack/upgrade/Grouping.java|  9 +++--
 .../server/state/stack/upgrade/ParallelScheduler.java  |  3 ++-
 pom.xml|  2 +-
 6 files changed, 53 insertions(+), 6 deletions(-)

diff --git a/ambari-server/docs/configuration/index.md 
b/ambari-server/docs/configuration/index.md
index e1a6899..743557d 100644
--- a/ambari-server/docs/configuration/index.md
+++ b/ambari-server/docs/configuration/index.md
@@ -114,9 +114,9 @@ The following are the properties which can be used to 
configure Ambari.
 | auto.group.creation | The auto group creation by Ambari |`false` | 
 | bootstrap.dir | The directory on the Ambari Server file system used for 
storing Ambari Agent bootstrap information such as request responses. 
|`/var/run/ambari-server/bootstrap` | 
 | bootstrap.master_host_name | The host name of the Ambari Server which will 
be used by the Ambari Agents for communication. | | 
-| bootstrap.script | The location and name of the Python script used to 
bootstrap new Ambari Agent hosts. 
|`/usr/lib/python2.6/site-packages/ambari_server/bootstrap.py` | 
+| bootstrap.script | The location and name of the Python script used to 
bootstrap new Ambari Agent hosts. 
|`/usr/lib/ambari-server/lib/ambari_server/bootstrap.py` | 
 | bootstrap.setup_agent.password | The password to set on the 
`AMBARI_PASSPHRASE` environment variable before invoking the bootstrap script. 
|`password` | 
-| bootstrap.setup_agent.script | The location and name of the Python script 
executed on the Ambari Agent host during the bootstrap process. 
|`/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py` | 
+| bootstrap.setup_agent.script | The location and name of the Python script 
executed on the Ambari Agent host during the bootstrap process. 
|`/usr/lib/ambari-server/lib/ambari_server/setupAgent.py` | 
 | client.api.acceptor.count | Count of acceptors to configure for the jetty 
connector used for Ambari API. | | 
 | client.api.port | The port that client connections will use with the REST 
API. The Ambari Web client runs on this port. |`8080` | 
 | client.api.ssl.cert_pass_file | The filename which contains the password for 
the keystores, truststores, and certificates for the REST API when it's 
protected by SSL. |`https.pass.txt` | 
@@ -135,7 +135,11 @@ The following are the properties which can be used to 
configure Ambari.
 | db.oracle.jdbc.name | The name of the Oracle JDBC JAR connector. 
|`ojdbc6.jar` | 
 | default.kdcserver.port | The port used to communicate with the Kerberos Key 
Distribution Center. |`88` | 
 | extensions.path | The location on the Ambari Server where stack extensions 
exist.The following are examples of valid 
values:`/var/lib/ambari-server/resources/extensions` | | 
+| gpl.license.accepted | Whether user accepted GPL license. |`false` | 
+| http.cache-control | The value that will be used to set the `Cache-Control` 
HTTP response header. |`no-store` | 
+| http.pragma | The value that will be used to set the `PRAGMA` HTTP response 
header. |`no-cache` | 
 | http.strict-transport-security | When using SSL, this will be used to set 
the `Strict-Transport-Security` response header. |`max-age=31536000` | 
+| http.x-content-type-options | The value that will be used to set the 
`X-CONTENT-TYPE` HTTP response header. |`nosniff` | 
 | http.x-frame-options | The value that will be used to set the 
`X-Frame-Options` HTTP response header. |`DENY` | 
 | http.x-xss-protection | The value that will be used to set the 
`X-XSS-Protection` HTTP response header. |`1; mode=block` | 
 | java.home | The location of the JDK on the Ambari Agent hosts.The 
following are examples of valid values:`/usr/jdk64/jdk1.7.0_45` | 
| 
@@ -172,6 +176,7 @@ The following are the properties which can be used to 
configure Ambari.
 | recovery.window_in_minutes | The length of a recovery window, in minutes, in 
which recovery attempts can be retried. This property is related to 
`recovery.max_count`. | | 
 | repo.validation.suffixes.default | The suffixes to use when validating most 
types of reposit

[ambari] branch trunk updated: [AMBARI-23127] Allow default value of number of parallel tasks run during Upgrade to be customizable. (swagle) (#522)

2018-03-01 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7345490  [AMBARI-23127] Allow default value of number of parallel 
tasks run during Upgrade to be customizable. (swagle) (#522)
7345490 is described below

commit 7345490d8992da2e8ae0ea7efcc654d4d6dac15d
Author: Siddharth 
AuthorDate: Thu Mar 1 16:06:09 2018 -0800

[AMBARI-23127] Allow default value of number of parallel tasks run during 
Upgrade to be customizable. (swagle) (#522)

* AMBARI-23127. Allow default value of number of parallel tasks run during 
Upgrade to be customizable. (swagle)

* AMBARI-23127. Configuration markdown regenerated. (swagle)
---
 ambari-server/docs/configuration/index.md  | 10 ++
 .../apache/ambari/server/configuration/Configuration.java  | 13 +
 .../org/apache/ambari/server/state/UpgradeContext.java | 14 ++
 .../apache/ambari/server/state/stack/upgrade/Grouping.java |  9 +++--
 .../server/state/stack/upgrade/ParallelScheduler.java  |  3 ++-
 pom.xml|  2 +-
 6 files changed, 47 insertions(+), 4 deletions(-)

diff --git a/ambari-server/docs/configuration/index.md 
b/ambari-server/docs/configuration/index.md
index 2aa6475..54d6fb2 100644
--- a/ambari-server/docs/configuration/index.md
+++ b/ambari-server/docs/configuration/index.md
@@ -50,6 +50,10 @@ The following are the properties which can be used to 
configure Ambari.
 | agent.stack.retry.tries | The number of times an Ambari Agent should retry 
package installation when it fails due to a repository error.  This 
property is related to `agent.stack.retry.on_repo_unavailability`. |`5` | 
 | agent.task.timeout | The time, in seconds, before agent commands are killed. 
This does not include package installation commands. |`900` | 
 | agent.threadpool.size.max | The size of the Jetty connection pool used for 
handling incoming Ambari Agent requests. |`25` | 
+| agents.registration.queue.size | Queue size for agents in registration. 
|`200` | 
+| agents.reports.processing.period | Period in seconds with agents reports 
will be processed. |`1` | 
+| agents.reports.processing.start.timeout | Timeout in seconds before start 
processing of agents' reports. |`5` | 
+| agents.reports.thread.pool.size | Thread pool size for agents reports 
processing. |`10` | 
 | alerts.ambari.snmp.dispatcher.udp.port | The UDP port to use when binding 
the Ambari SNMP dispatcher on Ambari Server startup. If no port is specified, 
then a random port will be used. | | 
 | alerts.cache.enabled | Determines whether current alerts should be cached. 
Enabling this can increase performance on large cluster, but can also result in 
lost alert data if the cache is not flushed frequently. |`false` | 
 | alerts.cache.flush.interval | The time, in minutes, after which cached alert 
information is flushed to the database This property is related to 
`alerts.cache.enabled`. |`10` | 
@@ -70,6 +74,7 @@ The following are the properties which can be used to 
configure Ambari.
 | api.csrfPrevention.enabled | Determines whether Cross-Site Request Forgery 
attacks are prevented by looking for the `X-Requested-By` header. |`true` | 
 | api.gzip.compression.enabled | Determines whether data sent to and from the 
Ambari service should be compressed. |`true` | 
 | api.gzip.compression.min.size | Used in conjunction with 
`api.gzip.compression.enabled`, determines the mininum size that an HTTP 
request must be before it should be compressed. This is measured in bytes. 
|`10240` | 
+| api.heartbeat.interval | Server to API STOMP endpoint heartbeat interval in 
milliseconds. |`1` | 
 | api.ssl | Determines whether SSL is used in for secure connections to 
Ambari. When enabled, ambari-server setup-https must be run in order to 
properly configure keystores. |`false` | 
 | auditlog.enabled | Determines whether audit logging is enabled. |`true` | 
 | auditlog.logger.capacity | The size of the worker queue for audit logger 
events. This property is related to `auditlog.enabled`. |`1` | 
@@ -132,6 +137,7 @@ The following are the properties which can be used to 
configure Ambari.
 | logsearch.portal.connect.timeout | The time, in milliseconds, that the 
Ambari Server will wait while attempting to connect to the LogSearch Portal 
service. |`5000` | 
 | logsearch.portal.external.address | Address of an external LogSearch Portal 
service. (managed outside of Ambari) Using Ambari Credential store is required 
for this feature (credential: 'logsearch.admin.credential') | | 
 | logsearch.portal.read.timeout | The time, in milliseconds, that the Ambari 
Server will wait while attempting to read a response from the LogSearch Portal 
service. |`5000` | 
+| messaging.threadpool.size | Thread pool 

[ambari] branch branch-2.6 updated: AMBARI-23116 Add empty kafka broker handling to stackadvisor (#508)

2018-03-01 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 3037730  AMBARI-23116 Add empty kafka broker handling to stackadvisor 
(#508)
3037730 is described below

commit 3037730bd2fcf6858b5292852724c41c0606f284
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Thu Mar 1 22:45:29 2018 +0100

AMBARI-23116 Add empty kafka broker handling to stackadvisor (#508)

* AMBARI-23116 added emty kafka broker handling to stackadvisor

* AMBARI-23116 fixed unit test and removed unnecessary check
---
 .../src/main/resources/stacks/HDP/2.3/services/stack_advisor.py| 7 +--
 .../src/test/python/stacks/2.3/common/test_stack_advisor.py| 5 +
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 80b2033..ca5ed4d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -337,6 +337,10 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
 servicesList = [service["StackServices"]["service_name"] for service in 
services["services"]]
 kafka_broker = getServicesSiteProperties(services, "kafka-broker")
+kafka_env = getServicesSiteProperties(services, "kafka-env")
+
+if not kafka_env: #Kafka check not required
+  return
 
 security_enabled = self.isSecurityEnabled(services)
 
@@ -345,8 +349,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 putKafkaBrokerAttributes = self.putPropertyAttribute(configurations, 
"kafka-broker")
 
 if security_enabled:
-  kafka_env = getServicesSiteProperties(services, "kafka-env")
-  kafka_user = kafka_env.get('kafka_user') if kafka_env is not None else 
None
+  kafka_user = kafka_env.get('kafka_user')
 
   if kafka_user is not None:
 kafka_super_users = kafka_broker.get('super.users') if kafka_broker is 
not None else None
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 689e25a..129174c 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -324,6 +324,11 @@ class TestHDP23StackAdvisor(TestCase):
   },
   "property_attributes": {}
 },
+"kafka-env": {
+  "properties": {
+"kafka_user" : "custom_kafka"
+  }
+},
 "kafka-broker": {
   "properties": {
 "authorizer.class.name" : "kafka.security.auth.SimpleAclAuthorizer"

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch branch-2.6 updated: AMBARI-23101. Rhel6: Deploy with NewMySQL for Hive fails (aonishuk) (#488)

2018-03-01 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 74b6a45  AMBARI-23101. Rhel6: Deploy with NewMySQL for Hive fails 
(aonishuk) (#488)
74b6a45 is described below

commit 74b6a4515094b18b73f2e1e54f72e88435e2102f
Author: aonishuk 
AuthorDate: Thu Mar 1 21:28:26 2018 +0200

AMBARI-23101. Rhel6: Deploy with NewMySQL for Hive fails (aonishuk) (#488)
---
 .../HIVE/0.12.0.2.0/package/scripts/mysql_server.py |  6 +++---
 .../HIVE/0.12.0.2.0/package/scripts/mysql_service.py| 17 -
 .../HIVE/0.12.0.2.0/package/scripts/mysql_users.py  |  5 +
 .../HIVE/0.12.0.2.0/package/scripts/params_linux.py |  1 -
 .../HIVE/0.12.0.2.0/package/scripts/status_params.py|  9 +++--
 .../test/python/stacks/2.0.6/HIVE/test_mysql_server.py  |  2 ++
 6 files changed, 29 insertions(+), 11 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
index 851dc02..0f3757e 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
@@ -46,18 +46,18 @@ class MysqlServer(Script):
   def start(self, env, rolling_restart=False):
 import params
 env.set_params(params)
-mysql_service(daemon_name=params.daemon_name, action='start')
+mysql_service(action='start')
 
   def stop(self, env, rolling_restart=False):
 import params
 env.set_params(params)
-mysql_service(daemon_name=params.daemon_name, action='stop')
+mysql_service(action='stop')
 
   def status(self, env):
 import status_params
 env.set_params(status_params)
 
-mysql_service(daemon_name=status_params.daemon_name, action='status')
+mysql_service(action='status')
 
 
 if __name__ == "__main__":
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
index 8b98ed1..1666269 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
@@ -18,10 +18,25 @@ limitations under the License.
 
 """
 
+import os
 from resource_management import *
 
+SERVICES_DIR = '/etc/init.d'
+POSSIBLE_DAEMON_NAMES = ['mysql', 'mysqld', 'mariadb']
 
-def mysql_service(daemon_name=None, action='start'): 
+def get_daemon_name():
+  import status_params
+  
+  for possible_daemon_name in status_params.POSSIBLE_DAEMON_NAMES:
+daemon_path = os.path.join(status_params.SERVICES_DIR, 
possible_daemon_name)
+if os.path.exists(daemon_path):
+  return possible_daemon_name
+
+  raise Fail("Could not find service daemon for mysql")
+
+def mysql_service(action='start'): 
+  daemon_name = get_daemon_name()
+  
   status_cmd = format("pgrep -l '^{process_name}$'")
   cmd = ('service', daemon_name, action)
 
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
index c023548..0ccf765 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
@@ -19,6 +19,7 @@ limitations under the License.
 """
 
 from resource_management import *
+from mysql_service import get_daemon_name
 
 # Used to add hive access to the needed components
 def mysql_adduser():
@@ -30,6 +31,8 @@ def mysql_adduser():
   )
   hive_server_host = format("{hive_server_host}")
   hive_metastore_host = format("{hive_metastore_host}")
+  
+  daemon_name = get_daemon_name()
 
   add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} 
{hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
   add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} 
{hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
@@ -54,6 +57,8 @@ def mysql_deluser():
   )
   hive_server_host = format("{hive_server_host}")
   hive_metastore_host = format("{hive_metastore_host}")
+  
+  daemon_name = get_daemon_name()
 
   del_hiveserver_cm

[ambari] branch trunk updated: AMBARI-23084 Background Ops Modal Updates - Get user name (#476)

2018-02-27 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 723210c  AMBARI-23084 Background Ops Modal Updates - Get user name 
(#476)
723210c is described below

commit 723210ca5e1af42eeea7c69f914db6b391c16f0f
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Tue Feb 27 17:22:45 2018 +0100

AMBARI-23084 Background Ops Modal Updates - Get user name (#476)

* AMBARI-23084 Background Ops Modal Updates - Get username
---
 .../ambari/server/actionmanager/ActionManager.java  |  2 ++
 .../org/apache/ambari/server/actionmanager/Request.java | 16 
 .../controller/internal/RequestResourceProvider.java|  5 -
 .../controller/internal/RequestStageContainer.java  |  5 +
 .../ambari/server/orm/entities/RequestEntity.java   | 17 +
 .../apache/ambari/server/upgrade/UpgradeCatalog270.java |  6 ++
 .../src/main/resources/Ambari-DDL-Derby-CREATE.sql  |  1 +
 .../src/main/resources/Ambari-DDL-MySQL-CREATE.sql  |  1 +
 .../src/main/resources/Ambari-DDL-Oracle-CREATE.sql |  1 +
 .../src/main/resources/Ambari-DDL-Postgres-CREATE.sql   |  1 +
 .../main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql|  1 +
 .../src/main/resources/Ambari-DDL-SQLServer-CREATE.sql  |  1 +
 .../controller/internal/RequestStageContainerTest.java  |  3 +++
 .../ambari/server/upgrade/UpgradeCatalog270Test.java| 12 
 14 files changed, 71 insertions(+), 1 deletion(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
index e6be43b..2015e34 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.controller.ExecuteActionRequest;
+import org.apache.ambari.server.security.authorization.AuthorizationHelper;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.utils.CommandUtils;
 import org.apache.ambari.server.utils.StageUtils;
@@ -79,6 +80,7 @@ public class ActionManager {
 
   public void sendActions(List stages, String clusterHostInfo, 
ExecuteActionRequest actionRequest) throws AmbariException {
 Request request = requestFactory.createNewFromStages(stages, 
clusterHostInfo, actionRequest);
+request.setUserName(AuthorizationHelper.getAuthenticatedName());
 sendActions(request, actionRequest);
   }
 
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
index eb4143f..76320bb 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Request.java
@@ -59,6 +59,7 @@ public class Request {
   private long startTime;
   private long endTime;
   private String clusterHostInfo;
+  private String userName;
 
   /**
* If true, this request can not be executed in parallel with any another
@@ -250,6 +251,7 @@ public class Request {
 requestEntity.setStatus(status);
 requestEntity.setDisplayStatus(displayStatus);
 requestEntity.setClusterHostInfo(clusterHostInfo);
+requestEntity.setUserName(userName);
 //TODO set all fields
 
 if (resourceFilters != null) {
@@ -423,6 +425,20 @@ public class Request {
   }
 
   /**
+   * Returns the user name associated with the request.
+   */
+  public String getUserName() {
+return userName;
+  }
+
+  /**
+   * Sets the user name
+   */
+  public void setUserName(String userName) {
+this.userName = userName;
+  }
+
+  /**
* @param entity  the request entity
* @return a list of {@link RequestResourceFilter} from the entity, or 
{@code null}
*if none are defined
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index b2f9e58..2ce19ed 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -127,6 +127,7 @@ public class RequestResourceProvider extends 
AbstractControllerResourceProvider
   public static final String REQUEST_PROGRESS_PERCENT_ID = REQUESTS + 
"/progr

[ambari] branch branch-2.6 updated: Revert "AMBARI-22934. [API] Updating current stack repo without GPL repos in the body does not throw any error. (mpapirkovkyy)" (#457)

2018-02-23 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 89dfc60  Revert "AMBARI-22934. [API] Updating current stack repo 
without GPL repos in the body does not throw any error. (mpapirkovkyy)" (#457)
89dfc60 is described below

commit 89dfc60ea77877dc0213596930f9c83ae7c3317e
Author: Siddharth 
AuthorDate: Fri Feb 23 09:02:01 2018 -0800

Revert "AMBARI-22934. [API] Updating current stack repo without GPL repos 
in the body does not throw any error. (mpapirkovkyy)" (#457)

This reverts commit cad4b63ec782e34b2b9a77b0ee56f20602a198ab.
---
 .../controller/AmbariManagementControllerImpl.java |  3 +-
 .../RepositoryVersionResourceProvider.java | 30 -
 .../stack/upgrade/RepositoryVersionHelper.java | 36 +---
 .../RepositoryVersionResourceProviderTest.java | 49 --
 .../stack/upgrade/RepositoryVersionHelperTest.java | 43 ---
 5 files changed, 3 insertions(+), 158 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index f7d4d7a..2f49bf3 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -190,6 +190,7 @@ import 
org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.RepoTag;
 import org.apache.ambari.server.state.stack.RepositoryXml;
 import org.apache.ambari.server.state.stack.WidgetLayout;
 import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
@@ -4505,7 +4506,7 @@ public class AmbariManagementControllerImpl implements 
AmbariManagementControlle
 for (RepositoryXml.Repo repo : os.getRepos()) {
   RepositoryResponse resp = new RepositoryResponse(repo.getBaseUrl(), 
os.getFamily(),
   repo.getRepoId(), repo.getRepoName(), repo.getDistribution(), 
repo.getComponents(), repo.getMirrorsList(),
-  repo.getBaseUrl(), repo.getLatestUri(), 
Collections.emptyList(), repo.getTags());
+  repo.getBaseUrl(), repo.getLatestUri(), 
Collections.emptyList(), Collections.emptySet());
 
   resp.setVersionDefinitionId(versionDefinitionId);
   resp.setStackName(stackId.getStackName());
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
index a3fe56e..73336ae 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
@@ -63,7 +63,6 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.repository.ManifestServiceInfo;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
-import org.apache.ambari.server.state.stack.RepoTag;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.ObjectUtils;
@@ -497,10 +496,6 @@ public class RepositoryVersionResourceProvider extends 
AbstractAuthorizedResourc
   }
 }
 
-if 
(RepositoryVersionHelper.shouldContainGPLRepo(repositoryVersion.getStackId(), 
repositoryVersion.getVersion())) {
-  validateGPLRepoPresence(repositoryVersion);
-}
-
 if 
(!RepositoryVersionEntity.isVersionInStack(repositoryVersion.getStackId(), 
repositoryVersion.getVersion())) {
   throw new AmbariException(MessageFormat.format("Version {0} needs to 
belong to stack {1}",
   repositoryVersion.getVersion(), repositoryVersion.getStackName() + 
"-" + repositoryVersion.getStackVersion()));
@@ -508,31 +503,6 @@ public class RepositoryVersionResourceProvider extends 
AbstractAuthorizedResourc
   }
 
   /**
-   * Checks HDP repository version contains GPL repo for each os.
-   * @param repositoryVersion repository version to check.
-   * @throws AmbariException in case repository version id HDP and should 
contain GPL repo, bug shouldn't.
-   */
-  private static void validateGPLRepoPresence(RepositoryVersionEnti

[ambari] branch trunk updated: Reverts AMBARI-22934. [API] Updating current stack repo without GPL repos in the body does not throw any error. (#456)

2018-02-23 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b78b61b  Reverts AMBARI-22934. [API] Updating current stack repo 
without GPL repos in the body does not throw any error. (#456)
b78b61b is described below

commit b78b61b1f092d5e68fc3f08396d533cd1d99d67a
Author: Siddharth 
AuthorDate: Fri Feb 23 08:59:08 2018 -0800

Reverts AMBARI-22934. [API] Updating current stack repo without GPL repos 
in the body does not throw any error. (#456)
---
 .../controller/AmbariManagementControllerImpl.java |  3 +-
 .../RepositoryVersionResourceProvider.java | 30 --
 .../stack/upgrade/RepositoryVersionHelper.java | 36 ---
 .../RepositoryVersionResourceProviderTest.java | 58 --
 .../stack/upgrade/RepositoryVersionHelperTest.java | 70 --
 5 files changed, 2 insertions(+), 195 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 90d6151..1c921f2 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -219,6 +219,7 @@ import 
org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
 import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.RepoTag;
 import org.apache.ambari.server.state.stack.RepositoryXml;
 import org.apache.ambari.server.state.stack.WidgetLayout;
 import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
@@ -4388,7 +4389,7 @@ public class AmbariManagementControllerImpl implements 
AmbariManagementControlle
 for (RepositoryXml.Repo repo : os.getRepos()) {
   RepositoryResponse resp = new RepositoryResponse(repo.getBaseUrl(), 
os.getFamily(),
   repo.getRepoId(), repo.getRepoName(), repo.getDistribution(), 
repo.getComponents(), repo.getMirrorsList(),
-  repo.getBaseUrl(), repo.getTags());
+  repo.getBaseUrl(), Collections.emptySet());
 
   resp.setVersionDefinitionId(versionDefinitionId);
   resp.setStackName(stackId.getStackName());
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
index ac961d0..8704cb7 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
@@ -63,7 +63,6 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.repository.ManifestServiceInfo;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
-import org.apache.ambari.server.state.stack.RepoTag;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.ObjectUtils;
@@ -495,10 +494,6 @@ public class RepositoryVersionResourceProvider extends 
AbstractAuthorizedResourc
   }
 }
 
-if 
(RepositoryVersionHelper.shouldContainGPLRepo(repositoryVersion.getStackId(), 
repositoryVersion.getVersion())) {
-  validateGPLRepoPresence(repositoryVersion);
-}
-
 if 
(!RepositoryVersionEntity.isVersionInStack(repositoryVersion.getStackId(), 
repositoryVersion.getVersion())) {
   throw new AmbariException(MessageFormat.format("Version {0} needs to 
belong to stack {1}",
   repositoryVersion.getVersion(), repositoryVersion.getStackName() + 
"-" + repositoryVersion.getStackVersion()));
@@ -506,31 +501,6 @@ public class RepositoryVersionResourceProvider extends 
AbstractAuthorizedResourc
   }
 
   /**
-   * Checks HDP repository version contains GPL repo for each os.
-   * @param repositoryVersion repository version to check.
-   * @throws AmbariException in case repository version id HDP and should 
contain GPL repo, bug shouldn't.
-   */
-  private static void validateGPLRepoPresence(RepositoryVersionEntity 
repositoryVersion) throws AmbariException {
-if (!repositoryVersion.getStackName().equals("HDP")) {
-  return;
-}
-for (RepoOsEntity os : repositoryVersion.getRepoOsEntities()) {
-  boolean hasGPLRepo = fals

[ambari] branch trunk updated: AMBARI-23015 Added NaN handling to JSON handling in MetricsRetrievalService.java (#396)

2018-02-16 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3a52f27  AMBARI-23015 Added NaN handling to JSON handling in 
MetricsRetrievalService.java (#396)
3a52f27 is described below

commit 3a52f278961406d198f4e89c30e49f989f26c143
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Fri Feb 16 22:05:55 2018 +0100

AMBARI-23015 Added NaN handling to JSON handling in 
MetricsRetrievalService.java (#396)

[AMBARI-23015] Added NaN handling to JSON handling in 
MetricsRetrievalService
---
 .../state/services/MetricsRetrievalService.java|  2 ++
 .../services/MetricsRetrievalServiceTest.java  | 36 ++
 2 files changed, 38 insertions(+)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
index 72ef1c0..10f2b05 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/services/MetricsRetrievalService.java
@@ -39,6 +39,7 @@ import 
org.apache.ambari.server.controller.jmx.JMXMetricHolder;
 import org.apache.ambari.server.controller.utilities.ScalingThreadPoolExecutor;
 import org.apache.ambari.server.controller.utilities.StreamProvider;
 import org.apache.commons.io.IOUtils;
+import org.codehaus.jackson.JsonParser;
 import org.codehaus.jackson.map.DeserializationConfig;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectReader;
@@ -182,6 +183,7 @@ public class MetricsRetrievalService extends 
AbstractService {
   public MetricsRetrievalService() {
 ObjectMapper jmxObjectMapper = new ObjectMapper();
 jmxObjectMapper.configure(DeserializationConfig.Feature.USE_ANNOTATIONS, 
false);
+jmxObjectMapper.configure(JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS, 
true);
 m_jmxObjectReader = jmxObjectMapper.reader(JMXMetricHolder.class);
   }
 
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/services/MetricsRetrievalServiceTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/services/MetricsRetrievalServiceTest.java
index e6a6f81..4017a8e 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/services/MetricsRetrievalServiceTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/services/MetricsRetrievalServiceTest.java
@@ -196,6 +196,42 @@ public class MetricsRetrievalServiceTest extends 
EasyMockSupport {
   }
 
   /**
+   * Tests handling NaN in JSON.
+   */
+  @Test
+  public void testJsonNaN() throws Exception {
+
+InputStream jmxInputStream = IOUtils.toInputStream("{ \"beans\": [ " +
+" {\n" +
+"\"name\" : 
\"Hadoop:service=HBase,name=RegionServer,sub=Server\",\n" +
+"\"modelerType\" : \"RegionServer,sub=Server\",  
\"l1CacheMissCount\" : 0,\n" +
+"\"l1CacheHitRatio\" : NaN,\n" +
+"\"l1CacheMissRatio\" : NaN,\n" +
+"\"l2CacheHitCount\" : 0" +
+" }] " +
+"}");
+
+StreamProvider streamProvider = createNiceMock(StreamProvider.class);
+
+
EasyMock.expect(streamProvider.readFrom(JMX_URL)).andReturn(jmxInputStream).once();
+
+replayAll();
+
+m_service.startAsync();
+m_service.awaitRunning(METRICS_SERVICE_TIMEOUT, TimeUnit.SECONDS);
+
+// make the service synchronous
+m_service.setThreadPoolExecutor(new SynchronousThreadPoolExecutor());
+
+JMXMetricHolder jmxMetricHolder = m_service.getCachedJMXMetric(JMX_URL);
+Assert.assertNull(jmxMetricHolder);
+
+m_service.submitRequest(MetricSourceType.JMX, streamProvider, JMX_URL);
+jmxMetricHolder = m_service.getCachedJMXMetric(JMX_URL);
+Assert.assertNotNull(jmxMetricHolder);
+  }
+
+  /**
* Tests that many requests to the same URL do not invoke the stream provider
* more than once.
*/

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: Adding namenode clusterId to amabari API (#377)

2018-02-15 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 236ca47  Adding namenode clusterId to amabari API (#377)
236ca47 is described below

commit 236ca47828a7f64d792980d7e088891ba06306e1
Author: majorendre <34535487+majoren...@users.noreply.github.com>
AuthorDate: Thu Feb 15 21:24:22 2018 +0100

Adding namenode clusterId to amabari API (#377)

[AMBARI-22974] Adding namenode clusterId to amabari API
---
 .../src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
index c66387d..3b80462 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
@@ -3935,6 +3935,11 @@
   "metric": 
"Hadoop:service=NameNode,name=NameNodeInfo.CorruptFiles",
   "pointInTime": true,
   "temporal": false
+},
+"metrics/dfs/namenode/ClusterId": {
+  "metric": "Hadoop:service=NameNode,name=NameNodeInfo.ClusterId",
+  "pointInTime": true,
+  "temporal": false
 }
   }
 }

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] 02/02: improved test

2018-02-14 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 7d441cc4587c42a9cc73160a92681152ca1c67fa
Author: majorendre 
AuthorDate: Mon Feb 12 12:40:44 2018 +0100

improved test

Added external name services to TestNamenodeHaUtils
---
 ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py 
b/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py
index 6f2e5aa..18f316f 100644
--- a/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py
+++ b/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py
@@ -62,6 +62,7 @@ class TestNamenodeHaUtils(TestCase):
 # federated config dfs.internal.nameservices in hdfs-site
 hdfs_site = {
   "dfs.internal.nameservices": "ns1,ns2",
+  "dfs.nameservices": "ns1,ns2,exns1,exns2"
 }
 
 self.assertEqual(["ns1","ns2"], get_nameservices(hdfs_site))

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated (b4b8e16 -> 7d441cc)

2018-02-14 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git.


from b4b8e16  Upgrade Ambari to Guice 4.1 (#358)
 new 6d2c6ba  Federation support added to name_node_ha_utils.py
 new 7d441cc  improved test

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../resource_management/TestNamenodeHaUtils.py |  16 ++-
 .../libraries/functions/namenode_ha_utils.py   | 109 +
 2 files changed, 82 insertions(+), 43 deletions(-)

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] 01/02: Federation support added to name_node_ha_utils.py

2018-02-14 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 6d2c6bab0ba57c362bc06871f866b1d6a914e5eb
Author: majorendre 
AuthorDate: Thu Feb 8 11:10:29 2018 +0100

Federation support added to name_node_ha_utils.py

So far the code could  handle only a single name service. Added support for 
multiple name services.
---
 .../resource_management/TestNamenodeHaUtils.py |  15 ++-
 .../libraries/functions/namenode_ha_utils.py   | 109 +
 2 files changed, 81 insertions(+), 43 deletions(-)

diff --git 
a/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py 
b/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py
index 2fc4904..6f2e5aa 100644
--- a/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py
+++ b/ambari-agent/src/test/python/resource_management/TestNamenodeHaUtils.py
@@ -19,7 +19,7 @@ limitations under the License.
 '''
 from unittest import TestCase
 from resource_management.libraries.functions.namenode_ha_utils import \
-  get_nameservice
+  get_nameservices
 
 
 class TestNamenodeHaUtils(TestCase):
@@ -39,7 +39,7 @@ class TestNamenodeHaUtils(TestCase):
   "dfs.namenode.rpc-address.HAB.nn2": "hostb2:8020",
 }
 
-self.assertEqual("HAA", get_nameservice(hdfs_site))
+self.assertEqual(["HAA"], get_nameservices(hdfs_site))
 
 # dfs.internal.nameservices not in hdfs-site
 hdfs_site = {
@@ -52,9 +52,16 @@ class TestNamenodeHaUtils(TestCase):
   "dfs.namenode.rpc-address.HAB.nn2": "hostb2:8020",
 }
 
-self.assertEqual("HAA", get_nameservice(hdfs_site))
+self.assertEqual(["HAA"], get_nameservices(hdfs_site))
 
 # Non HA
 hdfs_site = {}
 
-self.assertEqual(None, get_nameservice(hdfs_site))
+self.assertEqual([], get_nameservices(hdfs_site))
+
+# federated config dfs.internal.nameservices in hdfs-site
+hdfs_site = {
+  "dfs.internal.nameservices": "ns1,ns2",
+}
+
+self.assertEqual(["ns1","ns2"], get_nameservices(hdfs_site))
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index 7a2635f..d3a9c2d 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -28,7 +28,7 @@ from resource_management.libraries.functions.hdfs_utils 
import is_https_enabled_
 
 
 __all__ = ["get_namenode_states", "get_active_namenode",
-   "get_property_for_active_namenode", "get_nameservice"]
+   "get_property_for_active_namenode", "get_nameservices"]
 
 HDFS_NN_STATE_ACTIVE = 'active'
 HDFS_NN_STATE_STANDBY = 'standby'
@@ -65,15 +65,32 @@ def get_namenode_states(hdfs_site, security_enabled, 
run_user, times=10, sleep_t
   doRetries.attempt = 0
   return doRetries(hdfs_site, security_enabled, run_user)
 
+
 def get_namenode_states_noretries(hdfs_site, security_enabled, run_user, 
last_retry=True):
   """
+  returns data for all name nodes of all name services
+  """
+  active_namenodes = []
+  standby_namenodes = []
+  unknown_namenodes = []
+
+  name_services = get_nameservices(hdfs_site)
+  for name_service in name_services:
+active, standby, unknown = 
_get_namenode_states_noretries_single_ns(hdfs_site, name_service, 
security_enabled, run_user, last_retry)
+active_namenodes += active
+standby_namenodes += standby
+unknown_namenodes += unknown
+  return active_namenodes, standby_namenodes, unknown_namenodes
+
+
+def _get_namenode_states_noretries_single_ns(hdfs_site, name_service, 
security_enabled, run_user, last_retry=True):
+  """
   return format [('nn1', 'hdfs://hostname1:port1'), ('nn2', 
'hdfs://hostname2:port2')] , [], []
   """
   active_namenodes = []
   standby_namenodes = []
   unknown_namenodes = []
-  
-  name_service = get_nameservice(hdfs_site)
+
   nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
 
   # now we have something like 'nn1,nn2,nn3,nn4'
@@ -90,7 +107,7 @@ def get_namenode_states_noretries(hdfs_site, 
security_enabled, run_user, last_re
 else:
   key = NAMENODE_HTTPS_FRAGMENT.format(name_service,nn_unique_id)
   protocol = "https"
-
+
 if key in hdfs_site:
   # use str() to ensure that unicode strings do not have the u' in them
   value = str(hdfs_site[key])
@@ -101,11 +118,11 @@ def get_namenode_states_

[ambari] branch branch-2.6 updated: AMBARI-22918 Decommission RegionServer fails when kerberos is enabled

2018-02-13 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new f40a400  AMBARI-22918 Decommission RegionServer fails when kerberos is 
enabled
f40a400 is described below

commit f40a400da39cff9bd2c32ea6c5f33a256467e1e8
Author: Toshihiro Suzuki 
AuthorDate: Wed Feb 7 09:37:50 2018 +0900

AMBARI-22918 Decommission RegionServer fails when kerberos is enabled
---
 .../HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
index 022465a..24ed397 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
@@ -63,7 +63,7 @@ def hbase_decommission(env):
 for host in hosts:
   if host:
 regiondrainer_cmd = format(
-  "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {region_drainer} remove {host}")
+  "{kinit_cmd} HBASE_OPTS=\"$HBASE_OPTS {master_security_config}\" 
{hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove 
{host}")
 Execute(regiondrainer_cmd,
 user=params.hbase_user,
 logoutput=True
@@ -75,9 +75,9 @@ def hbase_decommission(env):
 for host in hosts:
   if host:
 regiondrainer_cmd = format(
-  "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {region_drainer} add {host}")
+  "{kinit_cmd} HBASE_OPTS=\"$HBASE_OPTS {master_security_config}\" 
{hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add 
{host}")
 regionmover_cmd = format(
-  "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {region_mover} unload {host}")
+  "{kinit_cmd} HBASE_OPTS=\"$HBASE_OPTS {master_security_config}\" 
{hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload 
{host}")
 
 Execute(regiondrainer_cmd,
 user=params.hbase_user,

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-22918 Decommission RegionServer fails when kerberos is enabled

2018-02-13 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fd46cc5  AMBARI-22918 Decommission RegionServer fails when kerberos is 
enabled
fd46cc5 is described below

commit fd46cc507d93a7f069df523997c16f7336b45302
Author: Toshihiro Suzuki 
AuthorDate: Tue Feb 6 17:33:42 2018 +0900

AMBARI-22918 Decommission RegionServer fails when kerberos is enabled
---
 .../HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
index 7358674..5853f75 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
@@ -64,7 +64,7 @@ def hbase_decommission(env):
 for host in hosts:
   if host:
 regiondrainer_cmd = format(
-  "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {region_drainer} remove {host}")
+  "{kinit_cmd} HBASE_OPTS=\"$HBASE_OPTS {master_security_config}\" 
{hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove 
{host}")
 Execute(regiondrainer_cmd,
 user=params.hbase_user,
 logoutput=True
@@ -76,9 +76,9 @@ def hbase_decommission(env):
 for host in hosts:
   if host:
 regiondrainer_cmd = format(
-  "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {region_drainer} add {host}")
+  "{kinit_cmd} HBASE_OPTS=\"$HBASE_OPTS {master_security_config}\" 
{hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add 
{host}")
 regionmover_cmd = format(
-  "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {region_mover} unload {host}")
+  "{kinit_cmd} HBASE_OPTS=\"$HBASE_OPTS {master_security_config}\" 
{hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload 
{host}")
 
 Execute(regiondrainer_cmd,
 user=params.hbase_user,

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-22891 : Logging improvement during the Upgrade when there is invalid Alert definition

2018-02-13 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6d1dc6c  AMBARI-22891 : Logging improvement during the Upgrade when 
there is invalid Alert definition
6d1dc6c is described below

commit 6d1dc6c4e5baa5b8044806ef0c29f6d15050d70e
Author: root 
AuthorDate: Tue Feb 6 16:04:02 2018 -0800

AMBARI-22891 : Logging improvement during the Upgrade when there is invalid 
Alert definition
---
 .../org/apache/ambari/server/state/alert/AlertDefinitionFactory.java | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
index ecf025f..c7acd54 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
@@ -196,6 +196,7 @@ public class AlertDefinitionFactory {
   Source source = m_gson.fromJson(sourceJson, Source.class);
   definition.setSource(source);
 } catch (Exception exception) {
+  LOG.error("Alert defintion is invalid for  Id : " + 
entity.getDefinitionId() + " Name: "+  entity.getDefinitionName() );
   LOG.error(
   "Unable to deserialize the alert definition source during coercion",
   exception);

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch branch-2.6 updated: AMBARI-22891 : Logging improvement during the Upgrade when there is invalid Alert definition

2018-02-05 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new be34314  AMBARI-22891 : Logging improvement during the Upgrade when 
there is invalid Alert definition
be34314 is described below

commit be34314a4c60cf04e595a35ff790445f27e49679
Author: root 
AuthorDate: Mon Feb 5 15:33:17 2018 -0800

AMBARI-22891 : Logging improvement during the Upgrade when there is invalid 
Alert definition
---
 .../org/apache/ambari/server/state/alert/AlertDefinitionFactory.java | 1 +
 1 file changed, 1 insertion(+)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
index 769920f..fb5d018 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java
@@ -196,6 +196,7 @@ public class AlertDefinitionFactory {
   Source source = m_gson.fromJson(sourceJson, Source.class);
   definition.setSource(source);
 } catch (Exception exception) {
+  LOG.error("Alert defintion is invalid for  Id : " + 
entity.getDefinitionId() + " Name: "+  entity.getDefinitionName() );
   LOG.error(
   "Unable to deserialize the alert definition source during coercion",
   exception);

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch branch-2.6 updated: AMBARI-22696 Whitelist execute latency from Storm Ambari metrics

2018-01-22 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 3730c03  AMBARI-22696 Whitelist execute latency from Storm Ambari 
metrics
3730c03 is described below

commit 3730c03f5c0ae3a9ac510baf53a5c2666e16012e
Author: Jungtaek Lim 
AuthorDate: Tue Jan 23 05:33:21 2018 +0900

AMBARI-22696 Whitelist execute latency from Storm Ambari metrics
---
 .../resources/common-services/STORM/1.0.1/configuration/storm-site.xml  | 2 +-
 .../src/main/resources/stacks/HDP/2.5/services/stack_advisor.py | 1 +
 ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py   | 1 +
 3 files changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
index 558beaf..71e217e 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
@@ -128,7 +128,7 @@
   
   
 topology.metrics.consumer.register
-[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", 
"parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", 
"__process-latency", "__receive\\.population$", "__sendqueue\\.population$", 
"__execute-count", "__emit-count", "__ack-count", "__fail-count", 
"memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", "GC/.+\\.count$", 
"GC/.+\\.timeMs$"]}]
+[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", 
"parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", 
"__process-latency", "__execute-latency", "__receive\\.population$", 
"__sendqueue\\.population$", "__execute-count", "__emit-count", "__ack-count", 
"__fail-count", "memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", 
"GC/.+\\.count$", "GC/.+\\.timeMs$"]}]
 
 
   false
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 4187dc9..bf87be2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -542,6 +542,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
'[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", '
'"parallelism.hint": 1, '
'"whitelist": ["kafkaOffset\\\..+/", 
"__complete-latency", "__process-latency", '
+   '"__execute-latency", '
'"__receive\\\.population$", 
"__sendqueue\\\.population$", "__execute-count", "__emit-count", '
'"__ack-count", "__fail-count", 
"memory/heap\\\.usedBytes$", "memory/nonHeap\\\.usedBytes$", '
'"GC/.+\\\.count$", "GC/.+\\\.timeMs$"]}]')
diff --git 
a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 8f27d79..7bdfcae 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -6040,6 +6040,7 @@ class TestHDP25StackAdvisor(TestCase):
 
self.assertEquals(configurations['storm-site']['properties']['topology.metrics.consumer.register'],
 '[{"class": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", '

   '"parallelism.hint": 1, '

   '"whitelist": ["kafkaOffset\\\..+/", 
"__complete-latency", "__process-latency", '
+   
   '"__execute-latency", '

   '"__receive\\\.population$", 
"__sendqueue\\\.population$", "__execute-count", "__emit-count", '

   '"__ack-count", "__fail-count", 
"memory/heap\\\.usedBytes$", "memory/nonHeap\\\.usedBytes$", '

   '"GC/.+\\\.count$", "GC/.+\\\.timeMs$"]}]')

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.


[ambari] branch trunk updated: AMBARI-22696 Whitelist execute latency from Storm Ambari metrics

2018-01-22 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b275722  AMBARI-22696 Whitelist execute latency from Storm Ambari 
metrics
b275722 is described below

commit b275722355c7e67af918a18a8b90c684e5837af0
Author: Jungtaek Lim 
AuthorDate: Tue Jan 23 05:21:28 2018 +0900

AMBARI-22696 Whitelist execute latency from Storm Ambari metrics
---
 .../common-services/STORM/1.0.1.3.0/configuration/storm-site.xml| 2 +-
 .../main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py   | 1 +
 .../resources/common-services/STORM/1.0.1/configuration/storm-site.xml  | 2 +-
 .../src/main/resources/stacks/HDP/2.5/services/stack_advisor.py | 1 +
 ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py   | 1 +
 5 files changed, 5 insertions(+), 2 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
index 6b97fb6..61cd168 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/configuration/storm-site.xml
@@ -918,7 +918,7 @@
 
 
 topology.metrics.consumer.register
-[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", 
"parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", 
"__process-latency", "__receive\\.population$", "__sendqueue\\.population$", 
"__execute-count", "__emit-count", "__ack-count", "__fail-count", 
"memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", "GC/.+\\.count$", 
"GC/.+\\.timeMs$"]}]
+[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", 
"parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", 
"__process-latency", "__execute-latency", "__receive\\.population$", 
"__sendqueue\\.population$", "__execute-count", "__emit-count", "__ack-count", 
"__fail-count", "memory/heap\\.usedBytes$", "memory/nonHeap\\.usedBytes$", 
"GC/.+\\.count$", "GC/.+\\.timeMs$"]}]
 
 
 false
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
index 1d6bbe0..95a9dd9 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
+++ 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/service_advisor.py
@@ -304,6 +304,7 @@ class StormRecommender(service_advisor.ServiceAdvisor):
'[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", '
'"parallelism.hint": 1, '
'"whitelist": ["kafkaOffset\\\..+/", 
"__complete-latency", "__process-latency", '
+   '"__execute-latency", '
'"__receive\\\.population$", 
"__sendqueue\\\.population$", "__execute-count", "__emit-count", '
'"__ack-count", "__fail-count", 
"memory/heap\\\.usedBytes$", "memory/nonHeap\\\.usedBytes$", '
'"GC/.+\\\.count$", "GC/.+\\\.timeMs$"]}]')
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
index 558beaf..71e217e 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/storm-site.xml
@@ -128,7 +128,7 @@
   
   
 topology.metrics.consumer.register
-[{"class": 
"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink", 
"parallelism.hint": 1, "whitelist": ["kafkaOffset\\..+/", "__complete-latency", 
"__process-latency", "__receive\\.population$", "__sendqueue\\.population$", 
"__execute-count", "__emit-count", "__ac

[ambari] branch branch-3.0-ams updated (b34445b -> 28061fd)

2018-01-16 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a change to branch branch-3.0-ams
in repository https://gitbox.apache.org/repos/asf/ambari.git.


from b34445b  Merge pull request #68 from swagle/branch-3.0-ams
 new e4b75c7  AMBARI-22556 - Reduce load from STS health check (Mingjie 
Tang via jonathanhurley)
 new b931bfb  AMBARI-22563. Packages Cannot Be Installed When Yum 
Transactions Fail (Dmytro Grinenko via ncole)
 new f5002f4  AMBARI-22485 : Allow Ambari to support non-kerberos SASL 
mechanisms for Kafka - Addendum, fix incorrect config tag (ydavis via 
mradhakrishnan)
 new d084341  AMBARI-22445. Warn the user appropriately for default MySQL 
server install for Hive (Part 2) (vsubramanian)
 new f312fc0  AMBARI-22578. hive2 queries fails after adding any service to 
the cluster. (jaimin via yusaku)
 new 16ddd20  AMBARI-22353. Remove properties.json And Switch To Adding 
Properties to ResourceProviders Dynamically (dlysnichenko)
 new 50d5382  AMBARI-22582 Clean up Configs page. (atkach)
 new fe04da1  AMBARI-22586 - Remove Beacon conf-select Changes Since It's 
Not Part of HDP (jonathanhurley)
 new 32dcbb1  AMBARI-22587. Storm service check failed during PU due to CNF 
StormAtlasHook (ncole)
 new 47820ef  AMBARI-22585. Fix the wording on IPA integration requirements 
in the Enable Kerberos Wizard (rlevas)
 new 13914ce  Revert "AMBARI-22572. During cluster installation bower 
cannot resolve angularjs version (alexantonenko)"
 new 954a370  Revert "AMBARI-22566. Upgrade Angular for Ambari Admin View 
(alexantonenko)"
 new b8d4eaa  AMBARI-22566. Upgrade Angular for Ambari Admin View 
(alexantonenko)
 new 1a8fe72  AMBARI-22590 - Messages for some services during PU package 
installation indicate circular dependency (jonathanhurley)
 new 82c03ea  AMBARI-22594. Livy server start fails during EU with 'Address 
already in use' error (dlysnichenko)
 new 332dad5  AMBARI-22590 - Messages for some services during PU package 
installation indicate circular dependency (part2) (jonathanhurley)
 new 3cabd90  AMBARI-22591.MD interpreter fails with NPE 
(Zeppelin)(Prabhjyot Singh via Venkata Sairam)
 new c958f52  AMBARI-22598 - Pig service check failed after PU with 
LzoCodec CNF (jonathanhurley)
 new d265e2e  AMBARI-22583. Ambari should not force accounts created in IPA 
to be added a user named 'ambari-managed-principals' (rlevas)
 new fc138a4  AMBARI-22597. Jetty Session Timeout Is Overridden By Views 
Initialization. (mpapirkovskyy)
 new 6d6e21e  AMBARI-22583. Ambari should not force accounts created in IPA 
to be added a user named 'ambari-managed-principals' [amended] (rlevas)
 new 5eb446b  Revert "AMBARI-22583. Ambari should not force accounts 
created in IPA to be added a user named 'ambari-managed-principals' [amended] 
(rlevas)"
 new 3e5af95  Revert "AMBARI-22583. Ambari should not force accounts 
created in IPA to be added a user named 'ambari-managed-principals' (rlevas)"
 new 58d9617  AMBARI-22583. Ambari should not force accounts created in IPA 
to be added a user named 'ambari-managed-principals' (rlevas)
 new 37a4574  AMBARI-22492. A bad WebHDFS request is issued when starting 
Hive Metastore (amagyar)
 new 166adb5  AMBARI-22606. Service actions for R4ML not loaded (akovalenko)
 new 4d45852  AMBARI-22608. Update HBASE 2.0.0.3.0 with proper pid file 
name for Phoenix Query Server. (Sergey Soldatov via sshridhar).
 new 5d53703  AMBARI-22492. A bad WebHDFS request is issued when starting 
Hive Metastore addendum (amagyar)
 new efc2322  AMBARI-22611. Log Search IT: Expose the right port for 
Selenium in docker-compose file (oleewere)
 new cd850bf  AMBARI-22605. Tez service check hangs when adding service. 
(mpapirkovskyy)
 new 146f00b  AMBARI-22610 Log Search UI: fixes for search box 
autocomplete. (ababiichuk)
 new ea9dd7f  AMBARI-22589 Ambari web UI stack version page is empty due to 
NPE when target stack does not contain all services from the current stack 
(dili)
 new 2d548af  AMBARI-22595. Livy2 keytabs are not getting configured 
automatically in zeppelin. (prabhjyotsingh via sshridhar).
 new 25b146b  AMBARI-22612 Log Search UI: Fixes for Top-Level Buttons. 
(Istvan Tobias via ababiichuk)
 new 4a53d0b  AMBARI-22609. Install wizard stacks on select version step 
(alexantonenko)
 new ee88e79  AMBARI-22613 - Hive Queries Failing with Missing SnappyCodec 
Compression Type Due to Hive's Use of MapReduce Admin Env Property 
(jonathanhurley)
 new b07ec55  AMBARI-22616 noplugins switch should not be used for yum 
repos backed by RedHat Satellite/Spacewalk (dgrinenko)
 new c90a626  AMBARI-22617. Install Wizard Customize Services step has no 
vertical scroll (akovalenko)
 new d6980c2  AMBARI-22615 Log Search U

[ambari] branch branch-2.6 updated: AMBARI-22635: Ambari should create a dummy core-site.xml for Ranger plugins when namenode is not installed.

2018-01-11 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 21fb855  AMBARI-22635: Ambari should create a dummy core-site.xml for 
Ranger plugins when namenode is not installed.
21fb855 is described below

commit 21fb855c6be7adb473c13cb677474a74da3c5003
Author: Vishal Suvagia 
AuthorDate: Thu Dec 14 18:16:59 2017 +0530

AMBARI-22635: Ambari should create a dummy core-site.xml for Ranger plugins 
when namenode is not installed.
---
 .../libraries/functions/setup_ranger_plugin_xml.py   | 14 +++---
 .../KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py| 15 ---
 .../KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py  | 16 
 .../STORM/0.9.1/package/scripts/setup_ranger_storm.py| 15 ---
 4 files changed, 43 insertions(+), 17 deletions(-)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index 485c1a6..78692cb 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -240,14 +240,14 @@ def setup_ranger_plugin_keystore(service_name, 
audit_db_is_enabled, stack_versio
 mode = 0640
   )
 
-def setup_core_site_for_required_plugins(component_user, component_group, 
create_core_site_path, config):
+def setup_core_site_for_required_plugins(component_user, component_group, 
create_core_site_path, configurations = {}, configuration_attributes = {}):
   XmlConfig('core-site.xml',
-conf_dir=create_core_site_path,
-configurations=config['configurations']['core-site'],
-configuration_attributes=config['configuration_attributes']['core-site'],
-owner=component_user,
-group=component_group,
-mode=0644
+conf_dir = create_core_site_path,
+configurations = configurations,
+configuration_attributes = configuration_attributes,
+owner = component_user,
+group = component_group,
+mode = 0644
   )
 
 def get_audit_configs(config):
diff --git 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
index 9aa09df..16eff94 100644
--- 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
+++ 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
@@ -81,9 +81,18 @@ def setup_ranger_kafka():
 group = params.user_group,
 mode = 0755
   )
-if params.stack_supports_core_site_for_ranger_plugin and 
params.enable_ranger_kafka and params.has_namenode and 
params.kerberos_security_enabled:
-  Logger.info("Stack supports core-site.xml creation for Ranger plugin, 
creating create core-site.xml from namenode configuraitions")
-  
setup_core_site_for_required_plugins(component_user=params.kafka_user,component_group=params.user_group,create_core_site_path
 = params.conf_dir, config = params.config)
+if params.stack_supports_core_site_for_ranger_plugin and 
params.enable_ranger_kafka and params.kerberos_security_enabled:
+  if params.has_namenode:
+Logger.info("Stack supports core-site.xml creation for Ranger plugin 
and Namenode is installed, creating create core-site.xml from namenode 
configurations")
+setup_core_site_for_required_plugins(component_user = 
params.kafka_user, component_group = params.user_group,
+ create_core_site_path = 
params.conf_dir, configurations = params.config['configurations']['core-site'],
+ configuration_attributes = 
params.config['configuration_attributes']['core-site'])
+  else:
+Logger.info("Stack supports core-site.xml creation for Ranger plugin 
and Namenode is not installed, creating create core-site.xml from default 
configurations")
+setup_core_site_for_required_plugins(component_user = 
params.kafka_user, component_group = params.user_group,
+ create_core_site_path = 
params.conf_dir, configurations = { 'hadoop.security.authentication' : 
'kerberos' if params.kerberos_security_enabled else 'simple' },
+ configuration_attributes = {})
+
 else:
   Logger.info("Stack does not support core-site.xml creation for Ranger 
plugin, skipping cor

[ambari] 01/01: Merge pull request #68 from swagle/branch-3.0-ams

2018-01-08 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch branch-3.0-ams
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit b34445b4ac748cb2a91761739b7178718cf3879c
Merge: a4293b7 2d40fc1
Author: Siddharth 
AuthorDate: Mon Jan 8 14:17:56 2018 -0800

Merge pull request #68 from swagle/branch-3.0-ams

[AMBARI-22744][ambari-metrics] Fix issues with webapp deployment with new 
Hadoop common changes. Addendum. (swagle)

 .../yarn/server/applicationhistoryservice/AMSApplicationServer.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" .


[ambari] branch branch-3.0-ams updated (a4293b7 -> b34445b)

2018-01-08 Thread swagle
This is an automated email from the ASF dual-hosted git repository.

swagle pushed a change to branch branch-3.0-ams
in repository https://gitbox.apache.org/repos/asf/ambari.git.


from a4293b7  AMBARI-22744. Fix issues with webapp deployment with new 
Hadoop common changes. (swagle)
 add 2d40fc1  AMBARI-22744. Fix issues with webapp deployment with new 
Hadoop common changes. Addendum. (swagle)
 new b34445b  Merge pull request #68 from swagle/branch-3.0-ams

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../yarn/server/applicationhistoryservice/AMSApplicationServer.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

-- 
To stop receiving notification emails like this one, please contact
['"commits@ambari.apache.org" '].


[2/2] ambari git commit: AMBARI-22744. Fix issues with webapp deployment with new Hadoop common changes. (swagle)

2018-01-05 Thread swagle
AMBARI-22744. Fix issues with webapp deployment with new Hadoop common changes. 
(swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a4293b7d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a4293b7d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a4293b7d

Branch: refs/heads/branch-3.0-ams
Commit: a4293b7dcff343600c1497fba27a9c21c6621ebb
Parents: 25c1812
Author: Siddharth Wagle 
Authored: Fri Jan 5 13:34:57 2018 -0800
Committer: Siddharth Wagle 
Committed: Fri Jan 5 13:34:57 2018 -0800

--
 .../AMSApplicationServer.java   | 10 ++--
 .../metrics/loadsimulator/LoadRunner.java   | 40 +++---
 .../loadsimulator/MetricsLoadSimulator.java |  6 +--
 .../loadsimulator/MetricsSenderWorker.java  | 21 +++-
 .../data/HostMetricsGenerator.java  |  8 ++-
 .../data/MetricsGeneratorConfigurer.java| 12 ++---
 .../loadsimulator/net/RestMetricsSender.java|  9 ++--
 .../metrics/loadsimulator/util/Json.java|  4 +-
 .../timeline/HBaseTimelineMetricsService.java   |  1 -
 .../metrics/timeline/PhoenixHBaseAccessor.java  |  1 -
 .../timeline/TimelineMetricConfiguration.java   |  2 -
 .../TimelineMetricDistributedCache.java |  6 +--
 .../metrics/timeline/TimelineMetricStore.java   | 12 ++---
 .../timeline/TimelineMetricStoreWatcher.java| 14 ++---
 .../timeline/TimelineMetricsIgniteCache.java| 57 ++--
 .../aggregators/AbstractTimelineAggregator.java | 35 ++--
 .../timeline/aggregators/DownSamplerUtils.java  | 11 ++--
 .../TimelineMetricAggregatorFactory.java| 20 +++
 .../TimelineMetricAppAggregator.java| 20 +++
 .../TimelineMetricClusterAggregator.java| 20 +++
 ...cClusterAggregatorSecondWithCacheSource.java | 14 ++---
 .../TimelineMetricFilteringHostAggregator.java  | 14 ++---
 .../TimelineMetricHostAggregator.java   | 16 +++---
 .../aggregators/TimelineMetricReadHelper.java   | 10 ++--
 .../timeline/aggregators/TopNDownSampler.java   | 14 ++---
 .../v2/TimelineMetricClusterAggregator.java | 16 +++---
 .../TimelineMetricFilteringHostAggregator.java  | 14 ++---
 .../v2/TimelineMetricHostAggregator.java| 14 ++---
 .../availability/AggregationTaskRunner.java | 24 -
 .../availability/CheckpointManager.java |  4 +-
 .../MetricCollectorHAController.java| 26 +
 .../OnlineOfflineStateModelFactory.java |  4 +-
 .../discovery/TimelineMetricMetadataKey.java|  4 +-
 .../TimelineMetricMetadataManager.java  | 43 +++
 .../discovery/TimelineMetricMetadataSync.java   |  7 +--
 ...tTimelineMetricsSeriesAggregateFunction.java |  9 ++--
 .../metrics/timeline/query/Condition.java   |  4 +-
 .../timeline/query/ConditionBuilder.java|  6 +--
 .../timeline/query/ConnectionProvider.java  |  2 -
 .../timeline/query/DefaultCondition.java| 13 ++---
 .../query/DefaultPhoenixDataSource.java | 12 ++---
 .../metrics/timeline/query/EmptyCondition.java  |  4 +-
 .../query/PhoenixConnectionProvider.java|  5 +-
 .../timeline/query/PhoenixTransactSQL.java  | 15 +++---
 .../query/SplitByMetricNamesCondition.java  |  4 +-
 .../metrics/timeline/query/TopNCondition.java   |  3 +-
 .../timeline/source/RawMetricsSource.java   |  1 -
 .../timeline/uuid/HashBasedUuidGenStrategy.java |  8 +--
 .../timeline/uuid/MetricUuidGenStrategy.java|  1 -
 .../timeline/uuid/RandomUuidGenStrategy.java|  6 +--
 .../timeline/TimelineWriter.java|  4 +-
 .../webapp/TimelineWebServices.java |  3 --
 ambari-metrics/pom.xml  |  2 +-
 53 files changed, 306 insertions(+), 329 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a4293b7d/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/AMSApplicationServer.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/AMSApplicationServer.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/AMSApplicationServer.java
index f576362..38d46ef 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/AMSApplicationServer.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/AMSApplicationServer.java
@@ -20,7 +20,6 @@ package 
org.apache.hadoop.yarn.server.applicationhistoryservice;
 
 import org.apache.commons.logging.Log;
 import

[1/2] ambari git commit: AMBARI-22744. Fix issues with webapp deployment with new Hadoop common changes. (swagle)

2018-01-05 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-3.0-ams 25c181210 -> a4293b7dc


http://git-wip-us.apache.org/repos/asf/ambari/blob/a4293b7d/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
index 96af877..fa5f55a 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/discovery/TimelineMetricMetadataSync.java
@@ -17,9 +17,6 @@
  */
 package 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -27,6 +24,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
+
 /**
  * Sync metadata info with the store
  */

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4293b7d/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/function/AbstractTimelineMetricsSeriesAggregateFunction.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/function/AbstractTimelineMetricsSeriesAggregateFunction.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/function/AbstractTimelineMetricsSeriesAggregateFunction.java
index 634e51d..5a5dde4 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/function/AbstractTimelineMetricsSeriesAggregateFunction.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/function/AbstractTimelineMetricsSeriesAggregateFunction.java
@@ -17,10 +17,6 @@
  */
 package 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function;
 
-import com.google.common.base.Joiner;
-import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -29,6 +25,11 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import com.google.common.base.Joiner;
+
 public abstract class AbstractTimelineMetricsSeriesAggregateFunction
 implements TimelineMetricsSeriesAggregateFunction {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a4293b7d/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
index 4e04e6c..8d8cca3 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/query/Condition.java
@@ -1,9 +1,9 @@
 package 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query;
 
-import org.apache.hadoop.metrics2.sink.timeline.Precision;
-
 import java.util.List;
 
+import org.apache.hadoop.metrics2.sink.timeline.Precision;
+
 /**
  * License

[5/6] ambari git commit: AMBARI-22688. Fix AMS compilation issues and unit test with hbase, hadoop and phoenix upgraded. (swagle)

2017-12-21 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
deleted file mode 100644
index 4c8d745..000
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
+++ /dev/null
@@ -1,784 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice;
-
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.file.tfile.TFile;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import 
org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptFinishDataProto;
-import 
org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationAttemptStartDataProto;
-import 
org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationFinishDataProto;
-import 
org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ApplicationStartDataProto;
-import 
org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerFinishDataProto;
-import 
org.apache.hadoop.yarn.proto.ApplicationHistoryServerProtos.ContainerStartDataProto;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptFinishData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptHistoryData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationAttemptStartData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationFinishData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationHistoryData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ApplicationStartData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerFinishData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerStartData;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptFinishDataPBImpl;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationAttemptStartDataPBImpl;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationFinishDataPBImpl;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.ApplicationStartDat

[2/6] ambari git commit: AMBARI-22688. Fix AMS compilation issues and unit test with hbase, hadoop and phoenix upgraded. (swagle)

2017-12-21 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
index 741bb3c..c4cebd6 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/AbstractMiniHBaseClusterTest.java
@@ -20,9 +20,9 @@ package 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.OUT_OFF_BAND_DATA_TIME_ALLOWANCE;
 import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.UPSERT_METRICS_SQL;
+import static 
org.apache.phoenix.end2end.ParallelStatsDisabledIT.tearDownMiniCluster;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.powermock.api.easymock.PowerMock.mockStatic;
 
 import java.io.IOException;
 import java.sql.Connection;
@@ -40,8 +40,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.AggregatorUtils;
@@ -204,7 +204,7 @@ public abstract class AbstractMiniHBaseClusterTest extends 
BaseTest {
   new PhoenixHBaseAccessor(new TimelineMetricConfiguration(new 
Configuration(), metricsConf),
 new PhoenixConnectionProvider() {
   @Override
-  public HBaseAdmin getHBaseAdmin() throws IOException {
+  public Admin getHBaseAdmin() throws IOException {
 try {
   return driver.getConnectionQueryServices(null, null).getAdmin();
 } catch (SQLException e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
index 57f9796..2a5dd0b 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
@@ -30,6 +30,7 @@ import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.ti
 import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME;
 import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.METRICS_RECORD_TABLE_NAME;
 import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES;
+import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.PHOENIX_TABLES_REGEX_PATTERN;
 
 import java.io.IOException;
 import java.lang.reflect.Field;
@@ -43,12 +44,14 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-impo

[6/6] ambari git commit: AMBARI-22688. Fix AMS compilation issues and unit test with hbase, hadoop and phoenix upgraded. (swagle)

2017-12-21 Thread swagle
AMBARI-22688. Fix AMS compilation issues and unit test with hbase,hadoop and 
phoenix upgraded. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8329f46b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8329f46b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8329f46b

Branch: refs/heads/branch-3.0-ams
Commit: 8329f46b930a7b4faf98835824d3384b946f076b
Parents: 397af77
Author: Siddharth Wagle 
Authored: Thu Dec 21 15:56:53 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Dec 21 15:56:53 2017 -0800

--
 .../conf/unix/ambari-metrics-collector  |6 +-
 .../ambari-metrics-timelineservice/pom.xml  |6 +-
 .../AMSApplicationServer.java   |  145 ++
 .../ApplicationHistoryClientService.java|  215 ---
 .../ApplicationHistoryManager.java  |  146 --
 .../ApplicationHistoryManagerImpl.java  |  250 ---
 .../ApplicationHistoryReader.java   |  117 --
 .../ApplicationHistoryServer.java   |  208 ---
 .../ApplicationHistoryStore.java|   37 -
 .../ApplicationHistoryWriter.java   |  112 --
 .../FileSystemApplicationHistoryStore.java  |  784 --
 .../MemoryApplicationHistoryStore.java  |  274 
 .../NullApplicationHistoryStore.java|  127 --
 .../metrics/timeline/PhoenixHBaseAccessor.java  |  141 +-
 .../timeline/TimelineMetricConfiguration.java   |3 -
 .../query/PhoenixConnectionProvider.java|3 +-
 .../timeline/query/PhoenixTransactSQL.java  |3 +
 .../timeline/EntityIdentifier.java  |  100 --
 .../timeline/LeveldbTimelineStore.java  | 1473 --
 .../timeline/MemoryTimelineStore.java   |  360 -
 .../timeline/package-info.java  |   20 -
 .../webapp/AHSController.java   |   55 -
 .../webapp/AHSLogsPage.java |   55 -
 .../webapp/AHSView.java |   90 --
 .../webapp/AHSWebApp.java   |   66 -
 .../webapp/AHSWebServices.java  |  162 --
 .../webapp/AMSController.java   |   37 +
 .../webapp/AMSWebApp.java   |   42 +
 .../webapp/AppAttemptPage.java  |   69 -
 .../webapp/AppPage.java |   71 -
 .../webapp/ContainerPage.java   |   41 -
 .../webapp/NavBlock.java|   51 -
 .../webapp/TimelineWebServices.java |  250 +--
 .../ApplicationHistoryStoreTestUtils.java   |   84 -
 .../TestApplicationHistoryClientService.java|  209 ---
 .../TestApplicationHistoryManagerImpl.java  |   76 -
 .../TestApplicationHistoryServer.java   |  267 
 .../TestFileSystemApplicationHistoryStore.java  |  233 ---
 .../TestMemoryApplicationHistoryStore.java  |  206 ---
 .../timeline/AbstractMiniHBaseClusterTest.java  |6 +-
 .../timeline/ITPhoenixHBaseAccessor.java|   47 +-
 .../timeline/TestLeveldbTimelineStore.java  |  253 ---
 .../timeline/TestMemoryTimelineStore.java   |   83 -
 .../timeline/TimelineStoreTestUtils.java|  789 --
 .../webapp/TestAHSWebApp.java   |  199 ---
 .../webapp/TestAHSWebServices.java  |  302 
 .../webapp/TestTimelineWebServices.java |  297 +---
 ambari-metrics/pom.xml  |   18 +-
 48 files changed, 405 insertions(+), 8183 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
 
b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
index 552be48..de764ec 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
@@ -25,7 +25,7 @@ HBASE_RS_PID=/var/run/ams-hbase/hbase-${USER}-regionserver.pid
 
 HBASE_DIR=/usr/lib/ams-hbase
 
-DAEMON_NAME=timelineserver
+DAEMON_NAME=ams-metrics-collector
 
 COLLECTOR_CONF_DIR=/etc/ambari-metrics-collector/conf
 HBASE_CONF_DIR=/etc/ams-hbase/conf
@@ -238,7 +238,7 @@ function start()
 echo "$(date) Launching in distributed mode. Assuming Hbase daemons up and 
running." | tee -a $STARTUPFILE
   fi
 
-   
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
+   
CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.AMSApplicationServer'
# YARN_OPTS="${YARN_OPTS} ${YARN_TIMEL

[1/6] ambari git commit: AMBARI-22688. Fix AMS compilation issues and unit test with hbase, hadoop and phoenix upgraded. (swagle)

2017-12-21 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-3.0-ams 397af778b -> 8329f46b9


http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
index b093a2a..83e2a27 100644
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
+++ 
b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestTimelineWebServices.java
@@ -22,8 +22,6 @@ import static org.junit.Assert.assertEquals;
 
 import javax.ws.rs.core.MediaType;
 
-import junit.framework.Assert;
-
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
@@ -33,7 +31,6 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TestTimelineMetricStore;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricStore;
 import 
org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TimelineStore;
-import 
org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.TestMemoryTimelineStore;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.Test;
@@ -49,10 +46,10 @@ import 
com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
+import junit.framework.Assert;
 
-public class TestTimelineWebServices extends JerseyTest {
 
-  private static TimelineStore store;
+public class TestTimelineWebServices extends JerseyTest {
   private static TimelineMetricStore metricStore;
   private long beforeTime;
 
@@ -63,13 +60,11 @@ public class TestTimelineWebServices extends JerseyTest {
   bind(YarnJacksonJaxbJsonProvider.class);
   bind(TimelineWebServices.class);
   bind(GenericExceptionHandler.class);
-  try{
-store = mockTimelineStore();
+  try {
 metricStore = new TestTimelineMetricStore();
   } catch (Exception e) {
 Assert.fail();
   }
-  bind(TimelineStore.class).toInstance(store);
   bind(TimelineMetricStore.class).toInstance(metricStore);
   serve("/*").with(GuiceContainer.class);
 }
@@ -84,59 +79,30 @@ public class TestTimelineWebServices extends JerseyTest {
 }
   }
 
-  private TimelineStore mockTimelineStore()
-  throws Exception {
-beforeTime = System.currentTimeMillis() - 1;
-TestMemoryTimelineStore store = new TestMemoryTimelineStore();
-store.setup();
-return store.getTimelineStore();
-  }
-
   public TestTimelineWebServices() {
 super(new WebAppDescriptor.Builder(
-"org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
-.contextListenerClass(GuiceServletConfig.class)
-.filterClass(com.google.inject.servlet.GuiceFilter.class)
-.contextPath("jersey-guice-filter")
-.servletPath("/")
-.clientConfig(new 
DefaultClientConfig(YarnJacksonJaxbJsonProvider.class))
-.build());
+  "org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
+  .contextListenerClass(GuiceServletConfig.class)
+  .filterClass(com.google.inject.servlet.GuiceFilter.class)
+  .contextPath("jersey-guice-filter")
+  .servletPath("/")
+  .clientConfig(new DefaultClientConfig(YarnJacksonJaxbJsonProvider.class))
+  .build());
   }
 
   @Test
   public void testAbout() throws Exception {
 WebResource r = resource();
 ClientResponse response = r.path("ws").path("v1").path("timeline")
-.accept(MediaType.APPLICATION_JSON)
-.get(ClientResponse.class);
+  .accept(MediaType.APPLICATION_JSON)
+  .get(ClientResponse.class);
 assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
 TimelineWebServices.AboutInfo about =
-response.getEntity(TimelineWebServices.AboutInfo.class);
+  response.getEntity(TimelineWebServices.AboutInfo.class);
 Assert.assertNotNull(about);
-Assert.assertEquals("Timeline API", about.getAbout());
-  }
-
-  private static void verifyEntities(TimelineEntities entities) {
-Assert.assertNotNull(entities);
-

[3/6] ambari git commit: AMBARI-22688. Fix AMS compilation issues and unit test with hbase, hadoop and phoenix upgraded. (swagle)

2017-12-21 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
deleted file mode 100644
index 3064d2d..000
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
-
-import java.util.Collections;
-import java.util.Set;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-
-import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.webapp.WebServices;
-import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
-import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
-import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
-import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
-import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-@Singleton
-@Path("/ws/v1/applicationhistory")
-public class AHSWebServices extends WebServices {
-
-  @Inject
-  public AHSWebServices(ApplicationBaseProtocol appBaseProt) {
-super(appBaseProt);
-  }
-
-  @GET
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  public AppsInfo get(@Context HttpServletRequest req,
-  @Context HttpServletResponse res) {
-return getApps(req, res, null, Collections. emptySet(), null, null,
-  null, null, null, null, null, null, Collections. emptySet());
-  }
-
-  @GET
-  @Path("/apps")
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  @Override
-  public AppsInfo getApps(@Context HttpServletRequest req,
-  @Context HttpServletResponse res, @QueryParam("state") String stateQuery,
-  @QueryParam("states") Set statesQuery,
-  @QueryParam("finalStatus") String finalStatusQuery,
-  @QueryParam("user") String userQuery,
-  @QueryParam("queue") String queueQuery,
-  @QueryParam("limit") String count,
-  @QueryParam("startedTimeBegin") String startedBegin,
-  @QueryParam("startedTimeEnd") String startedEnd,
-  @QueryParam("finishedTimeBegin") String finishBegin,
-  @QueryParam("finishedTimeEnd") String finishEnd,
-  @QueryParam("applicationTypes") Set applicationTypes) {
-init(res);
-validateStates(stateQuery, statesQuery);
-return super.getApps(req, res, stateQuery, statesQuery, finalStatusQuery,
-  userQuery, queueQuery, count, startedBegin, startedEnd, finishBegin,
-  finishEnd, applicationTypes);
-  }
-
-  @GET
-  @Path("/apps/{appid}")
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  @Override
-  public AppInfo getApp(@Context HttpServletRequest req,
-  @Context HttpServletResponse res, @PathParam("appid") String appId) {
-init(res);
-return super.getApp(req, res, appId);
-  }
-
-  @GET
-  @Path("/apps/{appid}/appattempts")
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  @Override
-  public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest req,
-  @Context HttpServletResponse res, @PathParam("appid") String appId) {
-init(res);
-return super.getAppAtte

[4/6] ambari git commit: AMBARI-22688. Fix AMS compilation issues and unit test with hbase, hadoop and phoenix upgraded. (swagle)

2017-12-21 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/8329f46b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
--
diff --git 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
 
b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
deleted file mode 100644
index edd4842..000
--- 
a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/timeline/LeveldbTimelineStore.java
+++ /dev/null
@@ -1,1473 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.applicationhistoryservice.timeline;
-
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.collections.map.LRUMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
-import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
-import 
org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity;
-import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.fusesource.leveldbjni.JniDBFactory;
-import org.iq80.leveldb.DB;
-import org.iq80.leveldb.DBIterator;
-import org.iq80.leveldb.Options;
-import org.iq80.leveldb.ReadOptions;
-import org.iq80.leveldb.WriteBatch;
-import org.iq80.leveldb.WriteOptions;
-
-import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper.readReverseOrderedLong;
-import static 
org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.GenericObjectMapper.writeReverseOrderedLong;
-
-/**
- * An implementation of an application timeline store backed by leveldb.
- *
- * There are three sections of the db, the start time section,
- * the entity section, and the indexed entity section.
- *
- * The start time section is used to retrieve the unique start time for
- * a given entity. Its values each contain a start time while its keys are of
- * the form:
- * 
- *   START_TIME_LOOKUP_PREFIX + entity type + entity id
- *
- * The entity section is ordered by entity type, then entity start time
- * descending, then entity ID. There are four sub-sections of the entity
- * section: events, primary filters, related entities,
- * and other info. The event entries have event info serialized into their
- * values. The other info entries have values corresponding to the values of
- * the other info name/value map for the entry (note the names are contained
- * in the key). All other entries have empty values. The key structure is as
- * follows:
- * 
- *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id
- *
- *   ENTITY_ENTRY_PREFIX + entity type + revstarttime + entity id +
- * EVENTS_COLU

ambari git commit: AMBARI-22686. Disabled stack still appears in the UI if VDF is available. (swagle)

2017-12-21 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/trunk 36d0271f7 -> e404100b9


AMBARI-22686. Disabled stack still appears in the UI if VDF is available. 
(swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e404100b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e404100b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e404100b

Branch: refs/heads/trunk
Commit: e404100b99ffac8f14e36b46770466deeb5760d7
Parents: 36d0271
Author: Siddharth Wagle 
Authored: Thu Dec 21 13:30:14 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Dec 21 13:30:14 2017 -0800

--
 .../server/api/services/AmbariMetaInfo.java | 12 
 .../server/api/services/AmbariMetaInfoTest.java | 32 ++--
 .../RepositoryVersionResourceProviderTest.java  |  5 +--
 3 files changed, 37 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/e404100b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index b1eba8f..9fee0ae 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -139,7 +139,7 @@ public class AmbariMetaInfo {
   private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private String commonKerberosDescriptorFileLocation;
-  private Map versionDefinitions = null;
+  Map versionDefinitions = null;
 
 
   @Inject
@@ -1368,12 +1368,12 @@ public class AmbariMetaInfo {
 versionDefinitions = new HashMap<>();
 
 for (StackInfo stack : getStacks()) {
-  for (VersionDefinitionXml definition : stack.getVersionDefinitions()) {
-versionDefinitions.put(String.format("%s-%s-%s", stack.getName(),
-stack.getVersion(), definition.release.version), definition);
-  }
-
   if (stack.isActive() && stack.isValid()) {
+for (VersionDefinitionXml definition : stack.getVersionDefinitions()) {
+  versionDefinitions.put(String.format("%s-%s-%s", stack.getName(),
+stack.getVersion(), definition.release.version), definition);
+}
+
 try {
   // !!! check for a "latest-vdf" one.  This will be used for the 
default if one is not found.
   VersionDefinitionXml xml = stack.getLatestVersionDefinition();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e404100b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 9285526..f98cffd 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -133,8 +133,7 @@ public class AmbariMetaInfoTest {
   private static final int OS_CNT = 4;
 
   private static TestAmbariMetaInfo metaInfo = null;
-  private final static Logger LOG =
-  LoggerFactory.getLogger(AmbariMetaInfoTest.class);
+  private final static Logger LOG = 
LoggerFactory.getLogger(AmbariMetaInfoTest.class);
   private static final String FILE_NAME = "hbase-site.xml";
   private static final String HADOOP_ENV_FILE_NAME = "hadoop-env.xml";
   private static final String HDFS_LOG4J_FILE_NAME = "hdfs-log4j.xml";
@@ -1902,6 +1901,35 @@ public class AmbariMetaInfoTest {
 Assert.assertEquals("src/test/resources/widgets.json", 
widgetsFile.getPath());
   }
 
+  @Test
+  public void testGetVersionDefinitionsForDisabledStack() throws 
AmbariException {
+Map versionDefinitions = 
metaInfo.getVersionDefinitions();
+Assert.assertNotNull(versionDefinitions);
+// Check presence
+Map.Entry vdfEntry = null;
+for (Map.Entry entry : 
versionDefinitions.entrySet()) {
+  if (entry.getKey().equals("HDP-2.2.1")) {
+vdfEntry = entry;
+  }
+}
+Assert.assertNotNull("Candidate stack and vdf for test case.", vdfEntry);
+StackInfo stackInfo = metaInfo.getStack("HDP", "2.2.1");
+// Strange that this is not immutable but works for this test !
+stackInfo.setActive(false);
+
+// Hate to use reflection hence changed con

ambari git commit: AMBARI-22686. Disabled stack still appears in the UI if VDF is available. (swagle)

2017-12-21 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 0cf1defb1 -> e04470dc5


AMBARI-22686. Disabled stack still appears in the UI if VDF is available. 
(swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e04470dc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e04470dc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e04470dc

Branch: refs/heads/branch-2.6
Commit: e04470dc54c30ab0a637f1bb243a95999cfb5b0a
Parents: 0cf1def
Author: Siddharth Wagle 
Authored: Thu Dec 21 13:17:14 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Dec 21 13:17:14 2017 -0800

--
 .../server/api/services/AmbariMetaInfo.java | 13 
 .../server/api/services/AmbariMetaInfoTest.java | 32 ++--
 .../RepositoryVersionResourceProviderTest.java  |  5 +--
 3 files changed, 38 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/e04470dc/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 4ac8574..11346c4 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -140,7 +140,8 @@ public class AmbariMetaInfo {
   private File extensionsRoot;
   private File serverVersionFile;
   private File customActionRoot;
-  private Map versionDefinitions = null;
+
+  Map versionDefinitions = null;
 
 
   @Inject
@@ -1428,12 +1429,12 @@ public class AmbariMetaInfo {
 versionDefinitions = new HashMap<>();
 
 for (StackInfo stack : getStacks()) {
-  for (VersionDefinitionXml definition : stack.getVersionDefinitions()) {
-versionDefinitions.put(String.format("%s-%s-%s", stack.getName(),
-stack.getVersion(), definition.release.version), definition);
-  }
-
   if (stack.isActive() && stack.isValid()) {
+for (VersionDefinitionXml definition : stack.getVersionDefinitions()) {
+  versionDefinitions.put(String.format("%s-%s-%s", stack.getName(),
+stack.getVersion(), definition.release.version), definition);
+}
+
 try {
   // !!! check for a "latest-vdf" one.  This will be used for the 
default if one is not found.
   VersionDefinitionXml xml = stack.getLatestVersionDefinition();

http://git-wip-us.apache.org/repos/asf/ambari/blob/e04470dc/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index e9567f1..1fe4641 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -133,8 +133,7 @@ public class AmbariMetaInfoTest {
   private static final int OS_CNT = 4;
 
   private static TestAmbariMetaInfo metaInfo = null;
-  private final static Logger LOG =
-  LoggerFactory.getLogger(AmbariMetaInfoTest.class);
+  private final static Logger LOG = 
LoggerFactory.getLogger(AmbariMetaInfoTest.class);
   private static final String FILE_NAME = "hbase-site.xml";
   private static final String HADOOP_ENV_FILE_NAME = "hadoop-env.xml";
   private static final String HDFS_LOG4J_FILE_NAME = "hdfs-log4j.xml";
@@ -1906,6 +1905,35 @@ public class AmbariMetaInfoTest {
 
Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
+  @Test
+  public void testGetVersionDefinitionsForDisabledStack() throws 
AmbariException {
+Map versionDefinitions = 
metaInfo.getVersionDefinitions();
+Assert.assertNotNull(versionDefinitions);
+// Check presence
+Map.Entry vdfEntry = null;
+for (Map.Entry entry : 
versionDefinitions.entrySet()) {
+  if (entry.getKey().equals("HDP-2.2.1")) {
+vdfEntry = entry;
+  }
+}
+Assert.assertNotNull("Candidate stack and vdf for test case.", vdfEntry);
+StackInfo stackInfo = metaInfo.getStack("HDP", "2.2.1");
+// Strange that this is not immutable but works for this test !
+stackInfo.setActive(false);
+
+// Hate to use reflection hence changed contract to be package priv

[2/2] ambari git commit: AMBARI-22514, AMBARI-22653. Ambari Infra Manager: solr data exporting jobs and integration test environment. (Krisztian Kasa via swagle)

2017-12-21 Thread swagle
AMBARI-22514, AMBARI-22653. Ambari Infra Manager: solr data exporting jobs and 
integration test environment. (Krisztian Kasa via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/36d0271f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/36d0271f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/36d0271f

Branch: refs/heads/trunk
Commit: 36d0271f74a70f5cfeca0e5ca0ebeb795fab6138
Parents: a15fc7f
Author: Siddharth Wagle 
Authored: Thu Dec 21 13:24:03 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Dec 21 13:24:03 2017 -0800

--
 ambari-infra/ambari-infra-manager-it/pom.xml| 155 +
 .../org/apache/ambari/infra/InfraClient.java|  93 
 .../ambari/infra/InfraManagerStories.java   | 108 +
 .../ambari/infra/OffsetDateTimeConverter.java   |  39 
 .../ambari/infra/steps/AbstractInfraSteps.java  | 223 +++
 .../ambari/infra/steps/ExportJobsSteps.java | 106 +
 .../src/test/resources/log4j.properties |  16 ++
 .../resources/stories/infra_api_tests.story |  23 ++
 .../ambari-infra-manager/docker/Dockerfile  |   6 +-
 .../docker/docker-compose.yml   |  81 +++
 .../docker/infra-manager-docker-compose.sh  | 105 +
 .../apache/ambari/infra/job/ObjectSource.java   |  23 ++
 .../infra/job/archive/AbstractFileAction.java   |  33 +++
 .../infra/job/archive/CompositeFileAction.java  |   7 +-
 .../ambari/infra/job/archive/Document.java  |   1 -
 .../archive/DocumentExportConfiguration.java|  74 +++---
 .../job/archive/DocumentExportJobListener.java  |  23 ++
 .../job/archive/DocumentExportProperties.java   | 140 +---
 .../job/archive/DocumentExportPropertyMap.java  |  38 
 .../job/archive/DocumentExportStepListener.java |  47 
 .../infra/job/archive/DocumentItemReader.java   |   8 +-
 .../infra/job/archive/DocumentIterator.java |   5 +-
 .../infra/job/archive/DocumentSource.java   |   7 +-
 .../ambari/infra/job/archive/FileAction.java|   2 +-
 .../job/archive/LocalDocumentItemWriter.java|   8 +-
 .../ambari/infra/job/archive/S3Properties.java  |  57 ++---
 .../ambari/infra/job/archive/S3Uploader.java|  23 +-
 .../infra/job/archive/SolrDocumentIterator.java |   3 +-
 .../infra/job/archive/SolrDocumentSource.java   |  22 +-
 .../infra/job/archive/SolrQueryBuilder.java |  28 ++-
 .../infra/job/archive/SolrQueryProperties.java  |  40 +++-
 .../infra/job/archive/TarGzCompressor.java  |   2 +-
 .../src/main/resources/infra-manager.properties |  48 +++-
 .../archive/DocumentExportPropertiesTest.java   |  54 +
 .../job/archive/DocumentItemReaderTest.java |   8 +-
 .../archive/LocalDocumentItemWriterTest.java|   8 +-
 .../infra/job/archive/SolrQueryBuilderTest.java |  18 +-
 .../job/archive/SolrQueryPropertiesTest.java|  54 +
 ambari-infra/pom.xml|   5 +-
 39 files changed, 1532 insertions(+), 209 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/36d0271f/ambari-infra/ambari-infra-manager-it/pom.xml
--
diff --git a/ambari-infra/ambari-infra-manager-it/pom.xml 
b/ambari-infra/ambari-infra-manager-it/pom.xml
new file mode 100644
index 000..97e8ea0
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager-it/pom.xml
@@ -0,0 +1,155 @@
+
+
+http://maven.apache.org/POM/4.0.0";
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+
+  
+ambari-infra
+org.apache.ambari
+2.0.0.0-SNAPSHOT
+  
+
+  Ambari Infra Manager Integration Tests
+  http://maven.apache.org
+  4.0.0
+
+  ambari-infra-manager-it
+
+  
+4.0.5
+2.20
+localhost
+NONE
+  
+
+  
+
+  org.apache.solr
+  solr-solrj
+  ${solr.version}
+
+
+  com.amazonaws
+  aws-java-sdk-s3
+  1.11.5
+
+
+  commons-io
+  commons-io
+  2.5
+
+
+
+  org.slf4j
+  slf4j-api
+  1.7.20
+
+
+  org.slf4j
+  slf4j-log4j12
+  1.7.20
+
+
+
+  org.jbehave
+  jbehave-core
+  ${jbehave.version}
+  test
+
+
+  junit
+  junit
+  test
+
+
+  org.easymock
+  easymock
+  3.4
+  test
+
+
+  org.hamcrest
+  hamcrest-all
+  1.3
+  test
+
+  
+
+  
+target/classes
+
+  
+src/test/java/
+
+  **/*.story
+
+  
+  
+src/test/resources
+  
+
+  
+
+  
+
+  it
+  
+
+  it
+
+  
+  
+
+  
+

[1/2] ambari git commit: AMBARI-22514, AMBARI-22653. Ambari Infra Manager: solr data exporting jobs and integration test environment. (Krisztian Kasa via swagle)

2017-12-21 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/trunk a15fc7fc1 -> 36d0271f7


http://git-wip-us.apache.org/repos/asf/ambari/blob/36d0271f/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrDocumentSource.java
--
diff --git 
a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrDocumentSource.java
 
b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrDocumentSource.java
index 2181ba3..5ded9ac 100644
--- 
a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrDocumentSource.java
+++ 
b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrDocumentSource.java
@@ -18,6 +18,8 @@
  */
 package org.apache.ambari.infra.job.archive;
 
+import org.apache.ambari.infra.job.CloseableIterator;
+import org.apache.ambari.infra.job.ObjectSource;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
@@ -29,27 +31,29 @@ import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.time.format.DateTimeFormatter;
 
-public class SolrDocumentSource implements DocumentSource {
+public class SolrDocumentSource implements ObjectSource {
   public static final DateTimeFormatter SOLR_DATETIME_FORMATTER = 
DateTimeFormatter.ofPattern("-MM-dd'T'HH:mm:ss.SSSX");
   private static final Logger LOG = 
LoggerFactory.getLogger(SolrDocumentSource.class);
 
-  private final String zkHost;
+  private final String zkConnectionString;
   private final SolrQueryProperties properties;
-  private final String endValue;
+  private final String start;
+  private final String end;
 
-  public SolrDocumentSource(String zkHost, SolrQueryProperties properties, 
String endValue) {
-this.zkHost = zkHost;
+  public SolrDocumentSource(String zkConnectionString, SolrQueryProperties 
properties, String start, String end) {
+this.zkConnectionString = zkConnectionString;
 this.properties = properties;
-this.endValue = endValue;
+this.start = start;
+this.end = end;
   }
 
   @Override
-  public DocumentIterator open(Document current, int rows) {
-CloudSolrClient client = new 
CloudSolrClient.Builder().withZkHost(zkHost).build();
+  public CloseableIterator open(Document current, int rows) {
+CloudSolrClient client = new 
CloudSolrClient.Builder().withZkHost(zkConnectionString).build();
 client.setDefaultCollection(properties.getCollection());
 
 SolrQuery query = properties.toQueryBuilder()
-.setEndValue(endValue)
+.setInterval(start, end)
 .setDocument(current)
 .build();
 query.setRows(rows);

http://git-wip-us.apache.org/repos/asf/ambari/blob/36d0271f/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrQueryBuilder.java
--
diff --git 
a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrQueryBuilder.java
 
b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrQueryBuilder.java
index d0f6d40..b3ea14e 100644
--- 
a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrQueryBuilder.java
+++ 
b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/job/archive/SolrQueryBuilder.java
@@ -25,6 +25,7 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static org.apache.commons.lang.StringUtils.isBlank;
 import static org.apache.solr.client.solrj.SolrQuery.ORDER.asc;
 
 public class SolrQueryBuilder {
@@ -32,6 +33,7 @@ public class SolrQueryBuilder {
   public static final Pattern PARAMETER_PATTERN = 
Pattern.compile("\\$\\{[a-z]+\\}");
 
   private String queryText;
+  private String startValue;
   private String endValue;
   private String filterQueryText;
   private Document document;
@@ -51,6 +53,12 @@ public class SolrQueryBuilder {
 return this;
   }
 
+  public SolrQueryBuilder setInterval(String startValue, String endValue) {
+this.startValue = startValue;
+this.endValue = endValue;
+return this;
+  }
+
   public SolrQueryBuilder setFilterQueryText(String filterQueryText) {
 this.filterQueryText = filterQueryText;
 return this;
@@ -71,19 +79,21 @@ public class SolrQueryBuilder {
 SolrQuery solrQuery = new SolrQuery();
 
 String query = queryText;
-query = setEndValueOn(query);
+query = setValueOn(query, "${start}", startValue);
+query = setValueOn(query, "${end}", endValue);
 
 solrQuery.setQuery(query);
 
 if (filterQueryText != null) {
   String filterQuery = filterQueryText;
-  filterQuery = setEndValueOn(filterQuery);
+  filterQuery = setValueOn(filterQue

ambari git commit: AMBARI-22635. Ambari should create a dummy core-site.xml for Ranger plugins when namenode is not installed. (Vishal Suvagia via swagle)

2017-12-13 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/trunk e46575a17 -> a3d4c3da5


AMBARI-22635. Ambari should create a dummy core-site.xml for Ranger plugins 
when namenode is not installed. (Vishal Suvagia via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a3d4c3da
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a3d4c3da
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a3d4c3da

Branch: refs/heads/trunk
Commit: a3d4c3da56eeec22e864978a7a8be39586948ff5
Parents: e46575a
Author: Siddharth Wagle 
Authored: Wed Dec 13 13:41:33 2017 -0800
Committer: Siddharth Wagle 
Committed: Wed Dec 13 13:41:33 2017 -0800

--
 .../libraries/functions/setup_ranger_plugin_xml.py  | 14 +++---
 .../package/scripts/setup_ranger_kafka.py   | 14 +++---
 .../0.8.1/package/scripts/setup_ranger_kafka.py | 15 ---
 .../0.5.0.2.2/package/scripts/setup_ranger_knox.py  | 16 
 .../0.5.0.3.0/package/scripts/setup_ranger_knox.py  | 16 
 .../0.9.1/package/scripts/setup_ranger_storm.py | 15 ---
 .../1.0.1.3.0/package/scripts/setup_ranger_storm.py | 15 ---
 7 files changed, 78 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a3d4c3da/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
--
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index 485c1a6..78692cb 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -240,14 +240,14 @@ def setup_ranger_plugin_keystore(service_name, 
audit_db_is_enabled, stack_versio
 mode = 0640
   )
 
-def setup_core_site_for_required_plugins(component_user, component_group, 
create_core_site_path, config):
+def setup_core_site_for_required_plugins(component_user, component_group, 
create_core_site_path, configurations = {}, configuration_attributes = {}):
   XmlConfig('core-site.xml',
-conf_dir=create_core_site_path,
-configurations=config['configurations']['core-site'],
-configuration_attributes=config['configuration_attributes']['core-site'],
-owner=component_user,
-group=component_group,
-mode=0644
+conf_dir = create_core_site_path,
+configurations = configurations,
+configuration_attributes = configuration_attributes,
+owner = component_user,
+group = component_group,
+mode = 0644
   )
 
 def get_audit_configs(config):

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3d4c3da/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
 
b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
index e9719aa..3a0b991 100644
--- 
a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
+++ 
b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/setup_ranger_kafka.py
@@ -81,9 +81,17 @@ def setup_ranger_kafka():
 group = params.user_group,
 mode = 0755
   )
-if params.stack_supports_core_site_for_ranger_plugin and 
params.enable_ranger_kafka and params.has_namenode and params.security_enabled:
-  Logger.info("Stack supports core-site.xml creation for Ranger plugin, 
creating create core-site.xml from namenode configuraitions")
-  
setup_core_site_for_required_plugins(component_user=params.kafka_user,component_group=params.user_group,create_core_site_path
 = params.conf_dir, config = params.config)
+if params.stack_supports_core_site_for_ranger_plugin and 
params.enable_ranger_kafka and params.security_enabled:
+  if params.has_namenode:
+Logger.info("Stack supports core-site.xml creation for Ranger plugin 
and Namenode is installed, creating create core-site.xml from namenode 
configurations")
+setup_core_site_for_required_plugins(component_user = 
params.kafka_user, component_group = params.user_group,
+ create_core_site_path = 
params.conf_dir, configurations = params.config['configurations']['core-site&#x

ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-05 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 e75e743fe -> 7ec13024b


AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP 
repos. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7ec13024
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7ec13024
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7ec13024

Branch: refs/heads/branch-2.6
Commit: 7ec13024b409f1aa341e8960e8ab45adbda37284
Parents: e75e743
Author: Siddharth Wagle 
Authored: Tue Dec 5 10:12:13 2017 -0800
Committer: Siddharth Wagle 
Committed: Tue Dec 5 10:12:24 2017 -0800

--
 .../libraries/functions/lzo_utils.py| 42 ++--
 .../4.0/properties/stack_features.json  |  5 ---
 2 files changed, 22 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/7ec13024/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
--
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
index 68ee607..9af016d 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
@@ -42,16 +42,13 @@ def get_lzo_packages():
   elif OSCheck.is_ubuntu_family():
 lzo_packages += ["liblzo2-2"]
 
-
-  stack_version_unformatted = 
stack_features.get_stack_feature_version(Script.get_config()) # only used to 
check stack_feature, NOT as package version!
-  if stack_version_unformatted and 
check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_unformatted):
-if OSCheck.is_ubuntu_family():
-  lzo_packages += 
[script_instance.format_package_name("hadooplzo-${stack_version}") ,
-   
script_instance.format_package_name("hadooplzo-${stack_version}-native")]
-else:
-  lzo_packages += 
[script_instance.format_package_name("hadooplzo_${stack_version}"),
-   
script_instance.format_package_name("hadooplzo_${stack_version}-native")]
-
+  if OSCheck.is_ubuntu_family():
+lzo_packages += 
[script_instance.format_package_name("hadooplzo-${stack_version}") ,
+ 
script_instance.format_package_name("hadooplzo-${stack_version}-native")]
+  else:
+lzo_packages += 
[script_instance.format_package_name("hadooplzo_${stack_version}"),
+ 
script_instance.format_package_name("hadooplzo_${stack_version}-native")]
+
   return lzo_packages
 
 def should_install_lzo():
@@ -59,16 +56,21 @@ def should_install_lzo():
   Return true if lzo is enabled via core-site.xml and GPL license (required 
for lzo) is accepted.
   """
   config = Script.get_config()
-  io_compression_codecs = 
default("/configurations/core-site/io.compression.codecs", None)
-  lzo_enabled = io_compression_codecs is not None and 
"com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-  if not lzo_enabled:
-return False
-
-  is_gpl_license_accepted = default("/hostLevelParams/gpl_license_accepted", 
False)
-  if not is_gpl_license_accepted:
-Logger.warning(INSTALLING_LZO_WITHOUT_GPL)
-return False
+  stack_version_unformatted = stack_features.get_stack_feature_version(config)
+  if check_stack_feature(StackFeature.LZO, stack_version_unformatted):
+io_compression_codecs = 
default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and 
"com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+if not lzo_enabled:
+  return False
+
+is_gpl_license_accepted = default("/hostLevelParams/gpl_license_accepted", 
False)
+if not is_gpl_license_accepted:
+  Logger.warning(INSTALLING_LZO_WITHOUT_GPL)
+  return False
+  else:
+Logger.info("This stack does not indicate that it supports LZO 
installation.")
+return False # No LZO support
 
   return True
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7ec13024/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
index d29efb2..0fb89ee 100755
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/

[7/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present 
in IOP repos. YARN service issue. (swagle)"

This reverts commit ab1d01bd1872a20a02c467894b1a294dfe96ba1b.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d8003b39
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d8003b39
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d8003b39

Branch: refs/heads/branch-2.6
Commit: d8003b39f60e110762875e5ac4da8f6f46fa32c1
Parents: bfca1bd
Author: Siddharth Wagle 
Authored: Tue Dec 5 07:54:41 2017 -0800
Committer: Siddharth Wagle 
Committed: Tue Dec 5 07:54:41 2017 -0800

--
 .../alert_hive_interactive_thrift_port.py   |  230 ---
 .../HIVE/package/alerts/alert_hive_metastore.py |  276 
 .../package/alerts/alert_hive_thrift_port.py|  286 
 .../package/alerts/alert_llap_app_status.py |  303 
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 ---
 .../package/etc/hive-schema-0.12.0.mysql.sql|  777 --
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 -
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 --
 .../services/HIVE/package/files/addMysqlUser.sh |   39 -
 .../services/HIVE/package/files/hcatSmoke.sh|   41 -
 .../services/HIVE/package/files/hiveSmoke.sh|   24 -
 .../HIVE/package/files/hiveTezSetup.cmd |   58 -
 .../services/HIVE/package/files/hiveserver2.sql |   23 -
 .../HIVE/package/files/hiveserver2Smoke.sh  |   32 -
 .../services/HIVE/package/files/pigSmoke.sh |   18 -
 .../HIVE/package/files/removeMysqlUser.sh   |   33 -
 .../HIVE/package/files/startMetastore.sh|   25 -
 .../HIVE/package/files/templetonSmoke.sh|   95 --
 .../services/HIVE/package/scripts/__init__.py   |   19 -
 .../4.2.5/services/HIVE/package/scripts/hcat.py |   81 -
 .../HIVE/package/scripts/hcat_client.py |   79 -
 .../HIVE/package/scripts/hcat_service_check.py  |   86 --
 .../4.2.5/services/HIVE/package/scripts/hive.py |  562 ---
 .../HIVE/package/scripts/hive_client.py |   62 -
 .../HIVE/package/scripts/hive_interactive.py|  360 -
 .../HIVE/package/scripts/hive_metastore.py  |  203 ---
 .../HIVE/package/scripts/hive_server.py |  161 --
 .../package/scripts/hive_server_interactive.py  |  626 
 .../HIVE/package/scripts/hive_server_upgrade.py |  134 --
 .../HIVE/package/scripts/hive_service.py|  185 ---
 .../package/scripts/hive_service_interactive.py |  108 --
 .../HIVE/package/scripts/mysql_server.py|   64 -
 .../HIVE/package/scripts/mysql_service.py   |   49 -
 .../HIVE/package/scripts/mysql_users.py |   70 -
 .../HIVE/package/scripts/mysql_utils.py |   35 -
 .../services/HIVE/package/scripts/params.py |   30 -
 .../HIVE/package/scripts/params_linux.py|  873 ---
 .../HIVE/package/scripts/params_windows.py  |   74 -
 .../HIVE/package/scripts/service_check.py   |  192 ---
 .../HIVE/package/scripts/setup_ranger_hive.py   |  156 --
 .../scripts/setup_ranger_hive_interactive.py|   77 -
 .../HIVE/package/scripts/status_params.py   |  124 --
 .../services/HIVE/package/scripts/webhcat.py|  135 --
 .../HIVE/package/scripts/webhcat_server.py  |   88 --
 .../HIVE/package/scripts/webhcat_service.py |   96 --
 .../package/scripts/webhcat_service_check.py|  128 --
 .../hadoop-metrics2-hivemetastore.properties.j2 |   56 -
 .../hadoop-metrics2-hiveserver2.properties.j2   |   55 -
 .../templates/hadoop-metrics2-llapdaemon.j2 |   54 -
 .../hadoop-metrics2-llaptaskscheduler.j2|   54 -
 .../HIVE/package/templates/hive.conf.j2 |   35 -
 .../package/templates/startHiveserver2.sh.j2|   24 -
 .../templates/startHiveserver2Interactive.sh.j2 |   24 -
 .../package/templates/templeton_smoke.pig.j2|   24 -
 .../package/templates/zkmigrator_jaas.conf.j2   |   26 -
 .../services/OOZIE/package/scripts/oozie.py |   11 +-
 .../4.2/services/HDFS/package/scripts/hdfs.py   |3 +-
 .../4.2/services/HIVE/package/scripts/hive.py   |3 +-
 .../4.2/services/OOZIE/package/scripts/oozie.py |   13 +-
 .../4.2/services/YARN/package/scripts/yarn.py   |5 +-
 60 files changed, 15 insertions(+), 9841 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
de

[1/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 bfca1bd12 -> d8003b39f


http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
deleted file mode 100644
index c4e99fa..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,88 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import 
check_stack_feature
-from resource_management.libraries.functions.security_commons import 
build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
-  FILE_TYPE_XML
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class WebHCatServer(Script):
-  def install(self, env):
-import params
-self.install_packages(env)
-
-  def start(self, env, upgrade_type=None):
-import params
-env.set_params(params)
-self.configure(env) # FOR SECURITY
-webhcat_service(action='start', upgrade_type=upgrade_type)
-
-  def stop(self, env, upgrade_type=None):
-import params
-env.set_params(params)
-webhcat_service(action='stop')
-
-  def configure(self, env):
-import params
-env.set_params(params)
-webhcat()
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class WebHCatServerWindows(WebHCatServer):
-  def status(self, env):
-import status_params
-env.set_params(status_params)
-check_windows_service_status(status_params.webhcat_server_win_service_name)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class WebHCatServerDefault(WebHCatServer):
-  def status(self, env):
-import status_params
-env.set_params(status_params)
-check_process_status(status_params.webhcat_pid_file)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-Logger.info("Executing WebHCat Stack Upgrade pre-restart")
-import params
-env.set_params(params)
-
-if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version): 
-  stack_select.select_packages(params.version)
-
-  def get_log_folder(self):
-import params
-return params.hcat_log_dir
-  
-  def get_user(self):
-import params
-return params.webhcat_user
-
-if __name__ == "__main__":
-  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
deleted file mode 100644
index ec8a0b7..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS I

[2/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
deleted file mode 100644
index 18e2978..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
+++ /dev/null
@@ -1,873 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import status_params
-import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
-import os
-
-from urlparse import urlparse
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from ambari_commons.os_check import OSCheck
-
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.copy_tarball import 
STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import 
get_not_managed_resources
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.stack_features import 
check_stack_feature
-from resource_management.libraries.functions.stack_features import 
get_stack_feature_version
-from resource_management.libraries.functions.get_port_from_url import 
get_port_from_url
-from resource_management.libraries.functions import upgrade_summary
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries import functions
-from resource_management.libraries.functions.setup_atlas_hook import 
has_atlas_in_cluster
-from ambari_commons.ambari_metrics_helper import 
select_metric_collector_hosts_from_hostnames
-from resource_management.libraries.functions.setup_ranger_plugin_xml import 
get_audit_configs, generate_ranger_service_config
-from resource_management.libraries.functions.get_architecture import 
get_architecture
-from resource_management.libraries.functions.version import get_major_version
-
-from resource_management.core.utils import PasswordString
-from resource_management.core.exceptions import Fail
-from resource_management.core.shell import checked_call
-from ambari_commons.credential_store_helper import 
get_password_from_credential_store
-
-# Default log4j version; put config files under /etc/hive/conf
-log4j_version = '1'
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-architecture = get_architecture()
-sudo = AMBARI_SUDO_BINARY
-
-credential_store_enabled = False
-if 'credentialStoreEnabled' in config:
-  credential_store_enabled = config['credentialStoreEnabled']
-
-stack_root = status_params.stack_root
-stack_name = status_params.stack_name
-stack_name_uppercase = stack_name.upper()
-agent_stack_retry_on_unavailability = 
config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", 
int)
-
-# Needed since this is an Atlas Hook service.
-cluster_name = config['clusterName']
-
-# node hostname
-hostname = config["hostname"]
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = status_params.stack_version_unformatted
-stack_version_formatted_major = status_params.stack_version_formatted_major
-
-# this is not available on INSTALL action because  is not 
available
-stack_version_formatted = functions.get_stack_version('hive-server2')
-major_stack_version = get_major_version(stack_version_formatted_major)
-
-# New Cl

[5/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
deleted file mode 100644
index bc6486b..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
+++ /dev/null
@@ -1,1406 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-"SD_ID" bigint NOT NULL,
-"BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-"INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-"CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_OLD" (
-"SD_ID" bigint NOT NULL,
-"COMMENT" character varying(256) DEFAULT NULL::character varying,
-"COLUMN_NAME" character varying(128) NOT NULL,
-"TYPE_NAME" character varying(4000) NOT NULL,
-"INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-"CD_ID" bigint NOT NULL,
-"COMMENT" character varying(4000),
-"COLUMN_NAME" character varying(128) NOT NULL,
-"TYPE_NAME" character varying(4000),
-"INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-"DB_ID" bigint NOT NULL,
-"PARAM_KEY" character varying(180) NOT NULL,
-"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-"DB_ID" bigint NOT NULL,
-"DESC" character varying(4000) DEFAULT NULL::character varying,
-"DB_LOCATION_URI" character varying(4000) NOT NULL,
-"NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-"DB_GRANT_ID" bigint NOT NULL,
-"CREATE_TIME" bigint NOT NULL,
-"DB_ID" bigint,
-"GRANT_OPTION" smallint NOT NULL,
-"GRANTOR" character varying(128) DEFAULT NULL::character varying,
-"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-"DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-"USER_GRANT_ID" bigint NOT NULL,
-"CREATE_TIME" bigint NOT NULL,
-"GRANT_OPTION" smallint NOT NULL,
-"GRANTOR" character varying(128) DEFAULT NULL::character varying,
-"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-"USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-"INDEX_ID" bigint NOT NULL,
-"CREATE_TIME" bigint NOT NULL,
-"DEFERRED_REBUILD" boolean NOT NULL,
-"INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character 
varying,
-"INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-"INDEX_TBL_ID" bigint,
-"LAST_ACCESS_TIME" bigint NOT NULL,
-"ORIG_TBL_ID" bigint,
-"SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-"INDEX_ID" bigint NOT NULL,
-"PARAM_KEY" character varying(256) NOT NULL,
-"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-"CLASS_NAME" character vary

[4/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index b860c6e..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,562 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import glob
-from urlparse import urlparse
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import copy_tarball
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import 
check_stack_feature
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.core.resources.system import File, Execute, Directory
-from resource_management.core.source import StaticFile, Template, 
DownloadSource, InlineTemplate
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.format import format
-from resource_management.core.exceptions import Fail
-from resource_management.core.shell import as_sudo
-from resource_management.core.shell import quote_bash_args
-from resource_management.core.logger import Logger
-from resource_management.core import utils
-from resource_management.libraries.functions.setup_atlas_hook import 
has_atlas_in_cluster, setup_atlas_hook
-from resource_management.libraries.functions.security_commons import 
update_credential_provider_path
-from resource_management.libraries.functions.lzo_utils import 
install_lzo_if_needed
-from ambari_commons.constants import SERVICE
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hive(name=None):
-  import params
-
-  XmlConfig("hive-site.xml",
-conf_dir = params.hive_conf_dir,
-configurations = params.config['configurations']['hive-site'],
-owner=params.hive_user,
-
configuration_attributes=params.config['configuration_attributes']['hive-site']
-  )
-
-  if name in ["hiveserver2","metastore"]:
-# Manually overriding service logon user & password set by the 
installation package
-service_name = params.service_map[name]
-ServiceConfig(service_name,
-  action="change_user",
-  username = params.hive_user,
-  password = Script.get_password(params.hive_user))
-Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), 
logoutput=True, user=params.hadoop_user)
-
-  if name == 'metastore':
-if params.init_metastore_schema:
-  check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd 
--service schematool -info '
-'-dbType {hive_metastore_db_type} '
-'-userName {hive_metastore_user_name} '
-'-passWord 
{hive_metastore_user_passwd!p}'
-'&set EXITCODE=%ERRORLEVEL%&exit /B 
%EXITCODE%"', #cmd "feature", propagate the process exit code manually
-hive_bin=params.hive_bin,
-
hive_metastore_db_type=params.hive_metastore_db_type,
-
hive_metastore_user_name=params.hive_metastore_user_name,
-
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
-  try:
-Execute(check_schema_created_cmd)
-  except Fail:
-create_schema_cmd = format('cmd /c {hi

[3/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
deleted file mode 100644
index 32322cd..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
+++ /dev/null
@@ -1,626 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-# Python Imports
-import subprocess
-import os
-import re
-import time
-import shutil
-from datetime import datetime
-import json
-
-# Ambari Commons & Resource Management imports
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.check_process_status import 
check_process_status
-from resource_management.core.source import InlineTemplate
-from resource_management.core.resources.system import Execute, Directory
-
-# Imports needed for Rolling/Express Upgrade
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import 
check_stack_feature
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-
-from resource_management.core import shell
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from ambari_commons import OSCheck, OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.security_commons import 
build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
-  FILE_TYPE_XML
-
-# Local Imports
-from setup_ranger_hive import setup_ranger_hive
-from hive_service_interactive import hive_service_interactive
-from hive_interactive import hive_interactive
-from hive_server import HiveServerDefault
-from setup_ranger_hive_interactive import setup_ranger_hive_interactive
-
-import traceback
-
-class HiveServerInteractive(Script):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveServerInteractiveDefault(HiveServerInteractive):
-def install(self, env):
-  import params
-  self.install_packages(env)
-
-def configure(self, env):
-  import params
-  env.set_params(params)
-  hive_interactive(name='hiveserver2')
-
-def pre_upgrade_restart(self, env, upgrade_type=None):
-  Logger.info("Executing Hive Server Interactive Stack Upgrade 
pre-restart")
-  import params
-  env.set_params(params)
-
-  if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
-stack_select.select_packages(params.version)
-
-# Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
-resource_created = copy_to_hdfs(
-  "hive2",
-  params.user_group,
-  params.hdfs_user,
-  skip=params.sysprep_skip_copy_tarballs_hdfs)
-
-resource_created = copy_to_hdfs(
-  "tez_hive2",
-  params.user_group,
-  params.hdfs_user,
-  skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
-
-if resource_created:
-  params.HdfsResource(None, action="execute")
-
-def start(self, env, upgrade_type=None):
-  import params
-  env.set_params(params)
-  self.configure(env)
-
-  if params.security_enabled:
-# Do the security setup, internally calls do_kinit()
-self.setup_security()
-
-  # TODO : We need have conditional [re]start of LLAP once "status check 
command" for LLAP is ready.
-  # Check status and based on that decide on [re]starting.
-
-

[6/7] ambari git commit: Revert "AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. YARN service issue. (swagle)"

2017-12-05 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/d8003b39/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
deleted file mode 100644
index b0415b1..000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
+++ /dev/null
@@ -1,777 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhostDatabase: test
--- --
--- Server version  5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, 
FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin 
DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` 
(`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` varchar(4000) DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` 
(`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT 
NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COL

[7/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP 
repos. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ab1d01bd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ab1d01bd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ab1d01bd

Branch: refs/heads/branch-2.6
Commit: ab1d01bd1872a20a02c467894b1a294dfe96ba1b
Parents: dc969b4
Author: Siddharth Wagle 
Authored: Mon Dec 4 13:51:54 2017 -0800
Committer: Siddharth Wagle 
Committed: Mon Dec 4 13:52:06 2017 -0800

--
 .../alert_hive_interactive_thrift_port.py   |  230 +++
 .../HIVE/package/alerts/alert_hive_metastore.py |  276 
 .../package/alerts/alert_hive_thrift_port.py|  286 
 .../package/alerts/alert_llap_app_status.py |  303 
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 +++
 .../package/etc/hive-schema-0.12.0.mysql.sql|  777 ++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 +
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 ++
 .../services/HIVE/package/files/addMysqlUser.sh |   39 +
 .../services/HIVE/package/files/hcatSmoke.sh|   41 +
 .../services/HIVE/package/files/hiveSmoke.sh|   24 +
 .../HIVE/package/files/hiveTezSetup.cmd |   58 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh  |   32 +
 .../services/HIVE/package/files/pigSmoke.sh |   18 +
 .../HIVE/package/files/removeMysqlUser.sh   |   33 +
 .../HIVE/package/files/startMetastore.sh|   25 +
 .../HIVE/package/files/templetonSmoke.sh|   95 ++
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../4.2.5/services/HIVE/package/scripts/hcat.py |   81 +
 .../HIVE/package/scripts/hcat_client.py |   79 +
 .../HIVE/package/scripts/hcat_service_check.py  |   86 ++
 .../4.2.5/services/HIVE/package/scripts/hive.py |  562 +++
 .../HIVE/package/scripts/hive_client.py |   62 +
 .../HIVE/package/scripts/hive_interactive.py|  360 +
 .../HIVE/package/scripts/hive_metastore.py  |  203 +++
 .../HIVE/package/scripts/hive_server.py |  161 ++
 .../package/scripts/hive_server_interactive.py  |  626 
 .../HIVE/package/scripts/hive_server_upgrade.py |  134 ++
 .../HIVE/package/scripts/hive_service.py|  185 +++
 .../package/scripts/hive_service_interactive.py |  108 ++
 .../HIVE/package/scripts/mysql_server.py|   64 +
 .../HIVE/package/scripts/mysql_service.py   |   49 +
 .../HIVE/package/scripts/mysql_users.py |   70 +
 .../HIVE/package/scripts/mysql_utils.py |   35 +
 .../services/HIVE/package/scripts/params.py |   30 +
 .../HIVE/package/scripts/params_linux.py|  873 +++
 .../HIVE/package/scripts/params_windows.py  |   74 +
 .../HIVE/package/scripts/service_check.py   |  192 +++
 .../HIVE/package/scripts/setup_ranger_hive.py   |  156 ++
 .../scripts/setup_ranger_hive_interactive.py|   77 +
 .../HIVE/package/scripts/status_params.py   |  124 ++
 .../services/HIVE/package/scripts/webhcat.py|  135 ++
 .../HIVE/package/scripts/webhcat_server.py  |   88 ++
 .../HIVE/package/scripts/webhcat_service.py |   96 ++
 .../package/scripts/webhcat_service_check.py|  128 ++
 .../hadoop-metrics2-hivemetastore.properties.j2 |   56 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   55 +
 .../templates/hadoop-metrics2-llapdaemon.j2 |   54 +
 .../hadoop-metrics2-llaptaskscheduler.j2|   54 +
 .../HIVE/package/templates/hive.conf.j2 |   35 +
 .../package/templates/startHiveserver2.sh.j2|   24 +
 .../templates/startHiveserver2Interactive.sh.j2 |   24 +
 .../package/templates/templeton_smoke.pig.j2|   24 +
 .../package/templates/zkmigrator_jaas.conf.j2   |   26 +
 .../services/OOZIE/package/scripts/oozie.py |   11 +-
 .../4.2/services/HDFS/package/scripts/hdfs.py   |3 +-
 .../4.2/services/HIVE/package/scripts/hive.py   |3 +-
 .../4.2/services/OOZIE/package/scripts/oozie.py |   13 +-
 .../4.2/services/YARN/package/scripts/yarn.py   |5 +-
 60 files changed, 9841 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
new file mode 100644
index 000..8d48412
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks

[1/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 dc969b4f7 -> ab1d01bd1


http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
new file mode 100644
index 000..c4e99fa
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_server.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.security_commons import 
build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
+  FILE_TYPE_XML
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class WebHCatServer(Script):
+  def install(self, env):
+import params
+self.install_packages(env)
+
+  def start(self, env, upgrade_type=None):
+import params
+env.set_params(params)
+self.configure(env) # FOR SECURITY
+webhcat_service(action='start', upgrade_type=upgrade_type)
+
+  def stop(self, env, upgrade_type=None):
+import params
+env.set_params(params)
+webhcat_service(action='stop')
+
+  def configure(self, env):
+import params
+env.set_params(params)
+webhcat()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class WebHCatServerWindows(WebHCatServer):
+  def status(self, env):
+import status_params
+env.set_params(status_params)
+check_windows_service_status(status_params.webhcat_server_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class WebHCatServerDefault(WebHCatServer):
+  def status(self, env):
+import status_params
+env.set_params(status_params)
+check_process_status(status_params.webhcat_pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+Logger.info("Executing WebHCat Stack Upgrade pre-restart")
+import params
+env.set_params(params)
+
+if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version): 
+  stack_select.select_packages(params.version)
+
+  def get_log_folder(self):
+import params
+return params.hcat_log_dir
+  
+  def get_user(self):
+import params
+return params.webhcat_user
+
+if __name__ == "__main__":
+  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
new file mode 100644
index 000..ec8a0b7
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/webhcat_service.py
@@ -0,0 +1,96 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS

[5/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
new file mode 100644
index 000..bc6486b
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1406 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+"SD_ID" bigint NOT NULL,
+"BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+"INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+"CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+"SD_ID" bigint NOT NULL,
+"COMMENT" character varying(256) DEFAULT NULL::character varying,
+"COLUMN_NAME" character varying(128) NOT NULL,
+"TYPE_NAME" character varying(4000) NOT NULL,
+"INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+"CD_ID" bigint NOT NULL,
+"COMMENT" character varying(4000),
+"COLUMN_NAME" character varying(128) NOT NULL,
+"TYPE_NAME" character varying(4000),
+"INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+"DB_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(180) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+"DB_ID" bigint NOT NULL,
+"DESC" character varying(4000) DEFAULT NULL::character varying,
+"DB_LOCATION_URI" character varying(4000) NOT NULL,
+"NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+"DB_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DB_ID" bigint,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+"USER_GRANT_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"GRANT_OPTION" smallint NOT NULL,
+"GRANTOR" character varying(128) DEFAULT NULL::character varying,
+"GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+"PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+"USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+"INDEX_ID" bigint NOT NULL,
+"CREATE_TIME" bigint NOT NULL,
+"DEFERRED_REBUILD" boolean NOT NULL,
+"INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character 
varying,
+"INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+"INDEX_TBL_ID" bigint,
+"LAST_ACCESS_TIME" bigint NOT NULL,
+"ORIG_TBL_ID" bigint,
+"SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+"INDEX_ID" bigint NOT NULL,
+"PARAM_KEY" character varying(256) NOT NULL,
+"PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; 
Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+"CLASS_NAME" character varying(

[3/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
new file mode 100644
index 000..32322cd
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive_server_interactive.py
@@ -0,0 +1,626 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import subprocess
+import os
+import re
+import time
+import shutil
+from datetime import datetime
+import json
+
+# Ambari Commons & Resource Management imports
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.check_process_status import 
check_process_status
+from resource_management.core.source import InlineTemplate
+from resource_management.core.resources.system import Execute, Directory
+
+# Imports needed for Rolling/Express Upgrade
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+
+from resource_management.core import shell
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.security_commons import 
build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
+  FILE_TYPE_XML
+
+# Local Imports
+from setup_ranger_hive import setup_ranger_hive
+from hive_service_interactive import hive_service_interactive
+from hive_interactive import hive_interactive
+from hive_server import HiveServerDefault
+from setup_ranger_hive_interactive import setup_ranger_hive_interactive
+
+import traceback
+
+class HiveServerInteractive(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerInteractiveDefault(HiveServerInteractive):
+def install(self, env):
+  import params
+  self.install_packages(env)
+
+def configure(self, env):
+  import params
+  env.set_params(params)
+  hive_interactive(name='hiveserver2')
+
+def pre_upgrade_restart(self, env, upgrade_type=None):
+  Logger.info("Executing Hive Server Interactive Stack Upgrade 
pre-restart")
+  import params
+  env.set_params(params)
+
+  if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
+stack_select.select_packages(params.version)
+
+# Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
+resource_created = copy_to_hdfs(
+  "hive2",
+  params.user_group,
+  params.hdfs_user,
+  skip=params.sysprep_skip_copy_tarballs_hdfs)
+
+resource_created = copy_to_hdfs(
+  "tez_hive2",
+  params.user_group,
+  params.hdfs_user,
+  skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
+
+if resource_created:
+  params.HdfsResource(None, action="execute")
+
+def start(self, env, upgrade_type=None):
+  import params
+  env.set_params(params)
+  self.configure(env)
+
+  if params.security_enabled:
+# Do the security setup, internally calls do_kinit()
+self.setup_security()
+
+  # TODO : We need have conditional [re]start of LLAP once "status check 
command" for LLAP is ready.
+  # Check status and based on that decide on [re]starting.
+
+  # 

[6/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
new file mode 100644
index 000..b0415b1
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhostDatabase: test
+-- --
+-- Server version  5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, 
FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin 
DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` 
(`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT 
NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` 
(`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT 
NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE

[2/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
new file mode 100644
index 000..18e2978
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/params_linux.py
@@ -0,0 +1,873 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
+import os
+
+from urlparse import urlparse
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.copy_tarball import 
STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import 
get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.stack_features import 
get_stack_feature_version
+from resource_management.libraries.functions.get_port_from_url import 
get_port_from_url
+from resource_management.libraries.functions import upgrade_summary
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries import functions
+from resource_management.libraries.functions.setup_atlas_hook import 
has_atlas_in_cluster
+from ambari_commons.ambari_metrics_helper import 
select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import 
get_audit_configs, generate_ranger_service_config
+from resource_management.libraries.functions.get_architecture import 
get_architecture
+from resource_management.libraries.functions.version import get_major_version
+
+from resource_management.core.utils import PasswordString
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import checked_call
+from ambari_commons.credential_store_helper import 
get_password_from_credential_store
+
+# Default log4j version; put config files under /etc/hive/conf
+log4j_version = '1'
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+architecture = get_architecture()
+sudo = AMBARI_SUDO_BINARY
+
+credential_store_enabled = False
+if 'credentialStoreEnabled' in config:
+  credential_store_enabled = config['credentialStoreEnabled']
+
+stack_root = status_params.stack_root
+stack_name = status_params.stack_name
+stack_name_uppercase = stack_name.upper()
+agent_stack_retry_on_unavailability = 
config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", 
int)
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+# node hostname
+hostname = config["hostname"]
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted_major = status_params.stack_version_formatted_major
+
+# this is not available on INSTALL action because  is not 
available
+stack_version_formatted = functions.get_stack_version('hive-server2')
+major_stack_version = get_major_version(stack_version_formatted_major)
+
+# New Cluste

[4/7] ambari git commit: AMBARI-22588. Hive Client restart fails: hadooplzo package not present in IOP repos. (swagle)

2017-12-04 Thread swagle
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab1d01bd/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
new file mode 100644
index 000..b860c6e
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,562 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import glob
+from urlparse import urlparse
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import copy_tarball
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.source import StaticFile, Template, 
DownloadSource, InlineTemplate
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_sudo
+from resource_management.core.shell import quote_bash_args
+from resource_management.core.logger import Logger
+from resource_management.core import utils
+from resource_management.libraries.functions.setup_atlas_hook import 
has_atlas_in_cluster, setup_atlas_hook
+from resource_management.libraries.functions.security_commons import 
update_credential_provider_path
+from resource_management.libraries.functions.lzo_utils import 
install_lzo_if_needed
+from ambari_commons.constants import SERVICE
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive(name=None):
+  import params
+
+  XmlConfig("hive-site.xml",
+conf_dir = params.hive_conf_dir,
+configurations = params.config['configurations']['hive-site'],
+owner=params.hive_user,
+
configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+  if name in ["hiveserver2","metastore"]:
+# Manually overriding service logon user & password set by the 
installation package
+service_name = params.service_map[name]
+ServiceConfig(service_name,
+  action="change_user",
+  username = params.hive_user,
+  password = Script.get_password(params.hive_user))
+Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), 
logoutput=True, user=params.hadoop_user)
+
+  if name == 'metastore':
+if params.init_metastore_schema:
+  check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd 
--service schematool -info '
+'-dbType {hive_metastore_db_type} '
+'-userName {hive_metastore_user_name} '
+'-passWord 
{hive_metastore_user_passwd!p}'
+'&set EXITCODE=%ERRORLEVEL%&exit /B 
%EXITCODE%"', #cmd "feature", propagate the process exit code manually
+hive_bin=params.hive_bin,
+
hive_metastore_db_type=params.hive_metastore_db_type,
+
hive_metastore_user_name=params.hive_metastore_user_name,
+
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+  try:
+Execute(check_schema_created_cmd)
+  except Fail:
+create_schema_cmd = format('cmd /c {hive_b

[2/2] ambari git commit: Revert "AMBARI-22575. Broken Python Unit Tests in branch-2.6 (aonishuk)"

2017-12-04 Thread swagle
Revert "AMBARI-22575. Broken Python Unit Tests in branch-2.6 (aonishuk)"

This reverts commit 51e26b86f8fef18f533a238f224d43d76a08bbaa.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/83cbde7d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/83cbde7d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/83cbde7d

Branch: refs/heads/branch-2.6
Commit: 83cbde7d75fbb9994b2c281ccae7f8b45f073225
Parents: 613dccb
Author: Siddharth Wagle 
Authored: Mon Dec 4 10:38:13 2017 -0800
Committer: Siddharth Wagle 
Committed: Mon Dec 4 10:38:13 2017 -0800

--
 .../stacks/2.0.6/OOZIE/test_oozie_server.py | 152 +--
 .../stacks/2.2/configs/oozie-upgrade.json   |  39 +
 2 files changed, 2 insertions(+), 189 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/83cbde7d/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
--
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py 
b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 09c4227..10a83d6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -28,44 +28,6 @@ from resource_management.libraries import functions
 from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
 import tempfile
 
-REPOSITORY_FILE_DICT = {
-"resolved": True, 
-"repoVersion": "2.6.4.0-52", 
-"repositories": [
-{
-"tags": [], 
-"ambariManaged": True, 
-"baseUrl": 
"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.4.0-52";,
 
-"repoName": "HDP", 
-"repoId": "HDP-2.6-repo-1", 
-"applicableServices": []
-}, 
-{
-"repoName": "HDP-GPL", 
-"tags": [
-"GPL"
-], 
-"ambariManaged": True, 
-"baseUrl": 
"http://s3.amazonaws.com/dev.hortonworks.com/HDP-GPL/centos6/2.x/BUILDS/2.6.4.0-52";,
 
-"repoId": "HDP-2.6-GPL-repo-1"
-}, 
-{
-"repoName": "HDP-UTILS", 
-"tags": [], 
-"ambariManaged": True, 
-"baseUrl": 
"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.22/repos/centos6";, 
-"repoId": "HDP-UTILS-1.1.0.22-repo-1"
-}
-], 
-"feature": {
-"preInstalled": False, 
-"scoped": True
-}, 
-"stackName": "HDP", 
-"repoVersionId": 1, 
-"repoFileName": "ambari-hdp-1"
-}
-
 def format_package_name_side_effect(name):
   return name.replace("${stack_version}", "1_2_3_4")
 
@@ -1257,34 +1219,6 @@ class TestOozieServer(RMFTestCase):
   sudo = True )
 
 self.assertResourceCalled('Directory', 
'/usr/hdp/current/oozie-server/libext', mode = 0777)
-
-self.assertResourceCalled('Repository', 'HDP-2.6-repo-1',
-append_to_file = False,
-base_url = 
'http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.4.0-52',
-action = ['create'],
-components = [u'HDP', 'main'],
-repo_template = UnknownConfigurationMock(),
-repo_file_name = 'ambari-hdp-1',
-mirror_list = None,
-)
-self.assertResourceCalled('Repository', 'HDP-2.6-GPL-repo-1',
-append_to_file = True,
-base_url = 
'http://s3.amazonaws.com/dev.hortonworks.com/HDP-GPL/centos6/2.x/BUILDS/2.6.4.0-52',
-action = ['create'],
-components = [u'HDP-GPL', 'main'],
-repo_template = UnknownConfigurationMock(),
-repo_file_name = 'ambari-hdp-1',
-mirror_list = None,
-)
-self.assertResourceCalled('Repository', 'HDP-UTILS-1.1.0.22-repo-1',
-append_to_file = True,
-base_url = 
'http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.22/repos/centos6',
-action = ['create'],
-components = [u'HDP-UTILS', 'main'],
-repo_template = UnknownConfigurationMock(),
-repo_file_name = 'ambari-hdp-1',
-mirror_list = None,
-)
 self.assertResourceCalled('Package', ('lzo'), retry_count=5, 
retry_on_repo_unavailability= False)
 self.assertResourceCalled('Package', ('hadooplzo_1_2_3_4'), retry_count = 
5, retry_on_repo_unavailability = False)
 self.assertResourceCalled('Package', ('hadooplzo_1_2_3_4-native'), 
retry_count = 5, retry_on_repo_unavailability = False)
@@ -1326,7 +1260,6 @@ class TestOozieServer(RMFTestCase):
 version = '2.3.0.0-1234'
 json_content['commandPar

[1/2] ambari git commit: Revert "AMBARI-22561. Need to address HDP-GPL repo update after user accepts license in post-install scenario. Breaks HDF deploy. (aonishuk)"

2017-12-04 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 2c181ac20 -> 83cbde7d7


Revert "AMBARI-22561. Need to address HDP-GPL repo update after user accepts 
license in post-install scenario. Breaks HDF deploy. (aonishuk)"

This reverts commit 3ac717a497efc60ca9b51f8ca7bef21f3df26fb3.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/613dccbd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/613dccbd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/613dccbd

Branch: refs/heads/branch-2.6
Commit: 613dccbd202374bae1664c5f09c41e1b97d77410
Parents: 2c181ac
Author: Siddharth Wagle 
Authored: Mon Dec 4 10:35:29 2017 -0800
Committer: Siddharth Wagle 
Committed: Mon Dec 4 10:35:29 2017 -0800

--
 .../libraries/functions/lzo_utils.py|  9 +-
 .../libraries/functions/repository_util.py  | 92 
 .../libraries/script/script.py  | 10 +--
 .../ambari/server/agent/CommandRepository.java  |  6 --
 .../custom_actions/scripts/install_packages.py  |  8 +-
 .../scripts/repo_initialization.py  |  9 +-
 .../src/test/python/stacks/utils/RMFTestCase.py |  2 -
 7 files changed, 51 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/613dccbd/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
--
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
index c505969..68ee607 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
@@ -54,9 +54,6 @@ def get_lzo_packages():
 
   return lzo_packages
 
-def is_gpl_license_accepted():
-  return default("/hostLevelParams/gpl_license_accepted", False)
-
 def should_install_lzo():
   """
   Return true if lzo is enabled via core-site.xml and GPL license (required 
for lzo) is accepted.
@@ -68,7 +65,8 @@ def should_install_lzo():
   if not lzo_enabled:
 return False
 
-  if not is_gpl_license_accepted():
+  is_gpl_license_accepted = default("/hostLevelParams/gpl_license_accepted", 
False)
+  if not is_gpl_license_accepted:
 Logger.warning(INSTALLING_LZO_WITHOUT_GPL)
 return False
 
@@ -81,9 +79,6 @@ def install_lzo_if_needed():
   if not should_install_lzo():
 return
 
-  # If user has just accepted GPL license. GPL repository can not yet be 
present.
-  Script.repository_util.create_repo_files()
-
   lzo_packages = get_lzo_packages()
 
   config = Script.get_config()

http://git-wip-us.apache.org/repos/asf/ambari/blob/613dccbd/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
--
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
index 5d73b5d..f1c8ef1 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
@@ -17,75 +17,60 @@ limitations under the License.
 
 """
 
-from ambari_commons.os_check import OSCheck
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
 from resource_management.libraries.resources.repository import Repository
-from resource_management.libraries.functions.is_empty import is_empty
 import ambari_simplejson as json
 
 
-__all__ = ["RepositoryUtil", "CommandRepository"]
+__all__ = ["create_repo_files", "CommandRepository"]
 
 # components_lits = repoName + postfix
 UBUNTU_REPO_COMPONENTS_POSTFIX = "main"
 
-class RepositoryUtil:
-  def __init__(self, config, tags_to_skip):
-self.tags_to_skip = tags_to_skip
 
-# repo templates
-repo_file = config['repositoryFile']
-repo_rhel_suse =  
config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  
config['configurations']['cluster-env']['repo_ubuntu_template']
+def create_repo_files(template, command_repository):
+  """
+  Creates repositories in a consistent manner for all types
+  :param command_repository: a CommandRepository instance
+  :type command_repository CommandRepository
+  :return: a dictionary with repo ID => repo file name mapping
+  """
 
-if is_empty(repo_file):
-  return
+  if command_repository.version_id is None:
+raise Fail("The command repository was not parsed correctly")
 
-self.template = repo_rhel_suse if OSCheck.is

ambari git commit: AMBARI-22555. Update server setup to deny GPL as default. Fixed for silent setup. (swagle)

2017-11-30 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/trunk 21e37b164 -> 4aef8fe07


AMBARI-22555. Update server setup to deny GPL as default. Fixed for silent 
setup. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4aef8fe0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4aef8fe0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4aef8fe0

Branch: refs/heads/trunk
Commit: 4aef8fe07fdabc684e2a25c4a4b78073710a5f77
Parents: 21e37b1
Author: Siddharth Wagle 
Authored: Thu Nov 30 11:41:48 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Nov 30 11:41:48 2017 -0800

--
 .../src/main/python/ambari_server/serverConfiguration.py| 4 ++--
 ambari-server/src/main/python/ambari_server/serverSetup.py  | 5 ++---
 ambari-server/src/main/python/ambari_server/serverUpgrade.py| 2 +-
 3 files changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/4aef8fe0/ambari-server/src/main/python/ambari_server/serverConfiguration.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py 
b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index d6ab453..27b4472 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -1147,7 +1147,7 @@ def update_ambari_env():
   return 0
  
 # default should be false / not accepted 
-def write_gpl_license_accepted(text = GPL_LICENSE_PROMPT_TEXT):
+def write_gpl_license_accepted(default_prompt_value = False, text = 
GPL_LICENSE_PROMPT_TEXT):
   properties = get_ambari_properties()
   if properties == -1:
 err = "Error getting ambari properties"
@@ -1157,7 +1157,7 @@ def write_gpl_license_accepted(text = 
GPL_LICENSE_PROMPT_TEXT):
   if GPL_LICENSE_ACCEPTED_PROPERTY in properties.keys() and 
properties.get_property(GPL_LICENSE_ACCEPTED_PROPERTY).lower() == "true":
 return True
 
-  result = get_YN_input(text, False)
+  result = get_YN_input(text, default_prompt_value)
 
   properties.process_pair(GPL_LICENSE_ACCEPTED_PROPERTY, str(result).lower())
   update_properties(properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4aef8fe0/ambari-server/src/main/python/ambari_server/serverSetup.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py 
b/ambari-server/src/main/python/ambari_server/serverSetup.py
index af45584..3b41fff 100644
--- a/ambari-server/src/main/python/ambari_server/serverSetup.py
+++ b/ambari-server/src/main/python/ambari_server/serverSetup.py
@@ -1182,9 +1182,8 @@ def setup(options):
 err = 'Downloading or installing JDK failed: {0}. Exiting.'.format(e)
 raise FatalException(e.code, err)
 
-  if not get_silent() or options.accept_gpl:
-print 'Checking GPL software agreement...'
-write_gpl_license_accepted()
+  print 'Checking GPL software agreement...'
+  write_gpl_license_accepted(default_prompt_value=options.accept_gpl)
 
   print 'Completing setup...'
   retcode = configure_os_settings()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4aef8fe0/ambari-server/src/main/python/ambari_server/serverUpgrade.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py 
b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index c2b7de8..dd847e3 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -179,7 +179,7 @@ def check_gpl_license_approved(upgrade_response):
   if 'lzo_enabled' not in upgrade_response or 
upgrade_response['lzo_enabled'].lower() != "true":
 return
 
-  while not write_gpl_license_accepted(LZO_ENABLED_GPL_TEXT) and not 
get_YN_input(INSTALLED_LZO_WITHOUT_GPL_TEXT, False):
+  while not write_gpl_license_accepted(text = LZO_ENABLED_GPL_TEXT) and not 
get_YN_input(INSTALLED_LZO_WITHOUT_GPL_TEXT, False):
 pass
 
 #



ambari git commit: AMBARI-22555. Update server setup to deny GPL as default. Fixed for silent setup. (swagle)

2017-11-30 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 2b49f4582 -> b89a94b22


AMBARI-22555. Update server setup to deny GPL as default. Fixed for silent 
setup. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b89a94b2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b89a94b2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b89a94b2

Branch: refs/heads/branch-2.6
Commit: b89a94b221b27148f36bc0b6a31c8610eed97996
Parents: 2b49f45
Author: Siddharth Wagle 
Authored: Thu Nov 30 10:43:00 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Nov 30 11:27:30 2017 -0800

--
 .../src/main/python/ambari_server/serverConfiguration.py| 4 ++--
 ambari-server/src/main/python/ambari_server/serverSetup.py  | 5 ++---
 ambari-server/src/main/python/ambari_server/serverUpgrade.py| 2 +-
 3 files changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/b89a94b2/ambari-server/src/main/python/ambari_server/serverConfiguration.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py 
b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index 2ab2484..5658f67 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -1144,7 +1144,7 @@ def update_ambari_env():
   return 0
 
 # default should be false / not accepted
-def write_gpl_license_accepted(text = GPL_LICENSE_PROMPT_TEXT):
+def write_gpl_license_accepted(default_prompt_value = False, text = 
GPL_LICENSE_PROMPT_TEXT):
   properties = get_ambari_properties()
   if properties == -1:
 err = "Error getting ambari properties"
@@ -1154,7 +1154,7 @@ def write_gpl_license_accepted(text = 
GPL_LICENSE_PROMPT_TEXT):
   if GPL_LICENSE_ACCEPTED_PROPERTY in properties.keys() and 
properties.get_property(GPL_LICENSE_ACCEPTED_PROPERTY).lower() == "true":
 return True
 
-  result = get_YN_input(text, False)
+  result = get_YN_input(text, default_prompt_value)
 
   properties.process_pair(GPL_LICENSE_ACCEPTED_PROPERTY, str(result).lower())
   update_properties(properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b89a94b2/ambari-server/src/main/python/ambari_server/serverSetup.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py 
b/ambari-server/src/main/python/ambari_server/serverSetup.py
index 50992d1..909497f 100644
--- a/ambari-server/src/main/python/ambari_server/serverSetup.py
+++ b/ambari-server/src/main/python/ambari_server/serverSetup.py
@@ -1128,9 +1128,8 @@ def setup(options):
 err = 'Downloading or installing JDK failed: {0}. Exiting.'.format(e)
 raise FatalException(e.code, err)
 
-  if not get_silent() or options.accept_gpl:
-print 'Checking GPL software agreement...'
-write_gpl_license_accepted()
+  print 'Checking GPL software agreement...'
+  write_gpl_license_accepted(default_prompt_value=options.accept_gpl)
 
   print 'Completing setup...'
   retcode = configure_os_settings()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b89a94b2/ambari-server/src/main/python/ambari_server/serverUpgrade.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py 
b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 06443d3..d5c7c7f 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -179,7 +179,7 @@ def check_gpl_license_approved(upgrade_response):
   if 'lzo_enabled' not in upgrade_response or 
upgrade_response['lzo_enabled'].lower() != "true":
 return
 
-  while not write_gpl_license_accepted(LZO_ENABLED_GPL_TEXT) and not 
get_YN_input(INSTALLED_LZO_WITHOUT_GPL_TEXT, False):
+  while not write_gpl_license_accepted(text = LZO_ENABLED_GPL_TEXT) and not 
get_YN_input(INSTALLED_LZO_WITHOUT_GPL_TEXT, False):
 pass
 
 #



ambari git commit: AMBARI-22555. Update server setup to deny GPL as default. (swagle)

2017-11-30 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/trunk 4240f8496 -> 1a803ccab


AMBARI-22555. Update server setup to deny GPL as default. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1a803cca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1a803cca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1a803cca

Branch: refs/heads/trunk
Commit: 1a803ccab0eeaa9bd6cf45d1be1139b7a240df1e
Parents: 4240f84
Author: Siddharth Wagle 
Authored: Thu Nov 30 09:47:46 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Nov 30 09:47:46 2017 -0800

--
 .../python/ambari_server/serverConfiguration.py| 17 ++---
 .../src/main/python/ambari_server/serverUpgrade.py | 14 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py |  2 +-
 .../stacks/2.0.6/common/test_stack_advisor.py  |  4 ++--
 4 files changed, 18 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/1a803cca/ambari-server/src/main/python/ambari_server/serverConfiguration.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py 
b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index f744fa0..d6ab453 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -196,14 +196,8 @@ SETUP_OR_UPGRADE_MSG = "- If this is a new setup, then run 
the \"ambari-server s
"- If this is an upgrade of an existing setup, run the 
\"ambari-server upgrade\" command.\n" \
"Refer to the Ambari documentation for more information 
on setup and upgrade."
 
-GPL_LICENSE_PROMPT_TEXT = """To download GPL licensed products like lzo you 
must accept the license terms below:
-LICENSE_LINE_1
-LICENSE_LINE_2
-LICENSE_LINE_3
-LICENSE_LINE_4
-LICENSE_LINE_5
-LICENSE_LINE_6
-Do you accept the GPL License Agreement [y/n] (y)?"""
+GPL_LICENSE_PROMPT_TEXT = """GPL License for LZO: 
https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+Enable Ambari Server to download and install GPL Licensed LZO packages [y/n] 
(n)? """
 
 DEFAULT_DB_NAME = "ambari"
 
@@ -1151,8 +1145,9 @@ def update_ambari_env():
 return -1
 
   return 0
-  
-def write_gpl_license_accepted():
+ 
+# default should be false / not accepted 
+def write_gpl_license_accepted(text = GPL_LICENSE_PROMPT_TEXT):
   properties = get_ambari_properties()
   if properties == -1:
 err = "Error getting ambari properties"
@@ -1162,7 +1157,7 @@ def write_gpl_license_accepted():
   if GPL_LICENSE_ACCEPTED_PROPERTY in properties.keys() and 
properties.get_property(GPL_LICENSE_ACCEPTED_PROPERTY).lower() == "true":
 return True
 
-  result = get_YN_input(GPL_LICENSE_PROMPT_TEXT, True)
+  result = get_YN_input(text, False)
 
   properties.process_pair(GPL_LICENSE_ACCEPTED_PROPERTY, str(result).lower())
   update_properties(properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1a803cca/ambari-server/src/main/python/ambari_server/serverUpgrade.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py 
b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 57a1ae0..c2b7de8 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -74,10 +74,14 @@ SCHEMA_UPGRADE_DEBUG = False
 
 SUSPEND_START_MODE = False
 
-INSALLED_LZO_WITHOUT_GPL_TEXT = "By saying no, Ambari will not automatically 
install LZO on any  new host in the cluster." + \
-"It is up to you to ensure LZO is installed and configured appropriately." + \
-"Without LZO being installed and configured data compressed with LZO will not 
be readable. " + \
-"Are you sure you want to proceed? [y/n] (n)?"
+INSTALLED_LZO_WITHOUT_GPL_TEXT = "By saying no, Ambari will not automatically 
install LZO on any new host in the cluster.  " \
+"It is up to you to ensure LZO is installed 
and configured appropriately.  " \
+"Without LZO being installed and configured, 
data compressed with LZO will not be readable.  " \
+"Are you sure you want to proceed? [y/n] (n)? "
+
+LZO_ENABLED_GPL_TEXT = "GPL License for LZO: 
https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html\n"; \
+   "Your cluster is configured to use L

ambari git commit: AMBARI-22555. Update server setup to deny GPL as default. (swagle)

2017-11-30 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-2.6 70418a031 -> 51bd023a2


AMBARI-22555. Update server setup to deny GPL as default. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/51bd023a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/51bd023a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/51bd023a

Branch: refs/heads/branch-2.6
Commit: 51bd023a2e3cfeafebe6a0e65820f82c0dd0cbd5
Parents: 70418a0
Author: Siddharth Wagle 
Authored: Thu Nov 30 09:43:09 2017 -0800
Committer: Siddharth Wagle 
Committed: Thu Nov 30 09:43:20 2017 -0800

--
 .../main/python/ambari_server/serverConfiguration.py | 15 +--
 .../src/main/python/ambari_server/serverUpgrade.py   | 14 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py   |  2 +-
 .../python/stacks/2.0.6/common/test_stack_advisor.py |  4 ++--
 4 files changed, 17 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/51bd023a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py 
b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index 8697e4c..2ab2484 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -190,14 +190,8 @@ SETUP_OR_UPGRADE_MSG = "- If this is a new setup, then run 
the \"ambari-server s
"- If this is an upgrade of an existing setup, run the 
\"ambari-server upgrade\" command.\n" \
"Refer to the Ambari documentation for more information 
on setup and upgrade."
 
-GPL_LICENSE_PROMPT_TEXT = """To download GPL licensed products like lzo you 
must accept the license terms below:
-LICENSE_LINE_1
-LICENSE_LINE_2
-LICENSE_LINE_3
-LICENSE_LINE_4
-LICENSE_LINE_5
-LICENSE_LINE_6
-Do you accept the GPL License Agreement [y/n] (y)?"""
+GPL_LICENSE_PROMPT_TEXT = """GPL License for LZO: 
https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+Enable Ambari Server to download and install GPL Licensed LZO packages [y/n] 
(n)? """
 
 DEFAULT_DB_NAME = "ambari"
 
@@ -1149,7 +1143,8 @@ def update_ambari_env():
 
   return 0
 
-def write_gpl_license_accepted():
+# default should be false / not accepted
+def write_gpl_license_accepted(text = GPL_LICENSE_PROMPT_TEXT):
   properties = get_ambari_properties()
   if properties == -1:
 err = "Error getting ambari properties"
@@ -1159,7 +1154,7 @@ def write_gpl_license_accepted():
   if GPL_LICENSE_ACCEPTED_PROPERTY in properties.keys() and 
properties.get_property(GPL_LICENSE_ACCEPTED_PROPERTY).lower() == "true":
 return True
 
-  result = get_YN_input(GPL_LICENSE_PROMPT_TEXT, True)
+  result = get_YN_input(text, False)
 
   properties.process_pair(GPL_LICENSE_ACCEPTED_PROPERTY, str(result).lower())
   update_properties(properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/51bd023a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
--
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py 
b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index a864609..06443d3 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -74,10 +74,14 @@ SCHEMA_UPGRADE_DEBUG = False
 
 SUSPEND_START_MODE = False
 
-INSALLED_LZO_WITHOUT_GPL_TEXT = "By saying no, Ambari will not automatically 
install LZO on any  new host in the cluster." + \
-"It is up to you to ensure LZO is installed and configured appropriately." + \
-"Without LZO being installed and configured data compressed with LZO will not 
be readable. " + \
-"Are you sure you want to proceed? [y/n] (n)?"
+INSTALLED_LZO_WITHOUT_GPL_TEXT = "By saying no, Ambari will not automatically 
install LZO on any new host in the cluster.  " \
+"It is up to you to ensure LZO is installed 
and configured appropriately.  " \
+"Without LZO being installed and configured, 
data compressed with LZO will not be readable.  " \
+"Are you sure you want to proceed? [y/n] (n)? "
+
+LZO_ENABLED_GPL_TEXT = "GPL License for LZO: 
https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html\n"; \
+   "Your cluster is configured to use L

[ambari] Git Push Summary

2017-11-29 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-22457 [deleted] ed50bc383


ambari git commit: AMBARI-22484. Stack advisor should disallow lzo enable without accepting license agreement. (Myroslav Papirkovskyy via swagle)

2017-11-20 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-22457 1fce5650b -> 9d55c0f50


AMBARI-22484. Stack advisor should disallow lzo enable without accepting 
license agreement. (Myroslav Papirkovskyy via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9d55c0f5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9d55c0f5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9d55c0f5

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 9d55c0f5034c2765e4428d9b3ad56c455be54bfc
Parents: 1fce565
Author: Siddharth Wagle 
Authored: Mon Nov 20 10:58:47 2017 -0800
Committer: Siddharth Wagle 
Committed: Mon Nov 20 10:58:47 2017 -0800

--
 .../stackadvisor/StackAdvisorRequest.java   | 11 
 .../commands/StackAdvisorCommand.java   |  2 ++
 .../ambari/server/controller/AmbariServer.java  |  3 ++-
 .../internal/StackAdvisorResourceProvider.java  |  9 +--
 .../GPLLicenseNotAcceptedException.java | 28 
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 23 +++-
 .../ValidationResourceProviderTest.java |  4 ++-
 .../stacks/2.0.6/common/test_stack_advisor.py   | 25 +
 8 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/9d55c0f5/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
index 64180e5..bc1e079 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRequest.java
@@ -48,6 +48,7 @@ public class StackAdvisorRequest {
   private List changedConfigurations = new 
LinkedList();
   private Set configGroups;
   private Map userContext = new HashMap();
+  private Boolean gplLicenseAccepted;
 
   public String getStackName() {
 return stackName;
@@ -117,6 +118,10 @@ public class StackAdvisorRequest {
 this.configGroups = configGroups;
   }
 
+  public Boolean getGplLicenseAccepted() {
+return gplLicenseAccepted;
+  }
+
   private StackAdvisorRequest(String stackName, String stackVersion) {
 this.stackName = stackName;
 this.stackVersion = stackVersion;
@@ -189,6 +194,12 @@ public class StackAdvisorRequest {
   return this;
 }
 
+public StackAdvisorRequestBuilder withGPLLicenseAccepted(
+Boolean gplLicenseAccepted) {
+  this.instance.gplLicenseAccepted = gplLicenseAccepted;
+  return this;
+}
+
 public StackAdvisorRequest build() {
   return this.instance;
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/9d55c0f5/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
index 5440462..48924f8 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/commands/StackAdvisorCommand.java
@@ -94,6 +94,7 @@ public abstract class StackAdvisorCommand extend
   private static final String CONFIGURATIONS_PROPERTY = "configurations";
   private static final String CHANGED_CONFIGURATIONS_PROPERTY = 
"changed-configurations";
   private static final String USER_CONTEXT_PROPERTY = "user-context";
+  private static final String GPL_LICENSE_ACCEPTED = "gpl-license-accepted";
   private static final String AMBARI_SERVER_CONFIGURATIONS_PROPERTY = 
"ambari-server-properties";
 
   private File recommendationsDir;
@@ -205,6 +206,7 @@ public abstract class StackAdvisorCommand extend
 
 JsonNode userContext = mapper.valueToTree(request.getUserContext());
 root.put(USER_CONTEXT_PROPERTY, userContext);
+root.put(GPL_LICENSE_ACCEPTED, request.getGplLicenseAccepted());
   }
 
   private void populateConfigGroups(ObjectNode root,

http://git-wip-us.apache.org/repos/asf/ambari/blob/9d55c0f5/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
--

ambari git commit: AMBARI-22480. Validate blueprint does not allow lzo enable without setup with license agreement. (Myroslav Papirkovskyy via swagle)

2017-11-20 Thread swagle
Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-22457 096fedbd3 -> 1fce5650b


AMBARI-22480. Validate blueprint does not allow lzo enable without setup with 
license agreement. (Myroslav Papirkovskyy via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1fce5650
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1fce5650
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1fce5650

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 1fce5650ba92f68a49e8750369aea8e9d3dcf6bf
Parents: 096fedb
Author: Siddharth Wagle 
Authored: Mon Nov 20 10:56:04 2017 -0800
Committer: Siddharth Wagle 
Committed: Mon Nov 20 10:56:04 2017 -0800

--
 .../internal/BlueprintResourceProvider.java | 36 +-
 .../ambari/server/topology/Blueprint.java   | 40 ++--
 .../ambari/server/topology/BlueprintImpl.java   | 17 ++---
 .../server/topology/BlueprintValidator.java |  4 +-
 .../server/topology/BlueprintValidatorImpl.java | 21 +-
 .../server/topology/BlueprintImplTest.java  | 69 ++--
 .../topology/BlueprintValidatorImplTest.java| 18 +++--
 7 files changed, 143 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/1fce5650/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
index 1c127c0..b313c8b 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java
@@ -18,11 +18,17 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.gson.Gson;
-import org.apache.ambari.server.orm.entities.BlueprintSettingEntity;
-import org.apache.ambari.server.utils.SecretReference;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.DuplicateResourceException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -41,28 +47,24 @@ import org.apache.ambari.server.orm.dao.BlueprintDAO;
 import org.apache.ambari.server.orm.entities.BlueprintConfigEntity;
 import org.apache.ambari.server.orm.entities.BlueprintConfiguration;
 import org.apache.ambari.server.orm.entities.BlueprintEntity;
+import org.apache.ambari.server.orm.entities.BlueprintSettingEntity;
 import org.apache.ambari.server.orm.entities.HostGroupComponentEntity;
 import org.apache.ambari.server.orm.entities.HostGroupEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.stack.NoSuchStackException;
-import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.BlueprintFactory;
+import org.apache.ambari.server.topology.GPLLicenseNotAcceptedException;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.SecurityConfiguration;
 import org.apache.ambari.server.topology.SecurityConfigurationFactory;
+import org.apache.ambari.server.utils.SecretReference;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.gson.Gson;
 
 
 /**
@@ -514,7 +516,7 @@ public class BlueprintResourceProvider extends 
AbstractControllerResourceProvide
 
 try {
   blueprint.validateRequiredProperties();
-} catch (InvalidTopologyException e) {
+} catch (InvalidTopologyException | GPLLicenseNotAcceptedException e) {
   throw new IllegalArgumentException("Blueprint configuration 
validation failed: " + e.getMessage(), e);
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1fce5650/ambari-server/src/

  1   2   3   4   5   6   7   8   9   10   >