ambari git commit: AMBARI-15871. App Timeline Server start fails on a kerberized cluster due to absence of hdfs keytab.(vbrodetskyi)

2016-06-09 Thread vbrodetskyi
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 94038e5c3 -> b0b4ebee9


AMBARI-15871. App Timeline Server start fails on a kerberized cluster due to 
absence of hdfs keytab.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b0b4ebee
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b0b4ebee
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b0b4ebee

Branch: refs/heads/branch-2.4
Commit: b0b4ebee9969930c3594761d3efc3577fdcb442a
Parents: 94038e5
Author: Vitaly Brodetskyi 
Authored: Wed Jun 8 18:19:09 2016 +0300
Committer: Vitaly Brodetskyi 
Committed: Wed Jun 8 18:19:09 2016 +0300

--
 .../main/resources/common-services/YARN/2.1.0.2.0/kerberos.json   | 3 +++
 .../src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json | 3 +++
 .../main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json | 3 +++
 .../src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json | 3 +++
 4 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/b0b4ebee/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
index 9afe668..4093431 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
@@ -151,6 +151,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0b4ebee/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index 240f61e..2fdce8a 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -152,6 +152,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0b4ebee/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index 9606b59..b02b3e9 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -154,6 +154,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0b4ebee/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index 22fed42..0d67e59 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -161,6 +161,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }



ambari git commit: AMBARI-15871. App Timeline Server start fails on a kerberized cluster due to absence of hdfs keytab.(vbrodetskyi)

2016-06-09 Thread vbrodetskyi
Repository: ambari
Updated Branches:
  refs/heads/trunk f9d9ff1b3 -> 47c9b2160


AMBARI-15871. App Timeline Server start fails on a kerberized cluster due to 
absence of hdfs keytab.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/47c9b216
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/47c9b216
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/47c9b216

Branch: refs/heads/trunk
Commit: 47c9b2160b0220bc4a5738d7e9d702189c5d5d2f
Parents: f9d9ff1
Author: Vitaly Brodetskyi 
Authored: Wed Jun 8 18:22:48 2016 +0300
Committer: Vitaly Brodetskyi 
Committed: Wed Jun 8 18:22:48 2016 +0300

--
 .../main/resources/common-services/YARN/2.1.0.2.0/kerberos.json   | 3 +++
 .../src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json | 3 +++
 .../main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json | 3 +++
 .../src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json | 3 +++
 4 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/47c9b216/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
index 9afe668..4093431 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
@@ -151,6 +151,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/47c9b216/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index 240f61e..2fdce8a 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -152,6 +152,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/47c9b216/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index 9606b59..b02b3e9 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -154,6 +154,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/47c9b216/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index 22fed42..0d67e59 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -161,6 +161,9 @@
   "keytab": {
 "configuration": 
"yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
   }
+},
+{
+  "name": "/HDFS/NAMENODE/hdfs"
 }
   ]
 }



ambari git commit: AMBARI-17125 Wizards operation gets into inconsistent state when logged into from a different browser

2016-06-09 Thread atkach
Repository: ambari
Updated Branches:
  refs/heads/trunk 47c9b2160 -> 1671d615a


AMBARI-17125 Wizards operation gets into inconsistent state when logged into 
from a different browser


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1671d615
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1671d615
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1671d615

Branch: refs/heads/trunk
Commit: 1671d615a7b4a88314bbcbaed860029fc20ffdb2
Parents: 47c9b21
Author: Andrii Tkach 
Authored: Wed Jun 8 19:21:08 2016 +0300
Committer: Andrii Tkach 
Committed: Thu Jun 9 12:52:16 2016 +0300

--
 .../global/background_operations_controller.js  |  2 +-
 .../controllers/global/cluster_controller.js|  5 +++
 .../main/admin/stack_and_upgrade_controller.js  | 43 ++--
 .../global/cluster_controller_test.js   | 27 
 .../admin/stack_and_upgrade_controller_test.js  | 16 
 5 files changed, 71 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/1671d615/ambari-web/app/controllers/global/background_operations_controller.js
--
diff --git 
a/ambari-web/app/controllers/global/background_operations_controller.js 
b/ambari-web/app/controllers/global/background_operations_controller.js
index a26dbfd..518f5ba 100644
--- a/ambari-web/app/controllers/global/background_operations_controller.js
+++ b/ambari-web/app/controllers/global/background_operations_controller.js
@@ -211,7 +211,7 @@ App.BackgroundOperationsController = Em.Controller.extend({
 
 data.items.forEach(function (request) {
   if (this.isUpgradeRequest(request)) {
-if (!App.get('upgradeIsRunning') && !App.get('testMode') && 
Em.get(request, 'Requests.request_status') !== 'COMPLETED') {
+if (!App.get('upgradeIsRunning') && !App.get('testMode')) {
   restoreUpgradeState = true;
 }
 return;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1671d615/ambari-web/app/controllers/global/cluster_controller.js
--
diff --git a/ambari-web/app/controllers/global/cluster_controller.js 
b/ambari-web/app/controllers/global/cluster_controller.js
index 1741704..d8a3664 100644
--- a/ambari-web/app/controllers/global/cluster_controller.js
+++ b/ambari-web/app/controllers/global/cluster_controller.js
@@ -302,6 +302,11 @@ App.ClusterController = 
Em.Controller.extend(App.ReloadPopupMixin, {
   var lastUpgradeData = 
data.items.sortProperty('Upgrade.request_id').pop();
   var dbUpgradeState = App.db.get('MainAdminStackAndUpgrade', 
'upgradeState');
 
+  //completed upgrade shouldn't be restored
+  if (lastUpgradeData && lastUpgradeData.Upgrade.request_status === 
"COMPLETED") {
+return;
+  }
+
   if (!Em.isNone(dbUpgradeState)) {
 App.set('upgradeState', dbUpgradeState);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1671d615/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
--
diff --git 
a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js 
b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 20cfe40..01a9978 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -390,6 +390,9 @@ App.MainAdminStackAndUpgradeController = 
Em.Controller.extend(App.LocalStorage,
 isRetryPending: false
   });
 }
+if (data.Upgrade.request_status === 'COMPLETED') {
+  this.finish();
+}
   },
 
   /**
@@ -1541,28 +1544,26 @@ App.MainAdminStackAndUpgradeController = 
Em.Controller.extend(App.LocalStorage,
* and clean auxiliary data
*/
   finish: function () {
-if (App.get('upgradeState') === 'COMPLETED') {
-  var upgradeVersion = this.get('upgradeVersion') && 
this.get('upgradeVersion').match(/[a-zA-Z]+\-\d+\.\d+/);
-  this.setDBProperties({
-upgradeId: undefined,
-upgradeState: 'INIT',
-upgradeVersion: undefined,
-currentVersion: undefined,
-upgradeTypeDisplayName: undefined,
-upgradeType: undefined,
-failuresTolerance: undefined,
-isDowngrade: undefined,
-downgradeAllowed: undefined
-  });
-  App.clusterStatus.setClusterStatus({
-localdb: App.db.data
-  });
-  if (upgradeVersion && upgradeVersion[0]) {
-App.set('currentStackVersion', upgradeVersion[0]);
-  }
-  App.set('upgradeState', 'INIT');
+var upgradeVersion = this.get('upgradeVersion') && 
this.get('upgradeVersion').match(/[a-zA-Z]+\-\d+

ambari git commit: AMBARI-17125 Wizards operation gets into inconsistent state when logged into from a different browser. (atkach)

2016-06-09 Thread atkach
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 b0b4ebee9 -> 4115f382c


AMBARI-17125 Wizards operation gets into inconsistent state when logged into 
from a different browser. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4115f382
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4115f382
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4115f382

Branch: refs/heads/branch-2.4
Commit: 4115f382c722fa3a0c10ada5f4691761f8401ca6
Parents: b0b4ebe
Author: Andrii Tkach 
Authored: Wed Jun 8 19:21:08 2016 +0300
Committer: Andrii Tkach 
Committed: Thu Jun 9 12:56:42 2016 +0300

--
 .../global/background_operations_controller.js  |  2 +-
 .../controllers/global/cluster_controller.js|  5 +++
 .../main/admin/stack_and_upgrade_controller.js  | 43 ++--
 .../global/cluster_controller_test.js   | 27 
 .../admin/stack_and_upgrade_controller_test.js  | 16 
 5 files changed, 71 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/4115f382/ambari-web/app/controllers/global/background_operations_controller.js
--
diff --git 
a/ambari-web/app/controllers/global/background_operations_controller.js 
b/ambari-web/app/controllers/global/background_operations_controller.js
index a26dbfd..518f5ba 100644
--- a/ambari-web/app/controllers/global/background_operations_controller.js
+++ b/ambari-web/app/controllers/global/background_operations_controller.js
@@ -211,7 +211,7 @@ App.BackgroundOperationsController = Em.Controller.extend({
 
 data.items.forEach(function (request) {
   if (this.isUpgradeRequest(request)) {
-if (!App.get('upgradeIsRunning') && !App.get('testMode') && 
Em.get(request, 'Requests.request_status') !== 'COMPLETED') {
+if (!App.get('upgradeIsRunning') && !App.get('testMode')) {
   restoreUpgradeState = true;
 }
 return;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4115f382/ambari-web/app/controllers/global/cluster_controller.js
--
diff --git a/ambari-web/app/controllers/global/cluster_controller.js 
b/ambari-web/app/controllers/global/cluster_controller.js
index 1741704..d8a3664 100644
--- a/ambari-web/app/controllers/global/cluster_controller.js
+++ b/ambari-web/app/controllers/global/cluster_controller.js
@@ -302,6 +302,11 @@ App.ClusterController = 
Em.Controller.extend(App.ReloadPopupMixin, {
   var lastUpgradeData = 
data.items.sortProperty('Upgrade.request_id').pop();
   var dbUpgradeState = App.db.get('MainAdminStackAndUpgrade', 
'upgradeState');
 
+  //completed upgrade shouldn't be restored
+  if (lastUpgradeData && lastUpgradeData.Upgrade.request_status === 
"COMPLETED") {
+return;
+  }
+
   if (!Em.isNone(dbUpgradeState)) {
 App.set('upgradeState', dbUpgradeState);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4115f382/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
--
diff --git 
a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js 
b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 20cfe40..01a9978 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -390,6 +390,9 @@ App.MainAdminStackAndUpgradeController = 
Em.Controller.extend(App.LocalStorage,
 isRetryPending: false
   });
 }
+if (data.Upgrade.request_status === 'COMPLETED') {
+  this.finish();
+}
   },
 
   /**
@@ -1541,28 +1544,26 @@ App.MainAdminStackAndUpgradeController = 
Em.Controller.extend(App.LocalStorage,
* and clean auxiliary data
*/
   finish: function () {
-if (App.get('upgradeState') === 'COMPLETED') {
-  var upgradeVersion = this.get('upgradeVersion') && 
this.get('upgradeVersion').match(/[a-zA-Z]+\-\d+\.\d+/);
-  this.setDBProperties({
-upgradeId: undefined,
-upgradeState: 'INIT',
-upgradeVersion: undefined,
-currentVersion: undefined,
-upgradeTypeDisplayName: undefined,
-upgradeType: undefined,
-failuresTolerance: undefined,
-isDowngrade: undefined,
-downgradeAllowed: undefined
-  });
-  App.clusterStatus.setClusterStatus({
-localdb: App.db.data
-  });
-  if (upgradeVersion && upgradeVersion[0]) {
-App.set('currentStackVersion', upgradeVersion[0]);
-  }
-  App.set('upgradeState', 'INIT');
+var upgradeVersion = this.get('upgradeVersion') && 
this.get('upgradeVersion').m

[1/2] ambari git commit: AMBARI-17087. takeover_config_merge.py should provide XML, yaml, properties-diff capability (aonishuk)

2016-06-09 Thread aonishuk
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 4115f382c -> 49a04d5b5
  refs/heads/trunk 1671d615a -> 31bc2d529


AMBARI-17087. takeover_config_merge.py should provide XML, yaml, 
properties-diff capability (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/31bc2d52
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/31bc2d52
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/31bc2d52

Branch: refs/heads/trunk
Commit: 31bc2d52952de42e9e3f7c98596e8750c7554a80
Parents: 1671d61
Author: Andrew Onishuk 
Authored: Thu Jun 9 14:15:48 2016 +0300
Committer: Andrew Onishuk 
Committed: Thu Jun 9 14:15:48 2016 +0300

--
 .../resources/scripts/takeover_config_merge.py  | 185 +--
 1 file changed, 174 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/31bc2d52/ambari-server/src/main/resources/scripts/takeover_config_merge.py
--
diff --git a/ambari-server/src/main/resources/scripts/takeover_config_merge.py 
b/ambari-server/src/main/resources/scripts/takeover_config_merge.py
index 075f99f..e975318 100644
--- a/ambari-server/src/main/resources/scripts/takeover_config_merge.py
+++ b/ambari-server/src/main/resources/scripts/takeover_config_merge.py
@@ -30,6 +30,7 @@ import xml
 import xml.etree.ElementTree as ET
 import StringIO
 import ConfigParser
+from optparse import OptionGroup
 
 logger = logging.getLogger('AmbariTakeoverConfigMerge')
 
@@ -150,6 +151,8 @@ class XmlParser(Parser):  # Used DOM parser to read data 
into a map
 class ConfigMerge:
 
   CONTENT_UNKNOWN_FILES_MAPPING_FILE = {}
+  LEFT_INPUT_DIR = "/tmp/left"
+  RIGHT_INPUT_DIR = "/tmp/right"
   INPUT_DIR = '/etc/hadoop'
   OUTPUT_DIR = '/tmp'
   OUT_FILENAME = 'ambari_takeover_config_merge.out'
@@ -165,14 +168,18 @@ class ConfigMerge:
 
 
   config_files_map = {}
+  left_file_paths = None
+  right_file_paths = None
 
-  def __init__(self, config_files_map):
+  def __init__(self, config_files_map=None, left_file_paths=None, 
right_file_paths=None):
 self.config_files_map = config_files_map
+self.left_file_paths = left_file_paths
+self.right_file_paths = right_file_paths
 
   @staticmethod
-  def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS):
+  def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS, 
directory=INPUT_DIR):
 filePaths = {}
-for dirName, subdirList, fileList in os.walk(ConfigMerge.INPUT_DIR, 
followlinks=True):
+for dirName, subdirList, fileList in os.walk(directory, followlinks=True):
   for file in fileList:
 root, ext = os.path.splitext(file)
 if ext in extensions:
@@ -333,6 +340,124 @@ class ConfigMerge:
   logger.info("Script successfully finished")
   return 0
 
+  def perform_diff(self):
+configurations_conflicts = {}
+attributes_conflicts = {}
+file_conflicts = []
+matches_configs = []
+
+for right_configs_names in self.right_file_paths:
+  for left_configs_names in self.left_file_paths:
+if right_configs_names == left_configs_names:
+  matches_configs.append(right_configs_names)
+
+for match_config in matches_configs:
+  configurations_conflicts[match_config], 
attributes_conflicts[match_config] = 
ConfigMerge.configuration_diff(self.left_file_paths[match_config], 
self.right_file_paths[match_config])
+
+file_conflicts = ConfigMerge.get_missing_files(self.right_file_paths, 
matches_configs, ConfigMerge.LEFT_INPUT_DIR) + \
+ ConfigMerge.get_missing_files(self.left_file_paths, 
matches_configs, ConfigMerge.RIGHT_INPUT_DIR)
+
+configuration_diff_output = None
+configuration_diff_output = ConfigMerge.format_diff_output(file_conflicts, 
configurations_conflicts, attributes_conflicts)
+
+if configuration_diff_output and configuration_diff_output != "":
+  conflict_filename = os.path.join(ConfigMerge.OUTPUT_DIR, "file-diff.txt")
+  logger.warn(
+"You have file diff conflicts. Please check 
{0}".format(conflict_filename))
+  with open(conflict_filename, "w") as fp:
+fp.write(configuration_diff_output)
+
+logger.info("Script successfully finished")
+return 0
+
+  @staticmethod
+  def format_diff_output(file_conflicts, configurations_conflicts, 
attributes_conflicts):
+output = ""
+if file_conflicts:
+  output += "=== File diff conflicts == \n\n"
+  for file_conflict in file_conflicts:
+output+=str(file_conflict)+"\n"
+
+if configurations_conflicts:
+  output += "\n\n=== Property diff conflicts == "
+  for config_name, property in configurations_conflicts.iteritems():
+  if property:
+output+= "\n\n||| "

[2/2] ambari git commit: AMBARI-17087. takeover_config_merge.py should provide XML, yaml, properties-diff capability (aonishuk)

2016-06-09 Thread aonishuk
AMBARI-17087. takeover_config_merge.py should provide XML, yaml, 
properties-diff capability (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/49a04d5b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/49a04d5b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/49a04d5b

Branch: refs/heads/branch-2.4
Commit: 49a04d5b5b8cd5fd637235901ecb3b56943c3d88
Parents: 4115f38
Author: Andrew Onishuk 
Authored: Thu Jun 9 14:15:51 2016 +0300
Committer: Andrew Onishuk 
Committed: Thu Jun 9 14:15:51 2016 +0300

--
 .../resources/scripts/takeover_config_merge.py  | 185 +--
 1 file changed, 174 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/49a04d5b/ambari-server/src/main/resources/scripts/takeover_config_merge.py
--
diff --git a/ambari-server/src/main/resources/scripts/takeover_config_merge.py 
b/ambari-server/src/main/resources/scripts/takeover_config_merge.py
index 075f99f..e975318 100644
--- a/ambari-server/src/main/resources/scripts/takeover_config_merge.py
+++ b/ambari-server/src/main/resources/scripts/takeover_config_merge.py
@@ -30,6 +30,7 @@ import xml
 import xml.etree.ElementTree as ET
 import StringIO
 import ConfigParser
+from optparse import OptionGroup
 
 logger = logging.getLogger('AmbariTakeoverConfigMerge')
 
@@ -150,6 +151,8 @@ class XmlParser(Parser):  # Used DOM parser to read data 
into a map
 class ConfigMerge:
 
   CONTENT_UNKNOWN_FILES_MAPPING_FILE = {}
+  LEFT_INPUT_DIR = "/tmp/left"
+  RIGHT_INPUT_DIR = "/tmp/right"
   INPUT_DIR = '/etc/hadoop'
   OUTPUT_DIR = '/tmp'
   OUT_FILENAME = 'ambari_takeover_config_merge.out'
@@ -165,14 +168,18 @@ class ConfigMerge:
 
 
   config_files_map = {}
+  left_file_paths = None
+  right_file_paths = None
 
-  def __init__(self, config_files_map):
+  def __init__(self, config_files_map=None, left_file_paths=None, 
right_file_paths=None):
 self.config_files_map = config_files_map
+self.left_file_paths = left_file_paths
+self.right_file_paths = right_file_paths
 
   @staticmethod
-  def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS):
+  def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS, 
directory=INPUT_DIR):
 filePaths = {}
-for dirName, subdirList, fileList in os.walk(ConfigMerge.INPUT_DIR, 
followlinks=True):
+for dirName, subdirList, fileList in os.walk(directory, followlinks=True):
   for file in fileList:
 root, ext = os.path.splitext(file)
 if ext in extensions:
@@ -333,6 +340,124 @@ class ConfigMerge:
   logger.info("Script successfully finished")
   return 0
 
+  def perform_diff(self):
+configurations_conflicts = {}
+attributes_conflicts = {}
+file_conflicts = []
+matches_configs = []
+
+for right_configs_names in self.right_file_paths:
+  for left_configs_names in self.left_file_paths:
+if right_configs_names == left_configs_names:
+  matches_configs.append(right_configs_names)
+
+for match_config in matches_configs:
+  configurations_conflicts[match_config], 
attributes_conflicts[match_config] = 
ConfigMerge.configuration_diff(self.left_file_paths[match_config], 
self.right_file_paths[match_config])
+
+file_conflicts = ConfigMerge.get_missing_files(self.right_file_paths, 
matches_configs, ConfigMerge.LEFT_INPUT_DIR) + \
+ ConfigMerge.get_missing_files(self.left_file_paths, 
matches_configs, ConfigMerge.RIGHT_INPUT_DIR)
+
+configuration_diff_output = None
+configuration_diff_output = ConfigMerge.format_diff_output(file_conflicts, 
configurations_conflicts, attributes_conflicts)
+
+if configuration_diff_output and configuration_diff_output != "":
+  conflict_filename = os.path.join(ConfigMerge.OUTPUT_DIR, "file-diff.txt")
+  logger.warn(
+"You have file diff conflicts. Please check 
{0}".format(conflict_filename))
+  with open(conflict_filename, "w") as fp:
+fp.write(configuration_diff_output)
+
+logger.info("Script successfully finished")
+return 0
+
+  @staticmethod
+  def format_diff_output(file_conflicts, configurations_conflicts, 
attributes_conflicts):
+output = ""
+if file_conflicts:
+  output += "=== File diff conflicts == \n\n"
+  for file_conflict in file_conflicts:
+output+=str(file_conflict)+"\n"
+
+if configurations_conflicts:
+  output += "\n\n=== Property diff conflicts == "
+  for config_name, property in configurations_conflicts.iteritems():
+  if property:
+output+= "\n\n||| " + config_name + " |||\n"
+output+= "\n".join(str(p) for p in property)
+
+if attributes_conflicts:
+  

ambari git commit: AMBARI-16980 : History tab takes long to populate when there is more entry in history table. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/trunk 31bc2d529 -> e88ca22cf


AMBARI-16980 : History tab takes long to populate when there is more entry in 
history table. (Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e88ca22c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e88ca22c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e88ca22c

Branch: refs/heads/trunk
Commit: e88ca22cfb7f97c36886a925d8197f1fee477070
Parents: 31bc2d5
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 17:49:07 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 17:49:07 2016 +0530

--
 .../hive/persistence/utils/ItemNotFound.java|  18 ++
 .../view/hive/resources/jobs/Aggregator.java| 284 +++
 .../view/hive/resources/jobs/JobService.java|  85 +-
 .../jobs/OperationHandleResourceManager.java|  12 +-
 .../hive/resources/jobs/atsJobs/ATSParser.java  |  82 +-
 .../jobs/atsJobs/ATSRequestsDelegate.java   |   6 +-
 .../jobs/atsJobs/ATSRequestsDelegateImpl.java   |  35 ++-
 .../hive/resources/jobs/atsJobs/IATSParser.java |   8 +-
 .../view/hive/resources/jobs/viewJobs/Job.java  |  23 +-
 .../jobs/viewJobs/JobControllerImpl.java|  28 +-
 .../hive/resources/jobs/viewJobs/JobImpl.java   |  17 +-
 .../hive/resources/jobs/viewJobs/JobInfo.java   |  78 +
 .../app/components/number-range-widget.js   |  15 +-
 .../ui/hive-web/app/controllers/history.js  | 201 +
 .../ui/hive-web/app/initializers/i18n.js|   3 +-
 .../resources/ui/hive-web/app/models/job.js |   3 +-
 .../resources/ui/hive-web/app/routes/history.js |  16 +-
 .../ui/hive-web/app/services/history.js | 204 +
 .../ui/hive-web/app/templates/history.hbs   |  68 ++---
 .../ui/hive-web/app/utils/constants.js  |   2 +-
 .../view/hive/resources/jobs/ATSParserTest.java |  28 +-
 .../hive/resources/jobs/AggregatorTest.java |  69 -
 22 files changed, 1049 insertions(+), 236 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/e88ca22c/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
--
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
index 3b7e51a..06976b9 100644
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
+++ 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
@@ -22,4 +22,22 @@ package org.apache.ambari.view.hive.persistence.utils;
  * Thrown when item was not found in DB
  */
 public class ItemNotFound extends Exception {
+  public ItemNotFound() {
+  }
+
+  public ItemNotFound(String message) {
+super(message);
+  }
+
+  public ItemNotFound(String message, Throwable cause) {
+super(message, cause);
+  }
+
+  public ItemNotFound(Throwable cause) {
+super(cause);
+  }
+
+  public ItemNotFound(String message, Throwable cause, boolean 
enableSuppression, boolean writableStackTrace) {
+super(message, cause, enableSuppression, writableStackTrace);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88ca22c/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
--
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
index f119ff3..5164a4d 100644
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
+++ 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
@@ -29,6 +29,7 @@ import 
org.apache.ambari.view.hive.resources.jobs.atsJobs.IATSParser;
 import org.apache.ambari.view.hive.resources.jobs.atsJobs.TezDagId;
 import org.apache.ambari.view.hive.resources.jobs.viewJobs.Job;
 import org.apache.ambari.view.hive.resources.jobs.viewJobs.JobImpl;
+import org.apache.ambari.view.hive.resources.jobs.viewJobs.JobInfo;
 import org.apache.commons.beanutils.PropertyUtils;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Hex;
@@ -37,10 +38,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.lang.reflect.InvocationTargetException;
-import java.util.HashSet;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.Set;
 
 /**
  * View Jobs and ATS Jobs aggregator.

ambari git commit: AMBARI-16980 : History tab takes long to populate when there is more entry in history table. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 49a04d5b5 -> e149d0864


AMBARI-16980 : History tab takes long to populate when there is more entry in 
history table. (Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e149d086
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e149d086
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e149d086

Branch: refs/heads/branch-2.4
Commit: e149d0864a773529d210260e7f99cfda1ae97a60
Parents: 49a04d5
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 17:49:07 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 17:50:12 2016 +0530

--
 .../hive/persistence/utils/ItemNotFound.java|  18 ++
 .../view/hive/resources/jobs/Aggregator.java| 284 +++
 .../view/hive/resources/jobs/JobService.java|  85 +-
 .../jobs/OperationHandleResourceManager.java|  12 +-
 .../hive/resources/jobs/atsJobs/ATSParser.java  |  82 +-
 .../jobs/atsJobs/ATSRequestsDelegate.java   |   6 +-
 .../jobs/atsJobs/ATSRequestsDelegateImpl.java   |  35 ++-
 .../hive/resources/jobs/atsJobs/IATSParser.java |   8 +-
 .../view/hive/resources/jobs/viewJobs/Job.java  |  23 +-
 .../jobs/viewJobs/JobControllerImpl.java|  28 +-
 .../hive/resources/jobs/viewJobs/JobImpl.java   |  17 +-
 .../hive/resources/jobs/viewJobs/JobInfo.java   |  78 +
 .../app/components/number-range-widget.js   |  15 +-
 .../ui/hive-web/app/controllers/history.js  | 201 +
 .../ui/hive-web/app/initializers/i18n.js|   3 +-
 .../resources/ui/hive-web/app/models/job.js |   3 +-
 .../resources/ui/hive-web/app/routes/history.js |  16 +-
 .../ui/hive-web/app/services/history.js | 204 +
 .../ui/hive-web/app/templates/history.hbs   |  68 ++---
 .../ui/hive-web/app/utils/constants.js  |   2 +-
 .../view/hive/resources/jobs/ATSParserTest.java |  28 +-
 .../hive/resources/jobs/AggregatorTest.java |  69 -
 22 files changed, 1049 insertions(+), 236 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/e149d086/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
--
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
index 3b7e51a..06976b9 100644
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
+++ 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/persistence/utils/ItemNotFound.java
@@ -22,4 +22,22 @@ package org.apache.ambari.view.hive.persistence.utils;
  * Thrown when item was not found in DB
  */
 public class ItemNotFound extends Exception {
+  public ItemNotFound() {
+  }
+
+  public ItemNotFound(String message) {
+super(message);
+  }
+
+  public ItemNotFound(String message, Throwable cause) {
+super(message, cause);
+  }
+
+  public ItemNotFound(Throwable cause) {
+super(cause);
+  }
+
+  public ItemNotFound(String message, Throwable cause, boolean 
enableSuppression, boolean writableStackTrace) {
+super(message, cause, enableSuppression, writableStackTrace);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e149d086/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
--
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
index f119ff3..5164a4d 100644
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
+++ 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/Aggregator.java
@@ -29,6 +29,7 @@ import 
org.apache.ambari.view.hive.resources.jobs.atsJobs.IATSParser;
 import org.apache.ambari.view.hive.resources.jobs.atsJobs.TezDagId;
 import org.apache.ambari.view.hive.resources.jobs.viewJobs.Job;
 import org.apache.ambari.view.hive.resources.jobs.viewJobs.JobImpl;
+import org.apache.ambari.view.hive.resources.jobs.viewJobs.JobInfo;
 import org.apache.commons.beanutils.PropertyUtils;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Hex;
@@ -37,10 +38,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.lang.reflect.InvocationTargetException;
-import java.util.HashSet;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.Set;
 
 /**
  * View Jobs and ATS Jobs ag

ambari git commit: AMBARI-16836 : View Instance: Data Migration. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/trunk e88ca22cf -> ddb201f8c


AMBARI-16836 : View Instance: Data Migration. (Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ddb201f8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ddb201f8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ddb201f8

Branch: refs/heads/trunk
Commit: ddb201f8ce8ecdc2a563a5ae69c071655e481b04
Parents: e88ca22
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 17:53:44 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 17:54:14 2016 +0530

--
 .../api/services/ViewDataMigrationService.java  | 143 ++--
 .../view/ViewDataMigrationContextImpl.java  |  44 +++-
 .../server/view/ViewDataMigrationUtility.java   | 228 +++
 .../ambari/server/view/ViewExtractor.java   |   7 +-
 .../apache/ambari/server/view/ViewRegistry.java | 147 +++-
 .../main/python/ambari_server/serverUpgrade.py  |   5 -
 .../services/ViewDataMigrationServiceTest.java  | 178 ++-
 .../view/ViewDataMigrationContextImplTest.java  |  55 ++---
 .../view/ViewDataMigrationUtilityTest.java  | 184 +++
 .../ambari/server/view/ViewRegistryTest.java|   6 +-
 10 files changed, 658 insertions(+), 339 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/ddb201f8/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
index c6846ce..4a71ce2 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
@@ -18,11 +18,9 @@
 package org.apache.ambari.server.api.services;
 
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
-import org.apache.ambari.server.view.ViewDataMigrationContextImpl;
+import org.apache.ambari.server.view.ViewDataMigrationUtility;
 import org.apache.ambari.server.view.ViewRegistry;
-import org.apache.ambari.view.migration.ViewDataMigrationContext;
 import org.apache.ambari.view.migration.ViewDataMigrationException;
-import org.apache.ambari.view.migration.ViewDataMigrator;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -31,7 +29,6 @@ import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Response;
-import java.util.Map;
 
 /**
  * Service responsible for data migration between view instances.
@@ -57,7 +54,15 @@ public class ViewDataMigrationService extends BaseService {
*/
   private final String instanceName;
 
-  private ViewRegistry viewRegistry;
+  /**
+   * The singleton view registry.
+   */
+  ViewRegistry viewRegistry;
+
+  /**
+   * The view data migration utility.
+   */
+  private ViewDataMigrationUtility viewDataMigrationUtility;
 
   /**
* Constructor.
@@ -77,8 +82,8 @@ public class ViewDataMigrationService extends BaseService {
* Migrates view instance persistence data from origin view instance
* specified in the path params.
*
-   * @param originViewVersionthe origin view version
-   * @param originInstanceName   the origin view instance name
+   * @param originViewVersion  the origin view version
+   * @param originInstanceName the origin view instance name
*/
   @PUT
   @Path("{originVersion}/{originInstanceName}")
@@ -93,127 +98,25 @@ public class ViewDataMigrationService extends BaseService {
 LOG.info("Data Migration to view instance " + viewName + "/" + viewVersion 
+ "/" + instanceName +
 " from " + viewName + "/" + originViewVersion + "/" + 
originInstanceName);
 
-ViewInstanceEntity instanceDefinition = getViewInstanceEntity(viewName, 
viewVersion, instanceName);
-ViewInstanceEntity originInstanceDefinition = 
getViewInstanceEntity(viewName, originViewVersion, originInstanceName);
+ViewInstanceEntity instanceDefinition = viewRegistry.getInstanceDefinition(
+viewName, viewVersion, instanceName);
+ViewInstanceEntity originInstanceDefinition = 
viewRegistry.getInstanceDefinition(
+viewName, originViewVersion, originInstanceName);
 
-ViewDataMigrationContextImpl migrationContext = 
getViewDataMigrationContext(instanceDefinition, originInstanceDefinition);
-
-ViewDataMigrator dataMigrator = getViewDataMigrator(instanceDefinition, 
migrationContext);
-
-LOG.debug("Running before-migration hook");
-if (!dataMigrator.beforeMigr

ambari git commit: AMBARI-16836 : View Instance: Data Migration. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 e149d0864 -> a20fd0e26


AMBARI-16836 : View Instance: Data Migration. (Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a20fd0e2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a20fd0e2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a20fd0e2

Branch: refs/heads/branch-2.4
Commit: a20fd0e26e6a9d08a025a3ef6ba7c2092df39f61
Parents: e149d08
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 17:53:44 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 17:53:44 2016 +0530

--
 .../api/services/ViewDataMigrationService.java  | 143 ++--
 .../view/ViewDataMigrationContextImpl.java  |  44 +++-
 .../server/view/ViewDataMigrationUtility.java   | 228 +++
 .../ambari/server/view/ViewExtractor.java   |   7 +-
 .../apache/ambari/server/view/ViewRegistry.java | 147 +++-
 .../main/python/ambari_server/serverUpgrade.py  |   5 -
 .../services/ViewDataMigrationServiceTest.java  | 178 ++-
 .../view/ViewDataMigrationContextImplTest.java  |  55 ++---
 .../view/ViewDataMigrationUtilityTest.java  | 184 +++
 .../ambari/server/view/ViewRegistryTest.java|   6 +-
 10 files changed, 658 insertions(+), 339 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a20fd0e2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
index c6846ce..4a71ce2 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ViewDataMigrationService.java
@@ -18,11 +18,9 @@
 package org.apache.ambari.server.api.services;
 
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
-import org.apache.ambari.server.view.ViewDataMigrationContextImpl;
+import org.apache.ambari.server.view.ViewDataMigrationUtility;
 import org.apache.ambari.server.view.ViewRegistry;
-import org.apache.ambari.view.migration.ViewDataMigrationContext;
 import org.apache.ambari.view.migration.ViewDataMigrationException;
-import org.apache.ambari.view.migration.ViewDataMigrator;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -31,7 +29,6 @@ import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Response;
-import java.util.Map;
 
 /**
  * Service responsible for data migration between view instances.
@@ -57,7 +54,15 @@ public class ViewDataMigrationService extends BaseService {
*/
   private final String instanceName;
 
-  private ViewRegistry viewRegistry;
+  /**
+   * The singleton view registry.
+   */
+  ViewRegistry viewRegistry;
+
+  /**
+   * The view data migration utility.
+   */
+  private ViewDataMigrationUtility viewDataMigrationUtility;
 
   /**
* Constructor.
@@ -77,8 +82,8 @@ public class ViewDataMigrationService extends BaseService {
* Migrates view instance persistence data from origin view instance
* specified in the path params.
*
-   * @param originViewVersionthe origin view version
-   * @param originInstanceName   the origin view instance name
+   * @param originViewVersion  the origin view version
+   * @param originInstanceName the origin view instance name
*/
   @PUT
   @Path("{originVersion}/{originInstanceName}")
@@ -93,127 +98,25 @@ public class ViewDataMigrationService extends BaseService {
 LOG.info("Data Migration to view instance " + viewName + "/" + viewVersion 
+ "/" + instanceName +
 " from " + viewName + "/" + originViewVersion + "/" + 
originInstanceName);
 
-ViewInstanceEntity instanceDefinition = getViewInstanceEntity(viewName, 
viewVersion, instanceName);
-ViewInstanceEntity originInstanceDefinition = 
getViewInstanceEntity(viewName, originViewVersion, originInstanceName);
+ViewInstanceEntity instanceDefinition = viewRegistry.getInstanceDefinition(
+viewName, viewVersion, instanceName);
+ViewInstanceEntity originInstanceDefinition = 
viewRegistry.getInstanceDefinition(
+viewName, originViewVersion, originInstanceName);
 
-ViewDataMigrationContextImpl migrationContext = 
getViewDataMigrationContext(instanceDefinition, originInstanceDefinition);
-
-ViewDataMigrator dataMigrator = getViewDataMigrator(instanceDefinition, 
migrationContext);
-
-LOG.debug("Running before-migration hook");
-if (!dataMigrator.

ambari git commit: AMBARI-17085 : corrected the initialization condition of view entities. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/trunk ddb201f8c -> 140c9f13d


AMBARI-17085 : corrected the initialization condition of view entities. 
(Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/140c9f13
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/140c9f13
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/140c9f13

Branch: refs/heads/trunk
Commit: 140c9f13d6b663847473d50ad3b3bed74c3de84f
Parents: ddb201f
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 17:58:01 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 17:58:01 2016 +0530

--
 .../org/apache/ambari/server/view/persistence/DataStoreImpl.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/140c9f13/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
index 039fd6f..a604458 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
@@ -255,7 +255,6 @@ public class DataStoreImpl implements DataStore {
 if (!initialized) {
   synchronized (this) {
 if (!initialized) {
-  initialized = true;
   try {
 for (ViewEntityEntity viewEntityEntity : 
viewInstanceEntity.getEntities()){
 
@@ -266,9 +265,9 @@ public class DataStoreImpl implements DataStore {
   entityMap.put(name, viewEntityEntity);
   entityClassMap.put(clazz, name);
 }
-
 configureTypes(jpaDynamicHelper, classLoader);
 
+initialized = true;
   } catch (Exception e) {
 throwPersistenceException("Can't initialize data store for view " +
 viewInstanceEntity.getViewName() + "." + 
viewInstanceEntity.getName(), e);



ambari git commit: AMBARI-17085 : corrected the initialization condition of view entities. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 a20fd0e26 -> 48caf26ad


AMBARI-17085 : corrected the initialization condition of view entities. 
(Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/48caf26a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/48caf26a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/48caf26a

Branch: refs/heads/branch-2.4
Commit: 48caf26ada6cd9247bd049cc40d7976e17bc9ffb
Parents: a20fd0e
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 17:58:01 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 17:58:51 2016 +0530

--
 .../org/apache/ambari/server/view/persistence/DataStoreImpl.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/48caf26a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
index 039fd6f..a604458 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/view/persistence/DataStoreImpl.java
@@ -255,7 +255,6 @@ public class DataStoreImpl implements DataStore {
 if (!initialized) {
   synchronized (this) {
 if (!initialized) {
-  initialized = true;
   try {
 for (ViewEntityEntity viewEntityEntity : 
viewInstanceEntity.getEntities()){
 
@@ -266,9 +265,9 @@ public class DataStoreImpl implements DataStore {
   entityMap.put(name, viewEntityEntity);
   entityClassMap.put(clazz, name);
 }
-
 configureTypes(jpaDynamicHelper, classLoader);
 
+initialized = true;
   } catch (Exception e) {
 throwPersistenceException("Can't initialize data store for view " +
 viewInstanceEntity.getViewName() + "." + 
viewInstanceEntity.getName(), e);



ambari git commit: AMBARI-17081 : hive view upload table adding support for date and time format as per hive. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 48caf26ad -> 0debb0628


AMBARI-17081 : hive view upload table adding support for date and time format 
as per hive. (Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0debb062
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0debb062
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0debb062

Branch: refs/heads/branch-2.4
Commit: 0debb0628d64890f3d8c3f47d8d3cbd859f32031
Parents: 48caf26
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 18:02:08 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 18:02:08 2016 +0530

--
 .../resources/uploads/parsers/ParseUtils.java   | 62 ++--
 .../hive/resources/uploads/parsers/Parser.java  |  4 +-
 .../hive/resources/upload/ParseUtilsTest.java   | 54 +
 3 files changed, 98 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/0debb062/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
--
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
index 3261bfa..e4e2853 100644
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
+++ 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
@@ -18,23 +18,28 @@
 
 package org.apache.ambari.view.hive.resources.uploads.parsers;
 
+import org.apache.directory.api.util.Strings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.sql.Timestamp;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.List;
 
-import static org.apache.ambari.view.hive.client.ColumnDescription.*;
+import static org.apache.ambari.view.hive.client.ColumnDescription.DataTypes;
 
 public class ParseUtils {
 
   protected final static Logger LOG =
 LoggerFactory.getLogger(ParseUtils.class);
 
-  final public static String[] DATE_FORMATS = {"mm/dd/", "dd/mm/", 
"mm-dd-" /*add more formatss*/};
+  final public static DataTypes[] dataTypeList = {DataTypes.BOOLEAN, 
DataTypes.INT, DataTypes.BIGINT, DataTypes.DOUBLE, DataTypes.CHAR, 
DataTypes.TIMESTAMP, DataTypes.DATE, DataTypes.STRING};
+  private static final String HIVE_DATE_FORMAT = "-MM-dd";
+
+  // no strict checking required as it is done by Date parsing
+  private static final String HIVE_DATE_FORMAT_REGEX = 
"^[0-9]{4}-[0-9]?[1-9]-[0-9]?[0-9]$";
 
-  final public static DataTypes [] dataTypeList = 
{DataTypes.BOOLEAN,DataTypes.INT,DataTypes.BIGINT,DataTypes.DOUBLE,DataTypes.CHAR,DataTypes.DATE,DataTypes.STRING};
 
   public static boolean isInteger(Object object) {
 if (object == null)
@@ -59,16 +64,11 @@ public class ParseUtils {
   return true;
 
 String strValue = object.toString();
-if (strValue.equalsIgnoreCase("true") || 
strValue.equalsIgnoreCase("false"))
-  return true;
-else
-  return false;
+return strValue.equalsIgnoreCase("true") || 
strValue.equalsIgnoreCase("false");
   }
 
   public static boolean isString(Object object) {
-if (object == null)
-  return false;
-else return true; // any non null can always be interpreted as a string
+return object != null;
   }
 
   public static boolean isLong(Object object) {
@@ -109,10 +109,8 @@ public class ParseUtils {
   return true;
 
 String str = object.toString().trim();
-if (str.length() == 1)
-  return true;
+return str.length() == 1;
 
-return false;
   }
 
   public static boolean isDate(Object object) {
@@ -123,13 +121,36 @@ public class ParseUtils {
   return true;
 
 String str = object.toString();
-for (String format : DATE_FORMATS) {
-  try {
-Date i = new SimpleDateFormat(format).parse(str);
-return true;
-  } catch (Exception e) {
+if (Strings.isNotEmpty(str)) {
+  str = str.trim();
+  if (str.matches(HIVE_DATE_FORMAT_REGEX)) {
+try {
+  SimpleDateFormat sdf = new SimpleDateFormat(HIVE_DATE_FORMAT);
+  sdf.setLenient(false);
+  Date date = sdf.parse(str);
+  return true;
+} catch (Exception e) {
+  LOG.debug("error while parsing as date string {}, format {}", str, 
HIVE_DATE_FORMAT, e);
+}
   }
 }
+return false;
+  }
+
+  public static boolean isTimeStamp(Object object) {
+if (object == null)
+  return false;
+
+if (object instanceof Date)
+  return true;
+
+String str = object.toString();
+

ambari git commit: AMBARI-17081 : hive view upload table adding support for date and time format as per hive. (Nitiraj Rathore via dipayanb)

2016-06-09 Thread dbhowmick
Repository: ambari
Updated Branches:
  refs/heads/trunk 140c9f13d -> 9c6ab0be9


AMBARI-17081 : hive view upload table adding support for date and time format 
as per hive. (Nitiraj Rathore via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9c6ab0be
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9c6ab0be
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9c6ab0be

Branch: refs/heads/trunk
Commit: 9c6ab0be9f488f0d89d032c23af0138b729994f8
Parents: 140c9f1
Author: Dipayan Bhowmick 
Authored: Thu Jun 9 18:02:08 2016 +0530
Committer: Dipayan Bhowmick 
Committed: Thu Jun 9 18:02:54 2016 +0530

--
 .../resources/uploads/parsers/ParseUtils.java   | 62 ++--
 .../hive/resources/uploads/parsers/Parser.java  |  4 +-
 .../hive/resources/upload/ParseUtilsTest.java   | 54 +
 3 files changed, 98 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/9c6ab0be/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
--
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
index 3261bfa..e4e2853 100644
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
+++ 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/uploads/parsers/ParseUtils.java
@@ -18,23 +18,28 @@
 
 package org.apache.ambari.view.hive.resources.uploads.parsers;
 
+import org.apache.directory.api.util.Strings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.sql.Timestamp;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.List;
 
-import static org.apache.ambari.view.hive.client.ColumnDescription.*;
+import static org.apache.ambari.view.hive.client.ColumnDescription.DataTypes;
 
 public class ParseUtils {
 
   protected final static Logger LOG =
 LoggerFactory.getLogger(ParseUtils.class);
 
-  final public static String[] DATE_FORMATS = {"mm/dd/", "dd/mm/", 
"mm-dd-" /*add more formatss*/};
+  final public static DataTypes[] dataTypeList = {DataTypes.BOOLEAN, 
DataTypes.INT, DataTypes.BIGINT, DataTypes.DOUBLE, DataTypes.CHAR, 
DataTypes.TIMESTAMP, DataTypes.DATE, DataTypes.STRING};
+  private static final String HIVE_DATE_FORMAT = "-MM-dd";
+
+  // no strict checking required as it is done by Date parsing
+  private static final String HIVE_DATE_FORMAT_REGEX = 
"^[0-9]{4}-[0-9]?[1-9]-[0-9]?[0-9]$";
 
-  final public static DataTypes [] dataTypeList = 
{DataTypes.BOOLEAN,DataTypes.INT,DataTypes.BIGINT,DataTypes.DOUBLE,DataTypes.CHAR,DataTypes.DATE,DataTypes.STRING};
 
   public static boolean isInteger(Object object) {
 if (object == null)
@@ -59,16 +64,11 @@ public class ParseUtils {
   return true;
 
 String strValue = object.toString();
-if (strValue.equalsIgnoreCase("true") || 
strValue.equalsIgnoreCase("false"))
-  return true;
-else
-  return false;
+return strValue.equalsIgnoreCase("true") || 
strValue.equalsIgnoreCase("false");
   }
 
   public static boolean isString(Object object) {
-if (object == null)
-  return false;
-else return true; // any non null can always be interpreted as a string
+return object != null;
   }
 
   public static boolean isLong(Object object) {
@@ -109,10 +109,8 @@ public class ParseUtils {
   return true;
 
 String str = object.toString().trim();
-if (str.length() == 1)
-  return true;
+return str.length() == 1;
 
-return false;
   }
 
   public static boolean isDate(Object object) {
@@ -123,13 +121,36 @@ public class ParseUtils {
   return true;
 
 String str = object.toString();
-for (String format : DATE_FORMATS) {
-  try {
-Date i = new SimpleDateFormat(format).parse(str);
-return true;
-  } catch (Exception e) {
+if (Strings.isNotEmpty(str)) {
+  str = str.trim();
+  if (str.matches(HIVE_DATE_FORMAT_REGEX)) {
+try {
+  SimpleDateFormat sdf = new SimpleDateFormat(HIVE_DATE_FORMAT);
+  sdf.setLenient(false);
+  Date date = sdf.parse(str);
+  return true;
+} catch (Exception e) {
+  LOG.debug("error while parsing as date string {}, format {}", str, 
HIVE_DATE_FORMAT, e);
+}
   }
 }
+return false;
+  }
+
+  public static boolean isTimeStamp(Object object) {
+if (object == null)
+  return false;
+
+if (object instanceof Date)
+  return true;
+
+String str = object.toString();
+try {
+   

ambari git commit: AMBARI-17141 "Settings" button shown to cluster administrator. (atkach)

2016-06-09 Thread atkach
Repository: ambari
Updated Branches:
  refs/heads/trunk 9c6ab0be9 -> f4dae12eb


AMBARI-17141 "Settings" button shown to cluster administrator. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f4dae12e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f4dae12e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f4dae12e

Branch: refs/heads/trunk
Commit: f4dae12ebe27035c478f884a4d15012a12c0dd0d
Parents: 9c6ab0b
Author: Andrii Tkach 
Authored: Thu Jun 9 13:43:27 2016 +0300
Committer: Andrii Tkach 
Committed: Thu Jun 9 16:50:40 2016 +0300

--
 ambari-web/app/templates/application.hbs| 2 +-
 ambari-web/app/templates/common/modal_popup.hbs | 8 
 ambari-web/app/utils/host_progress_popup.js | 7 ---
 3 files changed, 5 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/f4dae12e/ambari-web/app/templates/application.hbs
--
diff --git a/ambari-web/app/templates/application.hbs 
b/ambari-web/app/templates/application.hbs
index 9c17223..93f0681 100644
--- a/ambari-web/app/templates/application.hbs
+++ b/ambari-web/app/templates/application.hbs
@@ -81,7 +81,7 @@
   {{/if}}
 {{/if}}
 {{#if isExistingClusterDataLoaded}}
-  {{#isAuthorized "CLUSTER.UPGRADE_DOWNGRADE_STACK"}}
+  {{#isAuthorized "AMBARI.MANAGE_SETTINGS"}}
 {{t app.settings}}
   {{/isAuthorized}}
 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f4dae12e/ambari-web/app/templates/common/modal_popup.hbs
--
diff --git a/ambari-web/app/templates/common/modal_popup.hbs 
b/ambari-web/app/templates/common/modal_popup.hbs
index e4537e9..2e6d13e 100644
--- a/ambari-web/app/templates/common/modal_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popup.hbs
@@ -47,10 +47,10 @@
   {{view view.footerClass}}
 {{else}}
   
-{{#if view.hasFooterCheckbox}}
-  {{view Ember.Checkbox 
classNames="checkbox" checkedBinding="view.isNotShowBgChecked"}}  
-  {{t app.settings.notShowBgOperations}}
-{{/if}}
+  {{#isAuthorized "AMBARI.MANAGE_SETTINGS"}}
+{{view Ember.Checkbox 
classNames="checkbox" checkedBinding="view.isNotShowBgChecked"}}
+   {{t app.settings.notShowBgOperations}}
+  {{/isAuthorized}}
 {{#if view.third}}
   {{view.third}}
 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f4dae12e/ambari-web/app/utils/host_progress_popup.js
--
diff --git a/ambari-web/app/utils/host_progress_popup.js 
b/ambari-web/app/utils/host_progress_popup.js
index 20ea220..6e0dd2f 100644
--- a/ambari-web/app/utils/host_progress_popup.js
+++ b/ambari-web/app/utils/host_progress_popup.js
@@ -851,13 +851,6 @@ App.HostPopup = Em.Object.create({
   classNames: ['sixty-percent-width-modal', 'host-progress-popup', 
'full-height-modal'],
 
   /**
-   * for the checkbox: do not show this dialog again
-   *
-   * @type {bool}
-   */
-  hasFooterCheckbox: true,
-
-  /**
* Auto-display BG-popup
*
* @type {bool}



ambari git commit: AMBARI-17121 Unit-test failing coz of issue "YARN service check fails if there is no queue named 'default'" (dsen)

2016-06-09 Thread dsen
Repository: ambari
Updated Branches:
  refs/heads/trunk f4dae12eb -> b57a7cfa2


AMBARI-17121 Unit-test failing coz of issue "YARN service check fails if there 
is no queue named 'default'" (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b57a7cfa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b57a7cfa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b57a7cfa

Branch: refs/heads/trunk
Commit: b57a7cfa2ff4d2668dc421826ffaa84e8425d26a
Parents: f4dae12
Author: Dmytro Sen 
Authored: Thu Jun 9 16:52:32 2016 +0300
Committer: Dmytro Sen 
Committed: Thu Jun 9 16:52:47 2016 +0300

--
 .../YARN/2.1.0.2.0/package/scripts/params_linux.py| 2 +-
 .../src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py | 2 ++
 ambari-server/src/test/python/stacks/2.0.6/configs/default.json   | 3 ++-
 ambari-server/src/test/python/stacks/2.0.6/configs/secured.json   | 3 ++-
 ambari-server/src/test/python/stacks/2.1/configs/default.json | 3 ++-
 ambari-server/src/test/python/stacks/2.1/configs/secured.json | 3 ++-
 6 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/b57a7cfa/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 4d281a8..29fb3c1 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -173,7 +173,7 @@ mapred_log_dir_prefix = 
config['configurations']['mapred-env']['mapred_log_dir_p
 mapred_env_sh_template = config['configurations']['mapred-env']['content']
 yarn_env_sh_template = config['configurations']['yarn-env']['content']
 yarn_nodemanager_recovery_dir = 
default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
-service_check_queue_name = 
config['configurations']['yarn-env']['service_check.queue.name']
+service_check_queue_name = 
default('/configurations/yarn-env/service_check.queue.name','default')
 
 if len(rm_hosts) > 1:
   additional_rm_host = rm_hosts[1]

http://git-wip-us.apache.org/repos/asf/ambari/blob/b57a7cfa/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index b7c19ec..c2fd8a7 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -1392,6 +1392,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, 
configurations, services, hosts):
 validationItems = [ ]
+if not services:
+  return self.toConfigurationValidationProblems(validationItems, 
"yarn-env")
 yarnEnvProperties = getSiteProperties(configurations, "yarn-env")
 capacity_scheduler_properties, received_as_key_value_pair = 
self.getCapacitySchedulerProperties(services)
 leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b57a7cfa/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
--
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 86138d1..04aa828 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -546,7 +546,8 @@
 "resourcemanager_heapsize": "1024",
 "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
 "min_user_id": "1000",
-"is_supported_yarn_ranger": "false"
+"is_supported_yarn_ranger": "false",
+"service_check.queue.name": "default"
 },
 "hadoop-env": {
 "hdfs_tmp_dir": "/tmp",

http://git-wip-us.apache.org/repos/asf/ambari/blob/b57a7cfa/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
--
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index

ambari git commit: AMBARI-17121 Unit-test failing coz of issue "YARN service check fails if there is no queue named 'default'" (dsen)

2016-06-09 Thread dsen
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 0debb0628 -> 09050cb09


AMBARI-17121 Unit-test failing coz of issue "YARN service check fails if there 
is no queue named 'default'" (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/09050cb0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/09050cb0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/09050cb0

Branch: refs/heads/branch-2.4
Commit: 09050cb09f4f6fbd4190c7b05a3138c3085cb0bc
Parents: 0debb06
Author: Dmytro Sen 
Authored: Thu Jun 9 16:52:32 2016 +0300
Committer: Dmytro Sen 
Committed: Thu Jun 9 16:53:32 2016 +0300

--
 .../YARN/2.1.0.2.0/package/scripts/params_linux.py| 2 +-
 .../src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py | 2 ++
 ambari-server/src/test/python/stacks/2.0.6/configs/default.json   | 3 ++-
 ambari-server/src/test/python/stacks/2.0.6/configs/secured.json   | 3 ++-
 ambari-server/src/test/python/stacks/2.1/configs/default.json | 3 ++-
 ambari-server/src/test/python/stacks/2.1/configs/secured.json | 3 ++-
 6 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/09050cb0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 4d281a8..29fb3c1 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -173,7 +173,7 @@ mapred_log_dir_prefix = 
config['configurations']['mapred-env']['mapred_log_dir_p
 mapred_env_sh_template = config['configurations']['mapred-env']['content']
 yarn_env_sh_template = config['configurations']['yarn-env']['content']
 yarn_nodemanager_recovery_dir = 
default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
-service_check_queue_name = 
config['configurations']['yarn-env']['service_check.queue.name']
+service_check_queue_name = 
default('/configurations/yarn-env/service_check.queue.name','default')
 
 if len(rm_hosts) > 1:
   additional_rm_host = rm_hosts[1]

http://git-wip-us.apache.org/repos/asf/ambari/blob/09050cb0/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index b7c19ec..c2fd8a7 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -1392,6 +1392,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, 
configurations, services, hosts):
 validationItems = [ ]
+if not services:
+  return self.toConfigurationValidationProblems(validationItems, 
"yarn-env")
 yarnEnvProperties = getSiteProperties(configurations, "yarn-env")
 capacity_scheduler_properties, received_as_key_value_pair = 
self.getCapacitySchedulerProperties(services)
 leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)

http://git-wip-us.apache.org/repos/asf/ambari/blob/09050cb0/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
--
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 86138d1..04aa828 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -546,7 +546,8 @@
 "resourcemanager_heapsize": "1024",
 "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
 "min_user_id": "1000",
-"is_supported_yarn_ranger": "false"
+"is_supported_yarn_ranger": "false",
+"service_check.queue.name": "default"
 },
 "hadoop-env": {
 "hdfs_tmp_dir": "/tmp",

http://git-wip-us.apache.org/repos/asf/ambari/blob/09050cb0/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
--
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json 
b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.

ambari git commit: AMBARI-17141 "Settings" button shown to cluster administrator. (atkach)

2016-06-09 Thread atkach
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 09050cb09 -> a7edab221


AMBARI-17141 "Settings" button shown to cluster administrator. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a7edab22
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a7edab22
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a7edab22

Branch: refs/heads/branch-2.4
Commit: a7edab2211856e096514bb0dd4db920d5d4d836c
Parents: 09050cb
Author: Andrii Tkach 
Authored: Thu Jun 9 13:43:27 2016 +0300
Committer: Andrii Tkach 
Committed: Thu Jun 9 16:57:31 2016 +0300

--
 ambari-web/app/templates/application.hbs| 2 +-
 ambari-web/app/templates/common/modal_popup.hbs | 8 
 ambari-web/app/utils/host_progress_popup.js | 7 ---
 3 files changed, 5 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a7edab22/ambari-web/app/templates/application.hbs
--
diff --git a/ambari-web/app/templates/application.hbs 
b/ambari-web/app/templates/application.hbs
index 9c17223..93f0681 100644
--- a/ambari-web/app/templates/application.hbs
+++ b/ambari-web/app/templates/application.hbs
@@ -81,7 +81,7 @@
   {{/if}}
 {{/if}}
 {{#if isExistingClusterDataLoaded}}
-  {{#isAuthorized "CLUSTER.UPGRADE_DOWNGRADE_STACK"}}
+  {{#isAuthorized "AMBARI.MANAGE_SETTINGS"}}
 {{t app.settings}}
   {{/isAuthorized}}
 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7edab22/ambari-web/app/templates/common/modal_popup.hbs
--
diff --git a/ambari-web/app/templates/common/modal_popup.hbs 
b/ambari-web/app/templates/common/modal_popup.hbs
index e4537e9..2e6d13e 100644
--- a/ambari-web/app/templates/common/modal_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popup.hbs
@@ -47,10 +47,10 @@
   {{view view.footerClass}}
 {{else}}
   
-{{#if view.hasFooterCheckbox}}
-  {{view Ember.Checkbox 
classNames="checkbox" checkedBinding="view.isNotShowBgChecked"}}  
-  {{t app.settings.notShowBgOperations}}
-{{/if}}
+  {{#isAuthorized "AMBARI.MANAGE_SETTINGS"}}
+{{view Ember.Checkbox 
classNames="checkbox" checkedBinding="view.isNotShowBgChecked"}}
+   {{t app.settings.notShowBgOperations}}
+  {{/isAuthorized}}
 {{#if view.third}}
   {{view.third}}
 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7edab22/ambari-web/app/utils/host_progress_popup.js
--
diff --git a/ambari-web/app/utils/host_progress_popup.js 
b/ambari-web/app/utils/host_progress_popup.js
index 20ea220..6e0dd2f 100644
--- a/ambari-web/app/utils/host_progress_popup.js
+++ b/ambari-web/app/utils/host_progress_popup.js
@@ -851,13 +851,6 @@ App.HostPopup = Em.Object.create({
   classNames: ['sixty-percent-width-modal', 'host-progress-popup', 
'full-height-modal'],
 
   /**
-   * for the checkbox: do not show this dialog again
-   *
-   * @type {bool}
-   */
-  hasFooterCheckbox: true,
-
-  /**
* Auto-display BG-popup
*
* @type {bool}



[04/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
index a513c68..2bee181 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
@@ -24,238 +24,204 @@
 namenode_host
 
 NameNode Host.
-
-
+
   
   
 dfs_namenode_name_dir
 /hadoop/hdfs/namenode
 NameNode Directories.
-
-
+
   
   
 snamenode_host
 
 Secondary NameNode.
-
-
+
   
   
 dfs_namenode_checkpoint_dir
 /hadoop/hdfs/namesecondary
 Secondary NameNode checkpoint dir.
-
-
+
   
   
 datanode_hosts
 
 List of Datanode Hosts.
-
-
+
   
   
 dfs_datanode_data_dir
 /hadoop/hdfs/data
 Data directories for Data Nodes.
-
-
+
   
   
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 dfs_webhdfs_enabled
 true
 WebHDFS enabled
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).
-
-
+
   
   
 namenode_opt_maxnewsize
 640
 NameNode maximum new generation size
-
-
+
   
   
 namenode_opt_permsize
 128
 NameNode permanent generation size
-
-
+
   
   
 namenode_opt_maxpermsize
 256
 NameNode maximum permanent generation size
-
-
+
   
   
 datanode_du_reserved
 1
 Reserved space for HDFS
-
-
+
   
   
 dtnode_heapsize
 1024
 DataNode maximum Java heap size
-
-
+
   
   
 dfs_datanode_failed_volume_tolerated
 0
 DataNode volumes failure toleration
-
-
+
   
   
 dfs_namenode_checkpoint_period
 21600
 HDFS Maximum Checkpoint Delay
-
-
+
   
   
 fs_checkpoint_size
 0.5
 FS Checkpoint Size.
-
-
+
   
   
 proxyuser_group
 users
 Proxy user group.
-
-
+
   
   
 dfs_exclude
 
 HDFS Exclude hosts.
-
-
+
   
   
 dfs_replication
 3
 Default Block Replication.
-
-
+
   
   
 dfs_block_local_path_access_user
 hbase
 Default Block Replication.
-
-
+
   
   
 dfs_datanode_address
 50010
 Port for datanode address.
-
-
+
   
   
 dfs_datanode_http_address
 50075
 Port for datanode address.
-
-
+
   
   
 dfs_datanode_data_dir_perm
 750
 Datanode dir perms.
-
-
+
   
   
 security_enabled
 false
 Hadoop Security
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 kadmin_pw
 
 Kerberos realm admin password
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 Kerberos keytab path.
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 KeyTab Directory.
-
-
+
   
   
 namenode_formatted_mark_dir
 /var/run/hadoop/hdfs/namenode/formatted/
 Formatteed Mark Directory.
-
-
+
   
   
 hdfs_user
 hdfs
 User and Groups.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
index 93cc9ab..a31a481 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
@@ -26,8 +26,7 @@
 The ACL is a comma-separated list of user and group names. The user and
 group list is

[07/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
index 1be7157..ac4279f 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
@@ -28,39 +28,34 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.socket.write.timeout
 0
 DFS Client write socket timeout
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -69,8 +64,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -82,8 +76,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -92,8 +85,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -102,31 +94,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -137,8 +125,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -148,27 +135,23 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 
-
-
+
   
   
 dfs.datanode.http.address
 
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -176,8 +159,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -185,8 +167,7 @@ literal string "local" or a host:port for 
HDFS.
 
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -195,29 +176,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 4096
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -226,8 +203,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -236,8 +212,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -249,28 +224,24 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
-
-
+
   
   
 dfs.block.access.token.enable
@@ -279,8 +250,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+   

[23/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
index e297041..43e2473 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -22,8 +22,7 @@
 yarn.application.classpath
 
$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*
 Classpath for typical applications.
-
-
+
   
   
 hadoop.registry.rm.enabled
@@ -31,8 +30,7 @@
 
   Is the registry enabled: does the RM start it up, create the user and 
system paths, and purge service records when containers, application attempts 
and applications complete
 
-
-
+
   
   
 hadoop.registry.zk.quorum
@@ -40,15 +38,13 @@
 
   List of hostname:port pairs defining the zookeeper quorum binding for 
the registry
 
-
-
+
   
   
 yarn.nodemanager.recovery.enabled
 true
 Enable the node manager to recover after 
starting
-
-
+
   
   
 yarn.nodemanager.recovery.dir
@@ -57,22 +53,19 @@
   The local filesystem directory in which the node manager will store
   state when recovery is enabled.
 
-
-
+
   
   
 yarn.client.nodemanager-connect.retry-interval-ms
 1
 Time interval between each attempt to connect to 
NM
-
-
+
   
   
 yarn.client.nodemanager-connect.max-wait-ms
 6
 Max time to wait to establish a connection to NM
-
-
+
   
   
 yarn.resourcemanager.recovery.enabled
@@ -81,8 +74,7 @@
   Enable RM to recover state after starting.
   If true, then yarn.resourcemanager.store.class must be specified.
 
-
-
+
   
   
 yarn.resourcemanager.work-preserving-recovery.enabled
@@ -90,8 +82,7 @@
 
   Enable RM work preserving recovery. This configuration is private to 
YARN for experimenting the feature.
 
-
-
+
   
   
 yarn.resourcemanager.store.class
@@ -102,8 +93,7 @@
   the store is implicitly fenced; meaning a single ResourceManager
   is able to use the store at any point in time.
 
-
-
+
   
   
 yarn.resourcemanager.zk-address
@@ -111,43 +101,37 @@
 
   List Host:Port of the ZooKeeper servers to be used by the RM. comma 
separated host:port pairs, each corresponding to a zk server. e.g. 
"127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is 
used the example would look like: 
"127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be 
rooted at "/app/a" and all paths would be relative to this root - ie 
getting/setting/etc...  "/foo/bar" would result in operations being run on 
"/app/a/foo/bar" (from the server perspective).
 
-
-
+
   
   
 yarn.resourcemanager.zk-state-store.parent-path
 /rmstore
 Full path of the ZooKeeper znode where RM state will be 
stored. This must be supplied when using 
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the 
value for yarn.resourcemanager.store.class
-
-
+
   
   
 yarn.resourcemanager.zk-acl
 world:anyone:rwcda
 ACL's to be used for ZooKeeper znodes.
-
-
+
   
   
 
yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
 1
 Set the amount of time RM waits before allocating new 
containers on work-preserving-recovery. Such wait period gives RM a chance to 
settle down resyncing with NMs in the cluster on recovery, before assigning new 
containers to applications.
-
-
+
   
   
 yarn.resourcemanager.connect.retry-interval.ms
 3
 How often to try connecting to the 
ResourceManager.
-
-
+
   
   
 yarn.resourcemanager.connect.max-wait.ms
 90
 Maximum time to wait to establish connection to 
ResourceManager
-
-
+
   
   
 yarn.resourcemanager.zk-retry-interval-ms
@@ -157,78 +141,67 @@
   automatically from yarn.resourcemanager.zk-timeout-ms and
   yarn.resourcemanager.zk-num-retries."
 
-
-
+
   
   
 yarn.resourcemanager.zk-num-retries
 1000
 Number of times RM tries to connect to 
ZooKeeper.
-
-
+
   
   
 yarn.resourcemanager.zk-timeout-ms
 1
 ZooKeeper session timeout in milliseconds. Session expiration 
is manag

[55/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
index 1be7157..ac4279f 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.3.1/services/HDFS/configuration/hdfs-site.xml
@@ -28,39 +28,34 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.socket.write.timeout
 0
 DFS Client write socket timeout
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -69,8 +64,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -82,8 +76,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -92,8 +85,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -102,31 +94,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -137,8 +125,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -148,27 +135,23 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 
-
-
+
   
   
 dfs.datanode.http.address
 
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -176,8 +159,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -185,8 +167,7 @@ literal string "local" or a host:port for 
HDFS.
 
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -195,29 +176,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 4096
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -226,8 +203,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -236,8 +212,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -249,28 +224,24 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
-
-
+
   
   
 dfs.block.access.token.enable
@@ -279,8 +250,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+   

[53/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
index d724c44..450bc21 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -23,29 +23,25 @@
 io.sort.mb
 100
 No description
-
-
+
   
   
 io.sort.record.percent
 .2
 No description
-
-
+
   
   
 io.sort.spill.percent
 0.1
 No description
-
-
+
   
   
 io.sort.factor
 100
 No description
-
-
+
   
   
   
@@ -53,8 +49,7 @@
 
 No description
 true
-
-
+
   
   
 
@@ -62,44 +57,38 @@
 
 No description
 true
-
-
+
   
   
 mapred.reduce.parallel.copies
 30
 No description
-
-
+
   
   
 mapred.tasktracker.map.tasks.maximum
 
 No description
-
-
+
   
   
 mapred.map.tasks.speculative.execution
 false
 If true, then multiple instances of some map tasks
may be executed in parallel.
-
-
+
   
   
 mapred.reduce.tasks.speculative.execution
 false
 If true, then multiple instances of some reduce tasks
may be executed in parallel.
-
-
+
   
   
 mapred.reduce.slowstart.completed.maps
 0.05
-
-
+
   
   
 mapred.inmem.merge.threshold
@@ -110,8 +99,7 @@
   0 indicates we want to DON'T have any threshold and instead depend only on
   the ramfs's memory consumption to trigger the merge.
   
-
-
+
   
   
 mapred.job.shuffle.merge.percent
@@ -121,8 +109,7 @@
   storing in-memory map outputs, as defined by
   mapred.job.shuffle.input.buffer.percent.
   
-
-
+
   
   
 mapred.job.shuffle.input.buffer.percent
@@ -130,8 +117,7 @@
 The percentage of memory to be allocated from the maximum heap
   size to storing map outputs during the shuffle.
   
-
-
+
   
   
 mapred.map.output.compression.codec
@@ -139,8 +125,7 @@
 If the map outputs are compressed, how should they be
   compressed
 
-
-
+
   
   
 mapred.output.compression.type
@@ -148,8 +133,7 @@
 If the job outputs are to compressed as SequenceFiles, how 
should
they be compressed? Should be one of NONE, RECORD or BLOCK.
   
-
-
+
   
   
 mapred.job.reduce.input.buffer.percent
@@ -159,8 +143,7 @@
   remaining map outputs in memory must consume less than this threshold before
   the reduce can begin.
   
-
-
+
   
   
 mapreduce.reduce.input.limit
@@ -169,15 +152,13 @@
   is 10 Gb.)  If the estimated input size of the reduce is greater than
   this value, job is failed. A value of -1 means that there is no limit
   set. 
-
-
+
   
   
   
 mapred.compress.map.output
 
-
-
+
   
   
 mapred.task.timeout
@@ -186,46 +167,39 @@
   terminated if it neither reads an input, writes an output, nor
   updates its status string.
   
-
-
+
   
   
 jetty.connector
 org.mortbay.jetty.nio.SelectChannelConnector
 No description
-
-
+
   
   
 mapred.child.root.logger
 INFO,TLA
-
-
+
   
   
 mapred.child.java.opts
 -Xmx512m
 No description
-
-
+
   
   
 mapred.cluster.reduce.memory.mb
 
-
-
+
   
   
 mapred.job.map.memory.mb
 1024
-
-
+
   
   
 mapred.job.reduce.memory.mb
 1024
-
-
+
   
   
 mapred.max.tracker.blacklists
@@ -233,41 +207,35 @@
 
 if node is reported blacklisted by 16 successful jobs within 
timeout-window, it will be graylisted
   
-
-
+
   
   
 mapred.healthChecker.script.path
 
-
-
+
   
   
 mapred.healthChecker.script.timeout
 6
-
-
+
   
   
 mapred.task.maxvmem
 
 true
 No description
-
-
+
   
   
 mapreduce.fileoutputcommitter.marksuccessfuljobs
 false
-
-
+
   
   
 mapreduce.tasktracker.keytab.file
 
 The filename of the keytab for the task tracker
-
-
+
   
   
 mapreduce.jobtracker.split.metainfo.maxsize
@@ -276,53 +244,49 @@
 If the size of the split metainfo file is larger than this, 
the JobTracker will fail the job during
 initialize.

-
-
+
   
   
 mapreduce.jobhistory.keytab.file
 
 
 The keytab for the job history server principal.
-
-
+
   
 

[13/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
index ea3d6b6..8a4f566 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,29 +129,25 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
 Address where the datanode binds
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
 HTTP address for the datanode
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -171,8 +155,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -180,8 +163,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -190,29 +172,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -221,8 +199,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -231,8 +208,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -244,29 +220,25 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
 The max response size for IPC
-
-
+
   
   
 dfs.block.access.token.enable
@@ -275,8 +247,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", n

[49/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
index 37c35d8..2c0aeca 100644
--- 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
@@ -24,168 +24,144 @@
 namenode_host
 
 NameNode Host.
-
-
+
   
   
 dfs_name_dir
 /hadoop/hdfs/namenode
 NameNode Directories.
-
-
+
   
   
 snamenode_host
 
 Secondary NameNode.
-
-
+
   
   
 fs_checkpoint_dir
 /hadoop/hdfs/namesecondary
 Secondary NameNode checkpoint dir.
-
-
+
   
   
 datanode_hosts
 
 List of Datanode Hosts.
-
-
+
   
   
 dfs_data_dir
 /hadoop/hdfs/data
 Data directories for Data Nodes.
-
-
+
   
   
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 dfs_webhdfs_enabled
 true
 WebHDFS enabled
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).
-
-
+
   
   
 namenode_opt_maxnewsize
 640
 NameNode maximum new generation size
-
-
+
   
   
 namenode_opt_permsize
 128
 NameNode permanent generation size
-
-
+
   
   
 namenode_opt_maxpermsize
 256
 NameNode maximum permanent generation size
-
-
+
   
   
 datanode_du_reserved
 1
 Reserved space for HDFS
-
-
+
   
   
 dtnode_heapsize
 1024
 DataNode maximum Java heap size
-
-
+
   
   
 dfs_datanode_failed_volume_tolerated
 0
 DataNode volumes failure toleration
-
-
+
   
   
 fs_checkpoint_period
 21600
 HDFS Maximum Checkpoint Delay
-
-
+
   
   
 fs_checkpoint_size
 0.5
 FS Checkpoint Size.
-
-
+
   
   
 security_enabled
 false
 Hadoop Security
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 KeyTab Directory.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
index c60b565..d5fa858 100644
--- 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
+++ 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
@@ -24,64 +24,55 @@
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).
-
-
+
   
   
 namenode_opt_maxnewsize
 200
 NameNode maximum new generation size
-
-
+

[16/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
 
b/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
index 7c5365b..c706178 100644
--- 
a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
+++ 
b/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
@@ -21,190 +21,163 @@ limitations under the License.
 ambari.hive.db.schema.name
 hive
 Database name used as the Hive Metastore
-
-
+
   
   
 javax.jdo.option.ConnectionURL
 jdbc
 JDBC connect string for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionDriverName
 com.mysql.jdbc.Driver
 Driver class name for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionUserName
 hive
 username to use against metastore database
-
-
+
   
   
 javax.jdo.option.ConnectionPassword
  
 password to use against metastore database
-
-
+
   
   
 hive.metastore.warehouse.dir
 /apps/hive/warehouse
 location of default database for the warehouse
-
-
+
   
   
 hive.metastore.sasl.enabled
 
 If true, the metastore thrift interface will be secured with 
SASL.
  Clients must authenticate with Kerberos.
-
-
+
   
   
 hive.metastore.kerberos.keytab.file
 
 The path to the Kerberos Keytab file containing the metastore
  thrift server's service principal.
-
-
+
   
   
 hive.metastore.kerberos.principal
 
 The service principal for the metastore thrift server. The 
special
 string _HOST will be replaced automatically with the correct host 
name.
-
-
+
   
   
 hive.metastore.cache.pinobjtypes
 Table,Database,Type,FieldSchema,Order
 List of comma separated metastore object types that should be 
pinned in the cache
-
-
+
   
   
 hive.metastore.uris
 thrift://localhost:9083
 URI for client to contact metastore server
-
-
+
   
   
 hive.metastore.client.socket.timeout
 60
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.execute.setugi
 true
 In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
-
-
+
   
   
 hive.security.authorization.enabled
 false
 enable or disable the hive client authorization
-
-
+
   
   
 hive.security.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 the hive client authorization manager class name.
 The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

-
-
+
   
   
 hive.security.metastore.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 The authorization manager class name to be used in the 
metastore for authorization. The user-defined authorization class should 
implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
  
-
-
+
   
   
 hive.security.authenticator.manager
 org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator
 Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  
-
-
+
   
   
 hive.server2.enable.doAs
 true
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 true
-
-
+
   
   
 fs.file.impl.disable.cache
 true
-
-
+
   
   
 hive.enforce.bucketing
 true
 Whether bucketing is enforced. If true, while inserting into 
the table, bucketing is enforced.
-
-
+
   
   
 hive.enforce.sorting
 true
 Whether sorting is enforced. If true, while inserting into 
the table, sorting is enforced.
-
-
+
   
   
 hive.map.aggr
 true
 Whether to use map-side aggregation in Hive Group By 
queries.
-
-
+
   
   
 hive.optimize.bucke

[41/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
index 22d00d3..d5a36c2 100644
--- 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
@@ -24,8 +24,7 @@
 ambari-qa
 Policy user for KNOX
 This user must be system user and also present at Ranger 
admin portal
-
-
+
   
   
 common.name.for.certificate
@@ -34,8 +33,7 @@
 
   true
 
-
-
+
   
   
 ranger-knox-plugin-enabled
@@ -52,16 +50,14 @@
   boolean
   false
 
-
-
+
   
   
 REPOSITORY_CONFIG_USERNAME
 admin
 Ranger repository config user
 Used for repository creation on ranger admin
-
-
+
   
   
 REPOSITORY_CONFIG_PASSWORD
@@ -72,16 +68,14 @@
 
   password
 
-
-
+
   
   
 KNOX_HOME
 /usr/local/knox-server
 Knox Home
 Knox home folder
-
-
+
   
   
 XAAUDIT.DB.IS_ENABLED
@@ -97,8 +91,7 @@
 xasecure.audit.destination.db
   
 
-
-
+
   
   
 XAAUDIT.HDFS.IS_ENABLED
@@ -114,8 +107,7 @@
 xasecure.audit.destination.hdfs
   
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINATION_DIRECTORY
@@ -128,71 +120,61 @@
 xasecure.audit.destination.hdfs.dir
   
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY
 __REPLACE__LOG_DIR/hadoop/%app-type%/audit
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY
 __REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_FILE
 %hostname%-audit.log
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS
 900
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS
 86400
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS
 60
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_FILE
 %time:MMdd-HHmm.ss%.log
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS
 60
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS
 600
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT
@@ -201,30 +183,26 @@
 
   password
 
-
-
+
   
   
 SSL_KEYSTORE_FILE_PATH
 /etc/hadoop/conf/ranger-plugin-keystore.jks
 
-
-
+
   
   
 SSL_KEYSTORE_PASSWORD
 myKeyFilePassword
 PASSWORD
 
-
-
+
   
   
 SSL_TRUSTSTORE_FILE_PATH
 /etc/hadoop/conf/ranger-plugin-truststore.jks
 
-
-
+
   
   
 SSL_TRUSTSTORE_PASSWORD
@@ -234,43 +212,37 @@
 
   password
 
-
-
+
   
   
 POLICY_MGR_URL
 {{policymgr_mgr_url}}
 Policy Manager url
-
-
+
   
   
 SQL_CONNECTOR_JAR
 {{sql_connector_jar}}
 Location of DB client library (please check the location of 
the jar file)
-
-
+
   
   
 XAAUDIT.DB.FLAVOUR
 {{xa_audit_db_flavor}}
 The database type to be used (mysql/oracle)
-
-
+
   
   
 XAAUDIT.DB.DATABASE_NAME
 {{xa_audit_db_name}}
 Audit database name
-
-
+
   
   
 XAAUDIT.DB.USER_NAME
 {{xa_audit_db_user}}
 Audit database user
-
-
+
   
   
 XAAUDIT.DB.PASSWORD
@@ -280,21 +252,18 @@
 
   password
 
-
-
+
   
   
 XAAUDIT.DB.HOSTNAME
 {{xa_db_host}}
 Audit database hostname
-
-
+
   
   
 REPOSITORY_NAME
 {{repo_name}}
 Ranger repository name
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
index a3d3d4f..9847c8b 100644
--- 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
+++ 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
@@ -128,7 +128,6 @@
 ranger-knox-plugin-enabled
   
 
-
-
+
   
 


[21/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
index 7112a18..185056e 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
@@ -21,14 +21,12 @@
   
 yarn.node-labels.manager-class
 true
-
-
+
   
   
 yarn.timeline-service.recovery.enabled
 true
-
-
+
   
   
 yarn.acl.enable
@@ -40,8 +38,7 @@
 ranger-yarn-plugin-enabled
   
 
-
-
+
   
   
 yarn.authorization-provider
@@ -52,8 +49,7 @@
 ranger-yarn-plugin-enabled
   
 
-
-
+
   
   
 yarn.admin.acl
@@ -62,37 +58,32 @@
 
   true
 
-
-
+
   
   
   
 yarn.timeline-service.version
 1.5
 Timeline service version we’re currently 
using.
-
-
+
   
   
 yarn.timeline-service.store-class
 
org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
 Main storage class for YARN timeline server.
-
-
+
   
   
 yarn.timeline-service.entity-group-fs-store.active-dir
 /ats/active/
 DFS path to store active application’s timeline 
data
-
-
+
   
   
 yarn.timeline-service.entity-group-fs-store.done-dir
 /ats/done/
 DFS path to store done application’s timeline 
data
-
-
+
   
   
 
yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
@@ -101,36 +92,31 @@
 
   true
 
-
-
+
   
   
   
 yarn.timeline-service.entity-group-fs-store.summary-store
 
 
org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
-
-
+
   
   
 
yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
 
 60
-
-
+
   
   
 
yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
 
 3600
-
-
+
   
   
 yarn.timeline-service.entity-group-fs-store.retain-seconds
 
 604800
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
index 43eadea..4e4d23d 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
@@ -22,14 +22,12 @@
 mapreduce_shuffle,spark_shuffle
 Auxilliary services of NodeManager. A valid service name 
should only contain a-zA-Z0-9_ and can
   not start with numbers
-
-
+
   
   
 yarn.nodemanager.aux-services.spark_shuffle.class
 org.apache.spark.network.yarn.YarnShuffleService
 The auxiliary service class to use for Spark
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
index 2c4426b..0b45f48 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
@@ -24,182 +24,156 @@
 atlas.server.ha.enabled
 false
 Atlas high availability feature toggle.
-
-
+
   
   
 atlas.server.ids
-
+
 List of Atlas server ids for HA feature.
 
   false
   false
 
-
-
+
   
   
 atlas.server.address.id1
-
+
 Mapping of Atlas server ids to hosts.
 
   false
   false
 
-
-
+
   
   
 atlas.graph.storage.backend
 hbase
 
-
-
+
   
   
 atlas.graph.storage.hostname
-
+
 
-
-
+
   
   
 atlas.audit.hbase.zookeeper.quorum
-
+
 
-
-
+
   
   
 atlas.graph.index.search.backend
 solr5
 The Atlas indexing backend (e.g. solr5).
-
-
+
   
   
 atlas.graph

[15/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
index ea3d6b6..8a4f566 100644
--- 
a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,29 +129,25 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
 Address where the datanode binds
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
 HTTP address for the datanode
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -171,8 +155,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -180,8 +163,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -190,29 +172,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -221,8 +199,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -231,8 +208,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -244,29 +220,25 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
 The max response size for IPC
-
-
+
   
   
 dfs.block.access.token.enable
@@ -275,8 +247,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datan

[32/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
index 9c73abf..805a2f2 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 true
 Whether to enable WebHDFS feature
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
  Number of failed disks a DataNode would tolerate before it 
stops offering service
 true
-
-
+
   
   
 dfs.datanode.data.dir
@@ -65,8 +61,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -75,8 +70,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
   
@@ -273,8 +247,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.permissions.enabled
@@ -286,22 +259,19 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.superusergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 dfs.block.access.token.enable
@@ -310,8 +280,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+
   
   
 dfs.namenode.kerberos.principal
@@ -319,8 +288,7 @@ If "false", no access tokens are checked on accessing 
datanodes.
 
 Kerberos principal name for the NameNode
 
-
-
+
   
   
 dfs.secondary.namenode.kerberos.principal
@@ -328,8 +296,7 @@ Kerberos principal name for the NameNode
 
 Kerberos principal name for the secondary NameNode.
 
-
-
+
   
   
 dfs.namenode.secondary.http-address
 localhost:50090
 Address of secondary namenode web server
-
-
+
   
   
 dfs.web.authentication.kerberos.principal
@@ -364,8 +328,7 @@ Kerberos principal name for the NameNode
   The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
   HTTP SPENGO specification.
 
-
-
+
   
   
 dfs.web.authentication.kerberos.keytab
@@ -374,8 +337,7 @@ Kerberos principal name for the NameNode
   The Kerberos keytab file with the credentials for the
   HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
 
-
-
+
   
   
 dfs.datanode.kerberos.principal
@@ -383,8 +345,7 @@ Kerberos principal name for the NameNode
 
 The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
 
-
-
+
   
   
 dfs.namenode.keytab.file
@@ -392,8 +353,7 @@ Kerberos principal name for the NameNode
 
 Combined keytab file containing the namenode service and host 
principals.
 
-
-
+
   
   
 dfs.secondary.namenode.keytab.file
@@ -401,8 +361,7 @@ Kerberos principal name for the NameNode
 
 Combined keytab file containing the namenode service and host 
principals.
 
-
-
+
   
   
 dfs.datanode.keytab.file
@@ -410,15 +369,13 @@ Kerberos principal name for the NameNode
 
 The filename of the keytab file for the DataNode.
 
-
-
+
   
   
 dfs.namenode.https-address
 localhost:50470
 The https address where namenode binds
-
-
+
   
   
 dfs.datanode.data.dir.perm
@@ -427,8 +384,7 @@ Kerberos principal name for the NameNode
 directories. The datanode will not come up if the permissions are
 different on existing dfs.datanode.data.dir directories. If the directories
 don't exist, they will be created with this permission.
-
-
+
   
   
 dfs.namenode.accesstime.precision
@@ -437,15 +393,13 @@ don't exist, they will be created with this

[24/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
index 8f28baf..2e15558 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
@@ -24,44 +24,38 @@
 audit_log_level
 OFF
 Log level for audit logging
-
-
+
   
   
 monitor_forwarding_log_level
 WARN
 Log level for logging forwarded to the Accumulo
   Monitor
-
-
+
   
   
 debug_log_size
 512M
 Size of each debug rolling log file
-
-
+
   
   
 debug_num_logs
 10
 Number of rolling debug log files to keep
-
-
+
   
   
 info_log_size
 512M
 Size of each info rolling log file
-
-
+
   
   
 info_num_logs
 10
 Number of rolling info log files to keep
-
-
+
   
   
 content
@@ -115,7 +109,6 @@ log4j.appender.A1.layout=org.apache.log4j.PatternLayout
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
index f0b16c6..6b79265 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -23,29 +23,25 @@
 fs.AbstractFileSystem.glusterfs.impl
 org.apache.hadoop.fs.local.GlusterFs
 GlusterFS Abstract File System Implementation
-
-
+
   
   
 fs.glusterfs.impl
 GlusterFS fs impl
 org.apache.hadoop.fs.glusterfs.GlusterFileSystem
-
-
+
   
   
 fs.defaultFS
 glusterfs:///localhost:8020
-
-
+
   
   
   
 ha.failover-controller.active-standby-elector.zk.op.retries
 120
 ZooKeeper Failover Controller retries setting for your 
environment
-
-
+
   
   
   
@@ -55,24 +51,21 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.
-
-
+
   
   
 io.serializations
 org.apache.hadoop.io.serializer.WritableSerialization
  A list of comma-delimited serialization classes that can be 
used for obtaining serializers and deserializers.
 
-
-
+
   
   
 io.compression.codecs
 
org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
 A list of the compression codec classes that can be used
  for compression/decompression.
-
-
+
   
   
 fs.trash.interval
@@ -83,8 +76,7 @@
 If trash is disabled server side then the client side configuration is 
checked.
 If trash is enabled on the server side then the value configured on 
the server is used and the client configuration value is ignored.
 
-
-
+
   
   
   
@@ -93,8 +85,7 @@
 Defines the threshold number of connections after which
connections will be inspected for idleness.
   
-
-
+
   
   
 ipc.client.connection.maxidletime
@@ -102,15 +93,13 @@
 The maximum time after which a client will bring down the
connection to the server.
   
-
-
+
   
   
 ipc.client.connect.max.retries
 50
 Defines the maximum number of retries for IPC 
connections.
-
-
+
   
   
 ipc.server.tcpnodelay
@@ -121,8 +110,7 @@
   decrease latency
   with a cost of more/smaller packets.
 
-
-
+
   
   
   
@@ -133,8 +121,7 @@
 not be exposed to public. Enable this option if the interfaces
 are only reachable by those who have the right authorization.
   
-
-
+
   
   
 hadoop.security.authentication
@@ -143,8 +130,7 @@
Set the authentication for the cluster. Valid values are: simple or
kerberos.

-
-
+
   
   
 hadoop.security.authorization
@@ -152,8 +138,7 @@
 
  Enable authorization for differe

[01/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
Repository: ambari
Updated Branches:
  refs/heads/branch-2.4 a7edab221 -> a998371a4
  refs/heads/trunk b57a7cfa2 -> 4c5cf30ee


http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
index 37c35d8..2c0aeca 100644
--- 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/global.xml
@@ -24,168 +24,144 @@
 namenode_host
 
 NameNode Host.
-
-
+
   
   
 dfs_name_dir
 /hadoop/hdfs/namenode
 NameNode Directories.
-
-
+
   
   
 snamenode_host
 
 Secondary NameNode.
-
-
+
   
   
 fs_checkpoint_dir
 /hadoop/hdfs/namesecondary
 Secondary NameNode checkpoint dir.
-
-
+
   
   
 datanode_hosts
 
 List of Datanode Hosts.
-
-
+
   
   
 dfs_data_dir
 /hadoop/hdfs/data
 Data directories for Data Nodes.
-
-
+
   
   
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 dfs_webhdfs_enabled
 true
 WebHDFS enabled
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).
-
-
+
   
   
 namenode_opt_maxnewsize
 640
 NameNode maximum new generation size
-
-
+
   
   
 namenode_opt_permsize
 128
 NameNode permanent generation size
-
-
+
   
   
 namenode_opt_maxpermsize
 256
 NameNode maximum permanent generation size
-
-
+
   
   
 datanode_du_reserved
 1
 Reserved space for HDFS
-
-
+
   
   
 dtnode_heapsize
 1024
 DataNode maximum Java heap size
-
-
+
   
   
 dfs_datanode_failed_volume_tolerated
 0
 DataNode volumes failure toleration
-
-
+
   
   
 fs_checkpoint_period
 21600
 HDFS Maximum Checkpoint Delay
-
-
+
   
   
 fs_checkpoint_size
 0.5
 FS Checkpoint Size.
-
-
+
   
   
 security_enabled
 false
 Hadoop Security
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 KeyTab Directory.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
index c60b565..d5fa858 100644
--- 
a/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
+++ 
b/ambari-server/src/test/resources/stacks_with_common_services/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
@@ -24,64 +24,55 @@
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-X

[47/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Patch 1 - change validation rules and available fields (dlysnichenko)

2016-06-09 Thread dmitriusan
AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Patch 1 - 
change validation rules and available fields (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3f487193
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3f487193
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3f487193

Branch: refs/heads/branch-2.4
Commit: 3f4871939db94e9e05d0b3e43995db106c33dcc1
Parents: a7edab2
Author: Lisnichenko Dmitro 
Authored: Thu Jun 9 16:15:49 2016 +0300
Committer: Lisnichenko Dmitro 
Committed: Thu Jun 9 17:15:48 2016 +0300

--
 .../ambari/server/state/PropertyInfo.java   | 23 +++-
 .../server/state/PropertyUpgradeBehavior.java   | 16 +++---
 .../configurations-set-default-update-policy.sh | 11 +++---
 .../ambari/server/state/PropertyInfoTest.java   | 19 +---
 4 files changed, 19 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/3f487193/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index fba2daa..c570ab3 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -28,7 +28,6 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlAnyElement;
 import javax.xml.bind.annotation.XmlAttribute;
 import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlElementWrapper;
 import javax.xml.bind.annotation.XmlList;
 
@@ -51,9 +50,6 @@ public class PropertyInfo {
   private String filename;
   private boolean deleted;
 
-  @XmlElement(name="on-stack-upgrade", required = true)
-  private PropertyUpgradeBehavior propertyStackUpgradeBehavior;
-
   @XmlElement(name="on-ambari-upgrade", required = true)
   private PropertyUpgradeBehavior propertyAmbariUpgradeBehavior;
 
@@ -89,15 +85,10 @@ public class PropertyInfo {
   }
 
   public PropertyInfo() {
-propertyStackUpgradeBehavior = new PropertyUpgradeBehavior();
-propertyStackUpgradeBehavior.setAdd(true);
-propertyStackUpgradeBehavior.setChange(true);
-propertyStackUpgradeBehavior.setDelete(false);
-
 propertyAmbariUpgradeBehavior = new PropertyUpgradeBehavior();
-propertyAmbariUpgradeBehavior.setAdd(false);
-propertyAmbariUpgradeBehavior.setChange(true);
-propertyAmbariUpgradeBehavior.setDelete(true);
+propertyAmbariUpgradeBehavior.setAdd(true);
+propertyAmbariUpgradeBehavior.setUpdate(false);
+propertyAmbariUpgradeBehavior.setDelete(false);
   }
 
   public String getName() {
@@ -148,14 +139,6 @@ public class PropertyInfo {
 this.propertyTypes = propertyTypes;
   }
 
-  public PropertyUpgradeBehavior getPropertyStackUpgradeBehavior() {
-return propertyStackUpgradeBehavior;
-  }
-
-  public void setPropertyStackUpgradeBehavior(PropertyUpgradeBehavior 
propertyStackUpgradeBehavior) {
-this.propertyStackUpgradeBehavior = propertyStackUpgradeBehavior;
-  }
-
   public PropertyUpgradeBehavior getPropertyAmbariUpgradeBehavior() {
 return propertyAmbariUpgradeBehavior;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f487193/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
index de2e342..f6791ee 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
@@ -33,15 +33,15 @@ public class PropertyUpgradeBehavior {
   @XmlAttribute(name="delete", required = true)
   private boolean delete;
 
-  @XmlAttribute(name="change", required = true)
-  private boolean change;
+  @XmlAttribute(name="update", required = true)
+  private boolean update;
 
   public PropertyUpgradeBehavior() {}
 
-  public PropertyUpgradeBehavior(boolean add, boolean delete, boolean change) {
+  public PropertyUpgradeBehavior(boolean add, boolean delete, boolean update) {
 this.add = add;
 this.delete = delete;
-this.change = change;
+this.update = update;
   }
 
   public void setAdd( boolean add )
@@ -54,9 +54,9 @@ public class PropertyUpgradeBehavior {
 this.delete = delete;
   }
 

[05/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
index 1e916a0..450bc21 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -23,29 +23,25 @@
 io.sort.mb
 100
 No description
-
-
+
   
   
 io.sort.record.percent
 .2
 No description
-
-
+
   
   
 io.sort.spill.percent
 0.1
 No description
-
-
+
   
   
 io.sort.factor
 100
 No description
-
-
+
   
   
   
@@ -53,8 +49,7 @@
 
 No description
 true
-
-
+
   
   
 
@@ -62,44 +57,38 @@
 
 No description
 true
-
-
+
   
   
 mapred.reduce.parallel.copies
 30
 No description
-
-
+
   
   
 mapred.tasktracker.map.tasks.maximum
 
 No description
-
-
+
   
   
 mapred.map.tasks.speculative.execution
 false
 If true, then multiple instances of some map tasks
may be executed in parallel.
-
-
+
   
   
 mapred.reduce.tasks.speculative.execution
 false
 If true, then multiple instances of some reduce tasks
may be executed in parallel.
-
-
+
   
   
 mapred.reduce.slowstart.completed.maps
 0.05
-
-
+
   
   
 mapred.inmem.merge.threshold
@@ -110,8 +99,7 @@
   0 indicates we want to DON'T have any threshold and instead depend only on
   the ramfs's memory consumption to trigger the merge.
   
-
-
+
   
   
 mapred.job.shuffle.merge.percent
@@ -121,8 +109,7 @@
   storing in-memory map outputs, as defined by
   mapred.job.shuffle.input.buffer.percent.
   
-
-
+
   
   
 mapred.job.shuffle.input.buffer.percent
@@ -130,8 +117,7 @@
 The percentage of memory to be allocated from the maximum heap
   size to storing map outputs during the shuffle.
   
-
-
+
   
   
 mapred.map.output.compression.codec
@@ -139,8 +125,7 @@
 If the map outputs are compressed, how should they be
   compressed
 
-
-
+
   
   
 mapred.output.compression.type
@@ -148,8 +133,7 @@
 If the job outputs are to compressed as SequenceFiles, how 
should
they be compressed? Should be one of NONE, RECORD or BLOCK.
   
-
-
+
   
   
 mapred.job.reduce.input.buffer.percent
@@ -159,8 +143,7 @@
   remaining map outputs in memory must consume less than this threshold before
   the reduce can begin.
   
-
-
+
   
   
 mapreduce.reduce.input.limit
@@ -169,15 +152,13 @@
   is 10 Gb.)  If the estimated input size of the reduce is greater than
   this value, job is failed. A value of -1 means that there is no limit
   set. 
-
-
+
   
   
   
 mapred.compress.map.output
 
-
-
+
   
   
 mapred.task.timeout
@@ -186,46 +167,39 @@
   terminated if it neither reads an input, writes an output, nor
   updates its status string.
   
-
-
+
   
   
 jetty.connector
 org.mortbay.jetty.nio.SelectChannelConnector
 No description
-
-
+
   
   
 mapred.child.root.logger
 INFO,TLA
-
-
+
   
   
 mapred.child.java.opts
 -Xmx512m
 No description
-
-
+
   
   
 mapred.cluster.reduce.memory.mb
 
-
-
+
   
   
 mapred.job.map.memory.mb
 1024
-
-
+
   
   
 mapred.job.reduce.memory.mb
 1024
-
-
+
   
   
 mapred.max.tracker.blacklists
@@ -233,41 +207,35 @@
 
 if node is reported blacklisted by 16 successful jobs within 
timeout-window, it will be graylisted
   
-
-
+
   
   
 mapred.healthChecker.script.path
 
-
-
+
   
   
 mapred.healthChecker.script.timeout
 6
-
-
+
   
   
 mapred.task.maxvmem
 
 true
 No description
-
-
+
   
   
 mapreduce.fileoutputcommitter.marksuccessfuljobs
 false
-
-
+
   
   
 mapreduce.tasktracker.keytab.file
 
 The filename of the keytab for the task tracker
-
-
+
   
   
 mapreduce.jobtracker.split.metainfo.maxsize
@@ -276,57 +244,49 @@
 If the size of the split metainfo file is larger than this, 
the JobTracker will fail the job during
 initialize.

-
-
+
   
   
 mapreduce.jobhistory.keytab.file
 
 
 The keytab for the job history server principal.
-
-
+
   
 

[52/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
index a513c68..2bee181 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
@@ -24,238 +24,204 @@
 namenode_host
 
 NameNode Host.
-
-
+
   
   
 dfs_namenode_name_dir
 /hadoop/hdfs/namenode
 NameNode Directories.
-
-
+
   
   
 snamenode_host
 
 Secondary NameNode.
-
-
+
   
   
 dfs_namenode_checkpoint_dir
 /hadoop/hdfs/namesecondary
 Secondary NameNode checkpoint dir.
-
-
+
   
   
 datanode_hosts
 
 List of Datanode Hosts.
-
-
+
   
   
 dfs_datanode_data_dir
 /hadoop/hdfs/data
 Data directories for Data Nodes.
-
-
+
   
   
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 dfs_webhdfs_enabled
 true
 WebHDFS enabled
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).
-
-
+
   
   
 namenode_opt_maxnewsize
 640
 NameNode maximum new generation size
-
-
+
   
   
 namenode_opt_permsize
 128
 NameNode permanent generation size
-
-
+
   
   
 namenode_opt_maxpermsize
 256
 NameNode maximum permanent generation size
-
-
+
   
   
 datanode_du_reserved
 1
 Reserved space for HDFS
-
-
+
   
   
 dtnode_heapsize
 1024
 DataNode maximum Java heap size
-
-
+
   
   
 dfs_datanode_failed_volume_tolerated
 0
 DataNode volumes failure toleration
-
-
+
   
   
 dfs_namenode_checkpoint_period
 21600
 HDFS Maximum Checkpoint Delay
-
-
+
   
   
 fs_checkpoint_size
 0.5
 FS Checkpoint Size.
-
-
+
   
   
 proxyuser_group
 users
 Proxy user group.
-
-
+
   
   
 dfs_exclude
 
 HDFS Exclude hosts.
-
-
+
   
   
 dfs_replication
 3
 Default Block Replication.
-
-
+
   
   
 dfs_block_local_path_access_user
 hbase
 Default Block Replication.
-
-
+
   
   
 dfs_datanode_address
 50010
 Port for datanode address.
-
-
+
   
   
 dfs_datanode_http_address
 50075
 Port for datanode address.
-
-
+
   
   
 dfs_datanode_data_dir_perm
 750
 Datanode dir perms.
-
-
+
   
   
 security_enabled
 false
 Hadoop Security
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 kadmin_pw
 
 Kerberos realm admin password
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 Kerberos keytab path.
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 KeyTab Directory.
-
-
+
   
   
 namenode_formatted_mark_dir
 /var/run/hadoop/hdfs/namenode/formatted/
 Formatteed Mark Directory.
-
-
+
   
   
 hdfs_user
 hdfs
 User and Groups.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
index 93cc9ab..a31a481 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
@@ -26,8 +26,7 @@
 The ACL is a comma-separated list of user and group names. The user and
 group list is

[28/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
index 4b236db..647c0f4 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -23,22 +23,19 @@
 yarn.resourcemanager.hostname
 localhost
 The hostname of the RM.
-
-
+
   
   
 yarn.resourcemanager.resource-tracker.address
 localhost:8025
  The address of ResourceManager. 
-
-
+
   
   
 yarn.resourcemanager.scheduler.address
 localhost:8030
 The address of the scheduler interface.
-
-
+
   
   
 yarn.resourcemanager.address
@@ -47,22 +44,19 @@
   The address of the applications manager interface in the
   RM.
 
-
-
+
   
   
 yarn.resourcemanager.admin.address
 localhost:8141
 The address of the RM admin interface.
-
-
+
   
   
 yarn.resourcemanager.scheduler.class
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
 The class to use as the resource scheduler.
-
-
+
   
   
 yarn.scheduler.minimum-allocation-mb
@@ -72,8 +66,7 @@
   in MBs. Memory requests lower than this won't take effect,
   and the specified value will get allocated at minimum.
 
-
-
+
   
   
 yarn.scheduler.maximum-allocation-mb
@@ -83,45 +76,39 @@
   in MBs. Memory requests higher than this won't take effect,
   and will get capped to this value.
 
-
-
+
   
   
 yarn.acl.enable
 false
  Are acls enabled. 
-
-
+
   
   
 yarn.admin.acl
 
  ACL of who can be admin of the YARN cluster. 
-
-
+
   
   
   
 yarn.nodemanager.address
 0.0.0.0:45454
 The address of the container manager in the NM.
-
-
+
   
   
 yarn.nodemanager.resource.memory-mb
 5120
 Amount of physical memory, in MB, that can be allocated
   for containers.
-
-
+
   
   
 yarn.application.classpath
 
/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
 Classpath for typical applications.
-
-
+
   
   
 yarn.nodemanager.vmem-pmem-ratio
@@ -131,37 +118,32 @@
   expressed in terms of physical memory, and virtual memory usage
   is allowed to exceed this allocation by this ratio.
 
-
-
+
   
   
 yarn.nodemanager.container-executor.class
 
org.apache.hadoop.yarn.server.nodemanager.GlusterContainerExecutor
 ContainerExecutor for launching containers
-
-
+
   
   
 yarn.nodemanager.linux-container-executor.group
 hadoop
 Unix group of the NodeManager
-
-
+
   
   
 yarn.nodemanager.aux-services
 mapreduce_shuffle
 Auxilliary services of NodeManager. A valid service name 
should only contain a-zA-Z0-9_ and can
   not start with numbers
-
-
+
   
   
 yarn.nodemanager.aux-services.mapreduce_shuffle.class
 org.apache.hadoop.mapred.ShuffleHandler
 The auxiliary service class to use 
-
-
+
   
   
 yarn.nodemanager.log-dirs
@@ -173,8 +155,7 @@
   named container_{$contid}. Each container directory will contain the 
files
   stderr, stdin, and syslog generated by that container.
 
-
-
+
   
   
 yarn.nodemanager.local-dirs
@@ -186,8 +167,7 @@
   Individual containers' work directories, called container_${contid}, will
   be subdirectories of this.
 
-
-
+
   
   
 yarn.nodemanager.container-monitor.interval-ms
@@ -196,8 +176,7 @@
   The interval, in milliseconds, for which the node manager
   waits  between two cycles of monitoring its containers' memory usage.
 
-
-
+
   
   
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
index 26825ed..41bcb96 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/main/reso

[30/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
index dda3d44..84d38de 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
@@ -21,36 +21,31 @@ limitations under the License.
 hive.heapsize
 1024
 Hive Java heap size
-
-
+
   
   
 ambari.hive.db.schema.name
 hive
 Database name used as the Hive Metastore
-
-
+
   
   
 javax.jdo.option.ConnectionURL
 jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true
 JDBC connect string for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionDriverName
 com.mysql.jdbc.Driver
 Driver class name for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionUserName
 hive
 username to use against metastore database
-
-
+
   
   
 javax.jdo.option.ConnectionPassword
@@ -60,96 +55,83 @@ limitations under the License.
 
   password
 
-
-
+
   
   
 hive.metastore.warehouse.dir
 /apps/hive/warehouse
 location of default database for the warehouse
-
-
+
   
   
 hive.metastore.sasl.enabled
 false
 If true, the metastore thrift interface will be secured with 
SASL.
  Clients must authenticate with Kerberos.
-
-
+
   
   
 hive.metastore.kerberos.keytab.file
 /etc/security/keytabs/hive.service.keytab
 The path to the Kerberos Keytab file containing the metastore
  thrift server's service principal.
-
-
+
   
   
 hive.metastore.kerberos.principal
 hive/_h...@example.com
 The service principal for the metastore thrift server. The 
special
 string _HOST will be replaced automatically with the correct host 
name.
-
-
+
   
   
 hive.metastore.cache.pinobjtypes
 Table,Database,Type,FieldSchema,Order
 List of comma separated metastore object types that should be 
pinned in the cache
-
-
+
   
   
 hive.metastore.uris
 thrift://localhost:9083
 URI for client to contact metastore server
-
-
+
   
   
 hive.metastore.client.socket.timeout
 60
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.execute.setugi
 true
 In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
-
-
+
   
   
 hive.security.authorization.enabled
 false
 enable or disable the hive client authorization
-
-
+
   
   
 hive.security.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 the hive client authorization manager class name.
 The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

-
-
+
   
   
 hive.security.metastore.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 The authorization manager class name to be used in the 
metastore for authorization. The user-defined authorization class should 
implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
  
-
-
+
   
   
 hive.security.authenticator.manager
 org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator
 Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  
-
-
+
   
   
 hive.server2.enable.doAs
@@ -158,50 +140,43 @@ limitations under the License.
   submitted the query. But if the parameter is set to false, the query 
will run as the user that the hiveserver2
   process runs as.
 
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 true
 Disable HDFS filesystem cache.
-
-
+
   
   
 fs.file.impl.disable.cache
 true
 Disable local filesystem cache.
-
-
+
   
   
 hive.enforce.bucketing
 true
 Whether bucketing is enforced. If true, while inserting into 
the table, bucketi

[46/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all 
stack configuration xmls to pass validation (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a998371a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a998371a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a998371a

Branch: refs/heads/branch-2.4
Commit: a998371a43f0cc0375092a2ab06a9001f7014930
Parents: 3f48719
Author: Lisnichenko Dmitro 
Authored: Thu Jun 9 17:06:54 2016 +0300
Committer: Lisnichenko Dmitro 
Committed: Thu Jun 9 17:15:48 2016 +0300

--
 .../1.6.1.2.2.0/configuration/accumulo-env.xml  |  51 +-
 .../configuration/accumulo-log4j.xml|  21 +-
 .../1.6.1.2.2.0/configuration/accumulo-site.xml |  51 +-
 .../0.1.0/configuration/ams-env.xml |  21 +-
 .../0.1.0/configuration/ams-grafana-env.xml |  18 +-
 .../0.1.0/configuration/ams-grafana-ini.xml |  15 +-
 .../0.1.0/configuration/ams-hbase-env.xml   |  33 +-
 .../0.1.0/configuration/ams-hbase-log4j.xml |   3 +-
 .../0.1.0/configuration/ams-hbase-policy.xml|   9 +-
 .../configuration/ams-hbase-security-site.xml   |  51 +-
 .../0.1.0/configuration/ams-hbase-site.xml  | 150 ++
 .../0.1.0/configuration/ams-log4j.xml   |   3 +-
 .../0.1.0/configuration/ams-site.xml| 177 +++
 .../0.1.0/configuration/ams-ssl-client.xml  |   9 +-
 .../0.1.0/configuration/ams-ssl-server.xml  |  24 +-
 .../0.1.0/configuration/storm-site.xml  |   3 +-
 .../configuration/application-properties.xml| 105 ++---
 .../ATLAS/0.1.0.2.3/configuration/atlas-env.xml |  27 +-
 .../0.1.0.2.3/configuration/atlas-log4j.xml |   9 +-
 .../0.5.0.2.1/configuration/falcon-env.xml  |  33 +-
 .../configuration/falcon-runtime.properties.xml |  15 +-
 .../configuration/falcon-startup.properties.xml |  93 ++--
 .../0.5.0.2.1/configuration/oozie-site.xml  |  24 +-
 .../1.4.0.2.0/configuration/flume-conf.xml  |   3 +-
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml |  15 +-
 .../GANGLIA/3.5.0/configuration/ganglia-env.xml |  30 +-
 .../HAWQ/2.0.0/configuration/hawq-check-env.xml |   3 +-
 .../HAWQ/2.0.0/configuration/hawq-env.xml   |   6 +-
 .../2.0.0/configuration/hawq-limits-env.xml |  12 +-
 .../HAWQ/2.0.0/configuration/hawq-site.xml  |  57 +--
 .../2.0.0/configuration/hawq-sysctl-env.xml |  72 +--
 .../HAWQ/2.0.0/configuration/hdfs-client.xml|  96 ++--
 .../HAWQ/2.0.0/configuration/yarn-client.xml|  30 +-
 .../0.96.0.2.0/configuration/hbase-env.xml  |  39 +-
 .../0.96.0.2.0/configuration/hbase-log4j.xml|   3 +-
 .../0.96.0.2.0/configuration/hbase-policy.xml   |   9 +-
 .../0.96.0.2.0/configuration/hbase-site.xml | 114 ++---
 .../HDFS/2.1.0.2.0/configuration/core-site.xml  |  45 +-
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  54 +--
 .../2.1.0.2.0/configuration/hadoop-policy.xml   |  33 +-
 .../HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml |   3 +-
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  | 144 ++
 .../HDFS/2.1.0.2.0/configuration/ssl-client.xml |  21 +-
 .../HDFS/2.1.0.2.0/configuration/ssl-server.xml |  24 +-
 .../HIVE/0.12.0.2.0/configuration/hcat-env.xml  |   3 +-
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  48 +-
 .../configuration/hive-exec-log4j.xml   |   3 +-
 .../0.12.0.2.0/configuration/hive-log4j.xml |   3 +-
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml | 147 ++
 .../0.12.0.2.0/configuration/webhcat-env.xml|   3 +-
 .../0.12.0.2.0/configuration/webhcat-log4j.xml  |   3 +-
 .../0.12.0.2.0/configuration/webhcat-site.xml   |  57 +--
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  | 159 +++
 .../KAFKA/0.8.1/configuration/kafka-env.xml |  27 +-
 .../KAFKA/0.8.1/configuration/kafka-log4j.xml   |   3 +-
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |  69 +--
 .../KAFKA/0.9.0/configuration/kafka-env.xml |   3 +-
 .../0.9.0/configuration/ranger-kafka-audit.xml  |  48 +-
 .../ranger-kafka-plugin-properties.xml  |  21 +-
 .../ranger-kafka-policymgr-ssl.xml  |  18 +-
 .../configuration/ranger-kafka-security.xml |  18 +-
 .../1.10.3-10/configuration/kerberos-env.xml|  75 +--
 .../1.10.3-10/configuration/krb5-conf.xml   |  12 +-
 .../0.5.0.2.2/configuration/admin-topology.xml  |   3 +-
 .../0.5.0.2.2/configuration/gateway-log4j.xml   |   3 +-
 .../0.5.0.2.2/configuration/gateway-site.xml|  21 +-
 .../KNOX/0.5.0.2.2/configuration/knox-env.xml   |  18 +-
 .../KNOX/0.5.0.2.2/configuration/ldap-log4j.xml |   3 +-
 .../ranger-knox-plugin-properties.xml   |  93 ++--
 .../KNOX/0.5.0.2.2/configuration/topology.xml   |   3 +-
 .../KNOX/0.5.0.2.2/configuration/users-ldif.xml |   3 +-
 .../0.5.0/configuration/logfeeder-env.xml   |  42 +-
 .../0.5.0/configuration/logfeeder-log4j.xml |   3 +-
 .../configurati

[29/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
index cfa9c76..495a46f 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
@@ -27,8 +27,7 @@
for the java.library.path value. java.library.path tells the JVM where
to look for native libraries. It is necessary to set this config 
correctly since
Storm uses the ZeroMQ and JZMQ native libs. 
-
-
+
   
   
 storm.local.dir
@@ -36,71 +35,61 @@
 A directory on the local filesystem used by Storm for any 
local
filesystem usage it needs. The directory must exist and the Storm 
daemons must
have permission to read/write from this location.
-
-
+
   
   
 storm.zookeeper.servers
 ['localhost']
 A list of hosts of ZooKeeper servers used to manage the 
cluster.
-
-
+
   
   
 storm.zookeeper.port
 2181
 The port Storm will use to connect to each of the ZooKeeper 
servers.
-
-
+
   
   
 storm.zookeeper.root
 /storm
 The root location at which Storm stores data in 
ZooKeeper.
-
-
+
   
   
 storm.zookeeper.session.timeout
 2
 The session timeout for clients to ZooKeeper.
-
-
+
   
   
 storm.zookeeper.connection.timeout
 15000
 The connection timeout for clients to ZooKeeper.
-
-
+
   
   
 storm.zookeeper.retry.times
 5
 The number of times to retry a Zookeeper 
operation.
-
-
+
   
   
 storm.zookeeper.retry.interval
 1000
 The interval between retries of a Zookeeper 
operation.
-
-
+
   
   
 storm.zookeeper.retry.intervalceiling.millis
 3
 The ceiling of the interval between retries of a Zookeeper 
operation.
-
-
+
   
   
 storm.cluster.mode
 distributed
 The mode this Storm cluster is running in. Either 
"distributed" or "local".
-
-
+
   
   
 storm.local.mode.zmq
@@ -110,65 +99,56 @@
of this flag is to make it easy to run Storm in local mode by 
eliminating
the need for native dependencies, which can be difficult to install.
 
-
-
+
   
   
 storm.thrift.transport
 backtype.storm.security.auth.SimpleTransportPlugin
 The transport plug-in for Thrift client/server 
communication.
-
-
+
   
   
 storm.messaging.transport
 backtype.storm.messaging.netty.Context
 The transporter for communication among Storm 
tasks.
-
-
+
   
   
 nimbus.host
 localhost
 The host that the master server is running on.
-
-
+
   
   
 nimbus.thrift.port
 6627
  Which port the Thrift interface of Nimbus should run on. 
Clients should
connect to this port to upload jars and submit topologies.
-
-
+
   
   
 nimbus.thrift.max_buffer_size
 1048576
 The maximum buffer size thrift should use when reading 
messages.
-
-
+
   
   
 nimbus.childopts
 -Xmx1024m 
-Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf 
-javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM
 This parameter is used by the storm-deploy project to 
configure the jvm options for the nimbus daemon.
-
-
+
   
   
 nimbus.task.timeout.secs
 30
 How long without heartbeating a task can go before nimbus 
will consider the task dead and reassign it to another location.
-
-
+
   
   
 nimbus.supervisor.timeout.secs
 60
 How long before a supervisor can go without heartbeating 
before nimbus considers it dead and stops assigning new work to 
it.
-
-
+
   
   
 nimbus.monitor.freq.secs
@@ -178,15 +158,13 @@
that if a machine ever goes down Nimbus will immediately wake up and 
take action.
This parameter is for checking for failures when there's no explicit 
event like that occuring.
 
-
-
+
   
   
 nimbus.cleanup.inbox.freq.secs
 600
 How often nimbus should wake the cleanup thread to clean the 
inbox.
-
-
+
   
   
 nimbus.inbox.jar.expiration.secs
@@ -198,32 +176,28 @@
Note that the time it takes to delete an inbox jar file is going to be 
somewhat more than
NIMBUS_CLEANUP_INBOX_JAR_EXPI

[38/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
 
b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
index 943082d..037c817 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
@@ -23,42 +23,36 @@
 ranger.plugin.kms.service.name
 {{repo_name}}
 Name of the Ranger service containing policies for this kms 
instance
-
-
+
   
   
 ranger.plugin.kms.policy.source.impl
 org.apache.ranger.admin.client.RangerAdminRESTClient
 Class to retrieve policies from the source
-
-
+
   
   
 ranger.plugin.kms.policy.rest.url
 {{policymgr_mgr_url}}
 URL to Ranger Admin
-
-
+
   
   
 ranger.plugin.kms.policy.rest.ssl.config.file
 /etc/ranger/kms/conf/ranger-policymgr-ssl.xml
 Path to the file containing SSL details to contact Ranger 
Admin
-
-
+
   
   
 ranger.plugin.kms.policy.pollIntervalMs
 3
 How often to poll for changes in policies?
-
-
+
   
   
 ranger.plugin.kms.policy.cache.dir
 /etc/ranger/{{repo_name}}/policycache
 Directory where Ranger policies are cached after successful 
retrieval from the source
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-site.xml
 
b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-site.xml
index 7507c53..5bffe84 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-site.xml
@@ -22,44 +22,37 @@
   
 ranger.service.host
 {{kms_host}}
-
-
+
   
   
 ranger.service.http.port
 {{kms_port}}
-
-
+
   
   
 ranger.service.https.port
 9393
-
-
+
   
   
 ranger.service.shutdown.port
 7085
-
-
+
   
   
 ranger.contextName
 /kms
-
-
+
   
   
 xa.webapp.dir
 ./webapp
-
-
+
   
   
 ranger.service.https.attrib.ssl.enabled
 false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-env.xml
 
b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-env.xml
index 0824e75..f7fcdaf 100644
--- 
a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-env.xml
@@ -41,7 +41,6 @@ export HADOOP_CONF_DIR={{hadoop_conf_dir}}
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-log4j.xml
 
b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-log4j.xml
index cb7ecd0..b62a32e 100644
--- 
a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/configuration/slider-log4j.xml
@@ -87,7 +87,6 @@ log4j.logger.org.apache.zookeeper=WARN
   content
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml
 
b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/configuration/spark-defaults.xml
index a7d00e2..003b716 100644
--

[09/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
index c8293d4..a159c60 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HIVE/configuration/hive-site.xml
@@ -22,183 +22,157 @@ limitations under the License.
 false
 controls whether to connect to remove metastore server or
 open a new metastore server in Hive Client JVM
-
-
+
   
   
 javax.jdo.option.ConnectionURL
 
 JDBC connect string for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionDriverName
 com.mysql.jdbc.Driver
 Driver class name for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionUserName
 
 username to use against metastore database
-
-
+
   
   
 javax.jdo.option.ConnectionPassword
 
 password to use against metastore database
-
-
+
   
   
 hive.metastore.warehouse.dir
 /apps/hive/warehouse
 location of default database for the warehouse
-
-
+
   
   
 hive.metastore.sasl.enabled
 
 If true, the metastore thrift interface will be secured with 
SASL.
  Clients must authenticate with Kerberos.
-
-
+
   
   
 hive.metastore.kerberos.keytab.file
 
 The path to the Kerberos Keytab file containing the metastore
  thrift server's service principal.
-
-
+
   
   
 hive.metastore.kerberos.principal
 
 The service principal for the metastore thrift server. The 
special
 string _HOST will be replaced automatically with the correct host 
name.
-
-
+
   
   
 hive.metastore.cache.pinobjtypes
 Table,Database,Type,FieldSchema,Order
 List of comma separated metastore object types that should be 
pinned in the cache
-
-
+
   
   
 hive.metastore.uris
 
 URI for client to contact metastore server
-
-
+
   
   
 hadoop.clientside.fs.operations
 true
 FS operations are owned by client
-
-
+
   
   
 hive.metastore.client.socket.timeout
 60
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.execute.setugi
 true
 In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
-
-
+
   
   
 hive.security.authorization.enabled
 true
 enable or disable the hive client authorization
-
-
+
   
   
 hive.security.authorization.manager
 org.apache.hcatalog.security.HdfsAuthorizationProvider
 the hive client authorization manager class name.
 The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

-
-
+
   
   
 hive.server2.enable.doAs
 true
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 true
-
-
+
   
   
 fs.file.impl.disable.cache
 true
-
-
+
   
   
 hive.enforce.bucketing
 true
 Whether bucketing is enforced. If true, while inserting into 
the table, bucketing is enforced.
-
-
+
   
   
 hive.enforce.sorting
 true
 Whether sorting is enforced. If true, while inserting into 
the table, sorting is enforced.
-
-
+
   
   
 hive.map.aggr
 true
 Whether to use map-side aggregation in Hive Group By 
queries.
-
-
+
   
   
 hive.optimize.bucketmapjoin
 true
-
-
+
   
   
 hive.optimize.bucketmapjoin.sortedmerge
 true
-
-
+
   
   
 hive.mapred.reduce.tasks.speculative.execution
 false
 Whether speculative execution for reducers should be turned 
on.
-
-
+
   
   
 hive.auto.convert.join
 true
 Whether Hive enable the optimization about converting common
   join into mapjoin based on the input file size.
-
-
+
   
   
 hive.auto.convert.sortmerge.join
@@ -206,14 +180,12 @@ limitations under the License.
 Will the join be automatically converted to a sort-merge 
join, if the joined tables pass
   the criteria for sort-merge join.
 
-
-
+
   
   
 hive.auto.convert.sortmerge.join.noconditionaltask
 true
-
-
+
 

[20/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/kms-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/kms-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/kms-env.xml
index 570a95f..f3cb3a0 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/kms-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/kms-env.xml
@@ -28,7 +28,6 @@
   password
 
 HSM partition password
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/ranger-kms-audit.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/ranger-kms-audit.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/ranger-kms-audit.xml
index 166b97f..cd2d72a 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/ranger-kms-audit.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/RANGER_KMS/configuration/ranger-kms-audit.xml
@@ -22,44 +22,37 @@
   
 xasecure.audit.destination.db
 true
-
-
+
   
   
 xasecure.audit.destination.db.jdbc.url
 true
-
-
+
   
   
 xasecure.audit.destination.db.user
 true
-
-
+
   
   
 xasecure.audit.destination.db.password
 true
-
-
+
   
   
 xasecure.audit.destination.db.jdbc.driver
 true
-
-
+
   
   
 xasecure.audit.credential.provider.file
 true
-
-
+
   
   
 xasecure.audit.destination.db.batch.filespool.dir
 true
-
-
+
   
   
 xasecure.audit.destination.solr.urls
@@ -74,8 +67,7 @@
 ranger.audit.solr.urls
   
 
-
-
+
   
   
 xasecure.audit.destination.solr.zookeepers
@@ -87,7 +79,6 @@
 ranger.audit.solr.zookeepers
   
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-conf.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-conf.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-conf.xml
index 1f6ee21..37ed586 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-conf.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-conf.xml
@@ -18,50 +18,40 @@
  * limitations under the License.
  */
 -->
-
 
-
-
-livy.environment
-production
-
+  
+livy.environment
+production
+
 Specifies Livy's environment. May either be "production" or 
"development". In "development"
 mode, Livy will enable debugging options, such as reporting 
possible routes on a 404.
 defaults to development
 
-
-
-
-
-
-livy.server.port
-8998
-
+
+  
+  
+livy.server.port
+8998
+
 What port to start the server on. Defaults to 8998.
 
-
-
-
-
-
-livy.server.session.timeout
-360
-
+
+  
+  
+livy.server.session.timeout
+360
+
 Time in milliseconds on how long Livy will wait before timing out 
an idle session.
 Default is one hour.
 
-
-
-
-
-
-livy.impersonation.enabled
-true
-
+
+  
+  
+livy.impersonation.enabled
+true
+
 If livy should use proxy users when submitting a job.
 
-
-
-
-
-
\ No newline at end of file
+
+  
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-env.xml
index 60a13a9..668a8de 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/configuration/livy-env.xml
@@ -19,70 +19,59 @@
  * limitations under the License.
  */
 -->
-
 
-
-livy_user
-Livy User
-livy
- 

[60/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
index fc70b1b..6f96077 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,27 +129,23 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -169,8 +153,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -178,8 +161,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -188,29 +170,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -219,8 +197,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -229,8 +206,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -242,28 +218,24 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
-
-
+
   
   
 dfs.block.access.token.enable
@@ -272,8 +244,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+
   
   
 dfs.namenode.kerberos.principal
@

[59/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
index 1be7157..ac4279f 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
@@ -28,39 +28,34 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.socket.write.timeout
 0
 DFS Client write socket timeout
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -69,8 +64,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -82,8 +76,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -92,8 +85,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -102,31 +94,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -137,8 +125,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -148,27 +135,23 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 
-
-
+
   
   
 dfs.datanode.http.address
 
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -176,8 +159,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -185,8 +167,7 @@ literal string "local" or a host:port for 
HDFS.
 
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -195,29 +176,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 4096
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -226,8 +203,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -236,8 +212,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -249,28 +224,24 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
-
-
+
   
   
 dfs.block.access.token.enable
@@ -279,8 +250,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+   

[43/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
index a2cb615..145d832 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
@@ -22,8 +22,7 @@
 ha.failover-controller.active-standby-elector.zk.op.retries
 120
 ZooKeeper Failover Controller retries setting for your 
environment
-
-
+
   
   
   
@@ -33,24 +32,21 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.
-
-
+
   
   
 io.serializations
 org.apache.hadoop.io.serializer.WritableSerialization
  A list of comma-delimited serialization classes that can be 
used for obtaining serializers and deserializers.
 
-
-
+
   
   
 io.compression.codecs
 
org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
 A list of the compression codec classes that can be used
  for compression/decompression.
-
-
+
   
   
   
@@ -61,8 +57,7 @@
 The name of the default file system.  Either the
   literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 fs.trash.interval
@@ -73,8 +68,7 @@
 If trash is disabled server side then the client side configuration is 
checked.
 If trash is enabled on the server side then the value configured on 
the server is used and the client configuration value is ignored.
 
-
-
+
   
   
   
@@ -83,8 +77,7 @@
 Defines the threshold number of connections after which
connections will be inspected for idleness.
   
-
-
+
   
   
 ipc.client.connection.maxidletime
@@ -92,15 +85,13 @@
 The maximum time after which a client will bring down the
connection to the server.
   
-
-
+
   
   
 ipc.client.connect.max.retries
 50
 Defines the maximum number of retries for IPC 
connections.
-
-
+
   
   
 ipc.server.tcpnodelay
@@ -111,8 +102,7 @@
   decrease latency
   with a cost of more/smaller packets.
 
-
-
+
   
   
   
@@ -123,8 +113,7 @@
 not be exposed to public. Enable this option if the interfaces
 are only reachable by those who have the right authorization.
   
-
-
+
   
   
 hadoop.security.authentication
@@ -133,8 +122,7 @@
Set the authentication for the cluster. Valid values are: simple or
kerberos.

-
-
+
   
   
 hadoop.security.authorization
@@ -142,8 +130,7 @@
 
  Enable authorization for different protocols.
   
-
-
+
   
   
 hadoop.security.auth_to_local
@@ -189,8 +176,7 @@ DEFAULT
 
   multiLine
 
-
-
+
   
   
 net.topology.script.file.name
@@ -198,7 +184,6 @@ DEFAULT
 
   Location of topology script used by Hadoop to determine the rack 
location of nodes.
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index e4f6263..967991e 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -29,8 +29,7 @@
   directory
   false
 
-
-
+
   
   
 hadoop_pid_dir_prefix
@@ -42,8 +41,7 @@
   false
   true
 
-
-
+
   
   
 hadoop_root_logger
@@ -53,8 +51,7 @@
 
   false
 
-
-
+
   
   
 hadoop_heapsize
@@ -66,8 +63,7 @@
   MB
   false
 
-
-
+
   
   
 namenode_heapsize
@@ -88,8 +84,7 @@
 dfs.datanode.data.dir
   
 
-
-
+
   
   
 namenode_opt_newsize
@@ -110,8 +105,7 @@
   256
   false
 
-
-
+
   
   
 namenode_opt_maxnewsize
@@ -132,8 +126,7 @@
   256
   false
 
-
-
+
   
   
 namenode_opt_permsize
@@ -148,8 +141,7 @@
   128
   false
 
-
-
+ 

[40/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
index 099dbc4..27d201e 100644
--- 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
@@ -36,7 +36,6 @@ fi
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
index b2146b6..3be21af 100644
--- 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
@@ -60,7 +60,6 @@ log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c 
%x - %m%n
   content
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
index 4ddf793..7d70ed0 100644
--- 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
@@ -88,7 +88,6 @@ hcat.bin=/usr/bin/hcat
   true
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
index dcb7ece..4eaa433 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
@@ -162,7 +162,6 @@ under the License.
 
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
index 14fb114..872a9e4 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
@@ -57,7 +57,6 @@
 
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
index 7d3c36f..f5a7574 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
@@ -22,7 +22,6 @@
 
  Path to the PXF keytab file, owned by PXF service and with 
permissions 0400.

-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml

[89/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
index 22d00d3..d5a36c2 100644
--- 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
@@ -24,8 +24,7 @@
 ambari-qa
 Policy user for KNOX
 This user must be system user and also present at Ranger 
admin portal
-
-
+
   
   
 common.name.for.certificate
@@ -34,8 +33,7 @@
 
   true
 
-
-
+
   
   
 ranger-knox-plugin-enabled
@@ -52,16 +50,14 @@
   boolean
   false
 
-
-
+
   
   
 REPOSITORY_CONFIG_USERNAME
 admin
 Ranger repository config user
 Used for repository creation on ranger admin
-
-
+
   
   
 REPOSITORY_CONFIG_PASSWORD
@@ -72,16 +68,14 @@
 
   password
 
-
-
+
   
   
 KNOX_HOME
 /usr/local/knox-server
 Knox Home
 Knox home folder
-
-
+
   
   
 XAAUDIT.DB.IS_ENABLED
@@ -97,8 +91,7 @@
 xasecure.audit.destination.db
   
 
-
-
+
   
   
 XAAUDIT.HDFS.IS_ENABLED
@@ -114,8 +107,7 @@
 xasecure.audit.destination.hdfs
   
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINATION_DIRECTORY
@@ -128,71 +120,61 @@
 xasecure.audit.destination.hdfs.dir
   
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY
 __REPLACE__LOG_DIR/hadoop/%app-type%/audit
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY
 __REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_FILE
 %hostname%-audit.log
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS
 900
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS
 86400
 
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS
 60
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_FILE
 %time:MMdd-HHmm.ss%.log
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS
 60
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS
 600
 
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT
@@ -201,30 +183,26 @@
 
   password
 
-
-
+
   
   
 SSL_KEYSTORE_FILE_PATH
 /etc/hadoop/conf/ranger-plugin-keystore.jks
 
-
-
+
   
   
 SSL_KEYSTORE_PASSWORD
 myKeyFilePassword
 PASSWORD
 
-
-
+
   
   
 SSL_TRUSTSTORE_FILE_PATH
 /etc/hadoop/conf/ranger-plugin-truststore.jks
 
-
-
+
   
   
 SSL_TRUSTSTORE_PASSWORD
@@ -234,43 +212,37 @@
 
   password
 
-
-
+
   
   
 POLICY_MGR_URL
 {{policymgr_mgr_url}}
 Policy Manager url
-
-
+
   
   
 SQL_CONNECTOR_JAR
 {{sql_connector_jar}}
 Location of DB client library (please check the location of 
the jar file)
-
-
+
   
   
 XAAUDIT.DB.FLAVOUR
 {{xa_audit_db_flavor}}
 The database type to be used (mysql/oracle)
-
-
+
   
   
 XAAUDIT.DB.DATABASE_NAME
 {{xa_audit_db_name}}
 Audit database name
-
-
+
   
   
 XAAUDIT.DB.USER_NAME
 {{xa_audit_db_user}}
 Audit database user
-
-
+
   
   
 XAAUDIT.DB.PASSWORD
@@ -280,21 +252,18 @@
 
   password
 
-
-
+
   
   
 XAAUDIT.DB.HOSTNAME
 {{xa_db_host}}
 Audit database hostname
-
-
+
   
   
 REPOSITORY_NAME
 {{repo_name}}
 Ranger repository name
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
index a3d3d4f..9847c8b 100644
--- 
a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
+++ 
b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/topology.xml
@@ -128,7 +128,6 @@
 ranger-knox-plugin-enabled
   
 
-
-
+
   
 


[69/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
index 7112a18..185056e 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/yarn-site.xml
@@ -21,14 +21,12 @@
   
 yarn.node-labels.manager-class
 true
-
-
+
   
   
 yarn.timeline-service.recovery.enabled
 true
-
-
+
   
   
 yarn.acl.enable
@@ -40,8 +38,7 @@
 ranger-yarn-plugin-enabled
   
 
-
-
+
   
   
 yarn.authorization-provider
@@ -52,8 +49,7 @@
 ranger-yarn-plugin-enabled
   
 
-
-
+
   
   
 yarn.admin.acl
@@ -62,37 +58,32 @@
 
   true
 
-
-
+
   
   
   
 yarn.timeline-service.version
 1.5
 Timeline service version we’re currently 
using.
-
-
+
   
   
 yarn.timeline-service.store-class
 
org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
 Main storage class for YARN timeline server.
-
-
+
   
   
 yarn.timeline-service.entity-group-fs-store.active-dir
 /ats/active/
 DFS path to store active application’s timeline 
data
-
-
+
   
   
 yarn.timeline-service.entity-group-fs-store.done-dir
 /ats/done/
 DFS path to store done application’s timeline 
data
-
-
+
   
   
 
yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
@@ -101,36 +92,31 @@
 
   true
 
-
-
+
   
   
   
 yarn.timeline-service.entity-group-fs-store.summary-store
 
 
org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
-
-
+
   
   
 
yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
 
 60
-
-
+
   
   
 
yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
 
 3600
-
-
+
   
   
 yarn.timeline-service.entity-group-fs-store.retain-seconds
 
 604800
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
index 43eadea..4e4d23d 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.4/services/YARN/configuration/yarn-site.xml
@@ -22,14 +22,12 @@
 mapreduce_shuffle,spark_shuffle
 Auxilliary services of NodeManager. A valid service name 
should only contain a-zA-Z0-9_ and can
   not start with numbers
-
-
+
   
   
 yarn.nodemanager.aux-services.spark_shuffle.class
 org.apache.spark.network.yarn.YarnShuffleService
 The auxiliary service class to use for Spark
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
index 2c4426b..0b45f48 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/application-properties.xml
@@ -24,182 +24,156 @@
 atlas.server.ha.enabled
 false
 Atlas high availability feature toggle.
-
-
+
   
   
 atlas.server.ids
-
+
 List of Atlas server ids for HA feature.
 
   false
   false
 
-
-
+
   
   
 atlas.server.address.id1
-
+
 Mapping of Atlas server ids to hosts.
 
   false
   false
 
-
-
+
   
   
 atlas.graph.storage.backend
 hbase
 
-
-
+
   
   
 atlas.graph.storage.hostname
-
+
 
-
-
+
   
   
 atlas.audit.hbase.zookeeper.quorum
-
+
 
-
-
+
   
   
 atlas.graph.index.search.backend
 solr5
 The Atlas indexing backend (e.g. solr5).
-
-
+
   
   
 atlas.graph

[18/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
index 27bbbd8..811d593 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
@@ -36,8 +36,7 @@ limitations under the License.
   
   1
 
-
-
+
   
   
 hive.zookeeper.quorum
@@ -49,22 +48,19 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 hive.metastore.connect.retries
 24
 Number of retries while opening a connection to 
metastore
-
-
+
   
   
 hive.metastore.failure.retries
 24
 Number of retries upon failure of Thrift metastore 
calls
-
-
+
   
   
 hive.metastore.client.connect.retry.delay
@@ -73,8 +69,7 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   Number of seconds for the client to wait between consecutive connection 
attempts
 
-
-
+
   
   
 hive.metastore.client.socket.timeout
@@ -83,15 +78,13 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   MetaStore Client socket timeout in seconds
 
-
-
+
   
   
 hive.mapjoin.bucket.cache.size
 1
 
-
-
+
   
   
 hive.security.authorization.manager
@@ -106,23 +99,20 @@ limitations under the License.
 hive_security_authorization
   
 
-
-
+
   
   
 hive.cluster.delegation.token.store.class
 org.apache.hadoop.hive.thrift.ZooKeeperTokenStore
 The delegation token store implementation.
   Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for 
load-balanced cluster.
-
-
+
   
   
 hive.cluster.delegation.token.store.zookeeper.connectString
 localhost:2181
 The ZooKeeper token store connect string.
-
-
+
   
   
 hive.server2.support.dynamic.service.discovery
@@ -132,38 +122,33 @@ limitations under the License.
   when it is brought up. JDBC/ODBC clients should use the ZooKeeper 
ensemble: hive.zookeeper.quorum
   in their connection string.
 
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 false
 true
 Disable HDFS filesystem cache.
-
-
+
   
   
 fs.file.impl.disable.cache
 false
 true
 Disable local filesystem cache.
-
-
+
   
   
 hive.exec.scratchdir
 /tmp/hive
 HDFS root scratch dir for Hive jobs which gets created with 
write all (733) permission. For each connecting user, an HDFS scratch dir: 
${hive.exec.scratchdir}/ is created, with 
${hive.scratch.dir.permission}.
-
-
+
   
   
 hive.exec.submitviachild
 false
 
-
-
+
   
   
 hive.exec.submit.local.task.via.child
@@ -173,8 +158,7 @@ limitations under the License.
   separate JVM (true recommended) or not.
   Avoids the overhead of spawning new JVM, but can lead to out-of-memory 
issues.
 
-
-
+
   
   
 hive.exec.compress.output
@@ -183,8 +167,7 @@ limitations under the License.
   This controls whether the final outputs of a query (to a local/HDFS file 
or a Hive table) is compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.compress.intermediate
@@ -193,8 +176,7 @@ limitations under the License.
   This controls whether intermediate files produced by Hive between 
multiple map-reduce jobs are compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.reducers.bytes.per.reducer
@@ -208,8 +190,7 @@ limitations under the License.
   B
   
 
-
-
+
   
   
 hive.exec.reducers.max
@@ -218,8 +199,7 @@ limitations under the License.
   max number of reducers will be used. If the one specified in the 
configuration parameter mapred.reduce.tasks is
   negative, Hive will use this one as the max number of reducers when 
automatically determine number of reducers.
 
-
-
+
   
   
 hive.exec.pre.hooks
@@ -229,8 +209,7 @@ limitations under the License.
   A pre-execution hook is specified as the name of a Java class which 
implements the
   org.apache.hadoop.hive.ql.ho

[88/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
index 099dbc4..27d201e 100644
--- 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
@@ -36,7 +36,6 @@ fi
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
index b2146b6..3be21af 100644
--- 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
@@ -60,7 +60,6 @@ log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c 
%x - %m%n
   content
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
index 4ddf793..7d70ed0 100644
--- 
a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
@@ -88,7 +88,6 @@ hcat.bin=/usr/bin/hcat
   true
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
index dcb7ece..4eaa433 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-profiles.xml
@@ -162,7 +162,6 @@ under the License.
 
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
index 14fb114..872a9e4 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-public-classpath.xml
@@ -57,7 +57,6 @@
 
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
index 7d3c36f..f5a7574 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
@@ -22,7 +22,6 @@
 
  Path to the PXF keytab file, owned by PXF service and with 
permissions 0400.

-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml

[11/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
index 1be7157..ac4279f 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
@@ -28,39 +28,34 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.socket.write.timeout
 0
 DFS Client write socket timeout
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -69,8 +64,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -82,8 +76,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -92,8 +85,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -102,31 +94,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -137,8 +125,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -148,27 +135,23 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 
-
-
+
   
   
 dfs.datanode.http.address
 
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -176,8 +159,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -185,8 +167,7 @@ literal string "local" or a host:port for 
HDFS.
 
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -195,29 +176,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 4096
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -226,8 +203,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -236,8 +212,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -249,28 +224,24 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
-
-
+
   
   
 dfs.block.access.token.enable
@@ -279,8 +250,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+   

[82/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
index 9019773..6ef189a 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
@@ -21,8 +21,7 @@ limitations under the License.
 hive.heapsize
 1024
 Hive Java heap size
-
-
+
   
   
   
@@ -32,30 +31,26 @@ limitations under the License.
 
   database
 
-
-
+
   
   
 javax.jdo.option.ConnectionURL
 jdbc:postgresql://localhost/hive
 JDBC connect string for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionDriverName
 org.postgresql.Driver
 Driver class name for a JDBC metastore
-
-
+
   
   
   
 javax.jdo.option.ConnectionUserName
 hive
 username to use against metastore database
-
-
+
   
   
 javax.jdo.option.ConnectionPassword
@@ -65,73 +60,63 @@ limitations under the License.
 
   password
 
-
-
+
   
   
 hive.metastore.warehouse.dir
 /apps/hive/warehouse
 location of default database for the warehouse
-
-
+
   
   
 hive.metastore.sasl.enabled
 false
 If true, the metastore thrift interface will be secured with 
SASL.
  Clients must authenticate with Kerberos.
-
-
+
   
   
 hive.metastore.cache.pinobjtypes
 Table,Database,Type,FieldSchema,Order
 List of comma separated metastore object types that should be 
pinned in the cache
-
-
+
   
   
 hive.metastore.uris
 thrift://localhost:9083
 URI for client to contact metastore server
-
-
+
   
   
 hive.metastore.client.socket.timeout
 60
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.execute.setugi
 true
 In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
-
-
+
   
   
 hive.security.authorization.enabled
 false
 enable or disable the hive client authorization
-
-
+
   
   
 hive.security.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 the hive client authorization manager class name.
 The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

-
-
+
   
   
 hive.security.metastore.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 The authorization manager class name to be used in the 
metastore for authorization. The user-defined authorization class should 
implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
  
-
-
+
   
   
 hive.metastore.pre.event.listeners
@@ -140,8 +125,7 @@ limitations under the License.
   whenever databases, tables, and partitions are created, altered, or 
dropped.
   Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
   if metastore-side authorization is desired.
-
-
+
   
   
 hive.metastore.pre.event.listeners
@@ -150,15 +134,13 @@ limitations under the License.
   whenever databases, tables, and partitions are created, altered, or 
dropped.
   Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
   if metastore-side authorization is desired.
-
-
+
   
   
 hive.security.authenticator.manager
 org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator
 Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  
-
-
+
   
   
 hive.server2.enable.doAs
@@ -167,64 +149,55 @@ limitations under the License.
   submitted the query. But if the parameter is set to false, the query 
will run as the user that the hiveserver2
   process runs as.
 
-
-
+
   
   
 hive.server2.enable.impersonation
 Enable user impersonation for HiveServer2
 true
-
-
+
   
   
 hive.server2.authentication
 Authe

[54/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
index 870eee9..0b38cec 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
@@ -32,8 +32,7 @@
 into /tmp.  Change this configuration else all data will be lost
 on machine restart.
 
-
-
+
   
   
 hbase.cluster.distributed
@@ -43,8 +42,7 @@
   false, startup will run all HBase and ZooKeeper daemons together
   in the one JVM.
 
-
-
+
   
   
 hbase.tmp.dir
@@ -54,30 +52,26 @@
 than '/tmp' (The '/tmp' directory is often cleared on
 machine restart).
 
-
-
+
   
   
 hbase.master.info.bindAddress
 
 The bind address for the HBase Master web UI
 
-
-
+
   
   
 hbase.master.info.port
 
 The port for the HBase Master web UI.
-
-
+
   
   
 hbase.regionserver.info.port
 
 The port for the HBase RegionServer web UI.
-
-
+
   
   
 hbase.regionserver.global.memstore.upperLimit
@@ -85,8 +79,7 @@
 Maximum size of all memstores in a region server before new
   updates are blocked and flushes are forced. Defaults to 40% of heap
 
-
-
+
   
   
 hbase.regionserver.handler.count
@@ -95,8 +88,7 @@
 Same property is used by the Master for count of master handlers.
 Default is 10.
 
-
-
+
   
   
 hbase.hregion.majorcompaction
@@ -105,8 +97,7 @@
 HStoreFiles in a region.  Default: 1 day.
 Set to 0 to disable automated major compactions.
 
-
-
+
   
   
 hbase.regionserver.global.memstore.lowerLimit
@@ -117,8 +108,7 @@
   the minimum possible flushing to occur when updates are blocked due to
   memstore limiting.
 
-
-
+
   
   
 hbase.hregion.memstore.block.multiplier
@@ -130,8 +120,7 @@
 resultant flush files take a long time to compact or split, or
 worse, we OOME
 
-
-
+
   
   
 hbase.hregion.memstore.flush.size
@@ -141,8 +130,7 @@
 exceeds this number of bytes.  Value is checked by a thread that runs
 every hbase.server.thread.wakefrequency.
 
-
-
+
   
   
 hbase.hregion.memstore.mslab.enabled
@@ -153,8 +141,7 @@
   heavy write loads. This can reduce the frequency of stop-the-world
   GC pauses on large heaps.
 
-
-
+
   
   
 hbase.hregion.max.filesize
@@ -164,8 +151,7 @@
 grown to exceed this value, the hosting HRegion is split in two.
 Default: 1G.
 
-
-
+
   
   
 hbase.client.scanner.caching
@@ -177,8 +163,7 @@
 Do not set this value such that the time between invocations is greater
 than the scanner timeout; i.e. hbase.regionserver.lease.period
 
-
-
+
   
   
 zookeeper.session.timeout
@@ -190,8 +175,7 @@
   "The client sends a requested timeout, the server responds with the
   timeout that it can give the client. " In milliseconds.
 
-
-
+
   
   
 hbase.client.keyvalue.maxsize
@@ -203,8 +187,7 @@
 to set this to a fraction of the maximum region size. Setting it to zero
 or less disables the check.
 
-
-
+
   
   
 hbase.hstore.compactionThreshold
@@ -215,8 +198,7 @@
 is run to rewrite all HStoreFiles files as one.  Larger numbers
 put off compaction but when it runs, it takes longer to complete.
 
-
-
+
   
   
 hbase.hstore.blockingStoreFiles
@@ -227,8 +209,7 @@
 blocked for this HRegion until a compaction is completed, or
 until hbase.hstore.blockingWaitTime has been exceeded.
 
-
-
+
   
   
 hfile.block.cache.size
@@ -238,8 +219,7 @@
 used by HFile/StoreFile. Default of 0.25 means allocate 25%.
 Set to 0 to disable but it's not recommended.
 
-
-
+
   
   
   
@@ -295,8 +271,7 @@
 full privileges, regardless of stored ACLs, across the cluster.
 Only used when HBase security is enabled.
 
-
-
+
   
   
 hbase.coprocessor.region.classes
@@ -307,8 +282,7 @@
 it in HBase's classpath and add the fully qualified class name here.
 A coprocessor can also be loaded on demand by setting HTableDescriptor.
 
-
-
+
   
   
 hbase.coprocessor.master.classes
@@ -320,8 +294,7 @@
   implementing your own MasterObserver, just put it in HBase's classpath
   and add the fully qualified class name here.
 
-
-
+
   
   

[35/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
index 0a99ed4..4431ba6 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-env.xml
@@ -24,15 +24,13 @@
 flume_conf_dir
 /etc/flume/conf
 Location to save configuration files
-
-
+
   
   
 flume_log_dir
 /var/log/flume
 Location to save log files
-
-
+
   
   
 flume_user
@@ -44,8 +42,7 @@
   user
   false
 
-
-
+
   
   
   
@@ -86,7 +83,6 @@ export JAVA_HOME={{java_home}}
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-log4j.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-log4j.xml
index cba007f..bbd6335 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-log4j.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/FLUME/configuration/flume-log4j.xml
@@ -29,7 +29,6 @@
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
index b53cf87..e03b621 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/GANGLIA/configuration/ganglia-env.xml
@@ -24,72 +24,62 @@
 ganglia_conf_dir
 /etc/ganglia/hdp
 Config directory for Ganglia
-
-
+
   
   
 ganglia_runtime_dir
 /var/run/ganglia/hdp
 Run directories for Ganglia
-
-
+
   
   
 gmetad_user
 nobody
 USER GROUP
 User 
-
-
+
   
   
 gmond_user
 nobody
 USER GROUP
 User 
-
-
+
   
   
 rrdcached_base_dir
 /var/lib/ganglia/rrds
 Default directory for saving the rrd files on ganglia 
server
-
-
+
   
   
 rrdcached_timeout
 3600
 (-w) Data is written to disk every timeout seconds. If this 
option is not specified the default interval of 300 seconds will be 
used.
-
-
+
   
   
 rrdcached_flush_timeout
 7200
 (-f) Every timeout seconds the entire cache is searched for 
old values which are written to disk. This only concerns files to which updates 
have stopped, so setting this to a high value, such as 3600 seconds, is 
acceptable in most cases. This timeout defaults to 3600 seconds.
-
-
+
   
   
 rrdcached_delay
 1800
 (-z) If specified, rrdcached will delay writing of each RRD 
for a random number of seconds in the range [0,delay). This will avoid too many 
writes being queued simultaneously. This value should be no greater than the 
value specified in -w. By default, there is no delay.
-
-
+
   
   
 rrdcached_write_threads
 4
 (-t) Specifies the number of threads used for writing RRD 
files. The default is 4. Increasing this number will allow rrdcached to have 
more simultaneous I/O requests into the kernel. This may allow the kernel to 
re-order disk writes, resulting in better disk throughput.
-
-
+
   
   
 additional_clusters
  
 Add additional desired Ganglia metrics cluster in the form 
"name1:port1,name2:port2". Ensure that the names and ports are unique across 
all cluster and ports are available on ganglia server host. Ambari has reserved 
ports 8667-8669 within its own pool.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE

[64/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
 
b/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
index 7c5365b..c706178 100644
--- 
a/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
+++ 
b/ambari-server/src/test/resources/TestAmbaryServer.samples/dummy_common_services/HIVE/0.11.0.2.0.5.0/configuration/hive-site.xml
@@ -21,190 +21,163 @@ limitations under the License.
 ambari.hive.db.schema.name
 hive
 Database name used as the Hive Metastore
-
-
+
   
   
 javax.jdo.option.ConnectionURL
 jdbc
 JDBC connect string for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionDriverName
 com.mysql.jdbc.Driver
 Driver class name for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionUserName
 hive
 username to use against metastore database
-
-
+
   
   
 javax.jdo.option.ConnectionPassword
  
 password to use against metastore database
-
-
+
   
   
 hive.metastore.warehouse.dir
 /apps/hive/warehouse
 location of default database for the warehouse
-
-
+
   
   
 hive.metastore.sasl.enabled
 
 If true, the metastore thrift interface will be secured with 
SASL.
  Clients must authenticate with Kerberos.
-
-
+
   
   
 hive.metastore.kerberos.keytab.file
 
 The path to the Kerberos Keytab file containing the metastore
  thrift server's service principal.
-
-
+
   
   
 hive.metastore.kerberos.principal
 
 The service principal for the metastore thrift server. The 
special
 string _HOST will be replaced automatically with the correct host 
name.
-
-
+
   
   
 hive.metastore.cache.pinobjtypes
 Table,Database,Type,FieldSchema,Order
 List of comma separated metastore object types that should be 
pinned in the cache
-
-
+
   
   
 hive.metastore.uris
 thrift://localhost:9083
 URI for client to contact metastore server
-
-
+
   
   
 hive.metastore.client.socket.timeout
 60
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.execute.setugi
 true
 In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
-
-
+
   
   
 hive.security.authorization.enabled
 false
 enable or disable the hive client authorization
-
-
+
   
   
 hive.security.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 the hive client authorization manager class name.
 The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

-
-
+
   
   
 hive.security.metastore.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 The authorization manager class name to be used in the 
metastore for authorization. The user-defined authorization class should 
implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
  
-
-
+
   
   
 hive.security.authenticator.manager
 org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator
 Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  
-
-
+
   
   
 hive.server2.enable.doAs
 true
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 true
-
-
+
   
   
 fs.file.impl.disable.cache
 true
-
-
+
   
   
 hive.enforce.bucketing
 true
 Whether bucketing is enforced. If true, while inserting into 
the table, bucketing is enforced.
-
-
+
   
   
 hive.enforce.sorting
 true
 Whether sorting is enforced. If true, while inserting into 
the table, sorting is enforced.
-
-
+
   
   
 hive.map.aggr
 true
 Whether to use map-side aggregation in Hive Group By 
queries.
-
-
+
   
   
 hive.optimize.bucke

[87/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/usersync-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/usersync-properties.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/usersync-properties.xml
index 9524e1d..bc26d8e 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/usersync-properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/usersync-properties.xml
@@ -23,103 +23,86 @@
   
 SYNC_SOURCE
 true
-
-
+
   
   
 MIN_UNIX_USER_ID_TO_SYNC
 true
-
-
+
   
   
 POLICY_MGR_URL
 true
-
-
+
   
   
 SYNC_INTERVAL
 true
-
-
+
   
   
 SYNC_LDAP_URL
 true
-
-
+
   
   
 SYNC_LDAP_BIND_DN
 true
-
-
+
   
   
 SYNC_LDAP_BIND_PASSWORD
 true
-
-
+
   
   
 CRED_KEYSTORE_FILENAME
 true
-
-
+
   
   
 SYNC_LDAP_USER_SEARCH_BASE
 true
-
-
+
   
   
 SYNC_LDAP_USER_SEARCH_SCOPE
 true
-
-
+
   
   
 SYNC_LDAP_USER_OBJECT_CLASS
 true
-
-
+
   
   
 SYNC_LDAP_USER_SEARCH_FILTER
 true
-
-
+
   
   
 SYNC_LDAP_USER_NAME_ATTRIBUTE
 true
-
-
+
   
   
 SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE
 true
-
-
+
   
   
 SYNC_LDAP_USERNAME_CASE_CONVERSION
 true
-
-
+
   
   
 SYNC_LDAP_GROUPNAME_CASE_CONVERSION
 true
-
-
+
   
   
 logdir
 true
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-log4j.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-log4j.xml
index 2a15750..7bf4294 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-log4j.xml
@@ -103,7 +103,6 @@ log4j.additivity.jdbc.connection=false
   content
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-properties.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-properties.xml
index 3e026ec..60be81f 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/admin-properties.xml
@@ -23,19 +23,16 @@
   
 audit_db_name
 true
-
-
+
   
   
 audit_db_user
 true
-
-
+
   
   
 audit_db_password
 true
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
index 07170f3..cdbbb12 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.6.0/configuration/ranger-admin-site.xml
@@ -19,45 +19,38 @@
   
 ranger.jpa.audit.jdbc.driver
 true
-
-
+
   
   
 ranger.jpa.audit.jdbc.url
 true
-
-
+
   
   
 ranger.jpa.audit.jdbc.user
 true
-
-
+
   
   
 ranger.jpa.audit.jdbc.password
 true
-
-
+
   
   
 ranger.jpa.audit.jdbc.credential.alias
 true
-
-
+
   
   
 ranger.jpa.audit.jdbc.dialect
 true
-
-
+
   
   
 ranger.admin.kerberos.token.valid.seconds
 30
 
-
-
+
   
   
 ranger.admin.kerberos.cookie.domain
@@ -66,15 +59,13 @@
 
   true
 
-
-
+
   
   
 ranger.admin.kerberos.cookie.path
 /
 
-
-
+
   
   
 ranger.spnego.kerberos.principal
@@ -83,8 +74,7 @@
 
   true
 
-
-
+
   
   
 ranger.spnego.kerbero

[81/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
index c595f3a..4bebc19 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
@@ -24,15 +24,13 @@
 yarn_log_dir_prefix
 /var/log/hadoop-yarn
 YARN Log Dir Prefix
-
-
+
   
   
 yarn_pid_dir_prefix
 /var/run/hadoop-yarn
 YARN PID Dir Prefix
-
-
+
   
   
 yarn_user
@@ -44,43 +42,37 @@
   user
   false
 
-
-
+
   
   
 yarn_heapsize
 1024
 Max heapsize for all YARN components using a numerical value 
in the scale of MB
-
-
+
   
   
 resourcemanager_heapsize
 1024
 Max heapsize for ResourceManager using a numerical value in 
the scale of MB
-
-
+
   
   
 nodemanager_heapsize
 1024
 Max heapsize for NodeManager using a numerical value in the 
scale of MB
-
-
+
   
   
 min_user_id
 1000
 Set to 0 to disallow root from submitting jobs. Set to 1000 
to disallow all superusers from submitting jobs
-
-
+
   
   
 apptimelineserver_heapsize
 1024
 Max heapsize for AppTimelineServer using a numerical value in 
the scale of MB
-
-
+
   
   
   
@@ -201,7 +193,6 @@ YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
index cece269..7299f49 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
@@ -67,7 +67,6 @@ 
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Appl
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
index fc6708f..cefa82a 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
@@ -23,22 +23,19 @@
 yarn.resourcemanager.hostname
 localhost
 The hostname of the RM.
-
-
+
   
   
 yarn.resourcemanager.resource-tracker.address
 localhost:8025
  The address of ResourceManager. 
-
-
+
   
   
 yarn.resourcemanager.scheduler.address
 localhost:8030
 The address of the scheduler interface.
-
-
+
   
   
 yarn.resourcemanager.address
@@ -47,22 +44,19 @@
   The address of the applications manager interface in the
   RM.
 
-
-
+
   
   
 yarn.resourcemanager.admin.address
 localhost:8141
 The address of the RM admin interface.
-
-
+
   
   
 yarn.resourcemanager.scheduler.class
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
 The class to use as the resource scheduler.
-
-
+
   
   
 yarn.scheduler.minimum-allocation-mb
@@ -72,8 +66,7 @@
   in MBs. Memory requests lower than this won't take effect,
   and the specified value will get allocated at minimum.
 
-
-
+
   
   
 yarn.scheduler.maximum-allocation-mb
@@ -83,45 +76,39 @@
   in MBs. Memory requests higher than this won't take effect,
   and will get capped to this value.
 
-
-
+
   
   
 yarn.acl.enable
 false
  Are acls enabled. 
-
-
+
   
   
 yarn.admin.acl
 
  ACL of who can be admin of the YARN cluster. 
-
-
+
   
   
   
 yarn.nodemanager.address
 0.0.0.0:45454
 The address of the container manager in the NM.
-
-
+
   
   
 yarn.nodemanager.resource.memory-m

[79/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
index 4b236db..647c0f4 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -23,22 +23,19 @@
 yarn.resourcemanager.hostname
 localhost
 The hostname of the RM.
-
-
+
   
   
 yarn.resourcemanager.resource-tracker.address
 localhost:8025
  The address of ResourceManager. 
-
-
+
   
   
 yarn.resourcemanager.scheduler.address
 localhost:8030
 The address of the scheduler interface.
-
-
+
   
   
 yarn.resourcemanager.address
@@ -47,22 +44,19 @@
   The address of the applications manager interface in the
   RM.
 
-
-
+
   
   
 yarn.resourcemanager.admin.address
 localhost:8141
 The address of the RM admin interface.
-
-
+
   
   
 yarn.resourcemanager.scheduler.class
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
 The class to use as the resource scheduler.
-
-
+
   
   
 yarn.scheduler.minimum-allocation-mb
@@ -72,8 +66,7 @@
   in MBs. Memory requests lower than this won't take effect,
   and the specified value will get allocated at minimum.
 
-
-
+
   
   
 yarn.scheduler.maximum-allocation-mb
@@ -83,45 +76,39 @@
   in MBs. Memory requests higher than this won't take effect,
   and will get capped to this value.
 
-
-
+
   
   
 yarn.acl.enable
 false
  Are acls enabled. 
-
-
+
   
   
 yarn.admin.acl
 
  ACL of who can be admin of the YARN cluster. 
-
-
+
   
   
   
 yarn.nodemanager.address
 0.0.0.0:45454
 The address of the container manager in the NM.
-
-
+
   
   
 yarn.nodemanager.resource.memory-mb
 5120
 Amount of physical memory, in MB, that can be allocated
   for containers.
-
-
+
   
   
 yarn.application.classpath
 
/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
 Classpath for typical applications.
-
-
+
   
   
 yarn.nodemanager.vmem-pmem-ratio
@@ -131,37 +118,32 @@
   expressed in terms of physical memory, and virtual memory usage
   is allowed to exceed this allocation by this ratio.
 
-
-
+
   
   
 yarn.nodemanager.container-executor.class
 
org.apache.hadoop.yarn.server.nodemanager.GlusterContainerExecutor
 ContainerExecutor for launching containers
-
-
+
   
   
 yarn.nodemanager.linux-container-executor.group
 hadoop
 Unix group of the NodeManager
-
-
+
   
   
 yarn.nodemanager.aux-services
 mapreduce_shuffle
 Auxilliary services of NodeManager. A valid service name 
should only contain a-zA-Z0-9_ and can
   not start with numbers
-
-
+
   
   
 yarn.nodemanager.aux-services.mapreduce_shuffle.class
 org.apache.hadoop.mapred.ShuffleHandler
 The auxiliary service class to use 
-
-
+
   
   
 yarn.nodemanager.log-dirs
@@ -173,8 +155,7 @@
   named container_{$contid}. Each container directory will contain the 
files
   stderr, stdin, and syslog generated by that container.
 
-
-
+
   
   
 yarn.nodemanager.local-dirs
@@ -186,8 +167,7 @@
   Individual containers' work directories, called container_${contid}, will
   be subdirectories of this.
 
-
-
+
   
   
 yarn.nodemanager.container-monitor.interval-ms
@@ -196,8 +176,7 @@
   The interval, in milliseconds, for which the node manager
   waits  between two cycles of monitoring its containers' memory usage.
 
-
-
+
   
   
   
@@ -194,8 +177,7 @@ gpgcheck=0
   false
   false
 
-
-
+
   
   
   
@@ -210,8 +192,7 @@ gpgcheck=0
   false
   false
 
-
-
+
   
   
 stack_root
@@ -222,35 +203,30 @@ gpgcheck=0
   false
   false
 
-
-
+
   
   
 alerts_repeat_tolerance
 1
 The number of consecutive alerts required to transition an 
alert from the SOFT to the HARD state.
-
-
+
   
   
 ignore_bad_mounts
 false
 For properties handled by handle_mounted_dirs this will make 
Ambari not to c

[77/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
index cfa9c76..495a46f 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
@@ -27,8 +27,7 @@
for the java.library.path value. java.library.path tells the JVM where
to look for native libraries. It is necessary to set this config 
correctly since
Storm uses the ZeroMQ and JZMQ native libs. 
-
-
+
   
   
 storm.local.dir
@@ -36,71 +35,61 @@
 A directory on the local filesystem used by Storm for any 
local
filesystem usage it needs. The directory must exist and the Storm 
daemons must
have permission to read/write from this location.
-
-
+
   
   
 storm.zookeeper.servers
 ['localhost']
 A list of hosts of ZooKeeper servers used to manage the 
cluster.
-
-
+
   
   
 storm.zookeeper.port
 2181
 The port Storm will use to connect to each of the ZooKeeper 
servers.
-
-
+
   
   
 storm.zookeeper.root
 /storm
 The root location at which Storm stores data in 
ZooKeeper.
-
-
+
   
   
 storm.zookeeper.session.timeout
 2
 The session timeout for clients to ZooKeeper.
-
-
+
   
   
 storm.zookeeper.connection.timeout
 15000
 The connection timeout for clients to ZooKeeper.
-
-
+
   
   
 storm.zookeeper.retry.times
 5
 The number of times to retry a Zookeeper 
operation.
-
-
+
   
   
 storm.zookeeper.retry.interval
 1000
 The interval between retries of a Zookeeper 
operation.
-
-
+
   
   
 storm.zookeeper.retry.intervalceiling.millis
 3
 The ceiling of the interval between retries of a Zookeeper 
operation.
-
-
+
   
   
 storm.cluster.mode
 distributed
 The mode this Storm cluster is running in. Either 
"distributed" or "local".
-
-
+
   
   
 storm.local.mode.zmq
@@ -110,65 +99,56 @@
of this flag is to make it easy to run Storm in local mode by 
eliminating
the need for native dependencies, which can be difficult to install.
 
-
-
+
   
   
 storm.thrift.transport
 backtype.storm.security.auth.SimpleTransportPlugin
 The transport plug-in for Thrift client/server 
communication.
-
-
+
   
   
 storm.messaging.transport
 backtype.storm.messaging.netty.Context
 The transporter for communication among Storm 
tasks.
-
-
+
   
   
 nimbus.host
 localhost
 The host that the master server is running on.
-
-
+
   
   
 nimbus.thrift.port
 6627
  Which port the Thrift interface of Nimbus should run on. 
Clients should
connect to this port to upload jars and submit topologies.
-
-
+
   
   
 nimbus.thrift.max_buffer_size
 1048576
 The maximum buffer size thrift should use when reading 
messages.
-
-
+
   
   
 nimbus.childopts
 -Xmx1024m 
-Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf 
-javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM
 This parameter is used by the storm-deploy project to 
configure the jvm options for the nimbus daemon.
-
-
+
   
   
 nimbus.task.timeout.secs
 30
 How long without heartbeating a task can go before nimbus 
will consider the task dead and reassign it to another location.
-
-
+
   
   
 nimbus.supervisor.timeout.secs
 60
 How long before a supervisor can go without heartbeating 
before nimbus considers it dead and stops assigning new work to 
it.
-
-
+
   
   
 nimbus.monitor.freq.secs
@@ -178,15 +158,13 @@
that if a machine ever goes down Nimbus will immediately wake up and 
take action.
This parameter is for checking for failures when there's no explicit 
event like that occuring.
 
-
-
+
   
   
 nimbus.cleanup.inbox.freq.secs
 600
 How often nimbus should wake the cleanup thread to clean the 
inbox.
-
-
+
   
   
 nimbus.inbox.jar.expiration.secs
@@ -198,32 +176,28 @@
Note that the time it takes to delete an inbox jar file is going to be 
somewhat more than
NIMBUS_CLEANUP_INBOX_JAR_EXPI

[37/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
 
b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
index 0d9d48e..f302c18 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
@@ -24,15 +24,13 @@
 nimbus_seeds_supported
 true
 
-
-
+
   
   
 storm_logs_supported
 true
 
-
-
+
   
   
   
@@ -55,7 +53,6 @@
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-site.xml
index e60565f..66cf8e2 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-site.xml
@@ -28,36 +28,31 @@
   true
   false
 
-
-
+
   
   
 topology.min.replication.count.default
 1
 Default minimum number of nimbus hosts where the code must be 
replicated before leader nimbus can mark the topology as active and create 
assignments. 
-
-
+
   
   
 topology.min.replication.count
 {{actual_topology_min_replication_count}}
 Calculated minimum number of nimbus hosts where the code must 
be replicated before leader nimbus can mark the topology as active and create 
assignments. 
-
-
+
   
   
 topology.max.replication.wait.time.sec.default
 60
 Default maximum wait time for the nimbus host replication to 
achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will 
go ahead and perform topology activation tasks even if required 
nimbus.min.replication.count is not achieved
-
-
+
   
   
 topology.max.replication.wait.time.sec
 {{actual_topology_max_replication_wait_time_sec}}
 Calculated maximum wait time for the nimbus host replication 
to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus 
will go ahead and perform topology activation tasks even if required 
nimbus.min.replication.count is not achieved
-
-
+
   
   
 nimbus.host
@@ -65,7 +60,6 @@
 DONT_ADD_ON_UPGRADE
 Deprecated config in favor of nimbus.seeds used during non HA 
mode.
 true
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-worker-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-worker-log4j.xml
 
b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-worker-log4j.xml
index 573510c..7e73e1f 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-worker-log4j.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-worker-log4j.xml
@@ -106,7 +106,6 @@
   content
   false
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
 
b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
index 363522b..bdfcb3d 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-env.xml
@@ -30,8 +30,7 @@
   user
   false
 
-
-
+
   
   
 storm_log_dir
@@ -40,8 +39,7 @@
 
   directory
 
-
-
+
   
   
 storm_pid_dir
@@ -50,71 +48,60 @@
 
   directory
 
-
-
+
   
   
 jmxremote_port
 56431
 
-
-
+
   
   
 storm_principal_name
 Storm principal name
-
-
+
   
   
 storm_principal_name
 Storm principal name
-
-
+
   
   
 storm_keytab
 Storm keytab path
-
-
+
   
   
 storm_ui_principal_name
 Storm UI 

[08/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
index 3b1afbd..a40c6ec 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/oozie-site.xml
@@ -25,8 +25,7 @@
 oozie.base.url
 http://localhost:11000/oozie
 Base Oozie URL.
-
-
+
   
   
 oozie.system.id
@@ -34,8 +33,7 @@
 
 The Oozie system ID.
 
-
-
+
   
   
 oozie.systemmode
@@ -43,8 +41,7 @@
 
  System mode for  Oozie at startup.
  
-
-
+
   
   
 oozie.service.AuthorizationService.authorization.enabled
@@ -53,8 +50,7 @@
  Specifies whether security (user name/admin role) is enabled or not.
  If disabled any user can manage Oozie system and manage any job.
  
-
-
+
   
   
 oozie.service.PurgeService.older.than
@@ -62,8 +58,7 @@
 
  Jobs older than this value, in days, will be purged by the PurgeService.
  
-
-
+
   
   
 oozie.service.PurgeService.purge.interval
@@ -71,22 +66,19 @@
 
  Interval at which the purge service will run, in seconds.
  
-
-
+
   
   
 oozie.service.CallableQueueService.queue.size
 1000
 Max callable queue size
-
-
+
   
   
 oozie.service.CallableQueueService.threads
 10
 Number of threads used for executing callables
-
-
+
   
   
 oozie.service.CallableQueueService.callable.concurrency
@@ -98,16 +90,14 @@
  All commands that use action executors (action-start, action-end, 
action-kill and action-check) use
  the action type as the callable type.
  
-
-
+
   
   
 oozie.service.coord.normal.default.timeout
 120
 Default timeout for a coordinator action input check (in 
minutes) for normal job.
   -1 means infinite timeout
-
-
+
   
   
 oozie.db.schema.name
@@ -115,8 +105,7 @@
 
   Oozie DataBase Name
  
-
-
+
   
   
 oozie.service.HadoopAccessorService.jobTracker.whitelist
@@ -124,24 +113,21 @@
 
   Whitelisted job tracker for Oozie service.
   
-
-
+
   
   
 oozie.authentication.type
 simple
 
   
-
-
+
   
   
 oozie.service.HadoopAccessorService.nameNode.whitelist
  
 
   
-
-
+
   
   
 oozie.service.WorkflowAppService.system.libpath
@@ -151,8 +137,7 @@
   This path is added to workflow application if their job properties sets
   the property 'oozie.use.system.libpath' to true.
   
-
-
+
   
   
 use.system.libpath.for.mapreduce.and.pig.jobs
@@ -163,8 +148,7 @@
   specify where the Pig JAR files are. Instead, the ones from the system
   library path are used.
   
-
-
+
   
   
 oozie.authentication.kerberos.name.rules
@@ -176,8 +160,7 @@
 
 
 The mapping from kerberos principal names to local OS user 
names.
-
-
+
   
   
 oozie.service.HadoopAccessorService.hadoop.configurations
@@ -190,8 +173,7 @@
   the Oozie configuration directory; though the path can be absolute 
(i.e. to point
   to Hadoop client conf/ directories in the local filesystem.
   
-
-
+
   
   
 oozie.service.ActionService.executor.ext.classes
@@ -202,14 +184,12 @@
 org.apache.oozie.action.hadoop.SqoopActionExecutor,
 org.apache.oozie.action.hadoop.DistcpActionExecutor
 
-
-
+
   
   
 oozie.service.SchemaService.wf.ext.schemas
 
shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,hive-action-0.3.xsd
-
-
+
   
   
 oozie.service.JPAService.create.db.schema
@@ -220,8 +200,7 @@
 If set to true, it creates the DB schema if it does not exist. If 
the DB schema exists is a NOP.
 If set to false, it does not create the DB schema. If the DB 
schema does not exist it fails start up.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.driver
@@ -229,8 +208,7 @@
 
 JDBC driver class.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.url
@@ -238,8 +216,7 @@
 
 JDBC URL.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.username
@@ -247,8 +224,7 @@
 
 DB user name.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.password
@@ -259,8 +235,7 @@
 IMPORTANT: i

[84/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
index caccac7..aa95dee 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-site.xml
@@ -40,8 +40,7 @@
 mapreduce.map.memory.mb
   
 
-
-
+
   
   
 mapreduce.map.sort.spill.percent
@@ -53,8 +52,7 @@
   is already in progress, so spills may be larger than this threshold when
   it is set to less than .5
 
-
-
+
   
   
 mapreduce.task.io.sort.factor
@@ -63,8 +61,7 @@
   The number of streams to merge at once while sorting files.
   This determines the number of open file handles.
 
-
-
+
   
   
   
@@ -73,8 +70,7 @@
 
   Administrators for MapReduce applications.
 
-
-
+
   
   
 mapreduce.reduce.shuffle.parallelcopies
@@ -83,8 +79,7 @@
   The default number of parallel transfers run by reduce during
   the copy(shuffle) phase.
 
-
-
+
   
   
 mapreduce.map.speculative
@@ -93,8 +88,7 @@
   If true, then multiple instances of some map tasks
   may be executed in parallel.
 
-
-
+
   
   
 mapreduce.reduce.speculative
@@ -103,8 +97,7 @@
   If true, then multiple instances of some reduce tasks may be
   executed in parallel.
 
-
-
+
   
   
 mapreduce.job.reduce.slowstart.completedmaps
@@ -113,8 +106,7 @@
   Fraction of the number of maps in the job which should be complete before
   reduces are scheduled for the job.
 
-
-
+
   
   
 mapreduce.job.counters.max
@@ -122,8 +114,7 @@
 
   Limit on the number of counters allowed per job.
 
-
-
+
   
   
 mapreduce.reduce.shuffle.merge.percent
@@ -134,8 +125,7 @@
   storing in-memory map outputs, as defined by
   mapreduce.reduce.shuffle.input.buffer.percent.
 
-
-
+
   
   
 mapreduce.reduce.shuffle.input.buffer.percent
@@ -144,8 +134,7 @@
   The percentage of memory to be allocated from the maximum heap
   size to storing map outputs during the shuffle.
 
-
-
+
   
   
 mapreduce.output.fileoutputformat.compress.type
@@ -154,8 +143,7 @@
   If the job outputs are to compressed as SequenceFiles, how should
   they be compressed? Should be one of NONE, RECORD or BLOCK.
 
-
-
+
   
   
 mapreduce.reduce.input.buffer.percent
@@ -166,8 +154,7 @@
   remaining map outputs in memory must consume less than this threshold 
before
   the reduce can begin.
 
-
-
+
   
   
   
@@ -176,8 +163,7 @@
 
   Should the outputs of the maps be compressed before being sent across 
the network. Uses SequenceFile compression.
 
-
-
+
   
   
 mapreduce.task.timeout
@@ -187,8 +173,7 @@
   terminated if it neither reads an input, writes an output, nor
   updates its status string.
 
-
-
+
   
   
 mapreduce.map.memory.mb
@@ -212,8 +197,7 @@
 yarn.scheduler.minimum-allocation-mb
   
 
-
-
+
   
   
 mapreduce.reduce.memory.mb
@@ -237,8 +221,7 @@
 yarn.scheduler.minimum-allocation-mb
   
 
-
-
+
   
   
 mapreduce.shuffle.port
@@ -248,8 +231,7 @@
   ShuffleHandler is a service run at the NodeManager to facilitate
   transfers of intermediate Map outputs to requesting Reducers.
 
-
-
+
   
   
 mapreduce.jobhistory.intermediate-done-dir
@@ -257,8 +239,7 @@
 
   Directory where history files are written by MapReduce jobs.
 
-
-
+
   
   
 mapreduce.jobhistory.done-dir
@@ -267,24 +248,21 @@
   Directory where history files are managed by the MR JobHistory Server.
 
 NOT_MANAGED_HDFS_PATH
-
-
+
   
   
 mapreduce.jobhistory.address
 localhost:10020
 DONT_ADD_ON_UPGRADE
 Enter your JobHistoryServer hostname.
-
-
+
   
   
 mapreduce.jobhistory.webapp.address
 localhost:19888
 DONT_ADD_ON_UPGRADE
 Enter your JobHistoryServer hostname.
-
-
+
   
   
 mapreduce.framework.name
@@ -293,8 +271,7 @@
   The runtime framework for executing MapReduce jobs. Can be one of local,
   classic or yarn.
 
-
-
+
   
   
 yarn.app.mapreduce.am.staging-dir
@@ -302,8 +279,7 @@
 
   The staging dir used while submitting jobs.
 
-
-
+
   
   
 yarn.app.

[74/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
index 18f321c..4af74b1 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
@@ -36,8 +36,7 @@ limitations under the License.
   
   1
 
-
-
+
   
   
 hive.zookeeper.quorum
@@ -51,22 +50,19 @@ limitations under the License.
   multiLine
   true
 
-
-
+
   
   
 hive.metastore.connect.retries
 24
 Number of retries while opening a connection to 
metastore
-
-
+
   
   
 hive.metastore.failure.retries
 24
 Number of retries upon failure of Thrift metastore 
calls
-
-
+
   
   
 hive.metastore.client.connect.retry.delay
@@ -75,8 +71,7 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   Number of seconds for the client to wait between consecutive connection 
attempts
 
-
-
+
   
   
 hive.metastore.client.socket.timeout
@@ -85,15 +80,13 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   MetaStore Client socket timeout in seconds
 
-
-
+
   
   
 hive.mapjoin.bucket.cache.size
 1
 
-
-
+
   
   
 hive.security.authorization.manager
@@ -108,24 +101,21 @@ limitations under the License.
 hive_security_authorization
   
 
-
-
+
   
   
 hive.cluster.delegation.token.store.class
 org.apache.hadoop.hive.thrift.ZooKeeperTokenStore
 The delegation token store implementation.
   Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for 
load-balanced cluster.
-
-
+
   
   
 hive.cluster.delegation.token.store.zookeeper.connectString
 localhost:2181
 DONT_ADD_ON_UPGRADE
 The ZooKeeper token store connect string.
-
-
+
   
   
 hive.server2.support.dynamic.service.discovery
@@ -138,38 +128,33 @@ limitations under the License.
 
   boolean
 
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 false
 true
 Disable HDFS filesystem cache.
-
-
+
   
   
 fs.file.impl.disable.cache
 false
 true
 Disable local filesystem cache.
-
-
+
   
   
 hive.exec.scratchdir
 /tmp/hive
 HDFS root scratch dir for Hive jobs which gets created with 
write all (733) permission. For each connecting user, an HDFS scratch dir: 
${hive.exec.scratchdir}/ is created, with 
${hive.scratch.dir.permission}.
-
-
+
   
   
 hive.exec.submitviachild
 false
 
-
-
+
   
   
 hive.exec.submit.local.task.via.child
@@ -179,8 +164,7 @@ limitations under the License.
   separate JVM (true recommended) or not.
   Avoids the overhead of spawning new JVM, but can lead to out-of-memory 
issues.
 
-
-
+
   
   
 hive.exec.compress.output
@@ -189,8 +173,7 @@ limitations under the License.
   This controls whether the final outputs of a query (to a local/HDFS file 
or a Hive table) is compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.compress.intermediate
@@ -199,8 +182,7 @@ limitations under the License.
   This controls whether intermediate files produced by Hive between 
multiple map-reduce jobs are compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.reducers.bytes.per.reducer
@@ -214,8 +196,7 @@ limitations under the License.
   B
   
 
-
-
+
   
   
 hive.exec.reducers.max
@@ -224,8 +205,7 @@ limitations under the License.
   max number of reducers will be used. If the one specified in the 
configuration parameter mapred.reduce.tasks is
   negative, Hive will use this one as the max number of reducers when 
automatically determine number of reducers.
 
-
-
+
   
   
 hive.exec.pre.hooks
@@ -241,8 +221,7 @@ limitations under the License.
 hive_timeline_logging_enabled
   
 
-
-
+
   
   
 hive.exec.post.hooks
@@ -266,8 +245,7 @@ limitations under the License.
 atlas.server.https.port
   
 
-
-
+
   

[27/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
index 1da1589..86becf9 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
@@ -41,8 +41,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-action-create-inst
@@ -69,8 +68,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-action-create
@@ -97,8 +95,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-job-submit-data
@@ -126,8 +123,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-action-start
@@ -157,8 +153,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-sla-submit
@@ -172,8 +167,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-sla-create
@@ -187,8 +181,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.HadoopAccessorService.supported.filesystems
@@ -199,7 +192,6 @@
 
   custom
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
index a0eb6bc..2962549 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
@@ -29,8 +29,7 @@
   directory
   false
 
-
-
+
   
   
 hbase_pid_dir
@@ -42,8 +41,7 @@
   false
   true
 
-
-
+
   
   
 hbase_regionserver_xmn_max
@@ -57,16 +55,14 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
 
   MB
 
-
-
+
   
   
 hbase_regionserver_xmn_ratio
 0.2
 RegionServers -Xmn in -Xmx ratio
 Percentage of max heap size (-Xmx) which used for young 
generation heap (-Xmn).
-
-
+
   
   
 hbase_user
@@ -78,8 +74,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
   user
   false
 
-
-
+
   
   
 hbase_max_direct_memory_size
@@ -89,8 +84,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
 
   true
 
-
-
+
   
   
 phoenix_sql_enabled
@@ -111,8 +105,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
   
   1
 
-
-
+
   
   
   
@@ -194,7 +187,6 @@ export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {% 
if hbase_max_direct_
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
index 0510a84..1c3eb6e 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
@@ -26,8 +26,7 @@
 A staging directory in default file system (HDFS)
 for bulk loading.
 
-
-
+
   
   
 hbase.hstore.flush.retries.number
@@ -36,8 +35,7 @@
 
 The number of times the region flush operation will be retried.
 
-
-
+
   
   
 hbase.hregion.majorcompaction
@@ -56,8 +54,7 @@
   259200
   milliseconds
 
-
-
+
   
   
 hbase.hregion.majorcompaction.jitter
@@ -66,8 +63,7 @@
   a given amount of time either side of hbase.hregion.majorcompaction. The 
smaller the number,
   the closer the compactions will happen to the 
hbase.hregion.majorcompaction
   interval.
-
-
+
   
   
 hbase.hregion.memstore.block.multiplier
@@ -95,8 +91,7 @@
 
   
 
-
-
+
   
   
 hbase.bucketc

[94/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all 
stack configuration xmls to pass validation (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4c5cf30e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4c5cf30e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4c5cf30e

Branch: refs/heads/trunk
Commit: 4c5cf30eeea1d456f1cc79d888bc1b369ba5e683
Parents: 9699f57
Author: Lisnichenko Dmitro 
Authored: Thu Jun 9 15:25:48 2016 +0300
Committer: Lisnichenko Dmitro 
Committed: Thu Jun 9 17:16:29 2016 +0300

--
 .../1.6.1.2.2.0/configuration/accumulo-env.xml  |  51 +-
 .../configuration/accumulo-log4j.xml|  21 +-
 .../1.6.1.2.2.0/configuration/accumulo-site.xml |  51 +-
 .../0.1.0/configuration/ams-env.xml |  21 +-
 .../0.1.0/configuration/ams-grafana-env.xml |  18 +-
 .../0.1.0/configuration/ams-grafana-ini.xml |  15 +-
 .../0.1.0/configuration/ams-hbase-env.xml   |  33 +-
 .../0.1.0/configuration/ams-hbase-log4j.xml |   3 +-
 .../0.1.0/configuration/ams-hbase-policy.xml|   9 +-
 .../configuration/ams-hbase-security-site.xml   |  51 +-
 .../0.1.0/configuration/ams-hbase-site.xml  | 150 ++
 .../0.1.0/configuration/ams-log4j.xml   |   3 +-
 .../0.1.0/configuration/ams-site.xml| 180 +++
 .../0.1.0/configuration/ams-ssl-client.xml  |   9 +-
 .../0.1.0/configuration/ams-ssl-server.xml  |  24 +-
 .../0.1.0/configuration/storm-site.xml  |   3 +-
 .../configuration/application-properties.xml| 105 ++---
 .../ATLAS/0.1.0.2.3/configuration/atlas-env.xml |  27 +-
 .../0.1.0.2.3/configuration/atlas-log4j.xml |   9 +-
 .../0.5.0.2.1/configuration/falcon-env.xml  |  33 +-
 .../configuration/falcon-runtime.properties.xml |  15 +-
 .../configuration/falcon-startup.properties.xml |  93 ++--
 .../0.5.0.2.1/configuration/oozie-site.xml  |  24 +-
 .../1.4.0.2.0/configuration/flume-conf.xml  |   3 +-
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml |  15 +-
 .../GANGLIA/3.5.0/configuration/ganglia-env.xml |  30 +-
 .../HAWQ/2.0.0/configuration/hawq-check-env.xml |   3 +-
 .../HAWQ/2.0.0/configuration/hawq-env.xml   |   6 +-
 .../2.0.0/configuration/hawq-limits-env.xml |  12 +-
 .../HAWQ/2.0.0/configuration/hawq-site.xml  |  57 +--
 .../2.0.0/configuration/hawq-sysctl-env.xml |  72 +--
 .../HAWQ/2.0.0/configuration/hdfs-client.xml|  96 ++--
 .../HAWQ/2.0.0/configuration/yarn-client.xml|  30 +-
 .../0.96.0.2.0/configuration/hbase-env.xml  |  39 +-
 .../0.96.0.2.0/configuration/hbase-log4j.xml|   3 +-
 .../0.96.0.2.0/configuration/hbase-policy.xml   |   9 +-
 .../0.96.0.2.0/configuration/hbase-site.xml | 114 ++---
 .../HDFS/2.1.0.2.0/configuration/core-site.xml  |  45 +-
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  54 +--
 .../2.1.0.2.0/configuration/hadoop-policy.xml   |  33 +-
 .../HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml |   3 +-
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  | 144 ++
 .../HDFS/2.1.0.2.0/configuration/ssl-client.xml |  21 +-
 .../HDFS/2.1.0.2.0/configuration/ssl-server.xml |  24 +-
 .../HIVE/0.12.0.2.0/configuration/hcat-env.xml  |   3 +-
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  48 +-
 .../configuration/hive-exec-log4j.xml   |   3 +-
 .../0.12.0.2.0/configuration/hive-log4j.xml |   3 +-
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml | 147 ++
 .../0.12.0.2.0/configuration/webhcat-env.xml|   3 +-
 .../0.12.0.2.0/configuration/webhcat-log4j.xml  |   3 +-
 .../0.12.0.2.0/configuration/webhcat-site.xml   |  57 +--
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  | 159 +++
 .../KAFKA/0.8.1/configuration/kafka-env.xml |  27 +-
 .../KAFKA/0.8.1/configuration/kafka-log4j.xml   |   3 +-
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |  69 +--
 .../KAFKA/0.9.0/configuration/kafka-env.xml |   3 +-
 .../0.9.0/configuration/ranger-kafka-audit.xml  |  48 +-
 .../ranger-kafka-plugin-properties.xml  |  21 +-
 .../ranger-kafka-policymgr-ssl.xml  |  18 +-
 .../configuration/ranger-kafka-security.xml |  18 +-
 .../1.10.3-10/configuration/kerberos-env.xml|  75 +--
 .../1.10.3-10/configuration/krb5-conf.xml   |  12 +-
 .../0.5.0.2.2/configuration/admin-topology.xml  |   3 +-
 .../0.5.0.2.2/configuration/gateway-log4j.xml   |   3 +-
 .../0.5.0.2.2/configuration/gateway-site.xml|  21 +-
 .../KNOX/0.5.0.2.2/configuration/knox-env.xml   |  18 +-
 .../KNOX/0.5.0.2.2/configuration/ldap-log4j.xml |   3 +-
 .../ranger-knox-plugin-properties.xml   |  93 ++--
 .../KNOX/0.5.0.2.2/configuration/topology.xml   |   3 +-
 .../KNOX/0.5.0.2.2/configuration/users-ldif.xml |   3 +-
 .../0.5.0/configuration/logfeeder-env.xml   |  42 +-
 .../0.5.0/configuration/logfeeder-log4j.xml |   3 +-
 .../configuration/lo

[63/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
index ea3d6b6..8a4f566 100644
--- 
a/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/bad-stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,29 +129,25 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
 Address where the datanode binds
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
 HTTP address for the datanode
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -171,8 +155,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -180,8 +163,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -190,29 +172,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -221,8 +199,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -231,8 +208,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -244,29 +220,25 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
 The max response size for IPC
-
-
+
   
   
 dfs.block.access.token.enable
@@ -275,8 +247,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datan

[06/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
index 870eee9..0b38cec 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HBASE/configuration/hbase-site.xml
@@ -32,8 +32,7 @@
 into /tmp.  Change this configuration else all data will be lost
 on machine restart.
 
-
-
+
   
   
 hbase.cluster.distributed
@@ -43,8 +42,7 @@
   false, startup will run all HBase and ZooKeeper daemons together
   in the one JVM.
 
-
-
+
   
   
 hbase.tmp.dir
@@ -54,30 +52,26 @@
 than '/tmp' (The '/tmp' directory is often cleared on
 machine restart).
 
-
-
+
   
   
 hbase.master.info.bindAddress
 
 The bind address for the HBase Master web UI
 
-
-
+
   
   
 hbase.master.info.port
 
 The port for the HBase Master web UI.
-
-
+
   
   
 hbase.regionserver.info.port
 
 The port for the HBase RegionServer web UI.
-
-
+
   
   
 hbase.regionserver.global.memstore.upperLimit
@@ -85,8 +79,7 @@
 Maximum size of all memstores in a region server before new
   updates are blocked and flushes are forced. Defaults to 40% of heap
 
-
-
+
   
   
 hbase.regionserver.handler.count
@@ -95,8 +88,7 @@
 Same property is used by the Master for count of master handlers.
 Default is 10.
 
-
-
+
   
   
 hbase.hregion.majorcompaction
@@ -105,8 +97,7 @@
 HStoreFiles in a region.  Default: 1 day.
 Set to 0 to disable automated major compactions.
 
-
-
+
   
   
 hbase.regionserver.global.memstore.lowerLimit
@@ -117,8 +108,7 @@
   the minimum possible flushing to occur when updates are blocked due to
   memstore limiting.
 
-
-
+
   
   
 hbase.hregion.memstore.block.multiplier
@@ -130,8 +120,7 @@
 resultant flush files take a long time to compact or split, or
 worse, we OOME
 
-
-
+
   
   
 hbase.hregion.memstore.flush.size
@@ -141,8 +130,7 @@
 exceeds this number of bytes.  Value is checked by a thread that runs
 every hbase.server.thread.wakefrequency.
 
-
-
+
   
   
 hbase.hregion.memstore.mslab.enabled
@@ -153,8 +141,7 @@
   heavy write loads. This can reduce the frequency of stop-the-world
   GC pauses on large heaps.
 
-
-
+
   
   
 hbase.hregion.max.filesize
@@ -164,8 +151,7 @@
 grown to exceed this value, the hosting HRegion is split in two.
 Default: 1G.
 
-
-
+
   
   
 hbase.client.scanner.caching
@@ -177,8 +163,7 @@
 Do not set this value such that the time between invocations is greater
 than the scanner timeout; i.e. hbase.regionserver.lease.period
 
-
-
+
   
   
 zookeeper.session.timeout
@@ -190,8 +175,7 @@
   "The client sends a requested timeout, the server responds with the
   timeout that it can give the client. " In milliseconds.
 
-
-
+
   
   
 hbase.client.keyvalue.maxsize
@@ -203,8 +187,7 @@
 to set this to a fraction of the maximum region size. Setting it to zero
 or less disables the check.
 
-
-
+
   
   
 hbase.hstore.compactionThreshold
@@ -215,8 +198,7 @@
 is run to rewrite all HStoreFiles files as one.  Larger numbers
 put off compaction but when it runs, it takes longer to complete.
 
-
-
+
   
   
 hbase.hstore.blockingStoreFiles
@@ -227,8 +209,7 @@
 blocked for this HRegion until a compaction is completed, or
 until hbase.hstore.blockingWaitTime has been exceeded.
 
-
-
+
   
   
 hfile.block.cache.size
@@ -238,8 +219,7 @@
 used by HFile/StoreFile. Default of 0.25 means allocate 25%.
 Set to 0 to disable but it's not recommended.
 
-
-
+
   
   
   
@@ -295,8 +271,7 @@
 full privileges, regardless of stored ACLs, across the cluster.
 Only used when HBase security is enabled.
 
-
-
+
   
   
 hbase.coprocessor.region.classes
@@ -307,8 +282,7 @@
 it in HBase's classpath and add the fully qualified class name here.
 A coprocessor can also be loaded on demand by setting HTableDescriptor.
 
-
-
+
   
   
 hbase.coprocessor.master.classes
@@ -320,8 +294,7 @@
   implementing your own MasterObserver, just put it in HBase's classpath
   and add the fully qualified class name here.
 
-
-
+
   
   

[62/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
index ea3d6b6..8a4f566 100644
--- 
a/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,29 +129,25 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
 Address where the datanode binds
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
 HTTP address for the datanode
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -171,8 +155,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -180,8 +163,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -190,29 +172,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -221,8 +199,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -231,8 +208,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -244,29 +220,25 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
 The max response size for IPC
-
-
+
   
   
 dfs.block.access.token.enable
@@ -275,8 +247,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are 

[75/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
index 1da1589..86becf9 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/configuration/oozie-site.xml
@@ -41,8 +41,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-action-create-inst
@@ -69,8 +68,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-action-create
@@ -97,8 +95,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-job-submit-data
@@ -126,8 +123,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-action-start
@@ -157,8 +153,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-sla-submit
@@ -172,8 +167,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.ELService.ext.functions.coord-sla-create
@@ -187,8 +181,7 @@
 
   custom
 
-
-
+
   
   
 oozie.service.HadoopAccessorService.supported.filesystems
@@ -199,7 +192,6 @@
 
   custom
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
index a0eb6bc..2962549 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
@@ -29,8 +29,7 @@
   directory
   false
 
-
-
+
   
   
 hbase_pid_dir
@@ -42,8 +41,7 @@
   false
   true
 
-
-
+
   
   
 hbase_regionserver_xmn_max
@@ -57,16 +55,14 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
 
   MB
 
-
-
+
   
   
 hbase_regionserver_xmn_ratio
 0.2
 RegionServers -Xmn in -Xmx ratio
 Percentage of max heap size (-Xmx) which used for young 
generation heap (-Xmn).
-
-
+
   
   
 hbase_user
@@ -78,8 +74,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
   user
   false
 
-
-
+
   
   
 hbase_max_direct_memory_size
@@ -89,8 +84,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
 
   true
 
-
-
+
   
   
 phoenix_sql_enabled
@@ -111,8 +105,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds 
this value.
   
   1
 
-
-
+
   
   
   
@@ -194,7 +187,6 @@ export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {% 
if hbase_max_direct_
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
index 0510a84..1c3eb6e 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-site.xml
@@ -26,8 +26,7 @@
 A staging directory in default file system (HDFS)
 for bulk loading.
 
-
-
+
   
   
 hbase.hstore.flush.retries.number
@@ -36,8 +35,7 @@
 
 The number of times the region flush operation will be retried.
 
-
-
+
   
   
 hbase.hregion.majorcompaction
@@ -56,8 +54,7 @@
   259200
   milliseconds
 
-
-
+
   
   
 hbase.hregion.majorcompaction.jitter
@@ -66,8 +63,7 @@
   a given amount of time either side of hbase.hregion.majorcompaction. The 
smaller the number,
   the closer the compactions will happen to the 
hbase.hregion.majorcompaction
   interval.
-
-
+
   
   
 hbase.hregion.memstore.block.multiplier
@@ -95,8 +91,7 @@
 
   
 
-
-
+
   
   
 hbase.bucketc

[48/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Patch 1 - change validation rules and available fields (dlysnichenko)

2016-06-09 Thread dmitriusan
AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Patch 1 - 
change validation rules and available fields (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9699f57d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9699f57d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9699f57d

Branch: refs/heads/trunk
Commit: 9699f57d55c3305de2ae01290836caf60c41683e
Parents: b57a7cf
Author: Lisnichenko Dmitro 
Authored: Thu Jun 9 16:15:49 2016 +0300
Committer: Lisnichenko Dmitro 
Committed: Thu Jun 9 17:16:19 2016 +0300

--
 .../ambari/server/state/PropertyInfo.java   | 23 +++-
 .../server/state/PropertyUpgradeBehavior.java   | 16 +++---
 .../configurations-set-default-update-policy.sh | 11 +++---
 .../ambari/server/state/PropertyInfoTest.java   | 19 +---
 4 files changed, 19 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/9699f57d/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index fba2daa..c570ab3 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -28,7 +28,6 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlAnyElement;
 import javax.xml.bind.annotation.XmlAttribute;
 import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlElementRef;
 import javax.xml.bind.annotation.XmlElementWrapper;
 import javax.xml.bind.annotation.XmlList;
 
@@ -51,9 +50,6 @@ public class PropertyInfo {
   private String filename;
   private boolean deleted;
 
-  @XmlElement(name="on-stack-upgrade", required = true)
-  private PropertyUpgradeBehavior propertyStackUpgradeBehavior;
-
   @XmlElement(name="on-ambari-upgrade", required = true)
   private PropertyUpgradeBehavior propertyAmbariUpgradeBehavior;
 
@@ -89,15 +85,10 @@ public class PropertyInfo {
   }
 
   public PropertyInfo() {
-propertyStackUpgradeBehavior = new PropertyUpgradeBehavior();
-propertyStackUpgradeBehavior.setAdd(true);
-propertyStackUpgradeBehavior.setChange(true);
-propertyStackUpgradeBehavior.setDelete(false);
-
 propertyAmbariUpgradeBehavior = new PropertyUpgradeBehavior();
-propertyAmbariUpgradeBehavior.setAdd(false);
-propertyAmbariUpgradeBehavior.setChange(true);
-propertyAmbariUpgradeBehavior.setDelete(true);
+propertyAmbariUpgradeBehavior.setAdd(true);
+propertyAmbariUpgradeBehavior.setUpdate(false);
+propertyAmbariUpgradeBehavior.setDelete(false);
   }
 
   public String getName() {
@@ -148,14 +139,6 @@ public class PropertyInfo {
 this.propertyTypes = propertyTypes;
   }
 
-  public PropertyUpgradeBehavior getPropertyStackUpgradeBehavior() {
-return propertyStackUpgradeBehavior;
-  }
-
-  public void setPropertyStackUpgradeBehavior(PropertyUpgradeBehavior 
propertyStackUpgradeBehavior) {
-this.propertyStackUpgradeBehavior = propertyStackUpgradeBehavior;
-  }
-
   public PropertyUpgradeBehavior getPropertyAmbariUpgradeBehavior() {
 return propertyAmbariUpgradeBehavior;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/9699f57d/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
index de2e342..f6791ee 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyUpgradeBehavior.java
@@ -33,15 +33,15 @@ public class PropertyUpgradeBehavior {
   @XmlAttribute(name="delete", required = true)
   private boolean delete;
 
-  @XmlAttribute(name="change", required = true)
-  private boolean change;
+  @XmlAttribute(name="update", required = true)
+  private boolean update;
 
   public PropertyUpgradeBehavior() {}
 
-  public PropertyUpgradeBehavior(boolean add, boolean delete, boolean change) {
+  public PropertyUpgradeBehavior(boolean add, boolean delete, boolean update) {
 this.add = add;
 this.delete = delete;
-this.change = change;
+this.update = update;
   }
 
   public void setAdd( boolean add )
@@ -54,9 +54,9 @@ public class PropertyUpgradeBehavior {
 this.delete = delete;
   }
 
-  p

[90/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
index e65c690..a82283e 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
@@ -24,43 +24,37 @@ limitations under the License.
 templeton.port
 50111
 The HTTP port for the main server.
-
-
+
   
   
 templeton.hadoop.conf.dir
 /etc/hadoop/conf
 The path to the Hadoop configuration.
-
-
+
   
   
 templeton.jar
 /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
 The path to the Templeton jar file.
-
-
+
   
   
 templeton.libjars
 /usr/lib/zookeeper/zookeeper.jar
 Jars to add the the classpath.
-
-
+
   
   
 templeton.hadoop
 /usr/bin/hadoop
 The path to the Hadoop executable.
-
-
+
   
   
 templeton.python
 ${env.PYTHON_CMD}
 The path to the Python executable.
-
-
+
   
   
 templeton.pig.archive
@@ -69,22 +63,19 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 templeton.pig.path
 pig.tar.gz/pig/bin/pig
 The path to the Pig executable.
-
-
+
   
   
 templeton.hcat
 /usr/bin/hcat
 The path to the hcatalog executable.
-
-
+
   
   
 templeton.hive.archive
@@ -93,36 +84,31 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 templeton.hive.home
 hive.tar.gz/hive
 The path to the Hive home within the tar. Has no effect if 
templeton.hive.archive is not set.
-
-
+
   
   
 templeton.hcat.home
 hive.tar.gz/hive/hcatalog
 The path to the HCat home within the tar. Has no effect if 
templeton.hive.archive is not set.
-
-
+
   
   
 templeton.hive.path
 hive.tar.gz/hive/bin/hive
 The path to the Hive executable.
-
-
+
   
   
 templeton.hive.properties
 
hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9083,hive.metastore.sasl.enabled=false
 Properties to set when running hive.
-
-
+
   
   
 templeton.zookeeper.hosts
@@ -132,35 +118,30 @@ limitations under the License.
 
   multiLine
 
-
-
+
   
   
 templeton.storage.class
 org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage
 The class to use as storage
-
-
+
   
   
 templeton.override.enabled
 false
 Enable the override path in 
templeton.override.jars
-
-
+
   
   
 templeton.streaming.jar
 hdfs:///apps/webhcat/hadoop-streaming.jar
 The hdfs path to the Hadoop streaming jar file.
-
-
+
   
   
 templeton.exec.timeout
 6
 Time out for templeton api
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index d717619..9e38f1a 100644
--- 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -30,8 +30,7 @@
 
   directories
 
-
-
+
   
   
 port
@@ -42,8 +41,7 @@
 
   int
 
-
-
+
   
   
 zookeeper.connect
@@ -56,8 +54,7 @@
   path /chroot/path. Note that you must create this path yourself prior to 
starting the broker and consumers must use the
   same connection string.
 
-
-
+
   
   
 message.max.bytes
@@ -67,8 +64,7 @@
   It is important that this property be in sync with the maximum fetch 
size your consumers use or
   else an unruly producer will be able to publish messages too large for 
consumers to consume.
 
-
-
+
   
   
 num.network.threads
@@ -77,8 +73,7 @@
   The number of network threads that the server uses for handling network 
requests.
   You probably don't need to change this.
 
-
-
+
   
   
 num.io.threads
@@ -86,15 +81,13 @@
 
   The number of I/O threads that the server uses for executing requests. 
You should have at least as many threads as you have disks.
 
-
-
+
   
   

[19/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
index 7285d80..a4e974f 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
@@ -23,8 +23,7 @@
   
 oozie_user
 true
-
-
+
   
   
 oozie_database
@@ -34,8 +33,7 @@
 
   false
 
-
-
+
   
   
 oozie_data_dir
@@ -46,8 +44,7 @@
   true
   false
 
-
-
+
   
   
 oozie_log_dir
@@ -58,8 +55,7 @@
   true
   false
 
-
-
+
   
   
 oozie_pid_dir
@@ -70,8 +66,7 @@
   true
   false
 
-
-
+
   
   
   
@@ -147,7 +142,6 @@ set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
index cdf901a..4c954b4 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
@@ -31,8 +31,7 @@
 
   false
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.url
@@ -44,8 +43,7 @@
 
   false
 
-
-
+
   
   
 oozie.service.HadoopAccessorService.hadoop.configurations
@@ -58,8 +56,7 @@
   the Oozie configuration directory; though the path can be absolute (i.e. 
to point
   to Hadoop client conf/ directories in the local filesystem.
 
-
-
+
   
   
   
@@ -100,8 +97,7 @@
   org.apache.oozie.service.JobsConcurrencyService
 
 List of Oozie services
-
-
+
   
   
 oozie.services.ext
@@ -111,7 +107,6 @@
   To add/replace services defined in 'oozie.services' with custom 
implementations.
   Class names must be separated by commas.
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
index 2313f17..987821a 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
@@ -23,8 +23,7 @@
   
 sqoop_user
 true
-
-
+
   
   
   
@@ -71,7 +70,6 @@
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
index 439fe35..7caf599 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
@@ -23,20 +23,17 @@
   
 storm_user
 true
-
-
+
   
   
 storm_log_dir
 c:\hadoop\logs\storm
-
-
+
   
   
 storm_pid_dir
 c:\hadoop\run\storm
-
-
+
   
   
   
@@ -45,7 +42,6 @@
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
index b68a6d0..b3c86f2 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/

[85/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-site.xml
index cc2c760..47cda25 100644
--- 
a/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/STORM/0.9.1/configuration/storm-site.xml
@@ -27,8 +27,7 @@
for the java.library.path value. java.library.path tells the JVM where
to look for native libraries. It is necessary to set this config 
correctly since
Storm uses the ZeroMQ and JZMQ native libs. 
-
-
+
   
   
 storm.local.dir
@@ -39,8 +38,7 @@
 
   directory
 
-
-
+
   
   
 storm.zookeeper.servers
@@ -51,8 +49,7 @@
   multiLine
   false
 
-
-
+
   
   
 storm.zookeeper.port
@@ -61,8 +58,7 @@
 
   int
 
-
-
+
   
   
 storm.zookeeper.root
@@ -71,8 +67,7 @@
 
   directory
 
-
-
+
   
   
 storm.zookeeper.session.timeout
@@ -81,8 +76,7 @@
 
   int
 
-
-
+
   
   
 storm.zookeeper.connection.timeout
@@ -91,8 +85,7 @@
 
   int
 
-
-
+
   
   
 storm.zookeeper.retry.times
@@ -101,8 +94,7 @@
 
   int
 
-
-
+
   
   
 storm.zookeeper.retry.interval
@@ -112,8 +104,7 @@
   ms
   int
 
-
-
+
   
   
 storm.zookeeper.retry.intervalceiling.millis
@@ -123,15 +114,13 @@
   int
   ms
 
-
-
+
   
   
 storm.cluster.mode
 distributed
 The mode this Storm cluster is running in. Either 
"distributed" or "local".
-
-
+
   
   
 storm.local.mode.zmq
@@ -144,22 +133,19 @@
 
   boolean
 
-
-
+
   
   
 storm.thrift.transport
 backtype.storm.security.auth.SimpleTransportPlugin
 The transport plug-in for Thrift client/server 
communication.
-
-
+
   
   
 storm.messaging.transport
 backtype.storm.messaging.netty.Context
 The transporter for communication among Storm 
tasks.
-
-
+
   
   
 nimbus.host
@@ -170,8 +156,7 @@
   true
   false
 
-
-
+
   
   
 nimbus.thrift.port
@@ -181,8 +166,7 @@
 
   int
 
-
-
+
   
   
 nimbus.thrift.max_buffer_size
@@ -192,8 +176,7 @@
   int
   bytes
 
-
-
+
   
   
 nimbus.childopts
@@ -203,8 +186,7 @@
   multiLine
   false
 
-
-
+
   
   
 nimbus.task.timeout.secs
@@ -214,8 +196,7 @@
   int
   seconds
 
-
-
+
   
   
 nimbus.supervisor.timeout.secs
@@ -224,8 +205,7 @@
 
   int
 
-
-
+
   
   
 nimbus.monitor.freq.secs
@@ -239,8 +219,7 @@
   int
   seconds
 
-
-
+
   
   
 nimbus.cleanup.inbox.freq.secs
@@ -250,8 +229,7 @@
   int
   seconds
 
-
-
+
   
   
 nimbus.inbox.jar.expiration.secs
@@ -267,8 +245,7 @@
   int
   seconds
 
-
-
+
   
   
 nimbus.task.launch.secs
@@ -279,8 +256,7 @@
   int
   seconds
 
-
-
+
   
   
 nimbus.reassign
@@ -290,8 +266,7 @@
 
   boolean
 
-
-
+
   
   
 nimbus.file.copy.expiration.secs
@@ -302,8 +277,7 @@
   int
   seconds
 
-
-
+
   
   
 nimbus.topology.validator
@@ -311,8 +285,7 @@
 A custom class that implements ITopologyValidator that is run 
whenever a
topology is submitted. Can be used to provide business-specific logic 
for
whether topologies are allowed to run or not.
-
-
+
   
   
 ui.port
@@ -321,36 +294,31 @@
 
   int
 
-
-
+
   
   
 ui.childopts
 -Xmx768m
 Childopts for Storm UI Java process.
-
-
+
   
   
 logviewer.port
 8000
 HTTP UI port for log viewer.
-
-
+
   
   
 logviewer.childopts
 -Xmx128m
 Childopts for log viewer java process.
-
-
+
   
   
 logviewer.appender.name
 A1
 Appender name used by log viewer to determine log 
directory.
-
-
+
   
   
 drpc.port
@@ -359,8 +327,7 @@
 
   int
 
-
-
+
   
   
 drpc.worker.threads
@@ -369,8 +336,7 @@
 
   int
 
-
-
+
   
   
 drpc.queue.size
@@ -379,8 +345,7 @@
 
   int
 
-
-
+
   
   
 drpc.invocations.port
@@ -389,8 +354,7 @@
 
   int
 
-
-
+
   
   
 drpc.request.timeout.secs
@@ -402,38 +366,33 @@
   int
   seconds
 
-
-
+
   
   
 drpc.

[80/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
index 9c73abf..805a2f2 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hdfs-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 true
 Whether to enable WebHDFS feature
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
  Number of failed disks a DataNode would tolerate before it 
stops offering service
 true
-
-
+
   
   
 dfs.datanode.data.dir
@@ -65,8 +61,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -75,8 +70,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
   
@@ -273,8 +247,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.permissions.enabled
@@ -286,22 +259,19 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.superusergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 dfs.block.access.token.enable
@@ -310,8 +280,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 
-
-
+
   
   
 dfs.namenode.kerberos.principal
@@ -319,8 +288,7 @@ If "false", no access tokens are checked on accessing 
datanodes.
 
 Kerberos principal name for the NameNode
 
-
-
+
   
   
 dfs.secondary.namenode.kerberos.principal
@@ -328,8 +296,7 @@ Kerberos principal name for the NameNode
 
 Kerberos principal name for the secondary NameNode.
 
-
-
+
   
   
 dfs.namenode.secondary.http-address
 localhost:50090
 Address of secondary namenode web server
-
-
+
   
   
 dfs.web.authentication.kerberos.principal
@@ -364,8 +328,7 @@ Kerberos principal name for the NameNode
   The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
   HTTP SPENGO specification.
 
-
-
+
   
   
 dfs.web.authentication.kerberos.keytab
@@ -374,8 +337,7 @@ Kerberos principal name for the NameNode
   The Kerberos keytab file with the credentials for the
   HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
 
-
-
+
   
   
 dfs.datanode.kerberos.principal
@@ -383,8 +345,7 @@ Kerberos principal name for the NameNode
 
 The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
 
-
-
+
   
   
 dfs.namenode.keytab.file
@@ -392,8 +353,7 @@ Kerberos principal name for the NameNode
 
 Combined keytab file containing the namenode service and host 
principals.
 
-
-
+
   
   
 dfs.secondary.namenode.keytab.file
@@ -401,8 +361,7 @@ Kerberos principal name for the NameNode
 
 Combined keytab file containing the namenode service and host 
principals.
 
-
-
+
   
   
 dfs.datanode.keytab.file
@@ -410,15 +369,13 @@ Kerberos principal name for the NameNode
 
 The filename of the keytab file for the DataNode.
 
-
-
+
   
   
 dfs.namenode.https-address
 localhost:50470
 The https address where namenode binds
-
-
+
   
   
 dfs.datanode.data.dir.perm
@@ -427,8 +384,7 @@ Kerberos principal name for the NameNode
 directories. The datanode will not come up if the permissions are
 different on existing dfs.datanode.data.dir directories. If the directories
 don't exist, they will be created with this permission.
-
-
+
   
   
 dfs.namenode.accesstime.precision
@@ -437,15 +393,13 @@ don't exist, they will be created with this

[86/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-hive-site-override.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-hive-site-override.xml
 
b/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-hive-site-override.xml
index a1968e9..3154f24 100644
--- 
a/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-hive-site-override.xml
+++ 
b/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-hive-site-override.xml
@@ -23,15 +23,13 @@ limitations under the License.
 
   Disable impersonation in Hive Server 2.
 
-
-
+
   
   
 hive.metastore.client.socket.timeout
 1800
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.client.connect.retry.delay
@@ -39,8 +37,7 @@ limitations under the License.
 
   Expects a time value - number of seconds for the client to wait between 
consecutive connection attempts
 
-
-
+
   
   
 hive.server2.thrift.port
@@ -48,8 +45,7 @@ limitations under the License.
 
   TCP port number to listen on, default 10015.
 
-
-
+
   
   
 hive.server2.transport.mode
@@ -58,7 +54,6 @@ limitations under the License.
   Expects one of [binary, http].
   Transport mode of HiveServer2.
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-thrift-sparkconf.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-thrift-sparkconf.xml
 
b/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-thrift-sparkconf.xml
index bb636c6..0d07935 100644
--- 
a/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-thrift-sparkconf.xml
+++ 
b/ambari-server/src/main/resources/common-services/SPARK/1.5.2/configuration/spark-thrift-sparkconf.xml
@@ -27,8 +27,7 @@
   This is memory that accounts for things like VM overheads, interned 
strings,
   other native overheads, etc.
 
-
-
+
   
   
 spark.driver.extraLibraryPath
@@ -36,8 +35,7 @@
 
    Set a special library path to use when launching the driver JVM.
 
-
-
+
   
   
 spark.yarn.driver.memoryOverhead
@@ -47,8 +45,7 @@
   This is memory that accounts for things like VM overheads, interned 
strings,
   other native overheads, etc.
 
-
-
+
   
   
 spark.yarn.scheduler.heartbeat.interval-ms
@@ -56,8 +53,7 @@
 
   The interval in ms in which the Spark application master heartbeats into 
the YARN ResourceManager.
 
-
-
+
   
   
 spark.yarn.max.executor.failures
@@ -65,8 +61,7 @@
 
   The maximum number of executor failures before failing the application.
 
-
-
+
   
   
 spark.yarn.queue
@@ -74,8 +69,7 @@
 
   The name of the YARN queue to which the application is submitted.
 
-
-
+
   
   
 spark.yarn.containerLauncherMaxThreads
@@ -83,8 +77,7 @@
 
   The maximum number of threads to use in the application master for 
launching executor containers.
 
-
-
+
   
   
 spark.yarn.submit.file.replication
@@ -93,8 +86,7 @@
   HDFS replication level for the files uploaded into HDFS for the 
application.
   These include things like the Spark jar, the app jar, and any 
distributed cache files/archives.
 
-
-
+
   
   
 spark.yarn.preserve.staging.files
@@ -103,14 +95,12 @@
   Set to true to preserve the staged files (Spark jar, app jar, 
distributed cache files) at the
   end of the job rather then delete them.
 
-
-
+
   
   
 spark.yarn.max.executor.failures
 3
 The maximum number of executor failures before failing the 
application.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
 
b/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
index f57..6dd6c5a 100644
--- 
a/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
+++ 
b/ambari-server/src/main/resources/common-services/SPARK/1.6.0/configuration/spark-defaults.xml
@@ -22,15 +22,13 @@
   
 spark.yarn.services
 true
-
-
+
   
   
 spark

[03/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
index 6e69879..20a73d6 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
@@ -25,8 +25,7 @@
 oozie.base.url
 http://localhost:11000/oozie
 Base Oozie URL.
-
-
+
   
   
 oozie.system.id
@@ -34,8 +33,7 @@
 
 The Oozie system ID.
 
-
-
+
   
   
 oozie.systemmode
@@ -43,8 +41,7 @@
 
  System mode for  Oozie at startup.
  
-
-
+
   
   
 oozie.service.AuthorizationService.security.enabled
@@ -53,8 +50,7 @@
  Specifies whether security (user name/admin role) is enabled or not.
  If disabled any user can manage Oozie system and manage any job.
  
-
-
+
   
   
 oozie.service.PurgeService.older.than
@@ -62,8 +58,7 @@
 
  Jobs older than this value, in days, will be purged by the PurgeService.
  
-
-
+
   
   
 oozie.service.PurgeService.purge.interval
@@ -71,22 +66,19 @@
 
  Interval at which the purge service will run, in seconds.
  
-
-
+
   
   
 oozie.service.CallableQueueService.queue.size
 1000
 Max callable queue size
-
-
+
   
   
 oozie.service.CallableQueueService.threads
 10
 Number of threads used for executing callables
-
-
+
   
   
 oozie.service.CallableQueueService.callable.concurrency
@@ -98,16 +90,14 @@
  All commands that use action executors (action-start, action-end, 
action-kill and action-check) use
  the action type as the callable type.
  
-
-
+
   
   
 oozie.service.coord.normal.default.timeout
 120
 Default timeout for a coordinator action input check (in 
minutes) for normal job.
   -1 means infinite timeout
-
-
+
   
   
 oozie.db.schema.name
@@ -115,8 +105,7 @@
 
   Oozie DataBase Name
  
-
-
+
   
   
 oozie.service.HadoopAccessorService.jobTracker.whitelist
@@ -124,24 +113,21 @@
 
   Whitelisted job tracker for Oozie service.
   
-
-
+
   
   
 oozie.authentication.type
 simple
 
   
-
-
+
   
   
 oozie.service.HadoopAccessorService.nameNode.whitelist
  
 
   
-
-
+
   
   
 oozie.service.WorkflowAppService.system.libpath
@@ -151,8 +137,7 @@
   This path is added to workflow application if their job properties sets
   the property 'oozie.use.system.libpath' to true.
   
-
-
+
   
   
 use.system.libpath.for.mapreduce.and.pig.jobs
@@ -163,8 +148,7 @@
   specify where the Pig JAR files are. Instead, the ones from the system
   library path are used.
   
-
-
+
   
   
 oozie.authentication.kerberos.name.rules
@@ -176,8 +160,7 @@
 
 
 The mapping from kerberos principal names to local OS user 
names.
-
-
+
   
   
 oozie.service.HadoopAccessorService.hadoop.configurations
@@ -190,8 +173,7 @@
   the Oozie configuration directory; though the path can be absolute 
(i.e. to point
   to Hadoop client conf/ directories in the local filesystem.
   
-
-
+
   
   
 oozie.service.ActionService.executor.ext.classes
@@ -202,14 +184,12 @@
 org.apache.oozie.action.hadoop.SqoopActionExecutor,
 org.apache.oozie.action.hadoop.DistcpActionExecutor
 
-
-
+
   
   
 oozie.service.SchemaService.wf.ext.schemas
 
shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd
-
-
+
   
   
 oozie.service.JPAService.create.db.schema
@@ -220,8 +200,7 @@
 If set to true, it creates the DB schema if it does not exist. If 
the DB schema exists is a NOP.
 If set to false, it does not create the DB schema. If the DB 
schema does not exist it fails start up.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.driver
@@ -229,8 +208,7 @@
 
 JDBC driver class.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.url
@@ -238,8 +216,7 @@
 
 JDBC URL.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.username
@@ -247,8 +224,7 @@
 
 DB user name.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.pa

[25/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml
index df9af61..4f7a4e1 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml
@@ -24,22 +24,19 @@ limitations under the License.
 templeton.jar
 
/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar
 The path to the Templeton jar file.
-
-
+
   
   
 templeton.libjars
 /usr/hdp/${hdp.version}/zookeeper/zookeeper.jar
 Jars to add the the classpath.
-
-
+
   
   
 templeton.hadoop
 /usr/hdp/${hdp.version}/hadoop/bin/hadoop
 The path to the Hadoop executable.
-
-
+
   
   
 templeton.pig.archive
@@ -48,22 +45,19 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 templeton.pig.path
 pig.tar.gz/pig/bin/pig
 The path to the Pig executable.
-
-
+
   
   
 templeton.hcat
 /usr/hdp/${hdp.version}/hive/bin/hcat
 The path to the hcatalog executable.
-
-
+
   
   
 templeton.hive.archive
@@ -72,43 +66,37 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 templeton.hive.home
 hive.tar.gz/hive
 The path to the Hive home within the tar. Has no effect if 
templeton.hive.archive is not set.
-
-
+
   
   
 templeton.hcat.home
 hive.tar.gz/hive/hcatalog
 The path to the HCat home within the tar. Has no effect if 
templeton.hive.archive is not set.
-
-
+
   
   
 templeton.hive.path
 hive.tar.gz/hive/bin/hive
 The path to the Hive executable.
-
-
+
   
   
 templeton.sqoop.archive
 hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz
 The path to the Sqoop archive in HDFS.
-
-
+
   
   
 templeton.sqoop.path
 sqoop.tar.gz/sqoop/bin/sqoop
 The path to the Sqoop executable.
-
-
+
   
   
 templeton.sqoop.home
@@ -116,15 +104,13 @@ limitations under the License.
 The path to the Sqoop home within the tar. Has no effect if
   templeton.sqoop.archive is not set.
 
-
-
+
   
   
 templeton.streaming.jar
 
hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar
 The hdfs path to the Hadoop streaming jar file.
-
-
+
   
   
 templeton.hive.extra.files
@@ -137,7 +123,6 @@ limitations under the License.
   This can be used to specify config files, Tez artifacts, etc.  This will 
be sent -files option of hadoop jar command thus
   each path is interpreted by Generic Option Parser.  It can be local or 
hdfs path.
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
index c481cde..9c2cd54 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
@@ -24,7 +24,6 @@
 /usr/hdp/current/knox-server
 Knox Home
 Knox home folder
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-env.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-env.xml
index ccc2c00..1392d52 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-env.xml
@@ -108,7 +108,6 @@ export HADOOP_OPTS="-Dhdp.version=${HDP_VERSION} 
${HADOOP_OPTS}"
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml
--
d

[50/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
index 746578e..d927606 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
@@ -24,245 +24,210 @@
 namenode_host
 
 NameNode Host.
-
-
+
   
   
 dfs_namenode_name_dir
 /hadoop/hdfs/namenode
 NameNode Directories.
-
-
+
   
   
 snamenode_host
 
 Secondary NameNode.
-
-
+
   
   
 dfs_namenode_checkpoint_dir
 /hadoop/hdfs/namesecondary
 Secondary NameNode checkpoint dir.
-
-
+
   
   
 datanode_hosts
 
 List of Datanode Hosts.
-
-
+
   
   
 dfs_datanode_data_dir
 /hadoop/hdfs/data
 Data directories for Data Nodes.
-
-
+
   
   
 hdfs_log_dir_prefix
 /var/log/hadoop
 Hadoop Log Dir Prefix
-
-
+
   
   
 hadoop_pid_dir_prefix
 /var/run/hadoop
 Hadoop PID Dir Prefix
-
-
+
   
   
 dfs_webhdfs_enabled
 true
 WebHDFS enabled
-
-
+
   
   
 hadoop_heapsize
 1024
 Hadoop maximum Java heap size
-
-
+
   
   
 namenode_heapsize
 1024
 NameNode Java heap size
-
-
+
   
   
 namenode_opt_newsize
 200
 Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).
-
-
+
   
   
 namenode_opt_maxnewsize
 200
 NameNode maximum new generation size
-
-
+
   
   
 namenode_opt_permsize
 128
 NameNode permanent generation size
-
-
+
   
   
 namenode_opt_maxpermsize
 256
 NameNode maximum permanent generation size
-
-
+
   
   
 datanode_du_reserved
 1073741824
 Reserved space for HDFS
-
-
+
   
   
 dtnode_heapsize
 1024
 DataNode maximum Java heap size
-
-
+
   
   
 dfs_datanode_failed_volume_tolerated
 0
 DataNode volumes failure toleration
-
-
+
   
   
 dfs_namenode_checkpoint_period
 21600
 HDFS Maximum Checkpoint Delay
-
-
+
   
   
 fs_checkpoint_size
 0.5
 FS Checkpoint Size.
-
-
+
   
   
 proxyuser_group
 users
 Proxy user group.
-
-
+
   
   
 dfs_exclude
 
 HDFS Exclude hosts.
-
-
+
   
   
 dfs_replication
 3
 Default Block Replication.
-
-
+
   
   
 dfs_block_local_path_access_user
 hbase
 Default Block Replication.
-
-
+
   
   
 dfs_datanode_address
 50010
 Port for datanode address.
-
-
+
   
   
 dfs_datanode_http_address
 50075
 Port for datanode address.
-
-
+
   
   
 dfs_datanode_data_dir_perm
 750
 Datanode dir perms.
-
-
+
   
   
 security_enabled
 false
 Hadoop Security
-
-
+
   
   
 kerberos_domain
 EXAMPLE.COM
 Kerberos realm.
-
-
+
   
   
 kadmin_pw
 
 Kerberos realm admin password
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 Kerberos keytab path.
-
-
+
   
   
 keytab_path
 /etc/security/keytabs
 KeyTab Directory.
-
-
+
   
   
 namenode_formatted_mark_dir
 /var/run/hadoop/hdfs/namenode/formatted/
 Formatteed Mark Directory.
-
-
+
   
   
 hdfs_user
 hdfs
 User and Groups.
-
-
+
   
   
 lzo_enabled
 true
 LZO compression enabled
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
index 93cc9ab..a31a481 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
@@ -26,8 +26,7 @@

[71/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
index e297041..43e2473 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -22,8 +22,7 @@
 yarn.application.classpath
 
$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*
 Classpath for typical applications.
-
-
+
   
   
 hadoop.registry.rm.enabled
@@ -31,8 +30,7 @@
 
   Is the registry enabled: does the RM start it up, create the user and 
system paths, and purge service records when containers, application attempts 
and applications complete
 
-
-
+
   
   
 hadoop.registry.zk.quorum
@@ -40,15 +38,13 @@
 
   List of hostname:port pairs defining the zookeeper quorum binding for 
the registry
 
-
-
+
   
   
 yarn.nodemanager.recovery.enabled
 true
 Enable the node manager to recover after 
starting
-
-
+
   
   
 yarn.nodemanager.recovery.dir
@@ -57,22 +53,19 @@
   The local filesystem directory in which the node manager will store
   state when recovery is enabled.
 
-
-
+
   
   
 yarn.client.nodemanager-connect.retry-interval-ms
 1
 Time interval between each attempt to connect to 
NM
-
-
+
   
   
 yarn.client.nodemanager-connect.max-wait-ms
 6
 Max time to wait to establish a connection to NM
-
-
+
   
   
 yarn.resourcemanager.recovery.enabled
@@ -81,8 +74,7 @@
   Enable RM to recover state after starting.
   If true, then yarn.resourcemanager.store.class must be specified.
 
-
-
+
   
   
 yarn.resourcemanager.work-preserving-recovery.enabled
@@ -90,8 +82,7 @@
 
   Enable RM work preserving recovery. This configuration is private to 
YARN for experimenting the feature.
 
-
-
+
   
   
 yarn.resourcemanager.store.class
@@ -102,8 +93,7 @@
   the store is implicitly fenced; meaning a single ResourceManager
   is able to use the store at any point in time.
 
-
-
+
   
   
 yarn.resourcemanager.zk-address
@@ -111,43 +101,37 @@
 
   List Host:Port of the ZooKeeper servers to be used by the RM. comma 
separated host:port pairs, each corresponding to a zk server. e.g. 
"127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is 
used the example would look like: 
"127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be 
rooted at "/app/a" and all paths would be relative to this root - ie 
getting/setting/etc...  "/foo/bar" would result in operations being run on 
"/app/a/foo/bar" (from the server perspective).
 
-
-
+
   
   
 yarn.resourcemanager.zk-state-store.parent-path
 /rmstore
 Full path of the ZooKeeper znode where RM state will be 
stored. This must be supplied when using 
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the 
value for yarn.resourcemanager.store.class
-
-
+
   
   
 yarn.resourcemanager.zk-acl
 world:anyone:rwcda
 ACL's to be used for ZooKeeper znodes.
-
-
+
   
   
 
yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
 1
 Set the amount of time RM waits before allocating new 
containers on work-preserving-recovery. Such wait period gives RM a chance to 
settle down resyncing with NMs in the cluster on recovery, before assigning new 
containers to applications.
-
-
+
   
   
 yarn.resourcemanager.connect.retry-interval.ms
 3
 How often to try connecting to the 
ResourceManager.
-
-
+
   
   
 yarn.resourcemanager.connect.max-wait.ms
 90
 Maximum time to wait to establish connection to 
ResourceManager
-
-
+
   
   
 yarn.resourcemanager.zk-retry-interval-ms
@@ -157,78 +141,67 @@
   automatically from yarn.resourcemanager.zk-timeout-ms and
   yarn.resourcemanager.zk-num-retries."
 
-
-
+
   
   
 yarn.resourcemanager.zk-num-retries
 1000
 Number of times RM tries to connect to 
ZooKeeper.
-
-
+
   
   
 yarn.resourcemanager.zk-timeout-ms
 1
 ZooKeeper session timeout in milliseconds. Session expiration 
is manag

[72/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
index 8f28baf..2e15558 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
@@ -24,44 +24,38 @@
 audit_log_level
 OFF
 Log level for audit logging
-
-
+
   
   
 monitor_forwarding_log_level
 WARN
 Log level for logging forwarded to the Accumulo
   Monitor
-
-
+
   
   
 debug_log_size
 512M
 Size of each debug rolling log file
-
-
+
   
   
 debug_num_logs
 10
 Number of rolling debug log files to keep
-
-
+
   
   
 info_log_size
 512M
 Size of each info rolling log file
-
-
+
   
   
 info_num_logs
 10
 Number of rolling info log files to keep
-
-
+
   
   
 content
@@ -115,7 +109,6 @@ log4j.appender.A1.layout=org.apache.log4j.PatternLayout
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
index f0b16c6..6b79265 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -23,29 +23,25 @@
 fs.AbstractFileSystem.glusterfs.impl
 org.apache.hadoop.fs.local.GlusterFs
 GlusterFS Abstract File System Implementation
-
-
+
   
   
 fs.glusterfs.impl
 GlusterFS fs impl
 org.apache.hadoop.fs.glusterfs.GlusterFileSystem
-
-
+
   
   
 fs.defaultFS
 glusterfs:///localhost:8020
-
-
+
   
   
   
 ha.failover-controller.active-standby-elector.zk.op.retries
 120
 ZooKeeper Failover Controller retries setting for your 
environment
-
-
+
   
   
   
@@ -55,24 +51,21 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.
-
-
+
   
   
 io.serializations
 org.apache.hadoop.io.serializer.WritableSerialization
  A list of comma-delimited serialization classes that can be 
used for obtaining serializers and deserializers.
 
-
-
+
   
   
 io.compression.codecs
 
org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
 A list of the compression codec classes that can be used
  for compression/decompression.
-
-
+
   
   
 fs.trash.interval
@@ -83,8 +76,7 @@
 If trash is disabled server side then the client side configuration is 
checked.
 If trash is enabled on the server side then the value configured on 
the server is used and the client configuration value is ignored.
 
-
-
+
   
   
   
@@ -93,8 +85,7 @@
 Defines the threshold number of connections after which
connections will be inspected for idleness.
   
-
-
+
   
   
 ipc.client.connection.maxidletime
@@ -102,15 +93,13 @@
 The maximum time after which a client will bring down the
connection to the server.
   
-
-
+
   
   
 ipc.client.connect.max.retries
 50
 Defines the maximum number of retries for IPC 
connections.
-
-
+
   
   
 ipc.server.tcpnodelay
@@ -121,8 +110,7 @@
   decrease latency
   with a cost of more/smaller packets.
 
-
-
+
   
   
   
@@ -133,8 +121,7 @@
 not be exposed to public. Enable this option if the interfaces
 are only reachable by those who have the right authorization.
   
-
-
+
   
   
 hadoop.security.authentication
@@ -143,8 +130,7 @@
Set the authentication for the cluster. Valid values are: simple or
kerberos.

-
-
+
   
   
 hadoop.security.authorization
@@ -152,8 +138,7 @@
 
  Enable authorization for differe

[92/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
 
b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
index fce4d9c..b710922 100644
--- 
a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
@@ -25,50 +25,43 @@
 *.workflow.engine.impl
 org.apache.falcon.workflow.engine.OozieWorkflowEngine
 
-
-
+
   
   
 *.oozie.process.workflow.builder
 org.apache.falcon.workflow.OozieProcessWorkflowBuilder
 
-
-
+
   
   
 *.oozie.feed.workflow.builder
 org.apache.falcon.workflow.OozieFeedWorkflowBuilder
 
-
-
+
   
   
 *.SchedulableEntityManager.impl
 org.apache.falcon.resource.SchedulableEntityManager
 
-
-
+
   
   
 *.ConfigSyncService.impl
 org.apache.falcon.resource.ConfigSyncService
 
-
-
+
   
   
 *.ProcessInstanceManager.impl
 org.apache.falcon.resource.InstanceManager
 
-
-
+
   
   
 *.catalog.service.impl
 org.apache.falcon.catalog.HiveCatalogService
 
-
-
+
   
   
 *.application.services
@@ -83,8 +76,7 @@
 
   multiLine
 
-
-
+
   
   
 *.configstore.listeners
@@ -97,66 +89,57 @@
 
   multiLine
 
-
-
+
   
   
 *.broker.impl.class
 org.apache.activemq.ActiveMQConnectionFactory
 
-
-
+
   
   
 *.shared.libs
 
activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms
 
-
-
+
   
   
   
 *.domain
 ${falcon.app.type}
 
-
-
+
   
   
 *.config.store.uri
 file:///hadoop/falcon/store
 Location to store user entity configurations
-
-
+
   
   
 *.system.lib.location
 ${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib
 Location of libraries that is shipped to Hadoop
-
-
+
   
   
 *.retry.recorder.path
 ${falcon.log.dir}/retry
 
-
-
+
   
   
 *.falcon.cleanup.service.frequency
 days(1)
 
-
-
+
   
   
 *.broker.url
 tcp://localhost:61616
 DONT_ADD_ON_UPGRADE
 Default Active MQ url
-
-
+
   
   
 *.broker.ttlInMins
@@ -165,23 +148,20 @@
 
   int
 
-
-
+
   
   
 *.hive.shared.libs
 
hive-exec,hive-metastore,hive-common,hive-service,hive-hcatalog-server-extensions,\
 hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
-
-
+
   
   
 *.entity.topic
 FALCON.ENTITY.TOPIC
 
-
-
+
   
   
 *.max.retry.failure.count
@@ -190,8 +170,7 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   int
 
-
-
+
   
   
 *.internal.queue.size
@@ -200,8 +179,7 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   int
 
-
-
+
   
   
 *.falcon.graph.preserve.history
@@ -210,16 +188,14 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   boolean
 
-
-
+
   
   
   
 *.falcon.http.authentication.cookie.domain
 EXAMPLE.COM
 
-
-
+
   
   
 *.falcon.http.authentication.blacklisted.users
@@ -228,23 +204,20 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   true
 
-
-
+
   
   
   
 *.falcon.authentication.type
 simple
 
-
-
+
   
   
 *.falcon.http.authentication.type
 simple
 
-
-
+
   
   
 *.falcon.http.authentication.token.validity
@@ -253,22 +226,19 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   int
 
-
-
+
   
   
 *.falcon.http.authentication.signature.secret
 falcon
 
-
-
+
   
   
 *.falcon.http.authentication.simple.anonymous.allowed
 true
 Indicates if anonymous requests are allowed when using 
'simple' authentication
-
-
+
   
   
 *.falcon.http.authentication.kerberos.name.rules
@@ -277,7 +247,6 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   multiLine
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/oozie-site.xml

[91/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
index a2cb615..145d832 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
@@ -22,8 +22,7 @@
 ha.failover-controller.active-standby-elector.zk.op.retries
 120
 ZooKeeper Failover Controller retries setting for your 
environment
-
-
+
   
   
   
@@ -33,24 +32,21 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.
-
-
+
   
   
 io.serializations
 org.apache.hadoop.io.serializer.WritableSerialization
  A list of comma-delimited serialization classes that can be 
used for obtaining serializers and deserializers.
 
-
-
+
   
   
 io.compression.codecs
 
org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
 A list of the compression codec classes that can be used
  for compression/decompression.
-
-
+
   
   
   
@@ -61,8 +57,7 @@
 The name of the default file system.  Either the
   literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 fs.trash.interval
@@ -73,8 +68,7 @@
 If trash is disabled server side then the client side configuration is 
checked.
 If trash is enabled on the server side then the value configured on 
the server is used and the client configuration value is ignored.
 
-
-
+
   
   
   
@@ -83,8 +77,7 @@
 Defines the threshold number of connections after which
connections will be inspected for idleness.
   
-
-
+
   
   
 ipc.client.connection.maxidletime
@@ -92,15 +85,13 @@
 The maximum time after which a client will bring down the
connection to the server.
   
-
-
+
   
   
 ipc.client.connect.max.retries
 50
 Defines the maximum number of retries for IPC 
connections.
-
-
+
   
   
 ipc.server.tcpnodelay
@@ -111,8 +102,7 @@
   decrease latency
   with a cost of more/smaller packets.
 
-
-
+
   
   
   
@@ -123,8 +113,7 @@
 not be exposed to public. Enable this option if the interfaces
 are only reachable by those who have the right authorization.
   
-
-
+
   
   
 hadoop.security.authentication
@@ -133,8 +122,7 @@
Set the authentication for the cluster. Valid values are: simple or
kerberos.

-
-
+
   
   
 hadoop.security.authorization
@@ -142,8 +130,7 @@
 
  Enable authorization for different protocols.
   
-
-
+
   
   
 hadoop.security.auth_to_local
@@ -189,8 +176,7 @@ DEFAULT
 
   multiLine
 
-
-
+
   
   
 net.topology.script.file.name
@@ -198,7 +184,6 @@ DEFAULT
 
   Location of topology script used by Hadoop to determine the rack 
location of nodes.
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index b12400a..967991e 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -29,8 +29,7 @@
   directory
   false
 
-
-
+
   
   
 hadoop_pid_dir_prefix
@@ -42,8 +41,7 @@
   false
   true
 
-
-
+
   
   
 hadoop_root_logger
@@ -53,8 +51,7 @@
 
   false
 
-
-
+
   
   
 hadoop_heapsize
@@ -66,8 +63,7 @@
   MB
   false
 
-
-
+
   
   
 namenode_heapsize
@@ -88,8 +84,7 @@
 dfs.datanode.data.dir
   
 
-
-
+
   
   
 namenode_opt_newsize
@@ -110,8 +105,7 @@
   256
   false
 
-
-
+
   
   
 namenode_opt_maxnewsize
@@ -132,8 +126,7 @@
   256
   false
 
-
-
+
   
   
 namenode_opt_permsize
@@ -148,8 +141,7 @@
   128
   false
 
-
-
+ 

[44/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
 
b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
index fce4d9c..b710922 100644
--- 
a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
+++ 
b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-startup.properties.xml
@@ -25,50 +25,43 @@
 *.workflow.engine.impl
 org.apache.falcon.workflow.engine.OozieWorkflowEngine
 
-
-
+
   
   
 *.oozie.process.workflow.builder
 org.apache.falcon.workflow.OozieProcessWorkflowBuilder
 
-
-
+
   
   
 *.oozie.feed.workflow.builder
 org.apache.falcon.workflow.OozieFeedWorkflowBuilder
 
-
-
+
   
   
 *.SchedulableEntityManager.impl
 org.apache.falcon.resource.SchedulableEntityManager
 
-
-
+
   
   
 *.ConfigSyncService.impl
 org.apache.falcon.resource.ConfigSyncService
 
-
-
+
   
   
 *.ProcessInstanceManager.impl
 org.apache.falcon.resource.InstanceManager
 
-
-
+
   
   
 *.catalog.service.impl
 org.apache.falcon.catalog.HiveCatalogService
 
-
-
+
   
   
 *.application.services
@@ -83,8 +76,7 @@
 
   multiLine
 
-
-
+
   
   
 *.configstore.listeners
@@ -97,66 +89,57 @@
 
   multiLine
 
-
-
+
   
   
 *.broker.impl.class
 org.apache.activemq.ActiveMQConnectionFactory
 
-
-
+
   
   
 *.shared.libs
 
activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms
 
-
-
+
   
   
   
 *.domain
 ${falcon.app.type}
 
-
-
+
   
   
 *.config.store.uri
 file:///hadoop/falcon/store
 Location to store user entity configurations
-
-
+
   
   
 *.system.lib.location
 ${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib
 Location of libraries that is shipped to Hadoop
-
-
+
   
   
 *.retry.recorder.path
 ${falcon.log.dir}/retry
 
-
-
+
   
   
 *.falcon.cleanup.service.frequency
 days(1)
 
-
-
+
   
   
 *.broker.url
 tcp://localhost:61616
 DONT_ADD_ON_UPGRADE
 Default Active MQ url
-
-
+
   
   
 *.broker.ttlInMins
@@ -165,23 +148,20 @@
 
   int
 
-
-
+
   
   
 *.hive.shared.libs
 
hive-exec,hive-metastore,hive-common,hive-service,hive-hcatalog-server-extensions,\
 hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
-
-
+
   
   
 *.entity.topic
 FALCON.ENTITY.TOPIC
 
-
-
+
   
   
 *.max.retry.failure.count
@@ -190,8 +170,7 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   int
 
-
-
+
   
   
 *.internal.queue.size
@@ -200,8 +179,7 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   int
 
-
-
+
   
   
 *.falcon.graph.preserve.history
@@ -210,16 +188,14 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   boolean
 
-
-
+
   
   
   
 *.falcon.http.authentication.cookie.domain
 EXAMPLE.COM
 
-
-
+
   
   
 *.falcon.http.authentication.blacklisted.users
@@ -228,23 +204,20 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   true
 
-
-
+
   
   
   
 *.falcon.authentication.type
 simple
 
-
-
+
   
   
 *.falcon.http.authentication.type
 simple
 
-
-
+
   
   
 *.falcon.http.authentication.token.validity
@@ -253,22 +226,19 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   int
 
-
-
+
   
   
 *.falcon.http.authentication.signature.secret
 falcon
 
-
-
+
   
   
 *.falcon.http.authentication.simple.anonymous.allowed
 true
 Indicates if anonymous requests are allowed when using 
'simple' authentication
-
-
+
   
   
 *.falcon.http.authentication.kerberos.name.rules
@@ -277,7 +247,6 @@ 
hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client
 
   multiLine
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/oozie-site.xml

[66/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
index 27bbbd8..811d593 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/HIVE/configuration/hive-site.xml
@@ -36,8 +36,7 @@ limitations under the License.
   
   1
 
-
-
+
   
   
 hive.zookeeper.quorum
@@ -49,22 +48,19 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 hive.metastore.connect.retries
 24
 Number of retries while opening a connection to 
metastore
-
-
+
   
   
 hive.metastore.failure.retries
 24
 Number of retries upon failure of Thrift metastore 
calls
-
-
+
   
   
 hive.metastore.client.connect.retry.delay
@@ -73,8 +69,7 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   Number of seconds for the client to wait between consecutive connection 
attempts
 
-
-
+
   
   
 hive.metastore.client.socket.timeout
@@ -83,15 +78,13 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   MetaStore Client socket timeout in seconds
 
-
-
+
   
   
 hive.mapjoin.bucket.cache.size
 1
 
-
-
+
   
   
 hive.security.authorization.manager
@@ -106,23 +99,20 @@ limitations under the License.
 hive_security_authorization
   
 
-
-
+
   
   
 hive.cluster.delegation.token.store.class
 org.apache.hadoop.hive.thrift.ZooKeeperTokenStore
 The delegation token store implementation.
   Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for 
load-balanced cluster.
-
-
+
   
   
 hive.cluster.delegation.token.store.zookeeper.connectString
 localhost:2181
 The ZooKeeper token store connect string.
-
-
+
   
   
 hive.server2.support.dynamic.service.discovery
@@ -132,38 +122,33 @@ limitations under the License.
   when it is brought up. JDBC/ODBC clients should use the ZooKeeper 
ensemble: hive.zookeeper.quorum
   in their connection string.
 
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 false
 true
 Disable HDFS filesystem cache.
-
-
+
   
   
 fs.file.impl.disable.cache
 false
 true
 Disable local filesystem cache.
-
-
+
   
   
 hive.exec.scratchdir
 /tmp/hive
 HDFS root scratch dir for Hive jobs which gets created with 
write all (733) permission. For each connecting user, an HDFS scratch dir: 
${hive.exec.scratchdir}/ is created, with 
${hive.scratch.dir.permission}.
-
-
+
   
   
 hive.exec.submitviachild
 false
 
-
-
+
   
   
 hive.exec.submit.local.task.via.child
@@ -173,8 +158,7 @@ limitations under the License.
   separate JVM (true recommended) or not.
   Avoids the overhead of spawning new JVM, but can lead to out-of-memory 
issues.
 
-
-
+
   
   
 hive.exec.compress.output
@@ -183,8 +167,7 @@ limitations under the License.
   This controls whether the final outputs of a query (to a local/HDFS file 
or a Hive table) is compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.compress.intermediate
@@ -193,8 +176,7 @@ limitations under the License.
   This controls whether intermediate files produced by Hive between 
multiple map-reduce jobs are compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.reducers.bytes.per.reducer
@@ -208,8 +190,7 @@ limitations under the License.
   B
   
 
-
-
+
   
   
 hive.exec.reducers.max
@@ -218,8 +199,7 @@ limitations under the License.
   max number of reducers will be used. If the one specified in the 
configuration parameter mapred.reduce.tasks is
   negative, Hive will use this one as the max number of reducers when 
automatically determine number of reducers.
 
-
-
+
   
   
 hive.exec.pre.hooks
@@ -229,8 +209,7 @@ limitations under the License.
   A pre-execution hook is specified as the name of a Java class which 
implements the
   org.apache.hadoop.hive.ql.ho

[42/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
index e65c690..a82283e 100644
--- 
a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
@@ -24,43 +24,37 @@ limitations under the License.
 templeton.port
 50111
 The HTTP port for the main server.
-
-
+
   
   
 templeton.hadoop.conf.dir
 /etc/hadoop/conf
 The path to the Hadoop configuration.
-
-
+
   
   
 templeton.jar
 /usr/lib/hcatalog/share/webhcat/svr/webhcat.jar
 The path to the Templeton jar file.
-
-
+
   
   
 templeton.libjars
 /usr/lib/zookeeper/zookeeper.jar
 Jars to add the the classpath.
-
-
+
   
   
 templeton.hadoop
 /usr/bin/hadoop
 The path to the Hadoop executable.
-
-
+
   
   
 templeton.python
 ${env.PYTHON_CMD}
 The path to the Python executable.
-
-
+
   
   
 templeton.pig.archive
@@ -69,22 +63,19 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 templeton.pig.path
 pig.tar.gz/pig/bin/pig
 The path to the Pig executable.
-
-
+
   
   
 templeton.hcat
 /usr/bin/hcat
 The path to the hcatalog executable.
-
-
+
   
   
 templeton.hive.archive
@@ -93,36 +84,31 @@ limitations under the License.
 
   true
 
-
-
+
   
   
 templeton.hive.home
 hive.tar.gz/hive
 The path to the Hive home within the tar. Has no effect if 
templeton.hive.archive is not set.
-
-
+
   
   
 templeton.hcat.home
 hive.tar.gz/hive/hcatalog
 The path to the HCat home within the tar. Has no effect if 
templeton.hive.archive is not set.
-
-
+
   
   
 templeton.hive.path
 hive.tar.gz/hive/bin/hive
 The path to the Hive executable.
-
-
+
   
   
 templeton.hive.properties
 
hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9083,hive.metastore.sasl.enabled=false
 Properties to set when running hive.
-
-
+
   
   
 templeton.zookeeper.hosts
@@ -132,35 +118,30 @@ limitations under the License.
 
   multiLine
 
-
-
+
   
   
 templeton.storage.class
 org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage
 The class to use as storage
-
-
+
   
   
 templeton.override.enabled
 false
 Enable the override path in 
templeton.override.jars
-
-
+
   
   
 templeton.streaming.jar
 hdfs:///apps/webhcat/hadoop-streaming.jar
 The hdfs path to the Hadoop streaming jar file.
-
-
+
   
   
 templeton.exec.timeout
 6
 Time out for templeton api
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index d717619..9e38f1a 100644
--- 
a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ 
b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -30,8 +30,7 @@
 
   directories
 
-
-
+
   
   
 port
@@ -42,8 +41,7 @@
 
   int
 
-
-
+
   
   
 zookeeper.connect
@@ -56,8 +54,7 @@
   path /chroot/path. Note that you must create this path yourself prior to 
starting the broker and consumers must use the
   same connection string.
 
-
-
+
   
   
 message.max.bytes
@@ -67,8 +64,7 @@
   It is important that this property be in sync with the maximum fetch 
size your consumers use or
   else an unruly producer will be able to publish messages too large for 
consumers to consume.
 
-
-
+
   
   
 num.network.threads
@@ -77,8 +73,7 @@
   The number of network threads that the server uses for handling network 
requests.
   You probably don't need to change this.
 
-
-
+
   
   
 num.io.threads
@@ -86,15 +81,13 @@
 
   The number of I/O threads that the server uses for executing requests. 
You should have at least as many threads as you have disks.
 
-
-
+
   
   

[51/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
index 6e69879..20a73d6 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
@@ -25,8 +25,7 @@
 oozie.base.url
 http://localhost:11000/oozie
 Base Oozie URL.
-
-
+
   
   
 oozie.system.id
@@ -34,8 +33,7 @@
 
 The Oozie system ID.
 
-
-
+
   
   
 oozie.systemmode
@@ -43,8 +41,7 @@
 
  System mode for  Oozie at startup.
  
-
-
+
   
   
 oozie.service.AuthorizationService.security.enabled
@@ -53,8 +50,7 @@
  Specifies whether security (user name/admin role) is enabled or not.
  If disabled any user can manage Oozie system and manage any job.
  
-
-
+
   
   
 oozie.service.PurgeService.older.than
@@ -62,8 +58,7 @@
 
  Jobs older than this value, in days, will be purged by the PurgeService.
  
-
-
+
   
   
 oozie.service.PurgeService.purge.interval
@@ -71,22 +66,19 @@
 
  Interval at which the purge service will run, in seconds.
  
-
-
+
   
   
 oozie.service.CallableQueueService.queue.size
 1000
 Max callable queue size
-
-
+
   
   
 oozie.service.CallableQueueService.threads
 10
 Number of threads used for executing callables
-
-
+
   
   
 oozie.service.CallableQueueService.callable.concurrency
@@ -98,16 +90,14 @@
  All commands that use action executors (action-start, action-end, 
action-kill and action-check) use
  the action type as the callable type.
  
-
-
+
   
   
 oozie.service.coord.normal.default.timeout
 120
 Default timeout for a coordinator action input check (in 
minutes) for normal job.
   -1 means infinite timeout
-
-
+
   
   
 oozie.db.schema.name
@@ -115,8 +105,7 @@
 
   Oozie DataBase Name
  
-
-
+
   
   
 oozie.service.HadoopAccessorService.jobTracker.whitelist
@@ -124,24 +113,21 @@
 
   Whitelisted job tracker for Oozie service.
   
-
-
+
   
   
 oozie.authentication.type
 simple
 
   
-
-
+
   
   
 oozie.service.HadoopAccessorService.nameNode.whitelist
  
 
   
-
-
+
   
   
 oozie.service.WorkflowAppService.system.libpath
@@ -151,8 +137,7 @@
   This path is added to workflow application if their job properties sets
   the property 'oozie.use.system.libpath' to true.
   
-
-
+
   
   
 use.system.libpath.for.mapreduce.and.pig.jobs
@@ -163,8 +148,7 @@
   specify where the Pig JAR files are. Instead, the ones from the system
   library path are used.
   
-
-
+
   
   
 oozie.authentication.kerberos.name.rules
@@ -176,8 +160,7 @@
 
 
 The mapping from kerberos principal names to local OS user 
names.
-
-
+
   
   
 oozie.service.HadoopAccessorService.hadoop.configurations
@@ -190,8 +173,7 @@
   the Oozie configuration directory; though the path can be absolute 
(i.e. to point
   to Hadoop client conf/ directories in the local filesystem.
   
-
-
+
   
   
 oozie.service.ActionService.executor.ext.classes
@@ -202,14 +184,12 @@
 org.apache.oozie.action.hadoop.SqoopActionExecutor,
 org.apache.oozie.action.hadoop.DistcpActionExecutor
 
-
-
+
   
   
 oozie.service.SchemaService.wf.ext.schemas
 
shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd
-
-
+
   
   
 oozie.service.JPAService.create.db.schema
@@ -220,8 +200,7 @@
 If set to true, it creates the DB schema if it does not exist. If 
the DB schema exists is a NOP.
 If set to false, it does not create the DB schema. If the DB 
schema does not exist it fails start up.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.driver
@@ -229,8 +208,7 @@
 
 JDBC driver class.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.url
@@ -238,8 +216,7 @@
 
 JDBC URL.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.username
@@ -247,8 +224,7 @@
 
 DB user name.
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.pa

[34/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
index 9019773..6ef189a 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
@@ -21,8 +21,7 @@ limitations under the License.
 hive.heapsize
 1024
 Hive Java heap size
-
-
+
   
   
   
@@ -32,30 +31,26 @@ limitations under the License.
 
   database
 
-
-
+
   
   
 javax.jdo.option.ConnectionURL
 jdbc:postgresql://localhost/hive
 JDBC connect string for a JDBC metastore
-
-
+
   
   
 javax.jdo.option.ConnectionDriverName
 org.postgresql.Driver
 Driver class name for a JDBC metastore
-
-
+
   
   
   
 javax.jdo.option.ConnectionUserName
 hive
 username to use against metastore database
-
-
+
   
   
 javax.jdo.option.ConnectionPassword
@@ -65,73 +60,63 @@ limitations under the License.
 
   password
 
-
-
+
   
   
 hive.metastore.warehouse.dir
 /apps/hive/warehouse
 location of default database for the warehouse
-
-
+
   
   
 hive.metastore.sasl.enabled
 false
 If true, the metastore thrift interface will be secured with 
SASL.
  Clients must authenticate with Kerberos.
-
-
+
   
   
 hive.metastore.cache.pinobjtypes
 Table,Database,Type,FieldSchema,Order
 List of comma separated metastore object types that should be 
pinned in the cache
-
-
+
   
   
 hive.metastore.uris
 thrift://localhost:9083
 URI for client to contact metastore server
-
-
+
   
   
 hive.metastore.client.socket.timeout
 60
 MetaStore Client socket timeout in seconds
-
-
+
   
   
 hive.metastore.execute.setugi
 true
 In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
-
-
+
   
   
 hive.security.authorization.enabled
 false
 enable or disable the hive client authorization
-
-
+
   
   
 hive.security.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 the hive client authorization manager class name.
 The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

-
-
+
   
   
 hive.security.metastore.authorization.manager
 
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
 The authorization manager class name to be used in the 
metastore for authorization. The user-defined authorization class should 
implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
  
-
-
+
   
   
 hive.metastore.pre.event.listeners
@@ -140,8 +125,7 @@ limitations under the License.
   whenever databases, tables, and partitions are created, altered, or 
dropped.
   Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
   if metastore-side authorization is desired.
-
-
+
   
   
 hive.metastore.pre.event.listeners
@@ -150,15 +134,13 @@ limitations under the License.
   whenever databases, tables, and partitions are created, altered, or 
dropped.
   Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
   if metastore-side authorization is desired.
-
-
+
   
   
 hive.security.authenticator.manager
 org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator
 Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  
-
-
+
   
   
 hive.server2.enable.doAs
@@ -167,64 +149,55 @@ limitations under the License.
   submitted the query. But if the parameter is set to false, the query 
will run as the user that the hiveserver2
   process runs as.
 
-
-
+
   
   
 hive.server2.enable.impersonation
 Enable user impersonation for HiveServer2
 true
-
-
+
   
   
 hive.server2.authentication
 Authe

[83/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
index 4b063ee..8b99986 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HBASE/configuration/hbase-site.xml
@@ -32,8 +32,7 @@
 into /tmp.  Change this configuration else all data will be lost
 on machine restart.
 
-
-
+
   
   
 hbase.cluster.distributed
@@ -43,15 +42,13 @@
   false, startup will run all HBase and ZooKeeper daemons together
   in the one JVM.
 
-
-
+
   
   
 hbase.master.port
 6
 The port the HBase Master should bind to.
-
-
+
   
   
 hbase.tmp.dir
@@ -64,38 +61,33 @@
 
   directory
 
-
-
+
   
   
 hbase.local.dir
 ${hbase.tmp.dir}/local
 Directory on the local filesystem to be used as a local 
storage
 
-
-
+
   
   
 hbase.master.info.bindAddress
 0.0.0.0
 The bind address for the HBase Master web UI
 
-
-
+
   
   
 hbase.master.info.port
 60010
 The port for the HBase Master web UI.
-
-
+
   
   
 hbase.regionserver.info.port
 60030
 The port for the HBase RegionServer web UI.
-
-
+
   
   
 hbase.regionserver.global.memstore.upperLimit
@@ -103,8 +95,7 @@
 Maximum size of all memstores in a region server before new
   updates are blocked and flushes are forced. Defaults to 40% of heap
 
-
-
+
   
   
 hbase.regionserver.handler.count
@@ -113,8 +104,7 @@
 Same property is used by the Master for count of master handlers.
 Default is 10.
 
-
-
+
   
   
 hbase.hregion.majorcompaction
@@ -123,8 +113,7 @@
 HStoreFiles in a region.  Default: 1 day.
 Set to 0 to disable automated major compactions.
 
-
-
+
   
   
 hbase.regionserver.global.memstore.lowerLimit
@@ -135,8 +124,7 @@
   the minimum possible flushing to occur when updates are blocked due to
   memstore limiting.
 
-
-
+
   
   
 hbase.hregion.memstore.block.multiplier
@@ -148,8 +136,7 @@
 resultant flush files take a long time to compact or split, or
 worse, we OOME
 
-
-
+
   
   
 hbase.hregion.memstore.flush.size
@@ -159,8 +146,7 @@
 exceeds this number of bytes.  Value is checked by a thread that runs
 every hbase.server.thread.wakefrequency.
 
-
-
+
   
   
 hbase.hregion.memstore.mslab.enabled
@@ -171,8 +157,7 @@
   heavy write loads. This can reduce the frequency of stop-the-world
   GC pauses on large heaps.
 
-
-
+
   
   
 hbase.hregion.max.filesize
@@ -182,8 +167,7 @@
 grown to exceed this value, the hosting HRegion is split in two.
 Default: 1G.
 
-
-
+
   
   
 hbase.client.scanner.caching
@@ -195,8 +179,7 @@
 Do not set this value such that the time between invocations is greater
 than the scanner timeout; i.e. hbase.regionserver.lease.period
 
-
-
+
   
   
 zookeeper.session.timeout
@@ -208,8 +191,7 @@
   "The client sends a requested timeout, the server responds with the
   timeout that it can give the client. " In milliseconds.
 
-
-
+
   
   
 hbase.client.keyvalue.maxsize
@@ -221,8 +203,7 @@
 to set this to a fraction of the maximum region size. Setting it to zero
 or less disables the check.
 
-
-
+
   
   
 hbase.hstore.compactionThreshold
@@ -233,8 +214,7 @@
 is run to rewrite all HStoreFiles files as one.  Larger numbers
 put off compaction but when it runs, it takes longer to complete.
 
-
-
+
   
   
 hbase.hstore.flush.retries.number
@@ -242,8 +222,7 @@
 
 The number of times the region flush operation will be retried.
 
-
-
+
   
   
 hbase.hstore.blockingStoreFiles
@@ -254,8 +233,7 @@
 blocked for this HRegion until a compaction is completed, or
 until hbase.hstore.blockingWaitTime has been exceeded.
 
-
-
+
   
   
 hfile.block.cache.size
@@ -265,8 +243,7 @@
 used by HFile/StoreFile. Default of 0.25 means allocate 25%.
 Set to 0 to disable but it's not recommended.
 
-
-
+
   
   
   
@@ -276,8 +253,7 @@
 full privileges, regardless of stored ACLs, across the cluster.
 Only used when HBase security is enabled.
 
-
-
+
   
   
 hbase.security.authentication
@@ -285,16 +261,14 @@
   Controls

[67/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
index 7285d80..a4e974f 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
@@ -23,8 +23,7 @@
   
 oozie_user
 true
-
-
+
   
   
 oozie_database
@@ -34,8 +33,7 @@
 
   false
 
-
-
+
   
   
 oozie_data_dir
@@ -46,8 +44,7 @@
   true
   false
 
-
-
+
   
   
 oozie_log_dir
@@ -58,8 +55,7 @@
   true
   false
 
-
-
+
   
   
 oozie_pid_dir
@@ -70,8 +66,7 @@
   true
   false
 
-
-
+
   
   
   
@@ -147,7 +142,6 @@ set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
index cdf901a..4c954b4 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
@@ -31,8 +31,7 @@
 
   false
 
-
-
+
   
   
 oozie.service.JPAService.jdbc.url
@@ -44,8 +43,7 @@
 
   false
 
-
-
+
   
   
 oozie.service.HadoopAccessorService.hadoop.configurations
@@ -58,8 +56,7 @@
   the Oozie configuration directory; though the path can be absolute (i.e. 
to point
   to Hadoop client conf/ directories in the local filesystem.
 
-
-
+
   
   
   
@@ -100,8 +97,7 @@
   org.apache.oozie.service.JobsConcurrencyService
 
 List of Oozie services
-
-
+
   
   
 oozie.services.ext
@@ -111,7 +107,6 @@
   To add/replace services defined in 'oozie.services' with custom 
implementations.
   Class names must be separated by commas.
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
index 2313f17..987821a 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
@@ -23,8 +23,7 @@
   
 sqoop_user
 true
-
-
+
   
   
   
@@ -71,7 +70,6 @@
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
index 439fe35..7caf599 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
@@ -23,20 +23,17 @@
   
 storm_user
 true
-
-
+
   
   
 storm_log_dir
 c:\hadoop\logs\storm
-
-
+
   
   
 storm_pid_dir
 c:\hadoop\run\storm
-
-
+
   
   
   
@@ -45,7 +42,6 @@
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
index b68a6d0..b3c86f2 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/

[39/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/ranger-ugsync-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/ranger-ugsync-site.xml
 
b/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/ranger-ugsync-site.xml
index 05c4b23..7f3296e 100644
--- 
a/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/ranger-ugsync-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/RANGER/0.5.0/configuration/ranger-ugsync-site.xml
@@ -20,22 +20,19 @@
 ranger.usersync.port
 5151
 Port for unix authentication service, run within 
usersync
-
-
+
   
   
 ranger.usersync.ssl
 true
 SSL enabled? (ranger admin -> usersync 
communication)
-
-
+
   
   
 ranger.usersync.keystore.file
 /etc/ranger/usersync/conf/unixauthservice.jks
 Keystore file used for usersync
-
-
+
   
   
 ranger.usersync.keystore.password
@@ -45,15 +42,13 @@
 
   password
 
-
-
+
   
   
 ranger.usersync.truststore.file
 /etc/ranger/usersync/conf/mytruststore.jks
 Truststore used for usersync, required if usersync -> 
ranger admin communication is SSL enabled
-
-
+
   
   
 ranger.usersync.truststore.password
@@ -63,15 +58,13 @@
 
   password
 
-
-
+
   
   
 ranger.usersync.passwordvalidator.path
 ./native/credValidator.uexe
 Native program for password validation
-
-
+
   
   
 ranger.usersync.enabled
@@ -94,67 +87,58 @@
   
   1
 
-
-
+
   
   
 ranger.usersync.sink.impl.class
 
org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder
 Class to be used as sink (to sync users into ranger 
admin)
-
-
+
   
   
 ranger.usersync.policymanager.baseURL
 {{ranger_external_url}}
 URL to be used by clients to access ranger admin, use 
FQDN
-
-
+
   
   
 ranger.usersync.policymanager.maxrecordsperapicall
 1000
 How many records to be returned per API call
-
-
+
   
   
 ranger.usersync.policymanager.mockrun
 false
 Is user sync doing mock run?
-
-
+
   
   
 ranger.usersync.unix.minUserId
 Minimum User ID
 500
 Only sync users above this user id (applicable for 
UNIX)
-
-
+
   
   
 ranger.usersync.unix.group.file
 Group File
 /etc/group
 Location of the groups file on the linux server
-
-
+
   
   
 ranger.usersync.unix.password.file
 Password File
 /etc/passwd
 Location of the password file on the linux 
server
-
-
+
   
   
 ranger.usersync.sleeptimeinmillisbetweensynccycle
 6
 Sleeptime interval in milliseconds, if < 6000 then default 
to 1 min
-
-
+
   
   
 ranger.usersync.source.impl.class
@@ -181,40 +165,35 @@
   
   1
 
-
-
+
   
   
 ranger.usersync.filesource.file
 File Name
 /tmp/usergroup.txt
 Path to the file with the users and groups information. 
Example: /tmp/usergroup.json or /tmp/usergroup.csv or 
/tmp/usergroup.txt
-
-
+
   
   
 ranger.usersync.filesource.text.delimiter
 Delimiter
 ,
 Delimiter used in file, if File based user sync is 
used
-
-
+
   
   
 ranger.usersync.ldap.url
 LDAP/AD URL
 
 LDAP server URL. Example: value = ldap://localhost:389 or 
ldaps//localhost:636
-
-
+
   
   
 ranger.usersync.ldap.binddn
 ​Bind User
 
 Full distinguished name (DN), including common name (CN), of 
an LDAP user account that has privileges to search for users. This user is used 
for searching the users. This could be read-only LDAP user. Example: 
cn=admin,dc=example,dc=com
-
-
+
   
   
 ranger.usersync.ldap.ldapbindpassword
@@ -225,15 +204,13 @@
 
   password
 
-
-
+
   
   
 ranger.usersync.ldap.bindalias
 testldapalias
 Set as ranger.usersync.ldap.bindalias (string as 
is)
-
-
+
   
   
 ranger.usersync.ldap.bindkeystore
@@ -242,8 +219,7 @@
 
   true
 
-
-
+
   
   
 ranger.usersync.ldap.searchBase
@@ -253,8 +229,7 @@
 
   true
 
-
-
+
   
   
 ranger.usersync.ldap.user.searchbase
@@ -263,8 +238,7 @@
 "# search base for users
 # sample value would be ou=users,dc=hadoop,dc=apache,dc=org
 # overrides value specified in ranger.usersync.ldap.searchBase"
-
-
+
   
   
 ranger.usersync.ldap.user.searchscope
@@ -273,16 +247,14 @@
 "# search scope for the users, only base, one and sub are 
supported values
 # please customize the value to suit your deployment
 # default value: sub"
-
-
+
   
   

[14/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
index ea3d6b6..8a4f566 100644
--- 
a/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/common-services/MAPREDUCE/1.0/configuration/mapred-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,29 +129,25 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
 Address where the datanode binds
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
 HTTP address for the datanode
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -171,8 +155,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -180,8 +163,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -190,29 +172,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -221,8 +199,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -231,8 +208,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -244,29 +220,25 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
 The max response size for IPC
-
-
+
   
   
 dfs.block.access.token.enable
@@ -275,8 +247,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are 

[31/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
index 4b236db..647c0f4 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -23,22 +23,19 @@
 yarn.resourcemanager.hostname
 localhost
 The hostname of the RM.
-
-
+
   
   
 yarn.resourcemanager.resource-tracker.address
 localhost:8025
  The address of ResourceManager. 
-
-
+
   
   
 yarn.resourcemanager.scheduler.address
 localhost:8030
 The address of the scheduler interface.
-
-
+
   
   
 yarn.resourcemanager.address
@@ -47,22 +44,19 @@
   The address of the applications manager interface in the
   RM.
 
-
-
+
   
   
 yarn.resourcemanager.admin.address
 localhost:8141
 The address of the RM admin interface.
-
-
+
   
   
 yarn.resourcemanager.scheduler.class
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
 The class to use as the resource scheduler.
-
-
+
   
   
 yarn.scheduler.minimum-allocation-mb
@@ -72,8 +66,7 @@
   in MBs. Memory requests lower than this won't take effect,
   and the specified value will get allocated at minimum.
 
-
-
+
   
   
 yarn.scheduler.maximum-allocation-mb
@@ -83,45 +76,39 @@
   in MBs. Memory requests higher than this won't take effect,
   and will get capped to this value.
 
-
-
+
   
   
 yarn.acl.enable
 false
  Are acls enabled. 
-
-
+
   
   
 yarn.admin.acl
 
  ACL of who can be admin of the YARN cluster. 
-
-
+
   
   
   
 yarn.nodemanager.address
 0.0.0.0:45454
 The address of the container manager in the NM.
-
-
+
   
   
 yarn.nodemanager.resource.memory-mb
 5120
 Amount of physical memory, in MB, that can be allocated
   for containers.
-
-
+
   
   
 yarn.application.classpath
 
/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*
 Classpath for typical applications.
-
-
+
   
   
 yarn.nodemanager.vmem-pmem-ratio
@@ -131,37 +118,32 @@
   expressed in terms of physical memory, and virtual memory usage
   is allowed to exceed this allocation by this ratio.
 
-
-
+
   
   
 yarn.nodemanager.container-executor.class
 
org.apache.hadoop.yarn.server.nodemanager.GlusterContainerExecutor
 ContainerExecutor for launching containers
-
-
+
   
   
 yarn.nodemanager.linux-container-executor.group
 hadoop
 Unix group of the NodeManager
-
-
+
   
   
 yarn.nodemanager.aux-services
 mapreduce_shuffle
 Auxilliary services of NodeManager. A valid service name 
should only contain a-zA-Z0-9_ and can
   not start with numbers
-
-
+
   
   
 yarn.nodemanager.aux-services.mapreduce_shuffle.class
 org.apache.hadoop.mapred.ShuffleHandler
 The auxiliary service class to use 
-
-
+
   
   
 yarn.nodemanager.log-dirs
@@ -173,8 +155,7 @@
   named container_{$contid}. Each container directory will contain the 
files
   stderr, stdin, and syslog generated by that container.
 
-
-
+
   
   
 yarn.nodemanager.local-dirs
@@ -186,8 +167,7 @@
   Individual containers' work directories, called container_${contid}, will
   be subdirectories of this.
 
-
-
+
   
   
 yarn.nodemanager.container-monitor.interval-ms
@@ -196,8 +176,7 @@
   The interval, in milliseconds, for which the node manager
   waits  between two cycles of monitoring its containers' memory usage.
 
-
-
+
   
   
   
@@ -194,8 +177,7 @@ gpgcheck=0
   false
   false
 
-
-
+
   
   
   
@@ -210,8 +192,7 @@ gpgcheck=0
   false
   false
 
-
-
+
   
   
 stack_root
@@ -222,35 +203,30 @@ gpgcheck=0
   false
   false
 
-
-
+
   
   
 alerts_repeat_tolerance
 1
 The number of consecutive alerts required to transition an 
alert from the SOFT to the HARD state.
-
-
+
   
   
 ignore_bad_mounts
 false
 For properties handled by handle_mounted_dirs this will make 
Ambari not to c

[36/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/configuration/tez-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/configuration/tez-site.xml
 
b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/configuration/tez-site.xml
index 1e6d6d6..7620038 100644
--- 
a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/configuration/tez-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/configuration/tez-site.xml
@@ -21,22 +21,19 @@
 tez.lib.uris
 hdfs:///apps/tez/,hdfs:///apps/tez/lib/
 The location of the Tez libraries which will be localized for 
DAGs
-
-
+
   
   
 tez.am.log.level
 INFO
 Root Logging level passed to the Tez app master
-
-
+
   
   
 tez.staging-dir
 /tmp/${user.name}/staging
 The staging dir used while submitting DAGs
-
-
+
   
   
 tez.am.resource.memory.mb
@@ -45,16 +42,14 @@
 
   int
 
-
-
+
   
   
   
 tez.am.java.opts
 -server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA 
-XX:+UseParallelGC
 Java options for the Tez AppMaster process. The -Xmx 
parameter value is generally 0.8 times tez.am.resource.memory.mb 
config.
-
-
+
   
   
 tez.am.shuffle-vertex-manager.min-src-fraction
@@ -62,8 +57,7 @@
 In case of a ScatterGather connection, the fraction of source 
tasks which should
   complete before tasks for the current vertex are schedule
 
-
-
+
   
   
 tez.am.shuffle-vertex-manager.max-src-fraction
@@ -72,15 +66,13 @@
   completed, all tasks on the current vertex can be scheduled. Number of 
tasks ready for
   scheduling on the current vertex scales linearly between min-fraction 
and max-fraction
 
-
-
+
   
   
 tez.am.am-rm.heartbeat.interval-ms.max
 250
 The maximum heartbeat interval between the AM and RM in 
milliseconds
-
-
+
   
   
 tez.am.grouping.split-waves
@@ -92,8 +84,7 @@
 
   float
 
-
-
+
   
   
 tez.am.grouping.min-size
@@ -104,8 +95,7 @@
 
   int
 
-
-
+
   
   
 tez.am.grouping.max-size
@@ -116,31 +106,27 @@
 
   int
 
-
-
+
   
   
 tez.am.container.reuse.enabled
 true
 Configuration to specify whether container should be 
reused
-
-
+
   
   
 tez.am.container.reuse.rack-fallback.enabled
 true
 Whether to reuse containers for rack local tasks. Active only 
if reuse is enabled
 
-
-
+
   
   
 tez.am.container.reuse.non-local-fallback.enabled
 true
 Whether to reuse containers for non-local tasks. Active only 
if reuse is enabled
 
-
-
+
   
   
 tez.am.container.session.delay-allocation-millis
@@ -150,8 +136,7 @@
   it immediately. Only active when reuse is enabled. Set to -1 to never 
release a container
   in a session
 
-
-
+
   
   
 tez.am.container.reuse.locality.delay-allocation-millis
@@ -159,8 +144,7 @@
 The amount of time to wait before assigning a container to 
the next level of
   locality. NODE -> RACK -> NON_LOCAL
 
-
-
+
   
   
 tez.task.get-task.sleep.interval-ms.max
@@ -168,8 +152,7 @@
 The maximum amount of time, in seconds, to wait before a task 
asks an AM for
   another task
 
-
-
+
   
   
 tez.am.env
@@ -178,8 +161,7 @@
 Additional execution environment entries for tez. This is not an 
additive property. You must preserve the original value if
 you want to have access to native libraries.
 
-
-
+
   
   
   
@@ -188,8 +170,7 @@
 Time (in seconds) to wait for AM to come up when trying to 
submit a DAG from
   the client
 
-
-
+
   
   
 tez.session.am.dag.submit.timeout.secs
@@ -205,8 +186,7 @@
   600
   seconds
 
-
-
+
   
   
   
   
 tez.yarn.ats.enabled
 true
 Whether to send history events to YARN Application Timeline 
Server
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
index 1ab6f9f..eef82c5 100644
--- 
a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
+++ 
b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration-mapred/mapred-env.xml
@@ -29,8 +29,7 @@
   d

[58/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
index 5edac2e..d33788d 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
@@ -24,43 +24,37 @@
 hbasemaster_host
 
 HBase Master Host.
-
-
+
   
   
 regionserver_hosts
 
 Region Server Hosts
-
-
+
   
   
 hbase_log_dir
 /var/log/hbase
 Log Directories for HBase.
-
-
+
   
   
 hbase_pid_dir
 /var/run/hbase
 Log Directories for HBase.
-
-
+
   
   
 hbase_log_dir
 /var/log/hbase
 Log Directories for HBase.
-
-
+
   
   
 hbase_regionserver_heapsize
 1024
 RegionServer heap size.
-
-
+
   
   
 hbase_regionserver_xmn_max
@@ -70,168 +64,144 @@ Sets the upper bound on HBase RegionServers' young 
generation size.
 This value is used in case the young generation size (-Xmn) calculated based 
on the max heapsize (hbase_regionserver_heapsize)
 and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
 
-
-
+
   
   
 hbase_regionserver_xmn_ratio
 0.2
 Percentage of max heap size which used for young generation 
heap (-Xmx).
-
-
+
   
   
 hbase_master_heapsize
 1024
 HBase Master Heap Size
-
-
+
   
   
 hstore_compactionthreshold
 3
 HBase HStore compaction threshold.
-
-
+
   
   
 hfile_blockcache_size
 0.25
 HFile block cache size.
-
-
+
   
   
 hstorefile_maxsize
 1073741824
 Maximum HStoreFile Size
-
-
+
   
   
 regionserver_handlers
 30
 HBase RegionServer Handler
-
-
+
   
   
 hregion_majorcompaction
 8640
 HBase Major Compaction.
-
-
+
   
   
 hregion_blockmultiplier
 2
 HBase Region Block Multiplier
-
-
+
   
   
 hregion_memstoreflushsize
 
 HBase Region MemStore Flush Size.
-
-
+
   
   
 client_scannercaching
 100
 Base Client Scanner Caching
-
-
+
   
   
 zookeeper_sessiontimeout
 6
 ZooKeeper Session Timeout
-
-
+
   
   
 hfile_max_keyvalue_size
 10485760
 HBase Client Maximum key-value Size
-
-
+
   
   
 hbase_hdfs_root_dir
 /apps/hbase/data
 HBase Relative Path to HDFS.
-
-
+
   
   
 hbase_tmp_dir
 /var/log/hbase
 Hbase temp directory
-
-
+
   
   
 hbase_conf_dir
 /etc/hbase
 Config Directory for HBase.
-
-
+
   
   
 hdfs_enable_shortcircuit_read
 true
 HDFS Short Circuit Read
-
-
+
   
   
 hdfs_support_append
 true
 HDFS append support
-
-
+
   
   
 hstore_blockingstorefiles
 7
 HStore blocking storefiles.
-
-
+
   
   
 regionserver_memstore_lab
 true
 Region Server memstore.
-
-
+
   
   
 regionserver_memstore_lowerlimit
 0.35
 Region Server memstore lower limit.
-
-
+
   
   
 regionserver_memstore_upperlimit
 0.4
 Region Server memstore upper limit.
-
-
+
   
   
 hbase_conf_dir
 /etc/hbase
 HBase conf dir.
-
-
+
   
   
 hbase_user
 hbase
 HBase User Name.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
index c938e26..497dd02 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HBASE/configuration/hbase-policy.xml
@@ -28,8 +28,7 @@
 The ACL is a comma-separated list of user and group names. The user and 
 group list is separated by a blank. For e.g. "alice,bob users,wheel". 
 A special value of "*" means all users are allowed.
-
-
+
   
   
 security.admin.protocol.acl
@@ -39,8 +38,7 @@
 The ACL is a comma-separated list of user and group names. The user and 
 group list is separated by a blank. For e.g. "alice,bob users,wheel". 
 A special val

[17/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/STORM/configuration/storm-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/STORM/configuration/storm-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/STORM/configuration/storm-site.xml
index 27308bb..c182945 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/STORM/configuration/storm-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/STORM/configuration/storm-site.xml
@@ -23,28 +23,24 @@
   
 storm.thrift.transport
 {{storm_thrift_transport}}
-
-
+
   
   
 _storm.thrift.nonsecure.transport
 backtype.storm.security.auth.SimpleTransportPlugin
 The transport plug-in that used for non-secure mode for for 
Thrift client/server communication.
-
-
+
   
   
 _storm.thrift.secure.transport
 
backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin
 The transport plug-in that used for secure mode for Thrift 
client/server communication.
-
-
+
   
   
 java.library.path
 
/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib
-
-
+
   
   
 nimbus.childopts
@@ -52,20 +48,17 @@
 
   false
 
-
-
+
   
   
 worker.childopts
 -Xmx768m _JAAS_PLACEHOLDER 
-javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM
-
-
+
   
   
 ui.childopts
 -Xmx768m _JAAS_PLACEHOLDER
-
-
+
   
   
 supervisor.childopts
@@ -73,20 +66,17 @@
 
   false
 
-
-
+
   
   
 logviewer.childopts
 -Xmx128m _JAAS_PLACEHOLDER
-
-
+
   
   
 _storm.min.ruid
 null
 min.user.id is set to the first real user id on the system. 
If value is 'null' than default value will be taken from key UID_MIN of 
/etc/login.defs otherwise the specified value will be used for all 
hosts.
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
index 6e54800..aa53f30 100644
--- 
a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
@@ -24,29 +24,25 @@
   Specifying a single .tar.gz or .tgz assumes that a compressed version of 
the tez libs is being used. This is uncompressed into a tezlibs directory when 
running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after 
. and .*).
   If multiple files are specified - files are localized as regular files, 
contents of directories are localized as regular files (non-recursive).
 
-
-
+
   
   
 tez.cluster.additional.classpath.prefix
 
/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure
 
-
-
+
   
   
 tez.am.log.level
 INFO
 Root Logging level passed to the Tez app master
-
-
+
   
   
 tez.generate.debug.artifacts
 false
 Generate debug artifacts such as a text representation of the 
submitted DAG plan
-
-
+
   
   
 tez.am.resource.memory.mb
@@ -54,8 +50,7 @@
 The amount of memory to be used by the AppMaster.
   Used only if the value is not specified explicitly by the DAG definition.
 
-
-
+
   
   
 tez.am.launch.cmd-opts
@@ -63,15 +58,13 @@
 Java options for the Tez AppMaster process. The Xmx value is 
derived based on tez.am.resource.memory.mb and is 80% of the value by default.
   Used only if the value is not specified explicitly by the DAG definition.
 
-
-
+
   
   
 tez.am.launch.cluster-default.cmd-opts
 -server -Djava.net.preferIPv4Stack=true 
-Dhdp.version=${hdp.version}
 Cluster default Java options for the Tez AppMaster process. 
These will be prepended to the properties specified via 
tez.am.launch.cmd-opts
-
-
+
   
   
 tez.am.launch.env
@@ -81,8 +74,7 @@
 you want to have access to native libraries.
   Used only if the value is not specified explicitly by the DAG definition.
 
-
-
+
   
   
 tez.task.resource.memory.mb
@@ -90,8 +82,7 @@
 The amount of memory to be used by launched tasks.
   

[61/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
--
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
index ea3d6b6..8a4f566 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -28,32 +28,28 @@
   of directories then the name table is replicated in all of the
   directories, for redundancy. 
 true
-
-
+
   
   
 dfs.support.append
 true
 to enable dfs append
 true
-
-
+
   
   
 dfs.webhdfs.enabled
 false
 to enable webhdfs
 true
-
-
+
   
   
 dfs.datanode.failed.volumes.tolerated
 0
 #of failed disks dn would tolerate
 true
-
-
+
   
   
 dfs.block.local-path-access.user
@@ -62,8 +58,7 @@
 circuit reads.
 
 true
-
-
+
   
   
 dfs.data.dir
@@ -75,8 +70,7 @@
   Directories that do not exist are ignored.
   
 true
-
-
+
   
   
 dfs.hosts.exclude
@@ -85,8 +79,7 @@
 not permitted to connect to the namenode.  The full pathname of the
 file must be specified.  If the value is empty, no hosts are
 excluded.
-
-
+
   
   
 dfs.hosts
@@ -95,31 +88,27 @@
 permitted to connect to the namenode. The full pathname of the file
 must be specified.  If the value is empty, all hosts are
 permitted.
-
-
+
   
   
 dfs.replication.max
 50
 Maximal block replication.
   
-
-
+
   
   
 dfs.replication
 3
 Default block replication.
   
-
-
+
   
   
 dfs.heartbeat.interval
 3
 Determines datanode heartbeat interval in 
seconds.
-
-
+
   
   
 dfs.safemode.threshold.pct
@@ -130,8 +119,7 @@
 Values less than or equal to 0 mean not to start in safe mode.
 Values greater than 1 will make safe mode permanent.
 
-
-
+
   
   
 dfs.balance.bandwidthPerSec
@@ -141,29 +129,25 @@
 can utilize for the balancing purpose in term of
 the number of bytes per second.
   
-
-
+
   
   
 dfs.datanode.address
 0.0.0.0:50010
 Address where the datanode binds
-
-
+
   
   
 dfs.datanode.http.address
 0.0.0.0:50075
 HTTP address for the datanode
-
-
+
   
   
 dfs.block.size
 134217728
 The default block size for new files.
-
-
+
   
   
 dfs.http.address
@@ -171,8 +155,7 @@
 The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.
 true
-
-
+
   
   
 dfs.datanode.du.reserved
@@ -180,8 +163,7 @@ literal string "local" or a host:port for 
HDFS.
 1073741824
 Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 
-
-
+
   
   
 dfs.datanode.ipc.address
@@ -190,29 +172,25 @@ literal string "local" or a host:port for 
HDFS.
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 
-
-
+
   
   
 dfs.blockreport.initialDelay
 120
 Delay for first block report in seconds.
-
-
+
   
   
 dfs.namenode.handler.count
 40
 The number of server threads for the namenode.
-
-
+
   
   
 dfs.datanode.max.xcievers
 1024
 PRIVATE CONFIG VARIABLE
-
-
+
   
   
   
@@ -221,8 +199,7 @@ If the port is 0 then the server will start on a free port.
 
 The octal umask used when creating files and directories.
 
-
-
+
   
   
 dfs.web.ugi
@@ -231,8 +208,7 @@ The octal umask used when creating files and directories.
 The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 
-
-
+
   
   
 dfs.permissions
@@ -244,29 +220,25 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 
-
-
+
   
   
 dfs.permissions.supergroup
 hdfs
 The name of the group of super-users.
-
-
+
   
   
 dfs.namenode.handler.count
 100
 Added to grow Queue size so that more client connections are 
allowed
-
-
+
   
   
 ipc.server.max.response.size
 5242880
 The max response size for IPC
-
-
+
   
   
 dfs.block.access.token.enable
@@ -275,8 +247,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", n

[33/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
index c595f3a..4bebc19 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
@@ -24,15 +24,13 @@
 yarn_log_dir_prefix
 /var/log/hadoop-yarn
 YARN Log Dir Prefix
-
-
+
   
   
 yarn_pid_dir_prefix
 /var/run/hadoop-yarn
 YARN PID Dir Prefix
-
-
+
   
   
 yarn_user
@@ -44,43 +42,37 @@
   user
   false
 
-
-
+
   
   
 yarn_heapsize
 1024
 Max heapsize for all YARN components using a numerical value 
in the scale of MB
-
-
+
   
   
 resourcemanager_heapsize
 1024
 Max heapsize for ResourceManager using a numerical value in 
the scale of MB
-
-
+
   
   
 nodemanager_heapsize
 1024
 Max heapsize for NodeManager using a numerical value in the 
scale of MB
-
-
+
   
   
 min_user_id
 1000
 Set to 0 to disallow root from submitting jobs. Set to 1000 
to disallow all superusers from submitting jobs
-
-
+
   
   
 apptimelineserver_heapsize
 1024
 Max heapsize for AppTimelineServer using a numerical value in 
the scale of MB
-
-
+
   
   
   
@@ -201,7 +193,6 @@ YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
index cece269..7299f49 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
@@ -67,7 +67,6 @@ 
log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Appl
 
   content
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
index fc6708f..cefa82a 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
@@ -23,22 +23,19 @@
 yarn.resourcemanager.hostname
 localhost
 The hostname of the RM.
-
-
+
   
   
 yarn.resourcemanager.resource-tracker.address
 localhost:8025
  The address of ResourceManager. 
-
-
+
   
   
 yarn.resourcemanager.scheduler.address
 localhost:8030
 The address of the scheduler interface.
-
-
+
   
   
 yarn.resourcemanager.address
@@ -47,22 +44,19 @@
   The address of the applications manager interface in the
   RM.
 
-
-
+
   
   
 yarn.resourcemanager.admin.address
 localhost:8141
 The address of the RM admin interface.
-
-
+
   
   
 yarn.resourcemanager.scheduler.class
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
 The class to use as the resource scheduler.
-
-
+
   
   
 yarn.scheduler.minimum-allocation-mb
@@ -72,8 +66,7 @@
   in MBs. Memory requests lower than this won't take effect,
   and the specified value will get allocated at minimum.
 
-
-
+
   
   
 yarn.scheduler.maximum-allocation-mb
@@ -83,45 +76,39 @@
   in MBs. Memory requests higher than this won't take effect,
   and will get capped to this value.
 
-
-
+
   
   
 yarn.acl.enable
 false
  Are acls enabled. 
-
-
+
   
   
 yarn.admin.acl
 
  ACL of who can be admin of the YARN cluster. 
-
-
+
   
   
   
 yarn.nodemanager.address
 0.0.0.0:45454
 The address of the container manager in the NM.
-
-
+
   
   
 yarn.nodemanager.resource.memory-m

[26/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
index 18f321c..4af74b1 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
@@ -36,8 +36,7 @@ limitations under the License.
   
   1
 
-
-
+
   
   
 hive.zookeeper.quorum
@@ -51,22 +50,19 @@ limitations under the License.
   multiLine
   true
 
-
-
+
   
   
 hive.metastore.connect.retries
 24
 Number of retries while opening a connection to 
metastore
-
-
+
   
   
 hive.metastore.failure.retries
 24
 Number of retries upon failure of Thrift metastore 
calls
-
-
+
   
   
 hive.metastore.client.connect.retry.delay
@@ -75,8 +71,7 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   Number of seconds for the client to wait between consecutive connection 
attempts
 
-
-
+
   
   
 hive.metastore.client.socket.timeout
@@ -85,15 +80,13 @@ limitations under the License.
   Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, 
us/usec, ns/nsec), which is sec if not specified.
   MetaStore Client socket timeout in seconds
 
-
-
+
   
   
 hive.mapjoin.bucket.cache.size
 1
 
-
-
+
   
   
 hive.security.authorization.manager
@@ -108,24 +101,21 @@ limitations under the License.
 hive_security_authorization
   
 
-
-
+
   
   
 hive.cluster.delegation.token.store.class
 org.apache.hadoop.hive.thrift.ZooKeeperTokenStore
 The delegation token store implementation.
   Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for 
load-balanced cluster.
-
-
+
   
   
 hive.cluster.delegation.token.store.zookeeper.connectString
 localhost:2181
 DONT_ADD_ON_UPGRADE
 The ZooKeeper token store connect string.
-
-
+
   
   
 hive.server2.support.dynamic.service.discovery
@@ -138,38 +128,33 @@ limitations under the License.
 
   boolean
 
-
-
+
   
   
 fs.hdfs.impl.disable.cache
 false
 true
 Disable HDFS filesystem cache.
-
-
+
   
   
 fs.file.impl.disable.cache
 false
 true
 Disable local filesystem cache.
-
-
+
   
   
 hive.exec.scratchdir
 /tmp/hive
 HDFS root scratch dir for Hive jobs which gets created with 
write all (733) permission. For each connecting user, an HDFS scratch dir: 
${hive.exec.scratchdir}/ is created, with 
${hive.scratch.dir.permission}.
-
-
+
   
   
 hive.exec.submitviachild
 false
 
-
-
+
   
   
 hive.exec.submit.local.task.via.child
@@ -179,8 +164,7 @@ limitations under the License.
   separate JVM (true recommended) or not.
   Avoids the overhead of spawning new JVM, but can lead to out-of-memory 
issues.
 
-
-
+
   
   
 hive.exec.compress.output
@@ -189,8 +173,7 @@ limitations under the License.
   This controls whether the final outputs of a query (to a local/HDFS file 
or a Hive table) is compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.compress.intermediate
@@ -199,8 +182,7 @@ limitations under the License.
   This controls whether intermediate files produced by Hive between 
multiple map-reduce jobs are compressed.
   The compression codec and other options are determined from Hadoop 
config variables mapred.output.compress*
 
-
-
+
   
   
 hive.exec.reducers.bytes.per.reducer
@@ -214,8 +196,7 @@ limitations under the License.
   B
   
 
-
-
+
   
   
 hive.exec.reducers.max
@@ -224,8 +205,7 @@ limitations under the License.
   max number of reducers will be used. If the one specified in the 
configuration parameter mapred.reduce.tasks is
   negative, Hive will use this one as the max number of reducers when 
automatically determine number of reducers.
 
-
-
+
   
   
 hive.exec.pre.hooks
@@ -241,8 +221,7 @@ limitations under the License.
 hive_timeline_logging_enabled
   
 
-
-
+
   
   
 hive.exec.post.hooks
@@ -266,8 +245,7 @@ limitations under the License.
 atlas.server.https.port
   
 
-
-
+
   

[93/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
 
b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
index 052977f..ba86f900 100644
--- 
a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-site.xml
@@ -39,8 +39,7 @@
 timeline.metrics.service.operation.mode
   
 
-
-
+
   
   
 hbase.tmp.dir
@@ -54,16 +53,14 @@
 
   directory
 
-
-
+
   
   
 hbase.local.dir
 ${hbase.tmp.dir}/local
 Directory on the local filesystem to be used as a local 
storage
 
-
-
+
   
   
 hbase.cluster.distributed
@@ -79,8 +76,7 @@
 timeline.metrics.service.operation.mode
   
 
-
-
+
   
   
 hbase.master.wait.on.regionservers.mintostart
@@ -88,8 +84,7 @@
 
   Ensure that HBase Master waits for # many region server to start.
 
-
-
+
   
   
 hbase.zookeeper.quorum
@@ -102,43 +97,37 @@
   this is the list of servers which we will start/stop ZooKeeper on.
 
 true
-
-
+
   
   
 hbase.master.info.bindAddress
 0.0.0.0
 The bind address for the HBase Master web UI
-
-
+
   
   
 hbase.master.info.port
 61310
 The port for the HBase Master web UI.
-
-
+
   
   
 hbase.regionserver.info.port
 61330
 The port for the HBase RegionServer web UI.
-
-
+
   
   
 hbase.master.port
 61300
 The port for the HBase Master web UI.
-
-
+
   
   
 hbase.regionserver.port
 61320
 The port for the HBase RegionServer web UI.
-
-
+
   
   
 hbase.hregion.majorcompaction
@@ -148,8 +137,7 @@
   HStoreFiles in a region.
   0 to disable automated major compactions.
 
-
-
+
   
   
 phoenix.query.spoolThresholdBytes
@@ -158,8 +146,7 @@
   Threshold size in bytes after which results from parallelly executed
   query results are spooled to disk. Default is 20 mb.
 
-
-
+
   
   
 hbase.zookeeper.property.dataDir
@@ -168,8 +155,7 @@
   Property from ZooKeeper's config zoo.cfg.
   The directory where the snapshot is stored.
 
-
-
+
   
   
 hbase.client.scanner.caching
@@ -178,23 +164,20 @@
   Number of rows that will be fetched when calling next on a scanner
   if it is not served from (local, client) memory.
 
-
-
+
   
   
 hbase.normalizer.enabled
 true
 If set to true, Master will try to keep region size
 within each table approximately the same.
-
-
+
   
   
 hbase.normalizer.period
 60
 Period in ms at which the region normalizer runs in the 
Master.
-
-
+
   
   
 hbase.master.normalizer.class
@@ -204,8 +187,7 @@
   See the class comment for more on how it works
   
http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
 
-
-
+
   
   
 hfile.block.cache.size
@@ -214,8 +196,7 @@
   Percentage of maximum heap (-Xmx setting) to allocate to block cache
   used by a StoreFile. Default of 0.4 means allocate 40%.
 
-
-
+
   
   
 hbase.regionserver.global.memstore.upperLimit
@@ -224,8 +205,7 @@
   Maximum size of all memstores in a region server before new
   updates are blocked and flushes are forced. Defaults to 40% of heap
 
-
-
+
   
   
 hbase.regionserver.global.memstore.lowerLimit
@@ -237,8 +217,7 @@
   the minimum possible flushing to occur when updates are blocked due to
   memstore limiting.
 
-
-
+
   
   
 phoenix.groupby.maxCacheSize
@@ -246,8 +225,7 @@
 
   Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
 
-
-
+
   
   
 hbase.hregion.max.filesize
@@ -256,8 +234,7 @@
   Maximum HFile size. If the sum of the sizes of a region’s HFiles 
has grown
   to exceed this value, the region is split in two. Default is 10Gb.
 
-
-
+
   
   
 hbase.hregion.memstore.block.multiplier
@@ -267,8 +244,7 @@
   times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
   memstore during spikes in update traffic.
 
-
-
+
   
   
 hbase.hstore.flusher.count
@@ -278,8 +254,7 @@
   will be queued. With more threads, the flushes will be executed in 
parallel,
   increasing the load on HDFS, and potentially causing more compacti

[70/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

2016-06-09 Thread dmitriusan
http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
index b37c4ec..800216c 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
@@ -23,8 +23,7 @@
 xasecure.audit.is.enabled
 true
 Is Audit enabled?
-
-
+
   
   
 xasecure.audit.destination.db
@@ -40,22 +39,19 @@
 xasecure.audit.destination.db
   
 
-
-
+
   
   
 xasecure.audit.destination.db.jdbc.url
 {{audit_jdbc_url}}
 Audit DB JDBC URL
-
-
+
   
   
 xasecure.audit.destination.db.user
 {{xa_audit_db_user}}
 Audit DB JDBC User
-
-
+
   
   
 xasecure.audit.destination.db.password
@@ -65,29 +61,25 @@
 
   password
 
-
-
+
   
   
 xasecure.audit.destination.db.jdbc.driver
 {{jdbc_driver}}
 Audit DB JDBC Driver
-
-
+
   
   
 xasecure.audit.credential.provider.file
 jceks://file{{credential_file}}
 Credential file store
-
-
+
   
   
 xasecure.audit.destination.db.batch.filespool.dir
 /var/log/hive/audit/db/spool
 /var/log/hive/audit/db/spool
-
-
+
   
   
 xasecure.audit.destination.hdfs
@@ -103,8 +95,7 @@
 xasecure.audit.destination.hdfs
   
 
-
-
+
   
   
 xasecure.audit.destination.hdfs.dir
@@ -116,15 +107,13 @@
 xasecure.audit.destination.hdfs.dir
   
 
-
-
+
   
   
 xasecure.audit.destination.hdfs.batch.filespool.dir
 /var/log/hive/audit/hdfs/spool
 /var/log/hive/audit/hdfs/spool
-
-
+
   
   
 xasecure.audit.destination.solr
@@ -140,8 +129,7 @@
 xasecure.audit.destination.solr
   
 
-
-
+
   
   
 xasecure.audit.destination.solr.urls
@@ -156,8 +144,7 @@
 ranger.audit.solr.urls
   
 
-
-
+
   
   
 xasecure.audit.destination.solr.zookeepers
@@ -169,15 +156,13 @@
 ranger.audit.solr.zookeepers
   
 
-
-
+
   
   
 xasecure.audit.destination.solr.batch.filespool.dir
 /var/log/hive/audit/solr/spool
 /var/log/hive/audit/solr/spool
-
-
+
   
   
 xasecure.audit.provider.summary.enabled
@@ -187,7 +172,6 @@
 
   boolean
 
-
-
+
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c5cf30e/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-plugin-properties.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-plugin-properties.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-plugin-properties.xml
index 1b41ec0..6b2caab 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-plugin-properties.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -22,157 +22,131 @@
   
 XAAUDIT.DB.IS_ENABLED
 true
-
-
+
   
   
 XAAUDIT.HDFS.IS_ENABLED
 true
-
-
+
   
   
 XAAUDIT.HDFS.DESTINATION_DIRECTORY
 true
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY
 true
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY
 true
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_FILE
 true
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS
 true
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS
 true
-
-
+
   
   
 XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS
 true
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_FILE
 true
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS
 true
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS
 true
-
-
+
   
   
 XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT
 true
-
-
+
   
   
 SSL_KEYSTORE_FILE_PATH
 true
-
-
+
   
   
 SSL_KEYSTORE_PASSWORD
 true
-
-
+
   
   
 SSL_TRUSTSTORE_FILE_PATH
 true
-
-
+
   
   
 SSL_TRUSTSTORE_PASSWORD
 true
-
-
+
   
   
 UPDATE_XAPOLICIES_ON_GRANT_REVOKE
 true
-
-
+
   
   
 POLICY_MGR_URL
 true
-
- 

  1   2   3   >