AMBARI-6786. Provide testcases for configs got by /recommendations on stack-version
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fbe8b876 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fbe8b876 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fbe8b876 Branch: refs/heads/branch-alerts-dev Commit: fbe8b876a818fa9ad8215557c407a2f1a6e47412 Parents: c2a117f Author: Srimanth Gunturi <sgunt...@hortonworks.com> Authored: Tue Sep 9 10:42:42 2014 -0700 Committer: Srimanth Gunturi <sgunt...@hortonworks.com> Committed: Tue Sep 9 13:22:52 2014 -0700 ---------------------------------------------------------------------- .../stacks/HDP/1.3.2/services/stack_advisor.py | 19 ++- .../stacks/HDP/2.0.6/services/stack_advisor.py | 37 +++-- .../stacks/HDP/2.1/services/stack_advisor.py | 8 +- .../stacks/2.0.6/common/test_stack_advisor.py | 157 ++++++++++++++++++- .../stacks/2.1/common/test_stack_advisor.py | 132 ++++++++++++++++ 5 files changed, 323 insertions(+), 30 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/fbe8b876/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py index c9b119f..ba42075 100644 --- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py +++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/stack_advisor.py @@ -19,6 +19,7 @@ limitations under the License. import re import sys +from math import ceil from stack_advisor import DefaultStackAdvisor @@ -146,19 +147,21 @@ class HDP132StackAdvisor(DefaultStackAdvisor): 24 < cluster["ram"]: 2048 }[1] + totalAvailableRam = cluster["ram"] - cluster["reservedRam"] + if cluster["hBaseInstalled"]: + totalAvailableRam -= cluster["hbaseRam"] + cluster["totalAvailableRam"] = max(2048, totalAvailableRam * 1024) '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))''' - cluster["containers"] = max(3, - min(2 * cluster["cpu"], - int(min(1.8 * cluster["disk"], - cluster["ram"] / cluster["minContainerSize"])))) + cluster["containers"] = round(max(3, + min(2 * cluster["cpu"], + min(ceil(1.8 * cluster["disk"]), + cluster["totalAvailableRam"] / cluster["minContainerSize"])))) '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers''' - cluster["ramPerContainer"] = max(2048, - cluster["ram"] - cluster["reservedRam"] - cluster["hbaseRam"]) - cluster["ramPerContainer"] /= cluster["containers"] + cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"]) '''If greater than 1GB, value will be in multiples of 512.''' if cluster["ramPerContainer"] > 1024: - cluster["ramPerContainer"] = ceil(cluster["ramPerContainer"] / 512) * 512 + cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512 cluster["mapMemory"] = int(cluster["ramPerContainer"]) cluster["reduceMemory"] = cluster["ramPerContainer"] http://git-wip-us.apache.org/repos/asf/ambari/blob/fbe8b876/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py index 452ccbd..8853e13 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py @@ -19,6 +19,7 @@ limitations under the License. import re import sys +from math import ceil from stack_advisor import DefaultStackAdvisor @@ -91,19 +92,19 @@ class HDP206StackAdvisor(DefaultStackAdvisor): def recommendYARNConfigurations(self, configurations, clusterData): putYarnProperty = self.putProperty(configurations, "yarn-site") - putYarnProperty('yarn.nodemanager.resource.memory-mb', clusterData['containers'] * clusterData['ramPerContainer']) - putYarnProperty('yarn.scheduler.minimum-allocation-mb', clusterData['ramPerContainer']) - putYarnProperty('yarn.scheduler.maximum-allocation-mb', clusterData['containers'] * clusterData['ramPerContainer']) + putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer']))) + putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer'])) + putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer']))) def recommendMapReduce2Configurations(self, configurations, clusterData): putMapredProperty = self.putProperty(configurations, "mapred-site") - putMapredProperty('yarn.app.mapreduce.am.resource.mb', clusterData['amMemory']) - putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(0.8 * clusterData['amMemory'])) + "m") + putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory'])) + putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m") putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory']) - putMapredProperty('mapreduce.reduce.memory.mb', clusterData['reduceMemory']) - putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(0.8 * clusterData['mapMemory'])) + "m") - putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(0.8 * clusterData['reduceMemory'])) + "m") - putMapredProperty('mapreduce.task.io.sort.mb', int(min(0.4 * clusterData['mapMemory'], 1024))) + putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory'])) + putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m") + putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m") + putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024)) def getClusterData(self, servicesList, hosts, components): @@ -161,19 +162,21 @@ class HDP206StackAdvisor(DefaultStackAdvisor): 24 < cluster["ram"]: 2048 }[1] + totalAvailableRam = cluster["ram"] - cluster["reservedRam"] + if cluster["hBaseInstalled"]: + totalAvailableRam -= cluster["hbaseRam"] + cluster["totalAvailableRam"] = max(2048, totalAvailableRam * 1024) '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))''' - cluster["containers"] = max(3, + cluster["containers"] = round(max(3, min(2 * cluster["cpu"], - int(min(1.8 * cluster["disk"], - cluster["ram"] / cluster["minContainerSize"])))) + min(ceil(1.8 * cluster["disk"]), + cluster["totalAvailableRam"] / cluster["minContainerSize"])))) '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers''' - cluster["ramPerContainer"] = max(2048, - cluster["ram"] - cluster["reservedRam"] - cluster["hbaseRam"]) - cluster["ramPerContainer"] /= cluster["containers"] + cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"]) '''If greater than 1GB, value will be in multiples of 512.''' if cluster["ramPerContainer"] > 1024: - cluster["ramPerContainer"] = ceil(cluster["ramPerContainer"] / 512) * 512 + cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512 cluster["mapMemory"] = int(cluster["ramPerContainer"]) cluster["reduceMemory"] = cluster["ramPerContainer"] @@ -345,4 +348,4 @@ def formatXmxSizeToBytes(value): modifier == 't': 1024 * 1024 * 1024 * 1024, modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024 }[1] - return to_number(value) * m \ No newline at end of file + return to_number(value) * m http://git-wip-us.apache.org/repos/asf/ambari/blob/fbe8b876/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py index bcf3f61..621e74d 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py @@ -39,17 +39,17 @@ class HDP21StackAdvisor(HDP206StackAdvisor): "org.apache.oozie.service.HCatAccessorService") def recommendHiveConfigurations(self, configurations, clusterData): - containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else clusterData['reduceMemory'] + containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory']) containerSize = min(clusterData['containers'] * clusterData['ramPerContainer'], containerSize) putHiveProperty = self.putProperty(configurations, "hive-site") - putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(containerSize / 3) * 1048576) - putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(0.8 * containerSize)) + putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(round(containerSize / 3)) * 1048576) + putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(round(0.8 * containerSize))) + "m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC") putHiveProperty('hive.tez.container.size', containerSize) def recommendTezConfigurations(self, configurations, clusterData): putTezProperty = self.putProperty(configurations, "tez-site") - putTezProperty("tez.am.resource.memory.mb", clusterData['amMemory']) + putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory'])) putTezProperty("tez.am.java.opts", "-server -Xmx" + str(int(0.8 * clusterData["amMemory"])) + "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC") http://git-wip-us.apache.org/repos/asf/ambari/blob/fbe8b876/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py index 1577730..ce15609 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py +++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py @@ -174,7 +174,7 @@ class TestHDP206StackAdvisor(TestCase): result = self.stackAdvisor.validateConfigurations(services, hosts) expectedItems = [ - {"message": "Value is less than the recommended default of 2046", "level": "WARN"}, + {"message": "Value is less than the recommended default of 2048", "level": "WARN"}, {"message": "Value should be integer", "level": "ERROR"}, {"message": "Value should be set", "level": "ERROR"} ] @@ -235,6 +235,161 @@ class TestHDP206StackAdvisor(TestCase): ] self.assertValidationResult(expectedItems, result) + def test_getClusterData_withHBaseAnd6gbRam(self): + servicesList = ["HBASE"] + components = [] + hosts = { + "items" : [ + { + "Hosts" : { + "cpu_count" : 8, + "total_mem" : 6291456, + "disk_info" : [ + {"mountpoint" : "/"}, + {"mountpoint" : "/dev/shm"}, + {"mountpoint" : "/vagrant"}, + {"mountpoint" : "/"}, + {"mountpoint" : "/dev/shm"}, + {"mountpoint" : "/"}, + {"mountpoint" : "/dev/shm"}, + {"mountpoint" : "/vagrant"} + ] + } + } + ] + } + expected = { + "hBaseInstalled": True, + "components": components, + "cpu": 8, + "disk": 8, + "ram": 6, + "reservedRam": 2, + "hbaseRam": 1, + "minContainerSize": 512, + "totalAvailableRam": 3072, + "containers": 6, + "ramPerContainer": 512, + "mapMemory": 512, + "reduceMemory": 512, + "amMemory": 512 + } + + result = self.stackAdvisor.getClusterData(servicesList, hosts, components) + + self.assertEquals(result, expected) + + def test_getClusterData_withHBaseAnd48gbRam(self): + servicesList = ["HBASE"] + components = [] + hosts = { + "items" : [ + { + "Hosts" : { + "cpu_count" : 6, + "total_mem" : 50331648, + "disk_info" : [ + {"mountpoint" : "/"}, + {"mountpoint" : "/dev/shm"}, + {"mountpoint" : "/vagrant"}, + {"mountpoint" : "/"}, + {"mountpoint" : "/dev/shm"}, + {"mountpoint" : "/vagrant"} + ] + } + } + ] + } + expected = { + "hBaseInstalled": True, + "components": components, + "cpu": 6, + "disk": 6, + "ram": 48, + "reservedRam": 6, + "hbaseRam": 8, + "minContainerSize": 2048, + "totalAvailableRam": 34816, + "containers": 11, + "ramPerContainer": 3072, + "mapMemory": 3072, + "reduceMemory": 3072, + "amMemory": 3072 + } + + result = self.stackAdvisor.getClusterData(servicesList, hosts, components) + + self.assertEquals(result, expected) + + def test_recommendYARNConfigurations(self): + configurations = {} + clusterData = { + "containers" : 5, + "ramPerContainer": 256 + } + expected = { + "yarn-site": { + "properties": { + "yarn.nodemanager.resource.memory-mb": "1280", + "yarn.scheduler.minimum-allocation-mb": "256", + "yarn.scheduler.maximum-allocation-mb": "1280" + } + } + } + + self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData) + self.assertEquals(configurations, expected) + + def test_recommendMapReduce2Configurations_mapMemoryLessThan2560(self): + configurations = {} + clusterData = { + "mapMemory": 567, + "reduceMemory": 345.6666666666666, + "amMemory": 123.54 + } + expected = { + "mapred-site": { + "properties": { + "yarn.app.mapreduce.am.resource.mb": "123", + "yarn.app.mapreduce.am.command-opts": "-Xmx99m", + "mapreduce.map.memory.mb": "567", + "mapreduce.reduce.memory.mb": "345", + "mapreduce.map.java.opts": "-Xmx454m", + "mapreduce.reduce.java.opts": "-Xmx277m", + "mapreduce.task.io.sort.mb": "227" + } + } + } + + self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData) + self.assertEquals(configurations, expected) + + def test_getClusterData_noHostsWithoutHBase(self): + servicesList = [] + components = [] + hosts = { + "items" : [] + } + result = self.stackAdvisor.getClusterData(servicesList, hosts, components) + + expected = { + "hBaseInstalled": False, + "components": components, + "cpu": 0, + "disk": 0, + "ram": 0, + "reservedRam": 1, + "hbaseRam": 1, + "minContainerSize": 256, + "totalAvailableRam": 2048, + "containers": 3, + "ramPerContainer": 682.6666666666666, + "mapMemory": 682, + "reduceMemory": 682.6666666666666, + "amMemory": 682.6666666666666 + } + + self.assertEquals(result, expected) def prepareHosts(self, hostsNames): hosts = { "items": [] } http://git-wip-us.apache.org/repos/asf/ambari/blob/fbe8b876/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py new file mode 100644 index 0000000..5f39c73 --- /dev/null +++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py @@ -0,0 +1,132 @@ +''' +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +''' + +import socket +from unittest import TestCase + +class TestHDP21StackAdvisor(TestCase): + + def setUp(self): + import imp + import os + + testDirectory = os.path.dirname(os.path.abspath(__file__)) + stackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py') + hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py') + hdp21StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py') + hdp21StackAdvisorClassName = 'HDP21StackAdvisor' + with open(stackAdvisorPath, 'rb') as fp: + imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE)) + with open(hdp206StackAdvisorPath, 'rb') as fp: + imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE)) + with open(hdp21StackAdvisorPath, 'rb') as fp: + stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE)) + clazz = getattr(stack_advisor_impl, hdp21StackAdvisorClassName) + self.stackAdvisor = clazz() + + def test_recommendOozieConfigurations_noFalconServer(self): + configurations = {} + clusterData = { + "components" : [] + } + expected = { + } + + self.stackAdvisor.recommendOozieConfigurations(configurations, clusterData) + self.assertEquals(configurations, expected) + + def test_recommendOozieConfigurations_withFalconServer(self): + configurations = {} + clusterData = { + "components" : ["FALCON_SERVER"] + } + expected = { + "oozie-site": { + "properties": { + "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService," + + "org.apache.oozie.service.PartitionDependencyManagerService," + + "org.apache.oozie.service.HCatAccessorService" + } + } + } + + self.stackAdvisor.recommendOozieConfigurations(configurations, clusterData) + self.assertEquals(configurations, expected) + + def test_recommendHiveConfigurations_mapMemoryLessThan2048(self): + configurations = {} + clusterData = { + "mapMemory": 567, + "reduceMemory": 2056, + "containers": 3, + "ramPerContainer": 1024 + } + expected = { + "hive-site": { + "properties": { + "hive.auto.convert.join.noconditionaltask.size": "718274560", + "hive.tez.java.opts": "-server -Xmx1645m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC", + "hive.tez.container.size": "2056" + } + } + } + + self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData) + self.assertEquals(configurations, expected) + + def test_recommendHiveConfigurations_mapMemoryMoreThan2048(self): + configurations = {} + clusterData = { + "mapMemory": 3000, + "reduceMemory": 2056, + "containers": 3, + "ramPerContainer": 1024 + } + expected = { + "hive-site": { + "properties": { + "hive.auto.convert.join.noconditionaltask.size": "1048576000", + "hive.tez.java.opts": "-server -Xmx2400m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC", + "hive.tez.container.size": "3000" + } + } + } + + self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData) + self.assertEquals(configurations, expected) + + def test_recommendHiveConfigurations_containersRamIsLess(self): + configurations = {} + clusterData = { + "mapMemory": 3000, + "reduceMemory": 2056, + "containers": 3, + "ramPerContainer": 256 + } + expected = { + "hive-site": { + "properties": { + "hive.auto.convert.join.noconditionaltask.size": "268435456", + "hive.tez.java.opts": "-server -Xmx614m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC", + "hive.tez.container.size": "768" + } + } + } + + self.stackAdvisor.recommendHiveConfigurations(configurations, clusterData) + self.assertEquals(configurations, expected)