AMBARI-7478. All Region Servers are stopped after Decommission of Region Server 
(dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e9f2b4a8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e9f2b4a8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e9f2b4a8

Branch: refs/heads/branch-alerts-dev
Commit: e9f2b4a8535036488e2dbca7445d72d725af6377
Parents: 9073879
Author: Lisnichenko Dmitro <dlysniche...@hortonworks.com>
Authored: Thu Oct 2 18:33:22 2014 +0300
Committer: Lisnichenko Dmitro <dlysniche...@hortonworks.com>
Committed: Thu Oct 2 18:33:22 2014 +0300

----------------------------------------------------------------------
 .../HBASE/package/scripts/hbase_decommission.py | 62 ++++++-------
 .../services/HBASE/package/scripts/params.py    |  2 +-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  6 +-
 ambari-web/app/controllers/main/host/details.js | 91 +++++++++++++++++++-
 4 files changed, 120 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e9f2b4a8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
index 4f7ed9e..a623927 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
@@ -33,42 +33,42 @@ def hbase_decommission(env):
   )
   
   if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
 
-    if params.hbase_drain_only == 'true':
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} remove {host}")
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-          pass
-      pass
-
-    else:
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
 
-      hosts = params.hbase_excluded_hosts.split(",")
-      for host in hosts:
-        if host:
-          regiondrainer_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} add {host}")
-          regionmover_cmd = format(
-            "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_mover} unload {host}")
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main 
{region_mover} unload {host}")
 
-          Execute(regiondrainer_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
 
-          Execute(regionmover_cmd,
-                  user=params.hbase_user,
-                  logoutput=True
-          )
-        pass
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
       pass
     pass
-
+  pass
+  
 
   pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/e9f2b4a8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
index 04153bf..1395ebb 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
@@ -46,7 +46,7 @@ else:
 hadoop_conf_dir = "/etc/hadoop/conf"
 hbase_conf_dir = "/etc/hbase/conf"
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = config['commandParams']['mark_draining_only']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
 hbase_included_hosts = config['commandParams']['included_hosts']
 
 hbase_user = status_params.hbase_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/e9f2b4a8/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index a2261fb..856dd9a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -103,11 +103,7 @@ class TestHBaseMaster(RMFTestCase):
                               content = StaticFile('draining_servers.rb'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config 
/etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add 
host1',
-                              logoutput = True,
-                              user = 'hbase',
-                              )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config 
/etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config 
/etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove 
host1',
                               logoutput = True,
                               user = 'hbase',
                               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/e9f2b4a8/ambari-web/app/controllers/main/host/details.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/details.js 
b/ambari-web/app/controllers/main/host/details.js
index 1f1bf8e..50c2a12 100644
--- a/ambari-web/app/controllers/main/host/details.js
+++ b/ambari-web/app/controllers/main/host/details.js
@@ -882,7 +882,7 @@ App.MainHostDetailsController = Em.Controller.extend({
         this.doRecommissionAndRestart(hostName, svcName, "JOBTRACKER", 
"TASKTRACKER");
         break;
       case 'HBASE':
-        this.doRecommissionAndStart(hostName, svcName, "HBASE_MASTER", 
"HBASE_REGIONSERVER");
+        this.doRecommissionAndStartRegionServer(hostName, svcName, 
"HBASE_MASTER", "HBASE_REGIONSERVER");
     }
     this.showBackgroundOperationsPopup();
   },
@@ -971,6 +971,7 @@ App.MainHostDetailsController = Em.Controller.extend({
               "RequestInfo": {
                 "context": 
Em.I18n.t('hosts.host.regionserver.decommission.batch1'),
                 "command": "DECOMMISSION",
+                "exclusive" :"true",
                 "parameters": {
                   "slave_type": slaveType,
                   "excluded_hosts": hostNames
@@ -990,10 +991,11 @@ App.MainHostDetailsController = Em.Controller.extend({
           {
             "order_id": 2,
             "type": "PUT",
-            "uri": App.get('apiPrefix') + "/clusters/" + 
App.get('clusterName') + "/host_components/" + slaveType,
+            "uri": App.get('apiPrefix') + "/clusters/" + 
App.get('clusterName') +"/hosts/" + hostNames + "/host_components/" + slaveType,
             "RequestBodyInfo": {
               "RequestInfo": {
                 context: 
Em.I18n.t('hosts.host.regionserver.decommission.batch2'),
+                exclusive:true,
                 operation_level: {
                   level: "HOST_COMPONENT",
                   cluster_name: App.get('clusterName'),
@@ -1018,10 +1020,11 @@ App.MainHostDetailsController = Em.Controller.extend({
                 "command": "DECOMMISSION",
                 "service_name": serviceName,
                 "component_name": componentName,
+                "exclusive" :"true",
                 "parameters": {
                   "slave_type": slaveType,
                   "excluded_hosts": hostNames,
-                  "mark_draining_only": "true"
+                  "mark_draining_only": true
                 },
                 'operation_level': {
                   level: "HOST_COMPONENT",
@@ -1068,7 +1071,7 @@ App.MainHostDetailsController = Em.Controller.extend({
       return false;
     }
   },
-
+  
   /**
    * Performs Recommission and Start
    * @param {string} hostNames
@@ -1097,6 +1100,7 @@ App.MainHostDetailsController = Em.Controller.extend({
               "RequestInfo": {
                 "context": context_1,
                 "command": "DECOMMISSION",
+                "exclusive": true,
                 "parameters": {
                   "slave_type": slaveType,
                   "included_hosts": hostNames
@@ -1120,6 +1124,83 @@ App.MainHostDetailsController = Em.Controller.extend({
             "RequestBodyInfo": {
               "RequestInfo": {
                 context: startContext,
+                exclusive:true,
+                operation_level: {
+                  level: "HOST_COMPONENT",
+                  cluster_name: App.get('clusterName'),
+                  host_name: hostNames,
+                  service_name: serviceName || null
+                }
+              },
+              "Body": {
+                HostRoles: {
+                  state: "STARTED"
+                }
+              }
+            }
+          }
+        ]
+      },
+      success: 'decommissionSuccessCallback',
+      error: 'decommissionErrorCallback'
+    });
+  },
+  
+  
+  /**
+   * Performs Recommission and Start
+   * @param {string} hostNames
+   * @param {string} serviceName
+   * @param {string} componentName
+   * @param {string} slaveType
+   * @method doRecommissionAndStart
+   */
+  doRecommissionAndStartRegionServer: function (hostNames, serviceName, 
componentName, slaveType) {
+    var contextNameString_1 = 'hosts.host.' + slaveType.toLowerCase() + 
'.recommission';
+    var context_1 = Em.I18n.t(contextNameString_1);
+    var contextNameString_2 = 'requestInfo.startHostComponent.' + 
slaveType.toLowerCase();
+    var startContext = Em.I18n.t(contextNameString_2);
+    App.ajax.send({
+      name: 'host.host_component.recommission_and_restart',
+      sender: this,
+      data: {
+        intervalTimeSeconds: 1,
+        tolerateSize: 1,
+        batches: [
+          {
+            "order_id": 1,
+            "type": "POST",
+            "uri": App.apiPrefix + "/clusters/" + App.get('clusterName') + 
"/requests",
+            "RequestBodyInfo": {
+              "RequestInfo": {
+                "context": context_1,
+                "command": "DECOMMISSION",
+                "exclusive":"true",
+                "parameters": {
+                  "slave_type": slaveType,
+                  "mark_draining_only": true,
+                  "included_hosts": hostNames
+                },
+                'operation_level': {
+                  level: "HOST_COMPONENT",
+                  cluster_name: App.get('clusterName'),
+                  host_name: hostNames,
+                  service_name: serviceName
+                }
+              },
+              "Requests/resource_filters": [
+                {"service_name": serviceName, "component_name": componentName}
+              ]
+            }
+          },
+          {
+            "order_id": 2,
+            "type": "PUT",
+            "uri": App.get('apiPrefix') + "/clusters/" + 
App.get('clusterName') +"/hosts/" + hostNames + "/host_components/" + slaveType,
+            "RequestBodyInfo": {
+              "RequestInfo": {
+                context: startContext,
+                exclusive:true,
                 operation_level: {
                   level: "HOST_COMPONENT",
                   cluster_name: App.get('clusterName'),
@@ -1169,6 +1250,7 @@ App.MainHostDetailsController = Em.Controller.extend({
               "RequestInfo": {
                 "context": context_1,
                 "command": "DECOMMISSION",
+                "exclusive":"true",
                 "parameters": {
                   "slave_type": slaveType,
                   "included_hosts": hostNames
@@ -1195,6 +1277,7 @@ App.MainHostDetailsController = Em.Controller.extend({
                 "command": "RESTART",
                 "service_name": serviceName,
                 "component_name": slaveType,
+                "exclusive":"true",
                 "hosts": hostNames
               }
             }

Reply via email to