Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 c17e206f8 -> 2596e648f
  refs/heads/trunk 99b800c59 -> d5d07e767


AMBARI-14660. HistoryServer upgrade times out when /app-logs is too large 
(aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d5d07e76
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d5d07e76
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d5d07e76

Branch: refs/heads/trunk
Commit: d5d07e767f51b694bd068d82e1f1c52796b12303
Parents: 99b800c
Author: Andrew Onishuk <aonis...@hortonworks.com>
Authored: Thu Jan 14 12:44:44 2016 +0200
Committer: Andrew Onishuk <aonis...@hortonworks.com>
Committed: Thu Jan 14 12:44:44 2016 +0200

----------------------------------------------------------------------
 .../libraries/providers/hdfs_resource.py          | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d5d07e76/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
----------------------------------------------------------------------
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
 
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index 731bce7..71c4d5a 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -218,6 +218,12 @@ class HdfsResourceWebHDFS:
   Since it's not available on non-hdfs FS and also can be disabled in scope of 
HDFS. 
   We should still have the other implementations for such a cases.
   """
+  
+  # if we have more than this count of files to recursively chmod/chown
+  # webhdfs won't be used, but 'hadoop fs -chmod (or chown) -R ..' As it can 
really slow.
+  # (in one second ~17 files can be chmoded)
+  MAX_FILES_FOR_RECURSIVE_ACTION_VIA_WEBHDFS = 1000 
+  
   def action_execute(self, main_resource):
     pass
   
@@ -344,6 +350,12 @@ class HdfsResourceWebHDFS:
     
     if self.main_resource.resource.recursive_chown:
       self._fill_directories_list(self.main_resource.resource.target, results)
+      
+      # if we don't do this, we can end up waiting real long, having a big 
result list.
+      if len(results) > 
HdfsResourceWebHDFS.MAX_FILES_FOR_RECURSIVE_ACTION_VIA_WEBHDFS:
+        shell.checked_call(["hadoop", "fs", "-chown", "-R", 
format("{owner}:{group}"), self.main_resource.resource.target], 
user=self.main_resource.resource.user)
+        results = []
+
     if self.main_resource.resource.change_permissions_for_parents:
       self._fill_in_parent_directories(self.main_resource.resource.target, 
results)
       
@@ -361,6 +373,12 @@ class HdfsResourceWebHDFS:
     
     if self.main_resource.resource.recursive_chmod:
       self._fill_directories_list(self.main_resource.resource.target, results)
+      
+      # if we don't do this, we can end up waiting real long, having a big 
result list.
+      if len(results) > 
HdfsResourceWebHDFS.MAX_FILES_FOR_RECURSIVE_ACTION_VIA_WEBHDFS:
+        shell.checked_call(["hadoop", "fs", "-chmod", "-R", self.mode, 
self.main_resource.resource.target], user=self.main_resource.resource.user)
+        results = []
+      
     if self.main_resource.resource.change_permissions_for_parents:
       self._fill_in_parent_directories(self.main_resource.resource.target, 
results)
       

Reply via email to