ambari-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nc...@apache.org
Subject [49/50] [abbrv] ambari git commit: AMBARI-14660. HistoryServer upgrade times out when /app-logs is too large (aonishuk)
Date Fri, 15 Jan 2016 17:34:13 GMT
AMBARI-14660. HistoryServer upgrade times out when /app-logs is too large (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dfcea581
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dfcea581
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dfcea581

Branch: refs/heads/branch-dev-patch-upgrade
Commit: dfcea581e87f391199bc7bddf68a5675fc678303
Parents: 8796f5e
Author: Andrew Onishuk <aonishuk@hortonworks.com>
Authored: Thu Jan 14 12:44:44 2016 +0200
Committer: Nate Cole <ncole@hortonworks.com>
Committed: Thu Jan 14 11:43:30 2016 -0500

----------------------------------------------------------------------
 .../libraries/providers/hdfs_resource.py          | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dfcea581/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index 731bce7..71c4d5a 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -218,6 +218,12 @@ class HdfsResourceWebHDFS:
   Since it's not available on non-hdfs FS and also can be disabled in scope of HDFS. 
   We should still have the other implementations for such a cases.
   """
+  
+  # if we have more than this count of files to recursively chmod/chown
+  # webhdfs won't be used, but 'hadoop fs -chmod (or chown) -R ..' As it can really slow.
+  # (in one second ~17 files can be chmoded)
+  MAX_FILES_FOR_RECURSIVE_ACTION_VIA_WEBHDFS = 1000 
+  
   def action_execute(self, main_resource):
     pass
   
@@ -344,6 +350,12 @@ class HdfsResourceWebHDFS:
     
     if self.main_resource.resource.recursive_chown:
       self._fill_directories_list(self.main_resource.resource.target, results)
+      
+      # if we don't do this, we can end up waiting real long, having a big result list.
+      if len(results) > HdfsResourceWebHDFS.MAX_FILES_FOR_RECURSIVE_ACTION_VIA_WEBHDFS:
+        shell.checked_call(["hadoop", "fs", "-chown", "-R", format("{owner}:{group}"), self.main_resource.resource.target],
user=self.main_resource.resource.user)
+        results = []
+
     if self.main_resource.resource.change_permissions_for_parents:
       self._fill_in_parent_directories(self.main_resource.resource.target, results)
       
@@ -361,6 +373,12 @@ class HdfsResourceWebHDFS:
     
     if self.main_resource.resource.recursive_chmod:
       self._fill_directories_list(self.main_resource.resource.target, results)
+      
+      # if we don't do this, we can end up waiting real long, having a big result list.
+      if len(results) > HdfsResourceWebHDFS.MAX_FILES_FOR_RECURSIVE_ACTION_VIA_WEBHDFS:
+        shell.checked_call(["hadoop", "fs", "-chmod", "-R", self.mode, self.main_resource.resource.target],
user=self.main_resource.resource.user)
+        results = []
+      
     if self.main_resource.resource.change_permissions_for_parents:
       self._fill_in_parent_directories(self.main_resource.resource.target, results)
       


Mime
View raw message