Repository: ambari Updated Branches: refs/heads/branch-2.2 fa9bed061 -> dd846600f refs/heads/trunk ba2c77f15 -> 9ff6b8762
AMBARI-15468. file_system get_mount_point_for_dir works incorrect (aonishuk) Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9ff6b876 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9ff6b876 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9ff6b876 Branch: refs/heads/trunk Commit: 9ff6b8762f35871ca1c9f537adda06711993e562 Parents: ba2c77f Author: Andrew Onishuk <aonis...@hortonworks.com> Authored: Fri Mar 18 14:58:35 2016 +0200 Committer: Andrew Onishuk <aonis...@hortonworks.com> Committed: Fri Mar 18 14:58:35 2016 +0200 ---------------------------------------------------------------------- .../resource_management/TestFileSystem.py | 47 +++++++++++++++++++- .../libraries/functions/file_system.py | 6 ++- .../stacks/HDP/2.0.6/services/stack_advisor.py | 6 ++- 3 files changed, 54 insertions(+), 5 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/9ff6b876/ambari-agent/src/test/python/resource_management/TestFileSystem.py ---------------------------------------------------------------------- diff --git a/ambari-agent/src/test/python/resource_management/TestFileSystem.py b/ambari-agent/src/test/python/resource_management/TestFileSystem.py index 4e0eb63..925758c 100644 --- a/ambari-agent/src/test/python/resource_management/TestFileSystem.py +++ b/ambari-agent/src/test/python/resource_management/TestFileSystem.py @@ -34,6 +34,8 @@ class TestFileSystem(TestCase): SINGLE_ROOT = 1 MULT_DRIVE_CONFLICT = 2 MULT_DRIVE_DISTINCT = 3 + ONE_SEGMENT_MOUNT = 4 + SAME_PREFIX_MOUNTS = 5 def _get_mount(self, type): """ @@ -64,6 +66,13 @@ class TestFileSystem(TestCase): out += os.linesep + \ "/dev/sda1 on /hadoop/hdfs/data/1 type ext4 (rw)" + os.linesep + \ "/dev/sda2 on /hadoop/hdfs/data/2 type ext4 (rw)" + elif type == self.MOUNT_TYPE.ONE_SEGMENT_MOUNT: + out += os.linesep + \ + "/dev/sda1 on /hadoop type ext4 (rw)" + elif type == self.MOUNT_TYPE.SAME_PREFIX_MOUNTS: + out += os.linesep + \ + "/dev/sda1 on /hadoop/hdfs/data type ext4 (rw)" + os.linesep + \ + "/dev/sda2 on /hadoop/hdfs/data1 type ext4 (rw)" out_array = [x.split(' ') for x in out.strip().split('\n')] mount_val = [] @@ -103,6 +112,9 @@ class TestFileSystem(TestCase): """ mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.SINGLE_ROOT) + # refresh cached mounts + file_system.get_and_cache_mount_points(True) + mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data") self.assertEqual(mount_point, "/") @@ -115,8 +127,41 @@ class TestFileSystem(TestCase): """ mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.MULT_DRIVE_DISTINCT) + # refresh cached mounts + file_system.get_and_cache_mount_points(True) + mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/1") self.assertEqual(mount_point, "/hadoop/hdfs/data/1") mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/2") - self.assertEqual(mount_point, "/hadoop/hdfs/data/2") \ No newline at end of file + self.assertEqual(mount_point, "/hadoop/hdfs/data/2") + + @patch.object(Logger, "info") + @patch.object(Logger, "error") + @patch('resource_management.core.providers.mount.get_mounted') + def test_one_segment_mount(self, mounted_mock, log_error, log_info): + """ + Testing when the path has one segment. + """ + mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.ONE_SEGMENT_MOUNT) + + # refresh cached mounts + file_system.get_and_cache_mount_points(True) + + mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data/1") + self.assertEqual(mount_point, "/hadoop") + + @patch.object(Logger, "info") + @patch.object(Logger, "error") + @patch('resource_management.core.providers.mount.get_mounted') + def test_same_prefix(self, mounted_mock, log_error, log_info): + """ + Testing when two mount points have the same prefix. + """ + mounted_mock.return_value = self._get_mount(self.MOUNT_TYPE.SAME_PREFIX_MOUNTS) + + # refresh cached mounts + file_system.get_and_cache_mount_points(True) + + mount_point = file_system.get_mount_point_for_dir("/hadoop/hdfs/data1") + self.assertEqual(mount_point, "/hadoop/hdfs/data1") http://git-wip-us.apache.org/repos/asf/ambari/blob/9ff6b876/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py ---------------------------------------------------------------------- diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py b/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py index 39b86dd..2a859ed 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/file_system.py @@ -63,10 +63,12 @@ def get_mount_point_for_dir(dir): # "/", "/hadoop/hdfs", and "/hadoop/hdfs/data". # So take the one with the greatest number of segments. for m in cached_mounts: - if dir.startswith(m['mount_point']): + # Ensure that the mount path and the dir path ends with "/" + # The mount point "/hadoop" should not match the path "/hadoop1" + if os.path.join(dir, "").startswith(os.path.join(m['mount_point'], "")): if best_mount_found is None: best_mount_found = m["mount_point"] - elif best_mount_found.count(os.path.sep) < os.path.join(m["mount_point"]).count(os.path.sep): + elif os.path.join(best_mount_found, "").count(os.path.sep) < os.path.join(m["mount_point"], "").count(os.path.sep): best_mount_found = m["mount_point"] Logger.info("Mount point for directory %s is %s" % (str(dir), str(best_mount_found))) http://git-wip-us.apache.org/repos/asf/ambari/blob/9ff6b876/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py index fa1a0d8..ef82bce 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py @@ -1591,10 +1591,12 @@ def getMountPointForDir(dir, mountPoints): # "/", "/hadoop/hdfs", and "/hadoop/hdfs/data". # So take the one with the greatest number of segments. for mountPoint in mountPoints: - if dir.startswith(mountPoint): + # Ensure that the mount path and the dir path ends with "/" + # The mount point "/hadoop" should not match with the path "/hadoop1" + if os.path.join(dir, "").startswith(os.path.join(mountPoint, "")): if bestMountFound is None: bestMountFound = mountPoint - elif bestMountFound.count(os.path.sep) < os.path.join(mountPoint, "").count(os.path.sep): + elif os.path.join(bestMountFound, "").count(os.path.sep) < os.path.join(mountPoint, "").count(os.path.sep): bestMountFound = mountPoint return bestMountFound