This is an automated email from the ASF dual-hosted git repository. zanderxu pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit 4c86601cba532865271c60ee319c997fb1f3a195 Author: ZanderXu <zande...@apache.org> AuthorDate: Fri Mar 29 18:18:14 2024 +0800 HDFS-17389. [FGL] Client RPCs involving read process supports fine-grained lock (#6590) --- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 50 +++++++++++----------- .../hadoop/hdfs/server/namenode/LeaseManager.java | 3 +- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6f032811b88..a1f5d46ebef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2053,7 +2053,7 @@ BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId, BatchedListEntries<OpenFileEntry> batchedListEntries; String normalizedPath = new Path(path).toString(); // normalize path. try { - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.READ); if (openFilesTypes.contains(OpenFilesType.ALL_OPEN_FILES)) { @@ -2068,7 +2068,7 @@ BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId, } } } finally { - readUnlock(operationName, getLockReportInfoSupplier(null)); + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(null)); } } catch (AccessControlException e) { logAuditEvent(false, operationName, null); @@ -2080,7 +2080,7 @@ BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId, public BatchedListEntries<OpenFileEntry> getFilesBlockingDecom(long prevId, String path) { - assert hasReadLock(); + assert hasReadLock(FSNamesystemLockMode.FS); final List<OpenFileEntry> openFileEntries = Lists.newArrayList(); LightWeightHashSet<Long> openFileIds = new LightWeightHashSet<>(); for (DatanodeDescriptor dataNode : @@ -2220,7 +2220,7 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, FSPermissionChecker.setOperationType(operationName); final INode inode; try { - readLock(); + readLock(FSNamesystemLockMode.GLOBAL); try { checkOperation(OperationCategory.READ); res = FSDirStatAndListingOp.getBlockLocations( @@ -2245,7 +2245,7 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, checkBlockLocationsWhenObserver(res.blocks, srcArg); } } finally { - readUnlock(operationName, getLockReportInfoSupplier(srcArg)); + readUnlock(FSNamesystemLockMode.GLOBAL, operationName, getLockReportInfoSupplier(srcArg)); } } catch (AccessControlException e) { logAuditEvent(false, operationName, srcArg); @@ -2258,7 +2258,7 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, String src = srcArg; checkOperation(OperationCategory.WRITE); try { - writeLock(); + writeLock(FSNamesystemLockMode.FS); final long now = now(); try { checkOperation(OperationCategory.WRITE); @@ -2275,7 +2275,7 @@ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, } } } finally { - writeUnlock(operationName, getLockReportInfoSupplier(srcArg)); + writeUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(srcArg)); } } catch (Throwable e) { LOG.warn("Failed to update the access time of " + src, e); @@ -2679,12 +2679,12 @@ long getPreferredBlockSize(String src) throws IOException { checkOperation(OperationCategory.READ); final FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker.setOperationType(operationName); - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.READ); return FSDirAttrOp.getPreferredBlockSize(dir, pc, src); } finally { - readUnlock("getPreferredBlockSize"); + readUnlock(FSNamesystemLockMode.FS, operationName); } } @@ -3503,13 +3503,13 @@ HdfsFileStatus getFileInfo(final String src, boolean resolveLink, final FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker.setOperationType(operationName); try { - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.READ); stat = FSDirStatAndListingOp.getFileInfo( dir, pc, src, resolveLink, needLocation, needBlockToken); } finally { - readUnlock(operationName, getLockReportInfoSupplier(src)); + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(src)); } } catch (AccessControlException e) { logAuditEvent(false, operationName, src); @@ -3533,12 +3533,12 @@ boolean isFileClosed(final String src) throws IOException { FSPermissionChecker.setOperationType(operationName); boolean success = false; try { - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.READ); success = FSDirStatAndListingOp.isFileClosed(dir, pc, src); } finally { - readUnlock(operationName, getLockReportInfoSupplier(src)); + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(src)); } } catch (AccessControlException e) { logAuditEvent(false, operationName, src); @@ -3601,12 +3601,12 @@ ContentSummary getContentSummary(final String src) throws IOException { final FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker.setOperationType(operationName); try { - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.READ); cs = FSDirStatAndListingOp.getContentSummary(dir, pc, src); } finally { - readUnlock(operationName, getLockReportInfoSupplier(src)); + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(src)); } } catch (AccessControlException ace) { logAuditEvent(false, operationName, src); @@ -4235,12 +4235,12 @@ DirectoryListing getListing(String src, byte[] startAfter, final FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker.setOperationType(operationName); try { - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(NameNode.OperationCategory.READ); dl = getListingInt(dir, pc, src, startAfter, needLocation); } finally { - readUnlock(operationName, getLockReportInfoSupplier(src)); + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(src)); } } catch (AccessControlException e) { logAuditEvent(false, operationName, src); @@ -4306,7 +4306,7 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter, BatchedDirectoryListing bdl; checkOperation(OperationCategory.READ); - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(NameNode.OperationCategory.READ); @@ -4393,7 +4393,7 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter, returnedStartAfter); } } finally { - readUnlock(operationName, + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(Arrays.toString(srcs))); } for (int i = startSrcsIndex; i < srcsIndex; i++) { @@ -6256,7 +6256,7 @@ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path, return corruptFiles; } - readLock(); + readLock(FSNamesystemLockMode.GLOBAL); try { checkOperation(OperationCategory.READ); if (!blockManager.isPopulatingReplQueues()) { @@ -6305,7 +6305,7 @@ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path, LOG.debug("list corrupt file blocks returned: {}", count); return corruptFiles; } finally { - readUnlock("listCorruptFileBlocks"); + readUnlock(FSNamesystemLockMode.GLOBAL, "listCorruptFileBlocks"); } } @@ -8187,14 +8187,14 @@ void createEncryptionZone(final String src, final String keyName, keyName, src); final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); - writeLock(); + writeLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create encryption zone on " + src); resultingStat = FSDirEncryptionZoneOp.createEncryptionZone(dir, src, pc, metadata.getCipher(), keyName, logRetryCache); } finally { - writeUnlock(operationName, + writeUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(src, null, resultingStat)); } } catch (AccessControlException e) { @@ -8792,7 +8792,7 @@ void checkAccess(String src, FsAction mode) throws IOException { final FSPermissionChecker pc = getPermissionChecker(); FSPermissionChecker.setOperationType(operationName); try { - readLock(); + readLock(FSNamesystemLockMode.FS); try { checkOperation(OperationCategory.READ); final INodesInPath iip = dir.resolvePath(pc, src, DirOp.READ); @@ -8805,7 +8805,7 @@ void checkAccess(String src, FsAction mode) throws IOException { dir.checkPathAccess(pc, iip, mode); } } finally { - readUnlock(operationName, getLockReportInfoSupplier(src)); + readUnlock(FSNamesystemLockMode.FS, operationName, getLockReportInfoSupplier(src)); } } catch (AccessControlException e) { logAuditEvent(false, operationName, src); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index b21a34a932a..c2e01ffe266 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; @@ -284,7 +285,7 @@ public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles( */ public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles( final long prevId, final String path) throws IOException { - assert fsnamesystem.hasReadLock(); + assert fsnamesystem.hasReadLock(FSNamesystemLockMode.FS); SortedMap<Long, Lease> remainingLeases; synchronized (this) { remainingLeases = leasesById.tailMap(prevId, false); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org