Github user jackylk commented on a diff in the pull request: https://github.com/apache/carbondata/pull/2045#discussion_r175009993 --- Diff: core/src/main/java/org/apache/carbondata/core/locks/CarbonLockUtil.java --- @@ -107,4 +114,36 @@ public static int getLockProperty(String property, int defaultValue) { } } + /** + * Currently the segment lock files are not deleted immediately when unlock, + * so it needs to delete expired lock files before delete loads. + */ + public static void deleteExpiredSegmentLockFiles(CarbonTable carbonTable) { + LoadMetadataDetails[] details = + SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath()); + if (details != null && details.length > 0) { + AbsoluteTableIdentifier absoluteTableIdentifier = carbonTable.getAbsoluteTableIdentifier(); + long segmentLockFilesPreservTime = + CarbonProperties.getInstance().getSegmentLockFilesPreserveHours(); + long currTime = System.currentTimeMillis(); + for (LoadMetadataDetails oneRow : details) { + if (oneRow.getVisibility().equalsIgnoreCase("false") || + SegmentStatus.SUCCESS == oneRow.getSegmentStatus() || + SegmentStatus.LOAD_FAILURE == oneRow.getSegmentStatus() || + SegmentStatus.LOAD_PARTIAL_SUCCESS == oneRow.getSegmentStatus() || + SegmentStatus.COMPACTED == oneRow.getSegmentStatus()) { + String location = CarbonTablePath + .getSegmentLockFilePath(absoluteTableIdentifier.getTablePath(), + oneRow.getLoadName()); + CarbonFile carbonFile = + FileFactory.getCarbonFile(location, FileFactory.getFileType(location)); + if (carbonFile.exists()) { --- End diff -- This operation will call HDFS NameNode, so if there are 1000 lock files, this operation will be for each lock files so it is very time consuming. A better way is to do a CarbonFile.listFile on the lock folder and get all CarbonFile in one NameNode call. So you do not need to read table status (code from line 122 to line 137 is not required)
---