jzuber4 commented on a change in pull request #2038: URL: https://github.com/apache/hadoop/pull/2038#discussion_r446300821
########## File path: hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java ########## @@ -4181,79 +4181,114 @@ public LocatedFileStatus next() throws IOException { Path path = qualify(f); LOG.debug("listFiles({}, {})", path, recursive); try { - // if a status was given, that is used, otherwise - // call getFileStatus, which triggers an existence check - final S3AFileStatus fileStatus = status != null - ? status - : (S3AFileStatus) getFileStatus(path); - if (fileStatus.isFile()) { + // if a status was given and it is a file. + if (status != null && status.isFile()) { // simple case: File LOG.debug("Path is a file"); return new Listing.SingleStatusRemoteIterator( - toLocatedFileStatus(fileStatus)); - } else { - // directory: do a bulk operation - String key = maybeAddTrailingSlash(pathToKey(path)); - String delimiter = recursive ? null : "/"; - LOG.debug("Requesting all entries under {} with delimiter '{}'", - key, delimiter); - final RemoteIterator<S3AFileStatus> cachedFilesIterator; - final Set<Path> tombstones; - boolean allowAuthoritative = allowAuthoritative(f); - if (recursive) { - final PathMetadata pm = metadataStore.get(path, true); - // shouldn't need to check pm.isDeleted() because that will have - // been caught by getFileStatus above. - MetadataStoreListFilesIterator metadataStoreListFilesIterator = - new MetadataStoreListFilesIterator(metadataStore, pm, - allowAuthoritative); - tombstones = metadataStoreListFilesIterator.listTombstones(); - // if all of the below is true - // - authoritative access is allowed for this metadatastore for this directory, - // - all the directory listings are authoritative on the client - // - the caller does not force non-authoritative access - // return the listing without any further s3 access - if (!forceNonAuthoritativeMS && - allowAuthoritative && - metadataStoreListFilesIterator.isRecursivelyAuthoritative()) { - S3AFileStatus[] statuses = S3Guard.iteratorToStatuses( - metadataStoreListFilesIterator, tombstones); - cachedFilesIterator = listing.createProvidedFileStatusIterator( - statuses, ACCEPT_ALL, acceptor); - return listing.createLocatedFileStatusIterator(cachedFilesIterator); - } - cachedFilesIterator = metadataStoreListFilesIterator; - } else { - DirListingMetadata meta = - S3Guard.listChildrenWithTtl(metadataStore, path, ttlTimeProvider, - allowAuthoritative); - if (meta != null) { - tombstones = meta.listTombstones(); - } else { - tombstones = null; - } - cachedFilesIterator = listing.createProvidedFileStatusIterator( - S3Guard.dirMetaToStatuses(meta), ACCEPT_ALL, acceptor); - if (allowAuthoritative && meta != null && meta.isAuthoritative()) { - // metadata listing is authoritative, so return it directly - return listing.createLocatedFileStatusIterator(cachedFilesIterator); - } + toLocatedFileStatus(status)); + } + // Assuming the path to be a directory + // do a bulk operation. + RemoteIterator<S3ALocatedFileStatus> listFilesAssumingDir = + getListFilesAssumingDir(path, + recursive, + acceptor, + collectTombstones, + forceNonAuthoritativeMS); + // If there are no list entries present, we + // fallback to file existence check as the path + // can be a file or empty directory. + if (!listFilesAssumingDir.hasNext()) { + final S3AFileStatus fileStatus = (S3AFileStatus) getFileStatus(path); Review comment: > we will need a list for an directory which is empty. Else we will get FNFE If the directory is empty, there is no directory marker and the above iterator returns no results, doesn't that mean that there is no such directory? In that case, isn't throwing FNFE the right behavior? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org