rakeshadr commented on a change in pull request #1404: URL: https://github.com/apache/hadoop-ozone/pull/1404#discussion_r491969762
########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -129,6 +133,123 @@ public static OMPathInfo verifyFilesInPath( return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls); } + /** + * Verify any dir/key exist in the given path in the specified + * volume/bucket by iterating through directory table. + * + * @param omMetadataManager OM Metadata manager + * @param volumeName volume name + * @param bucketName bucket name + * @param keyName key name + * @param keyPath path + * @return OMPathInfoV1 path info object + * @throws IOException on DB failure + */ + public static OMPathInfoV1 verifyDirectoryKeysInPath( + @Nonnull OMMetadataManager omMetadataManager, + @Nonnull String volumeName, + @Nonnull String bucketName, @Nonnull String keyName, + @Nonnull Path keyPath) throws IOException { + + String leafNodeName = OzoneFSUtils.getFileName(keyName); + List<String> missing = new ArrayList<>(); + List<OzoneAcl> inheritAcls = new ArrayList<>(); + OMDirectoryResult result = OMDirectoryResult.NONE; + + Iterator<Path> elements = keyPath.iterator(); + // TODO: volume id and bucket id generation logic. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + long bucketId = + omMetadataManager.getBucketTable().get(bucketKey).getObjectID(); + long lastKnownParentId = bucketId; + OmDirectoryInfo parentPrefixInfo = null; + String dbDirName = ""; // absolute path for trace logs + while (elements.hasNext()) { + String fileName = elements.next().toString(); + if (missing.size() > 0) { + // Add all the sub-dirs to the missing list except the leaf element. + // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt. + // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list. + if(elements.hasNext()){ + // skips leaf node. + missing.add(fileName); + } + continue; + } + + // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt + // 1. Do lookup on directoryTable. If not exists goto next step. + // 2. Do look on keyTable. If not exists goto next step. + // 3. Add 'sub-dir' to missing parents list + String dbNodeName = omMetadataManager.getOzoneLeafNodeKey( + lastKnownParentId, fileName); + OmDirectoryInfo omPrefixInfo = omMetadataManager.getDirectoryTable(). + get(dbNodeName); + if (omPrefixInfo != null) { + dbDirName += omPrefixInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER; + if (elements.hasNext()) { Review comment: As 'OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH' comment says, there are some part of the parent component exists. Assume user given path is "vol1/buck1/a/b/c" in volume volume If there is a directory with name "a/b" it returns this enum value. Pls refer: https://github.com/apache/hadoop-ozone/blob/master/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java#L202 ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -129,6 +133,123 @@ public static OMPathInfo verifyFilesInPath( return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls); } + /** + * Verify any dir/key exist in the given path in the specified + * volume/bucket by iterating through directory table. + * + * @param omMetadataManager OM Metadata manager + * @param volumeName volume name + * @param bucketName bucket name + * @param keyName key name + * @param keyPath path + * @return OMPathInfoV1 path info object + * @throws IOException on DB failure + */ + public static OMPathInfoV1 verifyDirectoryKeysInPath( + @Nonnull OMMetadataManager omMetadataManager, + @Nonnull String volumeName, + @Nonnull String bucketName, @Nonnull String keyName, + @Nonnull Path keyPath) throws IOException { + + String leafNodeName = OzoneFSUtils.getFileName(keyName); + List<String> missing = new ArrayList<>(); + List<OzoneAcl> inheritAcls = new ArrayList<>(); + OMDirectoryResult result = OMDirectoryResult.NONE; + + Iterator<Path> elements = keyPath.iterator(); + // TODO: volume id and bucket id generation logic. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + long bucketId = + omMetadataManager.getBucketTable().get(bucketKey).getObjectID(); + long lastKnownParentId = bucketId; + OmDirectoryInfo parentPrefixInfo = null; + String dbDirName = ""; // absolute path for trace logs + while (elements.hasNext()) { + String fileName = elements.next().toString(); + if (missing.size() > 0) { + // Add all the sub-dirs to the missing list except the leaf element. + // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt. + // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list. + if(elements.hasNext()){ + // skips leaf node. + missing.add(fileName); + } + continue; + } + + // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt + // 1. Do lookup on directoryTable. If not exists goto next step. + // 2. Do look on keyTable. If not exists goto next step. + // 3. Add 'sub-dir' to missing parents list + String dbNodeName = omMetadataManager.getOzoneLeafNodeKey( + lastKnownParentId, fileName); + OmDirectoryInfo omPrefixInfo = omMetadataManager.getDirectoryTable(). + get(dbNodeName); + if (omPrefixInfo != null) { + dbDirName += omPrefixInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER; + if (elements.hasNext()) { + lastKnownParentId = omPrefixInfo.getObjectID(); + parentPrefixInfo = omPrefixInfo; + continue; + } else { + // Checked all the sub-dirs till the leaf node. + // Found a directory in the given path. + result = OMDirectoryResult.DIRECTORY_EXISTS; + } + } else { + if (parentPrefixInfo != null) { Review comment: Good point, I will add logic to handle it. ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -129,6 +133,123 @@ public static OMPathInfo verifyFilesInPath( return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls); } + /** + * Verify any dir/key exist in the given path in the specified + * volume/bucket by iterating through directory table. + * + * @param omMetadataManager OM Metadata manager + * @param volumeName volume name + * @param bucketName bucket name + * @param keyName key name + * @param keyPath path + * @return OMPathInfoV1 path info object + * @throws IOException on DB failure + */ + public static OMPathInfoV1 verifyDirectoryKeysInPath( + @Nonnull OMMetadataManager omMetadataManager, + @Nonnull String volumeName, + @Nonnull String bucketName, @Nonnull String keyName, + @Nonnull Path keyPath) throws IOException { + + String leafNodeName = OzoneFSUtils.getFileName(keyName); + List<String> missing = new ArrayList<>(); + List<OzoneAcl> inheritAcls = new ArrayList<>(); + OMDirectoryResult result = OMDirectoryResult.NONE; + + Iterator<Path> elements = keyPath.iterator(); + // TODO: volume id and bucket id generation logic. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + long bucketId = + omMetadataManager.getBucketTable().get(bucketKey).getObjectID(); + long lastKnownParentId = bucketId; + OmDirectoryInfo parentPrefixInfo = null; Review comment: Sure, will take care ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -129,6 +133,123 @@ public static OMPathInfo verifyFilesInPath( return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls); } + /** + * Verify any dir/key exist in the given path in the specified + * volume/bucket by iterating through directory table. + * + * @param omMetadataManager OM Metadata manager + * @param volumeName volume name + * @param bucketName bucket name + * @param keyName key name + * @param keyPath path + * @return OMPathInfoV1 path info object + * @throws IOException on DB failure + */ + public static OMPathInfoV1 verifyDirectoryKeysInPath( + @Nonnull OMMetadataManager omMetadataManager, + @Nonnull String volumeName, + @Nonnull String bucketName, @Nonnull String keyName, + @Nonnull Path keyPath) throws IOException { + + String leafNodeName = OzoneFSUtils.getFileName(keyName); + List<String> missing = new ArrayList<>(); + List<OzoneAcl> inheritAcls = new ArrayList<>(); + OMDirectoryResult result = OMDirectoryResult.NONE; + + Iterator<Path> elements = keyPath.iterator(); + // TODO: volume id and bucket id generation logic. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + long bucketId = + omMetadataManager.getBucketTable().get(bucketKey).getObjectID(); + long lastKnownParentId = bucketId; + OmDirectoryInfo parentPrefixInfo = null; + String dbDirName = ""; // absolute path for trace logs + while (elements.hasNext()) { + String fileName = elements.next().toString(); + if (missing.size() > 0) { + // Add all the sub-dirs to the missing list except the leaf element. + // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt. + // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list. + if(elements.hasNext()){ + // skips leaf node. + missing.add(fileName); + } + continue; + } + + // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt + // 1. Do lookup on directoryTable. If not exists goto next step. + // 2. Do look on keyTable. If not exists goto next step. + // 3. Add 'sub-dir' to missing parents list + String dbNodeName = omMetadataManager.getOzoneLeafNodeKey( + lastKnownParentId, fileName); + OmDirectoryInfo omPrefixInfo = omMetadataManager.getDirectoryTable(). Review comment: Sure, will take care ########## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java ########## @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.util.*; + +/** + * This class represents the directory information by keeping each component + * in the user given path and a pointer to its parent directory element in the + * path. Also, it stores directory node related metdata details. + */ +public class OmDirectoryInfo extends WithObjectID { + private long parentObjectID; // pointer to parent directory + + private String name; // directory name + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + + public OmDirectoryInfo(Builder builder) { + this.name = builder.name; + this.acls = builder.acls; + this.metadata = builder.metadata; + this.objectID = builder.objectID; + this.updateID = builder.updateID; + this.parentObjectID = builder.parentObjectID; + this.creationTime = builder.creationTime; + this.modificationTime = builder.modificationTime; + } + + /** + * Returns new builder class that builds a OmPrefixInfo. + * + * @return Builder + */ + public static OmDirectoryInfo.Builder newBuilder() { + return new OmDirectoryInfo.Builder(); + } + + /** + * Builder for Directory Info. + */ + public static class Builder { + private long parentObjectID; // pointer to parent directory + + private long objectID; + private long updateID; + + private String name; + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + private Map<String, String> metadata; + + public Builder() { + //Default values + this.acls = new LinkedList<>(); + this.metadata = new HashMap<>(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + public Builder setObjectID(long objectId) { + this.objectID = objectId; + return this; + } + + public Builder setUpdateID(long updateId) { + this.updateID = updateId; + return this; + } + + public Builder setName(String dirName) { + this.name = dirName; + return this; + } + + public Builder setCreationTime(long newCreationTime) { + this.creationTime = newCreationTime; + return this; + } + + public Builder setModificationTime(long newModificationTime) { + this.modificationTime = newModificationTime; + return this; + } + + public Builder setAcls(List<OzoneAcl> newAcls) { + this.acls = newAcls; + return this; + } + + public Builder addAcl(OzoneAcl ozoneAcl) { + if (ozoneAcl != null) { + this.acls.add(ozoneAcl); + } + return this; + } + + public Builder setMetadata(Map<String, String> newMetadata) { + this.metadata = newMetadata; + return this; + } + + public Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public Builder addAllMetadata(Map<String, String> additionalMetadata) { Review comment: Sure, will take care ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java ########## @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.codec; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo; + +import java.io.IOException; + +/** + * Codec to encode OmDirectoryInfo as byte array. + */ +public class OmDirectoryInfoCodec implements Codec<OmDirectoryInfo> { + + @Override + public byte[] toPersistedFormat(OmDirectoryInfo object) throws IOException { + Preconditions + .checkNotNull(object, "Null object can't be converted " + + "to byte array."); + return object.getProtobuf().toByteArray(); + } + + @Override + public OmDirectoryInfo fromPersistedFormat(byte[] rawData) + throws IOException { + Preconditions + .checkNotNull(rawData, + "Null byte array can't converted to real object."); + try { + return OmDirectoryInfo.getFromProtobuf(DirectoryInfo.parseFrom(rawData)); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data from the byte array", e); + } + } + + @Override + public OmDirectoryInfo copyObject(OmDirectoryInfo object) { + return object.copyObject(); + } +} Review comment: Sure, will take care ########## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java ########## @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.util.*; + +/** + * This class represents the directory information by keeping each component + * in the user given path and a pointer to its parent directory element in the + * path. Also, it stores directory node related metdata details. + */ +public class OmDirectoryInfo extends WithObjectID { + private long parentObjectID; // pointer to parent directory + + private String name; // directory name + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + + public OmDirectoryInfo(Builder builder) { + this.name = builder.name; + this.acls = builder.acls; + this.metadata = builder.metadata; + this.objectID = builder.objectID; + this.updateID = builder.updateID; + this.parentObjectID = builder.parentObjectID; + this.creationTime = builder.creationTime; + this.modificationTime = builder.modificationTime; + } + + /** + * Returns new builder class that builds a OmPrefixInfo. + * + * @return Builder + */ + public static OmDirectoryInfo.Builder newBuilder() { + return new OmDirectoryInfo.Builder(); + } + + /** + * Builder for Directory Info. + */ + public static class Builder { + private long parentObjectID; // pointer to parent directory + + private long objectID; + private long updateID; + + private String name; + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + private Map<String, String> metadata; + + public Builder() { + //Default values + this.acls = new LinkedList<>(); + this.metadata = new HashMap<>(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + public Builder setObjectID(long objectId) { + this.objectID = objectId; + return this; + } + + public Builder setUpdateID(long updateId) { + this.updateID = updateId; + return this; + } + + public Builder setName(String dirName) { + this.name = dirName; + return this; + } + + public Builder setCreationTime(long newCreationTime) { + this.creationTime = newCreationTime; + return this; + } + + public Builder setModificationTime(long newModificationTime) { + this.modificationTime = newModificationTime; + return this; + } + + public Builder setAcls(List<OzoneAcl> newAcls) { + this.acls = newAcls; + return this; + } + + public Builder addAcl(OzoneAcl ozoneAcl) { + if (ozoneAcl != null) { + this.acls.add(ozoneAcl); + } + return this; + } + + public Builder setMetadata(Map<String, String> newMetadata) { + this.metadata = newMetadata; + return this; + } + + public Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public Builder addAllMetadata(Map<String, String> additionalMetadata) { + if (additionalMetadata != null) { + metadata.putAll(additionalMetadata); + } + return this; + } + + public OmDirectoryInfo build() { + return new OmDirectoryInfo(this); + } + } + + @Override + public String toString() { + return getObjectID() + ""; + } + + public long getParentObjectID() { + return parentObjectID; + } + + public String getPath() { + return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName(); + } + + public String getName() { + return name; + } + + public long getCreationTime() { + return creationTime; + } + + public long getModificationTime() { + return modificationTime; + } + + public List<OzoneAcl> getAcls() { + return acls; + } + + /** + * Creates DirectoryInfo protobuf from OmDirectoryInfo. + */ + public OzoneManagerProtocolProtos.DirectoryInfo getProtobuf() { + OzoneManagerProtocolProtos.DirectoryInfo.Builder pib = + OzoneManagerProtocolProtos.DirectoryInfo.newBuilder().setName(name) + .setCreationTime(creationTime) + .setModificationTime(modificationTime) + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .setObjectID(objectID) + .setUpdateID(updateID) + .setParentID(parentObjectID); + if (acls != null) { + pib.addAllAcls(OzoneAclUtil.toProtobuf(acls)); + } + return pib.build(); + } + + /** + * Parses DirectoryInfo protobuf and creates OmPrefixInfo. + * @param dirInfo + * @return instance of OmDirectoryInfo + */ + public static OmDirectoryInfo getFromProtobuf( + OzoneManagerProtocolProtos.DirectoryInfo dirInfo) { + OmDirectoryInfo.Builder opib = OmDirectoryInfo.newBuilder() + .setName(dirInfo.getName()) + .setCreationTime(dirInfo.getCreationTime()) + .setModificationTime(dirInfo.getModificationTime()) + .setAcls(OzoneAclUtil.fromProtobuf(dirInfo.getAclsList())); + if (dirInfo.getMetadataList() != null) { + opib.addAllMetadata(KeyValueUtil + .getFromProtobuf(dirInfo.getMetadataList())); + } + if (dirInfo.hasObjectID()) { + opib.setObjectID(dirInfo.getObjectID()); + } + if (dirInfo.hasParentID()) { + opib.setParentObjectID(dirInfo.getParentID()); + } + if (dirInfo.hasUpdateID()) { + opib.setUpdateID(dirInfo.getUpdateID()); + } + return opib.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + OmDirectoryInfo omDirInfo = (OmDirectoryInfo) o; + return creationTime == omDirInfo.creationTime && + modificationTime == omDirInfo.modificationTime && + name.equals(omDirInfo.name) && + Objects.equals(metadata, omDirInfo.metadata) && + Objects.equals(acls, omDirInfo.acls) && + objectID == omDirInfo.objectID && + updateID == omDirInfo.updateID && + parentObjectID == omDirInfo.parentObjectID; + } + + @Override + public int hashCode() { + return Objects.hash(objectID, parentObjectID, name); + } + + /** + * Return a new copy of the object. + */ + public OmDirectoryInfo copyObject() { + OmDirectoryInfo.Builder builder = new Builder() + .setName(name) + .setCreationTime(creationTime) + .setModificationTime(modificationTime) + .setParentObjectID(parentObjectID) + .setObjectID(objectID) + .setUpdateID(updateID); + + acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), + acl.getName(), (BitSet) acl.getAclBitSet().clone(), + acl.getAclScope()))); + + if (metadata != null) { + metadata.forEach((k, v) -> builder.addMetadata(k, v)); + } + + return builder.build(); + } +} Review comment: Sure, will take care. One doubt, can't checkstyle detect this? ########## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java ########## @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.util.*; + +/** + * This class represents the directory information by keeping each component + * in the user given path and a pointer to its parent directory element in the + * path. Also, it stores directory node related metdata details. + */ +public class OmDirectoryInfo extends WithObjectID { + private long parentObjectID; // pointer to parent directory + + private String name; // directory name + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + + public OmDirectoryInfo(Builder builder) { + this.name = builder.name; + this.acls = builder.acls; + this.metadata = builder.metadata; + this.objectID = builder.objectID; + this.updateID = builder.updateID; + this.parentObjectID = builder.parentObjectID; + this.creationTime = builder.creationTime; + this.modificationTime = builder.modificationTime; + } + + /** + * Returns new builder class that builds a OmPrefixInfo. + * + * @return Builder + */ + public static OmDirectoryInfo.Builder newBuilder() { + return new OmDirectoryInfo.Builder(); + } + + /** + * Builder for Directory Info. + */ + public static class Builder { + private long parentObjectID; // pointer to parent directory + + private long objectID; + private long updateID; + + private String name; + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + private Map<String, String> metadata; + + public Builder() { + //Default values + this.acls = new LinkedList<>(); + this.metadata = new HashMap<>(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + public Builder setObjectID(long objectId) { + this.objectID = objectId; + return this; + } + + public Builder setUpdateID(long updateId) { + this.updateID = updateId; + return this; + } + + public Builder setName(String dirName) { + this.name = dirName; + return this; + } + + public Builder setCreationTime(long newCreationTime) { + this.creationTime = newCreationTime; + return this; + } + + public Builder setModificationTime(long newModificationTime) { + this.modificationTime = newModificationTime; + return this; + } + + public Builder setAcls(List<OzoneAcl> newAcls) { + this.acls = newAcls; + return this; + } + + public Builder addAcl(OzoneAcl ozoneAcl) { + if (ozoneAcl != null) { + this.acls.add(ozoneAcl); + } + return this; + } + + public Builder setMetadata(Map<String, String> newMetadata) { + this.metadata = newMetadata; + return this; + } + + public Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public Builder addAllMetadata(Map<String, String> additionalMetadata) { + if (additionalMetadata != null) { + metadata.putAll(additionalMetadata); + } + return this; + } + + public OmDirectoryInfo build() { + return new OmDirectoryInfo(this); + } + } + + @Override + public String toString() { + return getObjectID() + ""; Review comment: Sure, will take care ########## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java ########## @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.util.*; + +/** + * This class represents the directory information by keeping each component + * in the user given path and a pointer to its parent directory element in the + * path. Also, it stores directory node related metdata details. + */ +public class OmDirectoryInfo extends WithObjectID { + private long parentObjectID; // pointer to parent directory + + private String name; // directory name + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + + public OmDirectoryInfo(Builder builder) { + this.name = builder.name; + this.acls = builder.acls; + this.metadata = builder.metadata; + this.objectID = builder.objectID; + this.updateID = builder.updateID; + this.parentObjectID = builder.parentObjectID; + this.creationTime = builder.creationTime; + this.modificationTime = builder.modificationTime; + } + + /** + * Returns new builder class that builds a OmPrefixInfo. + * + * @return Builder + */ + public static OmDirectoryInfo.Builder newBuilder() { + return new OmDirectoryInfo.Builder(); + } + + /** + * Builder for Directory Info. + */ + public static class Builder { + private long parentObjectID; // pointer to parent directory + + private long objectID; + private long updateID; + + private String name; + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + private Map<String, String> metadata; + + public Builder() { + //Default values + this.acls = new LinkedList<>(); + this.metadata = new HashMap<>(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + public Builder setObjectID(long objectId) { + this.objectID = objectId; + return this; + } + + public Builder setUpdateID(long updateId) { + this.updateID = updateId; + return this; + } + + public Builder setName(String dirName) { + this.name = dirName; + return this; + } + + public Builder setCreationTime(long newCreationTime) { + this.creationTime = newCreationTime; + return this; + } + + public Builder setModificationTime(long newModificationTime) { + this.modificationTime = newModificationTime; + return this; + } + + public Builder setAcls(List<OzoneAcl> newAcls) { + this.acls = newAcls; + return this; + } + + public Builder addAcl(OzoneAcl ozoneAcl) { + if (ozoneAcl != null) { + this.acls.add(ozoneAcl); + } + return this; + } + + public Builder setMetadata(Map<String, String> newMetadata) { + this.metadata = newMetadata; Review comment: Sure, will take care ########## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java ########## @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.util.*; + +/** + * This class represents the directory information by keeping each component + * in the user given path and a pointer to its parent directory element in the + * path. Also, it stores directory node related metdata details. + */ +public class OmDirectoryInfo extends WithObjectID { + private long parentObjectID; // pointer to parent directory + + private String name; // directory name + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + + public OmDirectoryInfo(Builder builder) { + this.name = builder.name; + this.acls = builder.acls; + this.metadata = builder.metadata; + this.objectID = builder.objectID; + this.updateID = builder.updateID; + this.parentObjectID = builder.parentObjectID; + this.creationTime = builder.creationTime; + this.modificationTime = builder.modificationTime; + } + + /** + * Returns new builder class that builds a OmPrefixInfo. + * + * @return Builder + */ + public static OmDirectoryInfo.Builder newBuilder() { + return new OmDirectoryInfo.Builder(); + } + + /** + * Builder for Directory Info. + */ + public static class Builder { + private long parentObjectID; // pointer to parent directory + + private long objectID; + private long updateID; + + private String name; + + private long creationTime; + private long modificationTime; + + private List<OzoneAcl> acls; + private Map<String, String> metadata; + + public Builder() { + //Default values + this.acls = new LinkedList<>(); + this.metadata = new HashMap<>(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + public Builder setObjectID(long objectId) { + this.objectID = objectId; + return this; + } + + public Builder setUpdateID(long updateId) { + this.updateID = updateId; + return this; + } + + public Builder setName(String dirName) { + this.name = dirName; + return this; + } + + public Builder setCreationTime(long newCreationTime) { + this.creationTime = newCreationTime; + return this; + } + + public Builder setModificationTime(long newModificationTime) { + this.modificationTime = newModificationTime; + return this; + } + + public Builder setAcls(List<OzoneAcl> newAcls) { + this.acls = newAcls; Review comment: Sure, will take care ########## File path: hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java ########## @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.Assert.fail; + +/** + * Test verifies the entries and operations in directory table. + */ +public class TestOzoneDirectory { + + @Rule + public Timeout timeout = new Timeout(300000); + Review comment: Good point, will add more UTs. ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java ########## @@ -0,0 +1,312 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateDirectoryRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateDirectoryResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .Status; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handle create directory request. It will add path components to the directory + * table and maintains file system semantics. + */ +public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMDirectoryCreateRequestV1.class); + + public OMDirectoryCreateRequestV1(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + + CreateDirectoryRequest createDirectoryRequest = getOmRequest() + .getCreateDirectoryRequest(); + KeyArgs keyArgs = createDirectoryRequest.getKeyArgs(); + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + omResponse.setCreateDirectoryResponse(CreateDirectoryResponse.newBuilder()); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumCreateDirectory(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + IOException exception = null; + OMClientResponse omClientResponse = null; + Result result = Result.FAILURE; + List<OmDirectoryInfo> missingParentInfos; + + try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); + + // Check if this is the root of the filesystem. + if (keyName.length() == 0) { + throw new OMException("Directory create failed. Cannot create " + + "directory at root of the filesystem", + OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT); + } + // acquire lock + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + Path keyPath = Paths.get(keyName); + + // Need to check if any files exist in the given path, if they exist we + // cannot create a directory with the given key. + // Verify the path against directory table + OMFileRequest.OMPathInfoV1 omPathInfo = + OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, + bucketName, keyName, keyPath); + OMFileRequest.OMDirectoryResult omDirectoryResult = + omPathInfo.getDirectoryResult(); + + if (omDirectoryResult == FILE_EXISTS || + omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { + throw new OMException("Unable to create directory: " +keyName + + " in volume/bucket: " + volumeName + "/" + bucketName, + FILE_ALREADY_EXISTS); + } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH || + omDirectoryResult == NONE) { + + // prepare all missing parents + missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo( + ozoneManager, keyArgs, omPathInfo, trxnLogIndex); + // prepare leafNode dir + OmDirectoryInfo dirInfo = createDirectoryInfoWithACL( + omPathInfo.getLeafNodeName(), + keyArgs, omPathInfo.getLeafNodeObjectId(), + omPathInfo.getLastKnownParentId(), trxnLogIndex, + OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); + OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, + Optional.of(dirInfo), Optional.of(missingParentInfos), + trxnLogIndex); + result = OMDirectoryCreateRequest.Result.SUCCESS; + omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(), + dirInfo, missingParentInfos, result); + } else { + result = Result.DIRECTORY_ALREADY_EXISTS; + omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS); + omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(), + result); + } + } catch (IOException ex) { + exception = ex; + omClientResponse = new OMDirectoryCreateResponseV1( + createErrorOMResponse(omResponse, exception), result); + } finally { + addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, + omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, + auditMap, exception, userInfo)); + + logResult(createDirectoryRequest, keyArgs, omMetrics, result, exception); + + return omClientResponse; + } + + private void logResult(CreateDirectoryRequest createDirectoryRequest, + KeyArgs keyArgs, OMMetrics omMetrics, Result result, + IOException exception) { + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + switch (result) { + case SUCCESS: + omMetrics.incNumKeys(); + if (LOG.isDebugEnabled()) { + LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}", + volumeName, bucketName, keyName); + } + break; + case DIRECTORY_ALREADY_EXISTS: + if (LOG.isDebugEnabled()) { + LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}", + volumeName, bucketName, keyName, exception); + } + break; + case FAILURE: + omMetrics.incNumCreateDirectoryFails(); + LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " + + "Exception:{}", volumeName, bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}", + createDirectoryRequest); + } + } + + /** + * Construct OmDirectoryInfo for every parent directory in missing list. + * @param ozoneManager + * @param keyArgs + * @param pathInfo list of parent directories to be created and its ACLs + * @param trxnLogIndex + * @return + * @throws IOException + */ + public static List<OmDirectoryInfo> getAllParentDirInfo( + OzoneManager ozoneManager, KeyArgs keyArgs, + OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex) + throws IOException { + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + List<OmDirectoryInfo> missingParentInfos = new ArrayList<>(); + + ImmutablePair<Long, Long> objIdRange = OMFileRequest + .getObjIdRangeFromTxId(trxnLogIndex); + long baseObjId = objIdRange.getLeft(); + long maxObjId = objIdRange.getRight(); + long maxLevels = maxObjId - baseObjId; + long objectCount = 1; // baseObjID is used by the leaf directory + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + long lastKnownParentId = pathInfo.getLastKnownParentId(); + List<String> missingParents = pathInfo.getMissingParents(); + List<OzoneAcl> inheritAcls = pathInfo.getAcls(); + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + maxLevels + ". Unable to create directory: " + keyName + + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to KeyTable", missingKey); + // what about keyArgs for parent directories? TODO Review comment: I just copy pasted from original file OMDirectoryCreateRequestV.java. I don't know more about this. Please refer: https://github.com/apache/hadoop-ozone/blob/master/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java#L270 ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java ########## @@ -0,0 +1,312 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateDirectoryRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .CreateDirectoryResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .Status; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handle create directory request. It will add path components to the directory + * table and maintains file system semantics. + */ +public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMDirectoryCreateRequestV1.class); + + public OMDirectoryCreateRequestV1(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + + CreateDirectoryRequest createDirectoryRequest = getOmRequest() + .getCreateDirectoryRequest(); + KeyArgs keyArgs = createDirectoryRequest.getKeyArgs(); + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + omResponse.setCreateDirectoryResponse(CreateDirectoryResponse.newBuilder()); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumCreateDirectory(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + IOException exception = null; + OMClientResponse omClientResponse = null; + Result result = Result.FAILURE; + List<OmDirectoryInfo> missingParentInfos; + + try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY); + + // Check if this is the root of the filesystem. + if (keyName.length() == 0) { + throw new OMException("Directory create failed. Cannot create " + + "directory at root of the filesystem", + OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT); + } + // acquire lock + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + Path keyPath = Paths.get(keyName); + + // Need to check if any files exist in the given path, if they exist we + // cannot create a directory with the given key. + // Verify the path against directory table + OMFileRequest.OMPathInfoV1 omPathInfo = + OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, + bucketName, keyName, keyPath); + OMFileRequest.OMDirectoryResult omDirectoryResult = + omPathInfo.getDirectoryResult(); + + if (omDirectoryResult == FILE_EXISTS || + omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { + throw new OMException("Unable to create directory: " +keyName + + " in volume/bucket: " + volumeName + "/" + bucketName, + FILE_ALREADY_EXISTS); + } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH || + omDirectoryResult == NONE) { + + // prepare all missing parents + missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo( + ozoneManager, keyArgs, omPathInfo, trxnLogIndex); + // prepare leafNode dir + OmDirectoryInfo dirInfo = createDirectoryInfoWithACL( + omPathInfo.getLeafNodeName(), + keyArgs, omPathInfo.getLeafNodeObjectId(), + omPathInfo.getLastKnownParentId(), trxnLogIndex, + OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); + OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, + Optional.of(dirInfo), Optional.of(missingParentInfos), + trxnLogIndex); + result = OMDirectoryCreateRequest.Result.SUCCESS; + omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(), + dirInfo, missingParentInfos, result); + } else { + result = Result.DIRECTORY_ALREADY_EXISTS; + omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS); + omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(), + result); + } + } catch (IOException ex) { + exception = ex; + omClientResponse = new OMDirectoryCreateResponseV1( + createErrorOMResponse(omResponse, exception), result); + } finally { + addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, + omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, + auditMap, exception, userInfo)); + + logResult(createDirectoryRequest, keyArgs, omMetrics, result, exception); + + return omClientResponse; + } + + private void logResult(CreateDirectoryRequest createDirectoryRequest, + KeyArgs keyArgs, OMMetrics omMetrics, Result result, + IOException exception) { + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + switch (result) { + case SUCCESS: + omMetrics.incNumKeys(); Review comment: Good catch. Yes, will incr it ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -254,4 +440,34 @@ public static void addKeyTableCacheEntries( new CacheValue<>(keyInfo, index)); } } + + /** + * Adding directory info to the Table cache. + * + * @param omMetadataManager OM Metdata Manager + * @param dirInfo directory info + * @param missingParentInfos list of the parents to be added to DB + * @param trxnLogIndex transaction log index + */ + public static void addDirectoryTableCacheEntries( + OMMetadataManager omMetadataManager, + Optional<OmDirectoryInfo> dirInfo, + Optional<List<OmDirectoryInfo>> missingParentInfos, + long trxnLogIndex) { + for (OmDirectoryInfo parentInfo : missingParentInfos.get()) { + omMetadataManager.getDirectoryTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getOzoneLeafNodeKey( Review comment: Yes, it is in the new format: "parentInfo.getParentObjectID(), parentInfo.getName()". I hope the variable name is confusing. I think, rename variable 'parentInfo' to 'subDirInfo' , will make it more clear "subDirInfo.getParentObjectID(), subDirInfo.getName()". Does it make sense to you? ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -129,6 +133,123 @@ public static OMPathInfo verifyFilesInPath( return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls); } + /** + * Verify any dir/key exist in the given path in the specified + * volume/bucket by iterating through directory table. + * + * @param omMetadataManager OM Metadata manager + * @param volumeName volume name + * @param bucketName bucket name + * @param keyName key name + * @param keyPath path + * @return OMPathInfoV1 path info object + * @throws IOException on DB failure + */ + public static OMPathInfoV1 verifyDirectoryKeysInPath( Review comment: Thanks for the help. Will incorporate it. ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -156,6 +277,71 @@ public static long getObjIDFromTxId(long id) { return new ImmutablePair<>(baseId, maxAvailableId); } + + /** + * Class to return the results from verifyDirectoryKeysInPath. + * Includes the list of missing intermediate directories and + * the directory search result code. + */ + public static class OMPathInfoV1 { + private OMDirectoryResult directoryResult; + private String leafNodeName; + private long lastKnownParentId; + private long leafNodeObjectId; + private List<String> missingParents; + private List<OzoneAcl> acls; + + public OMPathInfoV1(String leafNodeName, long lastKnownParentId, + List missingParents, OMDirectoryResult result, + List<OzoneAcl> aclList) { + this.leafNodeName = leafNodeName; + this.lastKnownParentId = lastKnownParentId; + this.missingParents = missingParents; + this.directoryResult = result; + this.acls = aclList; Review comment: 'keyName' is already available as local variable and am making use of that. How about add it to 'OMPathInfoV1' later based on requirement ? ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ########## @@ -156,6 +277,71 @@ public static long getObjIDFromTxId(long id) { return new ImmutablePair<>(baseId, maxAvailableId); } + + /** + * Class to return the results from verifyDirectoryKeysInPath. + * Includes the list of missing intermediate directories and + * the directory search result code. + */ + public static class OMPathInfoV1 { + private OMDirectoryResult directoryResult; + private String leafNodeName; + private long lastKnownParentId; + private long leafNodeObjectId; + private List<String> missingParents; + private List<OzoneAcl> acls; + + public OMPathInfoV1(String leafNodeName, long lastKnownParentId, + List missingParents, OMDirectoryResult result, + List<OzoneAcl> aclList) { + this.leafNodeName = leafNodeName; + this.lastKnownParentId = lastKnownParentId; + this.missingParents = missingParents; + this.directoryResult = result; + this.acls = aclList; Review comment: Thanks for the explanation. Yes, will add it. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org