Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1619197&r1=1619196&r2=1619197&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Wed Aug 20 18:39:03 2014 @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.protocolPB; import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos + .EncryptionZoneWithIdProto; import java.io.EOFException; import java.io.IOException; @@ -51,6 +53,7 @@ import org.apache.hadoop.hdfs.protocol.C import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolStats; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -58,7 +61,9 @@ import org.apache.hadoop.hdfs.protocol.D import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.FsAclPermission; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -1178,7 +1183,9 @@ public class PBHelper { lb.getFileLength(), lb.getUnderConstruction(), PBHelper.convertLocatedBlock(lb.getBlocksList()), lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, - lb.getIsLastBlockComplete()); + lb.getIsLastBlockComplete(), + lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : + null); } public static LocatedBlocksProto convert(LocatedBlocks lb) { @@ -1190,6 +1197,9 @@ public class PBHelper { if (lb.getLastLocatedBlock() != null) { builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); } + if (lb.getFileEncryptionInfo() != null) { + builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo())); + } return builder.setFileLength(lb.getFileLength()) .setUnderConstruction(lb.isUnderConstruction()) .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) @@ -1315,7 +1325,9 @@ public class PBHelper { fs.getPath().toByteArray(), fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, - fs.hasChildrenNum() ? fs.getChildrenNum() : -1); + fs.hasChildrenNum() ? fs.getChildrenNum() : -1, + fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : + null); } public static SnapshottableDirectoryStatus convert( @@ -1365,6 +1377,9 @@ public class PBHelper { if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); } + if (fs.getFileEncryptionInfo() != null) { + builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo())); + } if (fs instanceof HdfsLocatedFileStatus) { LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); if (locations != null) { @@ -2253,7 +2268,7 @@ public class PBHelper { } return xAttrs; } - + public static List<XAttr> convert(GetXAttrsResponseProto a) { List<XAttrProto> xAttrs = a.getXAttrsList(); return convertXAttrs(xAttrs); @@ -2284,6 +2299,18 @@ public class PBHelper { return builder.build(); } + public static EncryptionZoneWithIdProto convert(EncryptionZoneWithId zone) { + return EncryptionZoneWithIdProto.newBuilder() + .setId(zone.getId()) + .setKeyName(zone.getKeyName()) + .setPath(zone.getPath()).build(); + } + + public static EncryptionZoneWithId convert(EncryptionZoneWithIdProto proto) { + return new EncryptionZoneWithId(proto.getPath(), proto.getKeyName(), + proto.getId()); + } + public static ShortCircuitShmSlotProto convert(SlotId slotId) { return ShortCircuitShmSlotProto.newBuilder(). setShmId(convert(slotId.getShmId())). @@ -2307,5 +2334,75 @@ public class PBHelper { public static ShmId convert(ShortCircuitShmIdProto shmId) { return new ShmId(shmId.getHi(), shmId.getLo()); } -} + public static HdfsProtos.CipherSuite convert(CipherSuite suite) { + switch (suite) { + case UNKNOWN: + return HdfsProtos.CipherSuite.UNKNOWN; + case AES_CTR_NOPADDING: + return HdfsProtos.CipherSuite.AES_CTR_NOPADDING; + default: + return null; + } + } + + public static CipherSuite convert(HdfsProtos.CipherSuite proto) { + switch (proto) { + case AES_CTR_NOPADDING: + return CipherSuite.AES_CTR_NOPADDING; + default: + // Set to UNKNOWN and stash the unknown enum value + CipherSuite suite = CipherSuite.UNKNOWN; + suite.setUnknownValue(proto.getNumber()); + return suite; + } + } + + public static List<HdfsProtos.CipherSuite> convertCipherSuites + (List<CipherSuite> suites) { + if (suites == null) { + return null; + } + List<HdfsProtos.CipherSuite> protos = + Lists.newArrayListWithCapacity(suites.size()); + for (CipherSuite suite : suites) { + protos.add(convert(suite)); + } + return protos; + } + + public static List<CipherSuite> convertCipherSuiteProtos( + List<HdfsProtos.CipherSuite> protos) { + List<CipherSuite> suites = Lists.newArrayListWithCapacity(protos.size()); + for (HdfsProtos.CipherSuite proto : protos) { + suites.add(convert(proto)); + } + return suites; + } + + public static HdfsProtos.FileEncryptionInfoProto convert( + FileEncryptionInfo info) { + if (info == null) { + return null; + } + return HdfsProtos.FileEncryptionInfoProto.newBuilder() + .setSuite(convert(info.getCipherSuite())) + .setKey(getByteString(info.getEncryptedDataEncryptionKey())) + .setIv(getByteString(info.getIV())) + .setEzKeyVersionName(info.getEzKeyVersionName()) + .build(); + } + + public static FileEncryptionInfo convert( + HdfsProtos.FileEncryptionInfoProto proto) { + if (proto == null) { + return null; + } + CipherSuite suite = convert(proto.getSuite()); + byte[] key = proto.getKey().toByteArray(); + byte[] iv = proto.getIv().toByteArray(); + String ezKeyVersionName = proto.getEzKeyVersionName(); + return new FileEncryptionInfo(suite, key, iv, ezKeyVersionName); + } + +}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1619197&r1=1619196&r2=1619197&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Aug 20 18:39:03 2014 @@ -52,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.B import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.fs.FileEncryptionInfo; + import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; @@ -839,14 +841,15 @@ public class BlockManager { public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, final long fileSizeExcludeBlocksUnderConstruction, final boolean isFileUnderConstruction, final long offset, - final long length, final boolean needBlockToken, final boolean inSnapshot) + final long length, final boolean needBlockToken, + final boolean inSnapshot, FileEncryptionInfo feInfo) throws IOException { assert namesystem.hasReadLock(); if (blocks == null) { return null; } else if (blocks.length == 0) { return new LocatedBlocks(0, isFileUnderConstruction, - Collections.<LocatedBlock>emptyList(), null, false); + Collections.<LocatedBlock>emptyList(), null, false, feInfo); } else { if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); @@ -871,7 +874,7 @@ public class BlockManager { } return new LocatedBlocks( fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, - locatedblocks, lastlb, isComplete); + locatedblocks, lastlb, isComplete, feInfo); } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java?rev=1619197&r1=1619196&r2=1619197&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java Wed Aug 20 18:39:03 2014 @@ -294,5 +294,10 @@ public final class HdfsServerConstants { public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode"; public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000; + + public static final String CRYPTO_XATTR_ENCRYPTION_ZONE = + "raw.hdfs.crypto.encryption.zone"; + public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = + "raw.hdfs.crypto.file.encryption.info"; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1619197&r1=1619196&r2=1619197&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Aug 20 18:39:03 2014 @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; import static org.apache.hadoop.util.Time.now; import java.io.Closeable; @@ -29,11 +32,13 @@ import java.util.List; import java.util.ListIterator; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; @@ -49,10 +54,12 @@ import org.apache.hadoop.fs.permission.P import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FsAclPermission; @@ -64,6 +71,8 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotException; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -112,9 +121,14 @@ public class FSDirectory implements Clos + DOT_RESERVED_STRING; public final static byte[] DOT_RESERVED = DFSUtil.string2Bytes(DOT_RESERVED_STRING); + private final static String RAW_STRING = "raw"; + private final static byte[] RAW = DFSUtil.string2Bytes(RAW_STRING); public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); + private final XAttr KEYID_XATTR = + XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); + INodeDirectory rootDir; private final FSNamesystem namesystem; private volatile boolean skipQuotaCheck = false; //skip while consuming edits @@ -151,7 +165,7 @@ public class FSDirectory implements Clos } boolean hasReadLock() { - return this.dirLock.getReadHoldCount() > 0; + return this.dirLock.getReadHoldCount() > 0 || hasWriteLock(); } public int getReadHoldCount() { @@ -162,6 +176,9 @@ public class FSDirectory implements Clos return this.dirLock.getWriteHoldCount(); } + @VisibleForTesting + public final EncryptionZoneManager ezManager; + /** * Caches frequently used file names used in {@link INode} to reuse * byte[] objects and reduce heap usage. @@ -190,6 +207,7 @@ public class FSDirectory implements Clos this.inodeXAttrsLimit = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); + Preconditions.checkArgument(this.inodeXAttrsLimit >= 0, "Cannot set a negative limit on the number of xattrs per inode (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY); @@ -209,6 +227,8 @@ public class FSDirectory implements Clos + " times"); nameCache = new NameCache<ByteArray>(threshold); namesystem = ns; + + ezManager = new EncryptionZoneManager(this, conf); } private FSNamesystem getFSNamesystem() { @@ -506,6 +526,7 @@ public class FSDirectory implements Clos return false; } + ezManager.checkMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(srcIIP, dstIIP); verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); @@ -584,6 +605,7 @@ public class FSDirectory implements Clos throw new IOException(error); } + ezManager.checkMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>(); if (dstInode != null) { // Destination exists @@ -1294,6 +1316,7 @@ public class FSDirectory implements Clos DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws UnresolvedLinkException, IOException { String srcs = normalizePath(src); + final boolean isRawPath = isReservedRawName(src); readLock(); try { @@ -1309,7 +1332,7 @@ public class FSDirectory implements Clos if (!targetNode.isDirectory()) { return new DirectoryListing( new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME, - targetNode, needLocation, snapshot)}, 0); + targetNode, needLocation, snapshot, isRawPath)}, 0); } final INodeDirectory dirInode = targetNode.asDirectory(); @@ -1323,7 +1346,7 @@ public class FSDirectory implements Clos for (int i=0; i<numOfListing && locationBudget>0; i++) { INode cur = contents.get(startChild+i); listing[i] = createFileStatus(cur.getLocalNameBytes(), cur, - needLocation, snapshot); + needLocation, snapshot, isRawPath); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. @@ -1374,7 +1397,7 @@ public class FSDirectory implements Clos for (int i = 0; i < numOfListing; i++) { Root sRoot = snapshots.get(i + skipSize).getRoot(); listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, - Snapshot.CURRENT_STATE_ID); + Snapshot.CURRENT_STATE_ID, false); } return new DirectoryListing( listing, snapshots.size() - skipSize - numOfListing); @@ -1382,12 +1405,13 @@ public class FSDirectory implements Clos /** Get the file info for a specific file. * @param src The string representation of the path to the file - * @param resolveLink whether to throw UnresolvedLinkException + * @param resolveLink whether to throw UnresolvedLinkException + * @param isRawPath true if a /.reserved/raw pathname was passed by the user * @return object containing information regarding the file * or null if file not found */ - HdfsFileStatus getFileInfo(String src, boolean resolveLink) - throws UnresolvedLinkException { + HdfsFileStatus getFileInfo(String src, boolean resolveLink, boolean isRawPath) + throws IOException { String srcs = normalizePath(src); readLock(); try { @@ -1396,8 +1420,9 @@ public class FSDirectory implements Clos } final INodesInPath inodesInPath = getLastINodeInPath(srcs, resolveLink); final INode i = inodesInPath.getINode(0); + return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, - inodesInPath.getPathSnapshotId()); + inodesInPath.getPathSnapshotId(), isRawPath); } finally { readUnlock(); } @@ -1414,7 +1439,7 @@ public class FSDirectory implements Clos throws UnresolvedLinkException { if (getINode4DotSnapshot(src) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, - HdfsFileStatus.EMPTY_NAME, -1L, 0); + HdfsFileStatus.EMPTY_NAME, -1L, 0, null); } return null; } @@ -2042,6 +2067,19 @@ public class FSDirectory implements Clos public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { inodeMap.put(inode); + if (!inode.isSymlink()) { + final XAttrFeature xaf = inode.getXAttrFeature(); + if (xaf != null) { + final List<XAttr> xattrs = xaf.getXAttrs(); + for (XAttr xattr : xattrs) { + final String xaName = XAttrHelper.getPrefixName(xattr); + if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { + ezManager.addEncryptionZone(inode.getId(), + new String(xattr.getValue())); + } + } + } + } } } @@ -2053,6 +2091,7 @@ public class FSDirectory implements Clos for (INode inode : inodes) { if (inode != null && inode instanceof INodeWithAdditionalFields) { inodeMap.remove(inode); + ezManager.removeEncryptionZone(inode.getId()); } } } @@ -2222,22 +2261,25 @@ public class FSDirectory implements Clos * @param path the local name * @param node inode * @param needLocation if block locations need to be included or not + * @param isRawPath true if this is being called on behalf of a path in + * /.reserved/raw * @return a file status * @throws IOException if any error occurs */ private HdfsFileStatus createFileStatus(byte[] path, INode node, - boolean needLocation, int snapshot) throws IOException { + boolean needLocation, int snapshot, boolean isRawPath) + throws IOException { if (needLocation) { - return createLocatedFileStatus(path, node, snapshot); + return createLocatedFileStatus(path, node, snapshot, isRawPath); } else { - return createFileStatus(path, node, snapshot); + return createFileStatus(path, node, snapshot, isRawPath); } } /** * Create FileStatus by file INode */ HdfsFileStatus createFileStatus(byte[] path, INode node, - int snapshot) { + int snapshot, boolean isRawPath) throws IOException { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; @@ -2249,7 +2291,10 @@ public class FSDirectory implements Clos } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - + + FileEncryptionInfo feInfo = isRawPath ? null : + getFileEncryptionInfo(node, snapshot); + return new HdfsFileStatus( size, node.isDirectory(), @@ -2263,19 +2308,22 @@ public class FSDirectory implements Clos node.isSymlink() ? node.asSymlink().getSymlink() : null, path, node.getId(), - childrenNum); + childrenNum, + feInfo); } /** * Create FileStatus with location info by file INode */ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, - INode node, int snapshot) throws IOException { + INode node, int snapshot, boolean isRawPath) throws IOException { assert hasReadLock(); long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; LocatedBlocks loc = null; + final FileEncryptionInfo feInfo = isRawPath ? null : + getFileEncryptionInfo(node, snapshot); if (node.isFile()) { final INodeFile fileNode = node.asFile(); size = fileNode.computeFileSize(snapshot); @@ -2286,16 +2334,17 @@ public class FSDirectory implements Clos final boolean isUc = !inSnapshot && fileNode.isUnderConstruction(); final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size; + loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileSize, isUc, 0L, size, false, - inSnapshot); + inSnapshot, feInfo); if (loc == null) { loc = new LocatedBlocks(); } } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - + HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), @@ -2303,7 +2352,7 @@ public class FSDirectory implements Clos getPermissionForFileStatus(node, snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId(), loc, childrenNum); + node.getId(), loc, childrenNum, feInfo); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = namesystem.getCacheManager(); @@ -2550,6 +2599,8 @@ public class FSDirectory implements Clos for (ListIterator<XAttr> it = toFilter.listIterator(); it.hasNext() ;) { XAttr filter = it.next(); + Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter), + "The encryption zone xattr should never be deleted."); if (a.equalsIgnoreValue(filter)) { add = false; it.remove(); @@ -2564,7 +2615,111 @@ public class FSDirectory implements Clos return newXAttrs; } - + + boolean isInAnEZ(INodesInPath iip) + throws UnresolvedLinkException, SnapshotAccessControlException { + readLock(); + try { + return ezManager.isInAnEZ(iip); + } finally { + readUnlock(); + } + } + + String getKeyName(INodesInPath iip) { + readLock(); + try { + return ezManager.getKeyName(iip); + } finally { + readUnlock(); + } + } + + XAttr createEncryptionZone(String src, String keyName) + throws IOException { + writeLock(); + try { + return ezManager.createEncryptionZone(src, keyName); + } finally { + writeUnlock(); + } + } + + EncryptionZoneWithId getEZForPath(INodesInPath iip) { + readLock(); + try { + return ezManager.getEZINodeForPath(iip); + } finally { + readUnlock(); + } + } + + BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId) + throws IOException { + readLock(); + try { + return ezManager.listEncryptionZones(prevId); + } finally { + readUnlock(); + } + } + + /** + * Set the FileEncryptionInfo for an INode. + */ + void setFileEncryptionInfo(String src, FileEncryptionInfo info) + throws IOException { + // Make the PB for the xattr + final HdfsProtos.FileEncryptionInfoProto proto = PBHelper.convert(info); + final byte[] protoBytes = proto.toByteArray(); + final XAttr fileEncryptionAttr = + XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes); + final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(fileEncryptionAttr); + + writeLock(); + try { + unprotectedSetXAttrs(src, xAttrs, EnumSet.of(XAttrSetFlag.CREATE)); + } finally { + writeUnlock(); + } + } + + /** + * Return the FileEncryptionInfo for an INode, or null if the INode is not + * an encrypted file. + */ + FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId) + throws IOException { + if (!inode.isFile()) { + return null; + } + readLock(); + try { + List<XAttr> xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId); + if (xAttrs == null) { + return null; + } + for (XAttr x : xAttrs) { + if (XAttrHelper.getPrefixName(x) + .equals(CRYPTO_XATTR_FILE_ENCRYPTION_INFO)) { + try { + HdfsProtos.FileEncryptionInfoProto proto = + HdfsProtos.FileEncryptionInfoProto.parseFrom(x.getValue()); + FileEncryptionInfo feInfo = PBHelper.convert(proto); + return feInfo; + } catch (InvalidProtocolBufferException e) { + throw new IOException("Could not parse file encryption info for " + + "inode " + inode, e); + } + } + } + return null; + } finally { + readUnlock(); + } + } + void setXAttrs(final String src, final List<XAttr> xAttrs, final EnumSet<XAttrSetFlag> flag) throws IOException { writeLock(); @@ -2575,7 +2730,7 @@ public class FSDirectory implements Clos } } - void unprotectedSetXAttrs(final String src, final List<XAttr> xAttrs, + INode unprotectedSetXAttrs(final String src, final List<XAttr> xAttrs, final EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException { assert hasWriteLock(); @@ -2584,7 +2739,20 @@ public class FSDirectory implements Clos int snapshotId = iip.getLatestSnapshotId(); List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode); List<XAttr> newXAttrs = setINodeXAttrs(existingXAttrs, xAttrs, flag); + + /* + * If we're adding the encryption zone xattr, then add src to the list + * of encryption zones. + */ + for (XAttr xattr : newXAttrs) { + final String xaName = XAttrHelper.getPrefixName(xattr); + if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { + ezManager.addEncryptionZone(inode.getId(), new String(xattr.getValue())); + } + } + XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); + return inode; } List<XAttr> setINodeXAttrs(final List<XAttr> existingXAttrs, @@ -2741,27 +2909,73 @@ public class FSDirectory implements Clos return src.startsWith(DOT_RESERVED_PATH_PREFIX); } + static boolean isReservedRawName(String src) { + return src.startsWith(DOT_RESERVED_PATH_PREFIX + + Path.SEPARATOR + RAW_STRING); + } + /** - * Resolve the path of /.reserved/.inodes/<inodeid>/... to a regular path + * Resolve a /.reserved/... path to a non-reserved path. + * <p/> + * There are two special hierarchies under /.reserved/: + * <p/> + * /.reserved/.inodes/<inodeid> performs a path lookup by inodeid, + * <p/> + * /.reserved/raw/... returns the encrypted (raw) bytes of a file in an + * encryption zone. For instance, if /ezone is an encryption zone, then + * /ezone/a refers to the decrypted file and /.reserved/raw/ezone/a refers to + * the encrypted (raw) bytes of /ezone/a. + * <p/> + * Pathnames in the /.reserved/raw directory that resolve to files not in an + * encryption zone are equivalent to the corresponding non-raw path. Hence, + * if /a/b/c refers to a file that is not in an encryption zone, then + * /.reserved/raw/a/b/c is equivalent (they both refer to the same + * unencrypted file). * * @param src path that is being processed * @param pathComponents path components corresponding to the path * @param fsd FSDirectory - * @return if the path indicates an inode, return path after replacing upto + * @return if the path indicates an inode, return path after replacing up to * <inodeid> with the corresponding path of the inode, else the path - * in {@code src} as is. + * in {@code src} as is. If the path refers to a path in the "raw" + * directory, return the non-raw pathname. * @throws FileNotFoundException if inodeid is invalid */ - static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd) - throws FileNotFoundException { - if (pathComponents == null || pathComponents.length <= 3) { + static String resolvePath(String src, byte[][] pathComponents, + FSDirectory fsd) throws FileNotFoundException { + final int nComponents = (pathComponents == null) ? + 0 : pathComponents.length; + if (nComponents <= 2) { return src; } - // Not /.reserved/.inodes - if (!Arrays.equals(DOT_RESERVED, pathComponents[1]) - || !Arrays.equals(DOT_INODES, pathComponents[2])) { // Not .inodes path + if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) { + /* This is not a /.reserved/ path so do nothing. */ return src; } + + if (Arrays.equals(DOT_INODES, pathComponents[2])) { + /* It's a /.reserved/.inodes path. */ + if (nComponents > 3) { + return resolveDotInodesPath(src, pathComponents, fsd); + } else { + return src; + } + } else if (Arrays.equals(RAW, pathComponents[2])) { + /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */ + if (nComponents == 3) { + return Path.SEPARATOR; + } else { + return constructRemainingPath("", pathComponents, 3); + } + } else { + /* It's some sort of /.reserved/<unknown> path. Ignore it. */ + return src; + } + } + + private static String resolveDotInodesPath(String src, + byte[][] pathComponents, FSDirectory fsd) + throws FileNotFoundException { final String inodeId = DFSUtil.bytes2String(pathComponents[3]); final long id; try { @@ -2790,10 +3004,20 @@ public class FSDirectory implements Clos } } - StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder() - : new StringBuilder(inode.getFullPathName()); - for (int i = 4; i < pathComponents.length; i++) { - path.append(Path.SEPARATOR).append(DFSUtil.bytes2String(pathComponents[i])); + String path = ""; + if (id != INodeId.ROOT_INODE_ID) { + path = inode.getFullPathName(); + } + return constructRemainingPath(path, pathComponents, 4); + } + + private static String constructRemainingPath(String pathPrefix, + byte[][] pathComponents, int startAt) { + + StringBuilder path = new StringBuilder(pathPrefix); + for (int i = startAt; i < pathComponents.length; i++) { + path.append(Path.SEPARATOR).append( + DFSUtil.bytes2String(pathComponents[i])); } if (NameNode.LOG.isDebugEnabled()) { NameNode.LOG.debug("Resolved path is " + path); @@ -2838,7 +3062,7 @@ public class FSDirectory implements Clos * @throws UnresolvedLinkException if symlink can't be resolved * @throws SnapshotAccessControlException if path is in RO snapshot */ - private INodesInPath getINodesInPath4Write(String src, boolean resolveLink) + INodesInPath getINodesInPath4Write(String src, boolean resolveLink) throws UnresolvedLinkException, SnapshotAccessControlException { final byte[][] components = INode.getPathComponents(src); INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components, Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1619197&r1=1619196&r2=1619197&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed Aug 20 18:39:03 2014 @@ -364,7 +364,8 @@ public class FSEditLogLoader { // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = fsNamesys.dir.createFileStatus( - HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID); + HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID, + false); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, stat); }