Author: curino
Date: Thu Aug 21 21:55:57 2014
New Revision: 1619608
URL: http://svn.apache.org/r1619608
Log:
Merge with trunk to pick up YARN-2436
Added:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/
- copied from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
- copied unchanged from r1619607,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/ (props
changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
(props changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
(props changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
(props changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
(props changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
(props changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
(props changed)
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged
/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs:r1594376-1619194
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1619018-1619607
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
Thu Aug 21 21:55:57 2014
@@ -1643,6 +1643,7 @@ public class RpcProgramNfs3 extends RpcP
DirectoryListing dlisting = null;
Nfs3FileAttributes postOpDirAttr = null;
long dotdotFileId = 0;
+ HdfsFileStatus dotdotStatus = null;
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
@@ -1678,7 +1679,7 @@ public class RpcProgramNfs3 extends RpcP
if (cookie == 0) {
// Get dotdot fileId
String dotdotFileIdPath = dirFileIdPath + "/..";
- HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
+ dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
if (dotdotStatus == null) {
// This should not happen
@@ -1723,7 +1724,8 @@ public class RpcProgramNfs3 extends RpcP
postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(
postOpDirAttr.getFileId()));
entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..",
- dotdotFileId, postOpDirAttr, new FileHandle(dotdotFileId));
+ dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus,
+ iug), new FileHandle(dotdotFileId));
for (int i = 2; i < n + 2; i++) {
long fileId = fstatus[i - 2].getFileId();
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Thu Aug 21 21:55:57 2014
@@ -255,6 +255,97 @@ Trunk (Unreleased)
HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
(Vinayakumar B via wheat 9)
+ BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+
+ HDFS-6387. HDFS CLI admin tool for creating & deleting an
+ encryption zone. (clamb)
+
+ HDFS-6386. HDFS Encryption Zones (clamb)
+
+ HDFS-6388. HDFS integration with KeyProvider. (clamb)
+
+ HDFS-6473. Protocol and API for Encryption Zones (clamb)
+
+ HDFS-6392. Wire crypto streams for encrypted files in
+ DFSClient. (clamb and yliu)
+
+ HDFS-6476. Print out the KeyProvider after finding KP successfully on
+ startup. (Juan Yu via wang)
+
+ HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
+ DFSClient. (Charles Lamb and wang)
+
+ HDFS-6389. Rename restrictions for encryption zones. (clamb)
+
+ HDFS-6605. Client server negotiation of cipher suite. (wang)
+
+ HDFS-6625. Remove the Delete Encryption Zone function (clamb)
+
+ HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
+
+ HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
+
+ HDFS-6635. Refactor encryption zone functionality into new
+ EncryptionZoneManager class. (wang)
+
+ HDFS-6474. Namenode needs to get the actual keys and iv from the
+ KeyProvider. (wang)
+
+ HDFS-6619. Clean up encryption-related tests. (wang)
+
+ HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
+
+ HDFS-6490. Fix the keyid format for generated keys in
+ FSNamesystem.createEncryptionZone (clamb)
+
+ HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
+ (wang)
+
+ HDFS-6718. Remove EncryptionZoneManager lock. (wang)
+
+ HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
+
+ HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
+ EZManager#createEncryptionZone. (clamb)
+
+ HDFS-6724. Decrypt EDEK before creating
+ CryptoInputStream/CryptoOutputStream. (wang)
+
+ HDFS-6509. Create a special /.reserved/raw directory for raw access to
+ encrypted data. (clamb via wang)
+
+ HDFS-6771. Require specification of an encryption key when creating
+ an encryption zone. (wang)
+
+ HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
+
+ HDFS-6692. Add more HDFS encryption tests. (wang)
+
+ HDFS-6780. Batch the encryption zones listing API. (wang)
+
+ HDFS-6394. HDFS encryption documentation. (wang)
+
+ HDFS-6834. Improve the configuration guidance in DFSClient when there
+ are no Codec classes found in configs. (umamahesh)
+
+ HDFS-6546. Add non-superuser capability to get the encryption zone
+ for a specific path. (clamb)
+
+ HDFS-6733. Creating encryption zone results in NPE when
+ KeyProvider is null. (clamb)
+
+ HDFS-6785. Should not be able to create encryption zone using path
+ to a non-directory file. (clamb)
+
+ HDFS-6807. Fix TestReservedRawPaths. (clamb)
+
+ HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses
configured
+ as boolean. (umamahesh)
+
+ HDFS-6817. Fix findbugs and other warnings. (yliu)
+
+ HDFS-6839. Fix TestCLI to expect new output. (clamb)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -405,6 +496,15 @@ Release 2.6.0 - UNRELEASED
HDFS-6188. An ip whitelist based implementation of TrustedChannelResolver.
(Benoy Antony via Arpit Agarwal)
+ HDFS-6858. Allow dfs.data.transfer.saslproperties.resolver.class default to
+ hadoop.security.saslproperties.resolver.class. (Benoy Antony via cnauroth)
+
+ HDFS-6878. Change MiniDFSCluster to support StorageType configuration
+ for individual directories. (Arpit Agarwal)
+
+ HDFS-6758. block writer should pass the expected block size to
+ DataXceiverServer. (Arpit Agarwal)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -523,7 +623,12 @@ Release 2.6.0 - UNRELEASED
HDFS-6868. portmap and nfs3 are documented as hadoop commands instead of
hdfs
(brandonli)
-Release 2.5.0 - UNRELEASED
+ HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi
+ Liu via jing9)
+
+ HDFS-6890. NFS readdirplus doesn't return dotdot attributes (brandonli)
+
+Release 2.5.0 - 2014-08-11
INCOMPATIBLE CHANGES
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
(original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml
Thu Aug 21 21:55:57 2014
@@ -304,6 +304,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
<include>datatransfer.proto</include>
<include>fsimage.proto</include>
<include>hdfs.proto</include>
+ <include>encryption.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
Thu Aug 21 21:55:57 2014
@@ -46,6 +46,7 @@ function hadoop_usage
echo " snapshotDiff diff two snapshots of a directory or diff the"
echo " current directory contents with a snapshot"
echo " zkfc run the ZK Failover Controller daemon"
+ echo " crypto configure HDFS encryption zones"
echo ""
echo "Most commands print help when invoked w/o parameters."
}
@@ -89,6 +90,9 @@ case ${COMMAND} in
echo "${CLASSPATH}"
exit
;;
+ crypto)
+ CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+ ;;
datanode)
daemon="true"
# Determine if we're starting a secure datanode, and
Propchange:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged
/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1594376-1619194
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1619018-1619607
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
Thu Aug 21 21:55:57 2014
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.fs;
-
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
@@ -31,6 +30,7 @@ import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -38,6 +38,8 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -59,6 +61,7 @@ import org.apache.hadoop.util.Progressab
public class Hdfs extends AbstractFileSystem {
DFSClient dfs;
+ final CryptoCodec factory;
private boolean verifyChecksum = true;
static {
@@ -85,6 +88,7 @@ public class Hdfs extends AbstractFileSy
}
this.dfs = new DFSClient(theUri, conf, getStatistics());
+ this.factory = CryptoCodec.getInstance(conf);
}
@Override
@@ -97,9 +101,12 @@ public class Hdfs extends AbstractFileSy
EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
int bufferSize, short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt, boolean createParent) throws IOException {
- return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
- absolutePermission, createFlag, createParent, replication, blockSize,
- progress, bufferSize, checksumOpt), getStatistics());
+
+ final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
+ absolutePermission, createFlag, createParent, replication, blockSize,
+ progress, bufferSize, checksumOpt);
+ return dfs.createWrappedOutputStream(dfsos, statistics,
+ dfsos.getInitialLen());
}
@Override
@@ -308,8 +315,9 @@ public class Hdfs extends AbstractFileSy
@Override
public HdfsDataInputStream open(Path f, int bufferSize)
throws IOException, UnresolvedLinkException {
- return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
- bufferSize, verifyChecksum));
+ final DFSInputStream dfsis = dfs.open(getUriPath(f),
+ bufferSize, verifyChecksum);
+ return dfs.createWrappedInputStream(dfsis);
}
@Override
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
Thu Aug 21 21:55:57 2014
@@ -26,8 +26,8 @@ import org.apache.hadoop.classification.
/**
* XAttr is the POSIX Extended Attribute model similar to that found in
* traditional Operating Systems. Extended Attributes consist of one
- * or more name/value pairs associated with a file or directory. Four
- * namespaces are defined: user, trusted, security and system.
+ * or more name/value pairs associated with a file or directory. Five
+ * namespaces are defined: user, trusted, security, system and raw.
* 1) USER namespace attributes may be used by any user to store
* arbitrary information. Access permissions in this namespace are
* defined by a file directory's permission bits. For sticky directories,
@@ -43,6 +43,12 @@ import org.apache.hadoop.classification.
* <br>
* 4) SECURITY namespace attributes are used by the fs kernel for
* security features. It is not visible to users.
+ * <br>
+ * 5) RAW namespace attributes are used for internal system attributes that
+ * sometimes need to be exposed. Like SYSTEM namespace attributes they are
+ * not visible to the user except when getXAttr/getXAttrs is called on a file
+ * or directory in the /.reserved/raw HDFS directory hierarchy. These
+ * attributes can only be accessed by the superuser.
* <p/>
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -55,7 +61,8 @@ public class XAttr {
USER,
TRUSTED,
SECURITY,
- SYSTEM;
+ SYSTEM,
+ RAW;
}
private final NameSpace ns;
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
Thu Aug 21 21:55:57 2014
@@ -17,6 +17,11 @@
*/
package org.apache.hadoop.hdfs;
+import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
+ .EncryptedKeyVersion;
+import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
+import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY;
import static
org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
import static
org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
@@ -76,6 +81,7 @@ import java.net.Socket;
import java.net.SocketAddress;
import java.net.URI;
import java.net.UnknownHostException;
+import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
@@ -95,6 +101,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
import org.apache.hadoop.fs.CacheFlag;
@@ -102,6 +113,7 @@ import org.apache.hadoop.fs.CommonConfig
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
@@ -140,6 +152,9 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -249,7 +264,11 @@ public class DFSClient implements java.i
private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
new DFSHedgedReadMetrics();
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
-
+ private final CryptoCodec codec;
+ @VisibleForTesting
+ List<CipherSuite> cipherSuites;
+ @VisibleForTesting
+ KeyProviderCryptoExtension provider;
/**
* DFSClient configuration
*/
@@ -581,7 +600,17 @@ public class DFSClient implements java.i
this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" +
DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId();
-
+ this.codec = CryptoCodec.getInstance(conf);
+ this.cipherSuites = Lists.newArrayListWithCapacity(1);
+ if (codec != null) {
+ cipherSuites.add(codec.getCipherSuite());
+ }
+ provider = DFSUtil.createKeyProviderCryptoExtension(conf);
+ if (provider == null) {
+ LOG.info("No KeyProvider found.");
+ } else {
+ LOG.info("Found KeyProvider: " + provider.toString());
+ }
int numResponseToDrop = conf.getInt(
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
@@ -1280,7 +1309,93 @@ public class DFSClient implements java.i
return volumeBlockLocations;
}
-
+
+ /**
+ * Decrypts a EDEK by consulting the KeyProvider.
+ */
+ private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
+ feInfo) throws IOException {
+ if (provider == null) {
+ throw new IOException("No KeyProvider is configured, cannot access" +
+ " an encrypted file");
+ }
+ EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
+ feInfo.getEzKeyVersionName(), feInfo.getIV(),
+ feInfo.getEncryptedDataEncryptionKey());
+ try {
+ return provider.decryptEncryptedKey(ekv);
+ } catch (GeneralSecurityException e) {
+ throw new IOException(e);
+ }
+ }
+
+ /**
+ * Wraps the stream in a CryptoInputStream if the underlying file is
+ * encrypted.
+ */
+ public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
+ throws IOException {
+ final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
+ if (feInfo != null) {
+ // File is encrypted, wrap the stream in a crypto stream.
+ KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
+ CryptoCodec codec = CryptoCodec
+ .getInstance(conf, feInfo.getCipherSuite());
+ if (codec == null) {
+ throw new IOException("No configuration found for the cipher suite "
+ + feInfo.getCipherSuite().getConfigSuffix() + " prefixed with "
+ + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+ + ". Please see the example configuration "
+ + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE "
+ + "at core-default.xml for details.");
+ }
+ final CryptoInputStream cryptoIn =
+ new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
+ feInfo.getIV());
+ return new HdfsDataInputStream(cryptoIn);
+ } else {
+ // No FileEncryptionInfo so no encryption.
+ return new HdfsDataInputStream(dfsis);
+ }
+ }
+
+ /**
+ * Wraps the stream in a CryptoOutputStream if the underlying file is
+ * encrypted.
+ */
+ public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
+ FileSystem.Statistics statistics) throws IOException {
+ return createWrappedOutputStream(dfsos, statistics, 0);
+ }
+
+ /**
+ * Wraps the stream in a CryptoOutputStream if the underlying file is
+ * encrypted.
+ */
+ public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
+ FileSystem.Statistics statistics, long startPos) throws IOException {
+ final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo();
+ if (feInfo != null) {
+ if (codec == null) {
+ throw new IOException("No configuration found for the cipher suite "
+ + HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY + " value prefixed with "
+ + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+ + ". Please see the example configuration "
+ + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE "
+ + "at core-default.xml for details.");
+ }
+ // File is encrypted, wrap the stream in a crypto stream.
+ KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
+ final CryptoOutputStream cryptoOut =
+ new CryptoOutputStream(dfsos, codec,
+ decrypted.getMaterial(), feInfo.getIV(), startPos);
+ return new HdfsDataOutputStream(cryptoOut, statistics, startPos);
+ } else {
+ // No FileEncryptionInfo present so no encryption.
+ return new HdfsDataOutputStream(dfsos, statistics, startPos);
+ }
+ }
+
public DFSInputStream open(String src)
throws IOException, UnresolvedLinkException {
return open(src, dfsClientConf.ioBufferSize, true, null);
@@ -1483,7 +1598,8 @@ public class DFSClient implements java.i
}
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
- buffersize, dfsClientConf.createChecksum(checksumOpt),
favoredNodeStrs);
+ buffersize, dfsClientConf.createChecksum(checksumOpt),
+ favoredNodeStrs, cipherSuites);
beginFileLease(result.getFileId(), result);
return result;
}
@@ -1530,7 +1646,7 @@ public class DFSClient implements java.i
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
flag, createParent, replication, blockSize, progress, buffersize,
- checksum);
+ checksum, null, cipherSuites);
}
beginFileLease(result.getFileId(), result);
return result;
@@ -1608,7 +1724,7 @@ public class DFSClient implements java.i
final Progressable progress, final FileSystem.Statistics statistics
) throws IOException {
final DFSOutputStream out = append(src, buffersize, progress);
- return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
+ return createWrappedOutputStream(out, statistics, out.getInitialLen());
}
private DFSOutputStream append(String src, int buffersize, Progressable
progress)
@@ -2753,6 +2869,36 @@ public class DFSClient implements java.i
}
}
+ public void createEncryptionZone(String src, String keyName)
+ throws IOException {
+ checkOpen();
+ try {
+ namenode.createEncryptionZone(src, keyName);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ SafeModeException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public EncryptionZone getEZForPath(String src)
+ throws IOException {
+ checkOpen();
+ try {
+ final EncryptionZoneWithId ezi = namenode.getEZForPath(src);
+ return (ezi.getId() < 0) ? null : ezi;
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public RemoteIterator<EncryptionZone> listEncryptionZones()
+ throws IOException {
+ checkOpen();
+ return new EncryptionZoneIterator(namenode);
+ }
+
public void setXAttr(String src, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
checkOpen();
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Thu Aug 21 21:55:57 2014
@@ -567,7 +567,9 @@ public class DFSConfigKeys extends Commo
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS =
"dfs.trustedchannel.resolver.class";
public static final String DFS_DATA_TRANSFER_PROTECTION_KEY =
"dfs.data.transfer.protection";
public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY =
"dfs.data.transfer.saslproperties.resolver.class";
-
+ public static final int
DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
+ public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES
= "dfs.namenode.list.encryption.zones.num.responses";
+
// Journal-node related configs. These are read on the JN side.
public static final String DFS_JOURNALNODE_EDITS_DIR_KEY =
"dfs.journalnode.edits.dir";
public static final String DFS_JOURNALNODE_EDITS_DIR_DEFAULT =
"/tmp/hadoop/dfs/journalnode/";
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
Thu Aug 21 21:55:57 2014
@@ -56,6 +56,7 @@ import org.apache.hadoop.fs.UnresolvedLi
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
@@ -92,6 +93,7 @@ implements ByteBufferReadable, CanSetDro
private final boolean verifyChecksum;
private LocatedBlocks locatedBlocks = null;
private long lastBlockBeingWrittenLength = 0;
+ private FileEncryptionInfo fileEncryptionInfo = null;
private DatanodeInfo currentNode = null;
private LocatedBlock currentLocatedBlock = null;
private long pos = 0;
@@ -301,6 +303,8 @@ implements ByteBufferReadable, CanSetDro
}
}
+ fileEncryptionInfo = locatedBlocks.getFileEncryptionInfo();
+
currentNode = null;
return lastBlockBeingWrittenLength;
}
@@ -1525,6 +1529,10 @@ implements ByteBufferReadable, CanSetDro
return new ReadStatistics(readStatistics);
}
+ public synchronized FileEncryptionInfo getFileEncryptionInfo() {
+ return fileEncryptionInfo;
+ }
+
private synchronized void closeCurrentBlockReader() {
if (blockReader == null) return;
// Close the current block reader so that the new caching settings can
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Thu Aug 21 21:55:57 2014
@@ -42,10 +42,12 @@ import java.util.concurrent.atomic.Atomi
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -153,7 +155,8 @@ public class DFSOutputStream extends FSO
private boolean shouldSyncBlock = false; // force blocks to disk upon close
private final AtomicReference<CachingStrategy> cachingStrategy;
private boolean failPacket = false;
-
+ private FileEncryptionInfo fileEncryptionInfo;
+
private static class Packet {
private static final long HEART_BEAT_SEQNO = -1L;
final long seqno; // sequencenumber of buffer in block
@@ -1339,8 +1342,14 @@ public class DFSOutputStream extends FSO
//
BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage():
stage;
+
+ // We cannot change the block length in 'block' as it counts the
number
+ // of bytes ack'ed.
+ ExtendedBlock blockCopy = new ExtendedBlock(block);
+ blockCopy.setNumBytes(blockSize);
+
// send the request
- new Sender(out).writeBlock(block, nodeStorageTypes[0], accessToken,
+ new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0],
accessToken,
dfsClient.clientName, nodes, nodeStorageTypes, null, bcs,
nodes.length, block.getNumBytes(), bytesSent, newGS, checksum,
cachingStrategy.get());
@@ -1560,6 +1569,7 @@ public class DFSOutputStream extends FSO
this.fileId = stat.getFileId();
this.blockSize = stat.getBlockSize();
this.blockReplication = stat.getReplication();
+ this.fileEncryptionInfo = stat.getFileEncryptionInfo();
this.progress = progress;
this.cachingStrategy = new AtomicReference<CachingStrategy>(
dfsClient.getDefaultWriteCachingStrategy());
@@ -1600,12 +1610,13 @@ public class DFSOutputStream extends FSO
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize, Progressable progress, int buffersize,
- DataChecksum checksum, String[] favoredNodes) throws IOException {
+ DataChecksum checksum, String[] favoredNodes,
+ List<CipherSuite> cipherSuites) throws IOException {
final HdfsFileStatus stat;
try {
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
new EnumSetWritable<CreateFlag>(flag), createParent, replication,
- blockSize);
+ blockSize, cipherSuites);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
@@ -1615,7 +1626,8 @@ public class DFSOutputStream extends FSO
NSQuotaExceededException.class,
SafeModeException.class,
UnresolvedPathException.class,
- SnapshotAccessControlException.class);
+ SnapshotAccessControlException.class,
+ UnknownCipherSuiteException.class);
}
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
flag, progress, checksum, favoredNodes);
@@ -1623,14 +1635,6 @@ public class DFSOutputStream extends FSO
return out;
}
- static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
- FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
- short replication, long blockSize, Progressable progress, int buffersize,
- DataChecksum checksum) throws IOException {
- return newStreamForCreate(dfsClient, src, masked, flag, createParent,
replication,
- blockSize, progress, buffersize, checksum, null);
- }
-
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
@@ -1648,6 +1652,7 @@ public class DFSOutputStream extends FSO
checksum.getBytesPerChecksum());
streamer = new DataStreamer();
}
+ this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
@@ -2172,11 +2177,18 @@ public class DFSOutputStream extends FSO
/**
* Returns the size of a file as it was when this stream was opened
*/
- long getInitialLen() {
+ public long getInitialLen() {
return initialFileSize;
}
/**
+ * @return the FileEncryptionInfo for this stream, or null if not encrypted.
+ */
+ public FileEncryptionInfo getFileEncryptionInfo() {
+ return fileEncryptionInfo;
+ }
+
+ /**
* Returns the access token currently used by streamer, for testing only
*/
synchronized Token<BlockTokenIdentifier> getBlockToken() {
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
Thu Aug 21 21:55:57 2014
@@ -71,6 +71,9 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -1722,4 +1725,39 @@ public class DFSUtil {
}
}
}
+
+ /**
+ * Creates a new KeyProviderCryptoExtension by wrapping the
+ * KeyProvider specified in the given Configuration.
+ *
+ * @param conf Configuration specifying a single, non-transient KeyProvider.
+ * @return new KeyProviderCryptoExtension, or null if no provider was found.
+ * @throws IOException if the KeyProvider is improperly specified in
+ * the Configuration
+ */
+ public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
+ final Configuration conf) throws IOException {
+ final List<KeyProvider> providers = KeyProviderFactory.getProviders(conf);
+ if (providers == null || providers.size() == 0) {
+ return null;
+ }
+ if (providers.size() > 1) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("Found multiple KeyProviders but only one is permitted
[");
+ String prefix = " ";
+ for (KeyProvider kp: providers) {
+ builder.append(prefix + kp.toString());
+ prefix = ", ";
+ }
+ builder.append("]");
+ throw new IOException(builder.toString());
+ }
+ KeyProviderCryptoExtension provider = KeyProviderCryptoExtension
+ .createKeyProviderCryptoExtension(providers.get(0));
+ if (provider.isTransient()) {
+ throw new IOException("KeyProvider " + provider.toString()
+ + " was found but it is a transient provider.");
+ }
+ return provider;
+ }
}
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
Thu Aug 21 21:55:57 2014
@@ -61,7 +61,6 @@ import org.apache.hadoop.fs.permission.A
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -69,6 +68,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -291,8 +291,9 @@ public class DistributedFileSystem exten
@Override
public FSDataInputStream doCall(final Path p)
throws IOException, UnresolvedLinkException {
- return new HdfsDataInputStream(
- dfs.open(getPathName(p), bufferSize, verifyChecksum));
+ final DFSInputStream dfsis =
+ dfs.open(getPathName(p), bufferSize, verifyChecksum);
+ return dfs.createWrappedInputStream(dfsis);
}
@Override
public FSDataInputStream next(final FileSystem fs, final Path p)
@@ -357,7 +358,7 @@ public class DistributedFileSystem exten
: EnumSet.of(CreateFlag.CREATE),
true, replication, blockSize, progress, bufferSize, null,
favoredNodes);
- return new HdfsDataOutputStream(out, statistics);
+ return dfs.createWrappedOutputStream(out, statistics);
}
@Override
public HdfsDataOutputStream next(final FileSystem fs, final Path p)
@@ -385,9 +386,10 @@ public class DistributedFileSystem exten
@Override
public FSDataOutputStream doCall(final Path p)
throws IOException, UnresolvedLinkException {
- return new HdfsDataOutputStream(dfs.create(getPathName(p), permission,
- cflags, replication, blockSize, progress, bufferSize, checksumOpt),
- statistics);
+ final DFSOutputStream dfsos = dfs.create(getPathName(p), permission,
+ cflags, replication, blockSize, progress, bufferSize,
+ checksumOpt);
+ return dfs.createWrappedOutputStream(dfsos, statistics);
}
@Override
public FSDataOutputStream next(final FileSystem fs, final Path p)
@@ -404,11 +406,12 @@ public class DistributedFileSystem exten
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
statistics.incrementWriteOps(1);
- return new HdfsDataOutputStream(dfs.primitiveCreate(
- getPathName(fixRelativePart(f)),
- absolutePermission, flag, true, replication, blockSize,
- progress, bufferSize, checksumOpt),statistics);
- }
+ final DFSOutputStream dfsos = dfs.primitiveCreate(
+ getPathName(fixRelativePart(f)),
+ absolutePermission, flag, true, replication, blockSize,
+ progress, bufferSize, checksumOpt);
+ return dfs.createWrappedOutputStream(dfsos, statistics);
+ }
/**
* Same as create(), except fails if parent directory doesn't already exist.
@@ -428,9 +431,9 @@ public class DistributedFileSystem exten
@Override
public FSDataOutputStream doCall(final Path p) throws IOException,
UnresolvedLinkException {
- return new HdfsDataOutputStream(dfs.create(getPathName(p), permission,
- flag, false, replication, blockSize, progress, bufferSize, null),
- statistics);
+ final DFSOutputStream dfsos = dfs.create(getPathName(p), permission,
+ flag, false, replication, blockSize, progress, bufferSize, null);
+ return dfs.createWrappedOutputStream(dfsos, statistics);
}
@Override
@@ -1796,6 +1799,25 @@ public class DistributedFileSystem exten
}.resolve(this, absF);
}
+ /* HDFS only */
+ public void createEncryptionZone(Path path, String keyName)
+ throws IOException {
+ dfs.createEncryptionZone(getPathName(path), keyName);
+ }
+
+ /* HDFS only */
+ public EncryptionZone getEZForPath(Path path)
+ throws IOException {
+ Preconditions.checkNotNull(path);
+ return dfs.getEZForPath(getPathName(path));
+ }
+
+ /* HDFS only */
+ public RemoteIterator<EncryptionZone> listEncryptionZones()
+ throws IOException {
+ return dfs.listEncryptionZones();
+ }
+
@Override
public void setXAttr(Path path, final String name, final byte[] value,
final EnumSet<XAttrSetFlag> flag) throws IOException {
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
Thu Aug 21 21:55:57 2014
@@ -49,9 +49,9 @@ public class XAttrHelper {
Preconditions.checkNotNull(name, "XAttr name cannot be null.");
final int prefixIndex = name.indexOf(".");
- if (prefixIndex < 4) {// Prefix length is at least 4.
+ if (prefixIndex < 3) {// Prefix length is at least 3.
throw new HadoopIllegalArgumentException("An XAttr name must be " +
- "prefixed with user/trusted/security/system, followed by a '.'");
+ "prefixed with user/trusted/security/system/raw, followed by a '.'");
} else if (prefixIndex == name.length() - 1) {
throw new HadoopIllegalArgumentException("XAttr name cannot be empty.");
}
@@ -66,9 +66,11 @@ public class XAttrHelper {
ns = NameSpace.SYSTEM;
} else if (prefix.equals(NameSpace.SECURITY.toString().toLowerCase())) {
ns = NameSpace.SECURITY;
+ } else if (prefix.equals(NameSpace.RAW.toString().toLowerCase())) {
+ ns = NameSpace.RAW;
} else {
throw new HadoopIllegalArgumentException("An XAttr name must be " +
- "prefixed with user/trusted/security/system, followed by a '.'");
+ "prefixed with user/trusted/security/system/raw, followed by a '.'");
}
XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name.
substring(prefixIndex + 1)).setValue(value).build();
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
Thu Aug 21 21:55:57 2014
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.client;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
@@ -33,7 +34,9 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
/**
@@ -225,4 +228,51 @@ public class HdfsAdmin {
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
return dfs.listCachePools();
}
+
+ /**
+ * Create an encryption zone rooted at an empty existing directory, using the
+ * specified encryption key. An encryption zone has an associated encryption
+ * key used when reading and writing files within the zone.
+ *
+ * @param path The path of the root of the encryption zone. Must refer to
+ * an empty, existing directory.
+ * @param keyName Name of key available at the KeyProvider.
+ * @throws IOException if there was a general IO exception
+ * @throws AccessControlException if the caller does not have access to path
+ * @throws FileNotFoundException if the path does not exist
+ */
+ public void createEncryptionZone(Path path, String keyName)
+ throws IOException, AccessControlException, FileNotFoundException {
+ dfs.createEncryptionZone(path, keyName);
+ }
+
+ /**
+ * Get the path of the encryption zone for a given file or directory.
+ *
+ * @param path The path to get the ez for.
+ *
+ * @return The EncryptionZone of the ez, or null if path is not in an ez.
+ * @throws IOException if there was a general IO exception
+ * @throws AccessControlException if the caller does not have access to path
+ * @throws FileNotFoundException if the path does not exist
+ */
+ public EncryptionZone getEncryptionZoneForPath(Path path)
+ throws IOException, AccessControlException, FileNotFoundException {
+ return dfs.getEZForPath(path);
+ }
+
+ /**
+ * Returns a RemoteIterator which can be used to list the encryption zones
+ * in HDFS. For large numbers of encryption zones, the iterator will fetch
+ * the list of zones in a number of small batches.
+ * <p/>
+ * Since the list is fetched in batches, it does not represent a
+ * consistent snapshot of the entire list of encryption zones.
+ * <p/>
+ * This method can only be called by HDFS superusers.
+ */
+ public RemoteIterator<EncryptionZone> listEncryptionZones()
+ throws IOException {
+ return dfs.listEncryptionZones();
+ }
}
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
Thu Aug 21 21:55:57 2014
@@ -17,17 +17,21 @@
*/
package org.apache.hadoop.hdfs.client;
+import java.io.InputStream;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import com.google.common.base.Preconditions;
+
/**
* The Hdfs implementation of {@link FSDataInputStream}.
*/
@@ -38,25 +42,49 @@ public class HdfsDataInputStream extends
super(in);
}
+ public HdfsDataInputStream(CryptoInputStream in) throws IOException {
+ super(in);
+ Preconditions.checkArgument(in.getWrappedStream() instanceof
DFSInputStream,
+ "CryptoInputStream should wrap a DFSInputStream");
+ }
+
+ private DFSInputStream getDFSInputStream() {
+ if (in instanceof CryptoInputStream) {
+ return (DFSInputStream) ((CryptoInputStream) in).getWrappedStream();
+ }
+ return (DFSInputStream) in;
+ }
+
+ /**
+ * Get a reference to the wrapped output stream. We always want to return the
+ * actual underlying InputStream, even when we're using a CryptoStream. e.g.
+ * in the delegated methods below.
+ *
+ * @return the underlying output stream
+ */
+ public InputStream getWrappedStream() {
+ return in;
+ }
+
/**
* Get the datanode from which the stream is currently reading.
*/
public DatanodeInfo getCurrentDatanode() {
- return ((DFSInputStream) in).getCurrentDatanode();
+ return getDFSInputStream().getCurrentDatanode();
}
/**
* Get the block containing the target position.
*/
public ExtendedBlock getCurrentBlock() {
- return ((DFSInputStream) in).getCurrentBlock();
+ return getDFSInputStream().getCurrentBlock();
}
/**
* Get the collection of blocks that has already been located.
*/
public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
- return ((DFSInputStream) in).getAllBlocks();
+ return getDFSInputStream().getAllBlocks();
}
/**
@@ -66,7 +94,7 @@ public class HdfsDataInputStream extends
* @return The visible length of the file.
*/
public long getVisibleLength() throws IOException {
- return ((DFSInputStream) in).getFileLength();
+ return getDFSInputStream().getFileLength();
}
/**
@@ -76,6 +104,6 @@ public class HdfsDataInputStream extends
* bytes read through HdfsDataInputStream.
*/
public synchronized DFSInputStream.ReadStatistics getReadStatistics() {
- return ((DFSInputStream) in).getReadStatistics();
+ return getDFSInputStream().getReadStatistics();
}
}
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
Thu Aug 21 21:55:57 2014
@@ -18,14 +18,18 @@
package org.apache.hadoop.hdfs.client;
import java.io.IOException;
+import java.io.OutputStream;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSOutputStream;
+import com.google.common.base.Preconditions;
+
/**
* The Hdfs implementation of {@link FSDataOutputStream}.
*/
@@ -42,6 +46,18 @@ public class HdfsDataOutputStream extend
this(out, stats, 0L);
}
+ public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics
stats,
+ long startPosition) throws IOException {
+ super(out, stats, startPosition);
+ Preconditions.checkArgument(out.getWrappedStream() instanceof
DFSOutputStream,
+ "CryptoOutputStream should wrap a DFSOutputStream");
+ }
+
+ public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics
stats)
+ throws IOException {
+ this(out, stats, 0L);
+ }
+
/**
* Get the actual number of replicas of the current block.
*
@@ -55,7 +71,11 @@ public class HdfsDataOutputStream extend
* @return the number of valid replicas of the current block
*/
public synchronized int getCurrentBlockReplication() throws IOException {
- return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
+ OutputStream wrappedStream = getWrappedStream();
+ if (wrappedStream instanceof CryptoOutputStream) {
+ wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
+ }
+ return ((DFSOutputStream) wrappedStream).getCurrentBlockReplication();
}
/**
@@ -67,14 +87,20 @@ public class HdfsDataOutputStream extend
* @see FSDataOutputStream#hsync()
*/
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
- ((DFSOutputStream) getWrappedStream()).hsync(syncFlags);
+ OutputStream wrappedStream = getWrappedStream();
+ if (wrappedStream instanceof CryptoOutputStream) {
+ ((CryptoOutputStream) wrappedStream).flush();
+ wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
+ }
+ ((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
public static enum SyncFlag {
+
/**
- * When doing sync to DataNodes, also update the metadata (block
- * length) in the NameNode
+ * When doing sync to DataNodes, also update the metadata (block length) in
+ * the NameNode.
*/
UPDATE_LENGTH;
}
-}
\ No newline at end of file
+}
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Thu Aug 21 21:55:57 2014
@@ -24,6 +24,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
@@ -188,7 +189,8 @@ public interface ClientProtocol {
@AtMostOnce
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
- boolean createParent, short replication, long blockSize)
+ boolean createParent, short replication, long blockSize,
+ List<CipherSuite> cipherSuites)
throws AccessControlException, AlreadyBeingCreatedException,
DSQuotaExceededException, FileAlreadyExistsException,
FileNotFoundException, NSQuotaExceededException,
@@ -1267,6 +1269,31 @@ public interface ClientProtocol {
public AclStatus getAclStatus(String src) throws IOException;
/**
+ * Create an encryption zone
+ */
+ @AtMostOnce
+ public void createEncryptionZone(String src, String keyName)
+ throws IOException;
+
+ /**
+ * Get the encryption zone for a path.
+ */
+ @Idempotent
+ public EncryptionZoneWithId getEZForPath(String src)
+ throws IOException;
+
+ /**
+ * Used to implement cursor-based batched listing of {@EncryptionZone}s.
+ *
+ * @param prevId ID of the last item in the previous batch. If there is no
+ * previous batch, a negative value can be used.
+ * @return Batch of encryption zones.
+ */
+ @Idempotent
+ public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
+ long prevId) throws IOException;
+
+ /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
@@ -1307,7 +1334,6 @@ public interface ClientProtocol {
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
- * @param xAttrs xAttrs to get
* @return List<XAttr> <code>XAttr</code> list
* @throws IOException
*/
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
Thu Aug 21 21:55:57 2014
@@ -21,6 +21,7 @@ import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -44,6 +45,8 @@ public class HdfsFileStatus {
private final String owner;
private final String group;
private final long fileId;
+
+ private final FileEncryptionInfo feInfo;
// Used by dir, not including dot and dotdot. Always zero for a regular file.
private final int childrenNum;
@@ -63,11 +66,12 @@ public class HdfsFileStatus {
* @param group the group of the path
* @param path the local name in java UTF8 encoding the same as that
in-memory
* @param fileId the file id
+ * @param feInfo the file's encryption info
*/
public HdfsFileStatus(long length, boolean isdir, int block_replication,
- long blocksize, long modification_time, long access_time,
- FsPermission permission, String owner, String group,
- byte[] symlink, byte[] path, long fileId, int childrenNum)
{
+ long blocksize, long modification_time, long access_time,
+ FsPermission permission, String owner, String group, byte[] symlink,
+ byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo) {
this.length = length;
this.isdir = isdir;
this.block_replication = (short)block_replication;
@@ -85,6 +89,7 @@ public class HdfsFileStatus {
this.path = path;
this.fileId = fileId;
this.childrenNum = childrenNum;
+ this.feInfo = feInfo;
}
/**
@@ -238,6 +243,10 @@ public class HdfsFileStatus {
return fileId;
}
+ public final FileEncryptionInfo getFileEncryptionInfo() {
+ return feInfo;
+ }
+
public final int getChildrenNum() {
return childrenNum;
}
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
Thu Aug 21 21:55:57 2014
@@ -21,6 +21,7 @@ import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -51,15 +52,16 @@ public class HdfsLocatedFileStatus exten
* @param path local path name in java UTF8 format
* @param fileId the file id
* @param locations block locations
+ * @param feInfo file encryption info
*/
public HdfsLocatedFileStatus(long length, boolean isdir,
int block_replication, long blocksize, long modification_time,
long access_time, FsPermission permission, String owner, String group,
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
- int childrenNum) {
+ int childrenNum, FileEncryptionInfo feInfo) {
super(length, isdir, block_replication, blocksize, modification_time,
- access_time, permission, owner, group, symlink, path, fileId,
- childrenNum);
+ access_time, permission, owner, group, symlink, path, fileId,
+ childrenNum, feInfo);
this.locations = locations;
}
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
Thu Aug 21 21:55:57 2014
@@ -23,6 +23,7 @@ import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileEncryptionInfo;
/**
* Collection of blocks with their locations and the file length.
@@ -35,22 +36,23 @@ public class LocatedBlocks {
private final boolean underConstruction;
private LocatedBlock lastLocatedBlock = null;
private boolean isLastBlockComplete = false;
+ private FileEncryptionInfo fileEncryptionInfo = null;
public LocatedBlocks() {
fileLength = 0;
blocks = null;
underConstruction = false;
}
-
- /** public Constructor */
+
public LocatedBlocks(long flength, boolean isUnderConstuction,
- List<LocatedBlock> blks,
- LocatedBlock lastBlock, boolean isLastBlockCompleted) {
+ List<LocatedBlock> blks, LocatedBlock lastBlock,
+ boolean isLastBlockCompleted, FileEncryptionInfo feInfo) {
fileLength = flength;
blocks = blks;
underConstruction = isUnderConstuction;
this.lastLocatedBlock = lastBlock;
this.isLastBlockComplete = isLastBlockCompleted;
+ this.fileEncryptionInfo = feInfo;
}
/**
@@ -92,13 +94,20 @@ public class LocatedBlocks {
}
/**
- * Return ture if file was under construction when
- * this LocatedBlocks was constructed, false otherwise.
+ * Return true if file was under construction when this LocatedBlocks was
+ * constructed, false otherwise.
*/
public boolean isUnderConstruction() {
return underConstruction;
}
-
+
+ /**
+ * @return the FileEncryptionInfo for the LocatedBlocks
+ */
+ public FileEncryptionInfo getFileEncryptionInfo() {
+ return fileEncryptionInfo;
+ }
+
/**
* Find block containing specified offset.
*
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
Thu Aug 21 21:55:57 2014
@@ -61,7 +61,7 @@ public class SnapshottableDirectoryStatu
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName, inodeId,
- childrenNum);
+ childrenNum, null);
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath;
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
Thu Aug 21 21:55:57 2014
@@ -162,8 +162,10 @@ public final class DataTransferSaslUtil
Configuration saslPropsResolverConf = new Configuration(conf);
saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops);
Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
- DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
+ HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
SaslPropertiesResolver.class, SaslPropertiesResolver.class);
+ resolverClass =
conf.getClass(DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
+ resolverClass, SaslPropertiesResolver.class);
saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
resolverClass, SaslPropertiesResolver.class);
SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance(
Modified:
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1619608&r1=1619607&r2=1619608&view=diff
==============================================================================
---
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
(original)
+++
hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
Thu Aug 21 21:55:57 2014
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -176,6 +177,12 @@ import org.apache.hadoop.hdfs.protocol.p
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
+import
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
+import
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
+import
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto;
+import
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
+import
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
+import
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -376,7 +383,8 @@ public class ClientNamenodeProtocolServe
HdfsFileStatus result = server.create(req.getSrc(),
PBHelper.convert(req.getMasked()), req.getClientName(),
PBHelper.convertCreateFlag(req.getCreateFlag()),
req.getCreateParent(),
- (short) req.getReplication(), req.getBlockSize());
+ (short) req.getReplication(), req.getBlockSize(),
+ PBHelper.convertCipherSuiteProtos(req.getCipherSuitesList()));
if (result != null) {
return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
@@ -1301,6 +1309,52 @@ public class ClientNamenodeProtocolServe
}
@Override
+ public CreateEncryptionZoneResponseProto createEncryptionZone(
+ RpcController controller, CreateEncryptionZoneRequestProto req)
+ throws ServiceException {
+ try {
+ server.createEncryptionZone(req.getSrc(), req.getKeyName());
+ return CreateEncryptionZoneResponseProto.newBuilder().build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public GetEZForPathResponseProto getEZForPath(
+ RpcController controller, GetEZForPathRequestProto req)
+ throws ServiceException {
+ try {
+ GetEZForPathResponseProto.Builder builder =
+ GetEZForPathResponseProto.newBuilder();
+ final EncryptionZoneWithId ret = server.getEZForPath(req.getSrc());
+ builder.setZone(PBHelper.convert(ret));
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public ListEncryptionZonesResponseProto listEncryptionZones(
+ RpcController controller, ListEncryptionZonesRequestProto req)
+ throws ServiceException {
+ try {
+ BatchedEntries<EncryptionZoneWithId> entries = server
+ .listEncryptionZones(req.getId());
+ ListEncryptionZonesResponseProto.Builder builder =
+ ListEncryptionZonesResponseProto.newBuilder();
+ builder.setHasMore(entries.hasMore());
+ for (int i=0; i<entries.size(); i++) {
+ builder.addZones(PBHelper.convert(entries.get(i)));
+ }
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
public SetXAttrResponseProto setXAttr(RpcController controller,
SetXAttrRequestProto req) throws ServiceException {
try {