Author: todd Date: Thu Jul 24 06:23:41 2014 New Revision: 1613007 URL: http://svn.apache.org/r1613007 Log: Merge trunk into branch
Removed: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Propchange: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1612741-1613006 Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Thu Jul 24 06:23:41 2014 @@ -104,6 +104,10 @@ public class RpcProgramMountd extends Rp @Override public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) { + if (hostsMatcher == null) { + return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, + null); + } AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client); if (accessPrivilege == AccessPrivilege.NONE) { return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, @@ -208,16 +212,23 @@ public class RpcProgramMountd extends Rp } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - // Currently only support one NFS export + // Currently only support one NFS export List<NfsExports> hostsMatchers = new ArrayList<NfsExports>(); - hostsMatchers.add(hostsMatcher); - out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); + if (hostsMatcher != null) { + hostsMatchers.add(hostsMatcher); + out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); + } else { + // This means there are no valid exports provided. + RpcAcceptedReply.getInstance(xid, + RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( + out); + } } else { // Invalid procedure RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); - } + } ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Thu Jul 24 06:23:41 2014 @@ -2123,8 +2123,11 @@ public class RpcProgramNfs3 extends RpcP if (!doPortMonitoring(remoteAddress)) { return false; } - + // Check export table + if (exports == null) { + return false; + } InetAddress client = ((InetSocketAddress) remoteAddress).getAddress(); AccessPrivilege access = exports.getAccessPrivilege(client); if (access == AccessPrivilege.NONE) { Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul 24 06:23:41 2014 @@ -357,6 +357,13 @@ Release 2.6.0 - UNRELEASED HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause a null pointer exception. (Masatake Iwasaki via brandonli) + HDFS-6114. Block Scan log rolling will never happen if blocks written + continuously leading to huge size of dncp_block_verification.log.curr + (vinayakumarb via cmccabe) + + HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in + nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -892,6 +899,9 @@ Release 2.5.0 - UNRELEASED HDFS-6703. NFS: Files can be deleted from a read-only mount (Srikanth Upputuri via brandonli) + HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code + when xattr doesn't exist. (Charles Lamb via umamahesh) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) Propchange: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1612741-1613006 Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Thu Jul 24 06:23:41 2014 @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.dat import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.util.DataChecksum; import com.google.common.annotations.VisibleForTesting; Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java Thu Jul 24 06:23:41 2014 @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Jul 24 06:23:41 2014 @@ -1337,6 +1337,6 @@ public interface ClientProtocol { * @param xAttr <code>XAttr</code> to remove * @throws IOException */ - @Idempotent + @AtMostOnce public void removeXAttr(String src, XAttr xAttr) throws IOException; } Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java Thu Jul 24 06:23:41 2014 @@ -27,7 +27,7 @@ import java.nio.channels.ReadableByteCha import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Thu Jul 24 06:23:41 2014 @@ -84,6 +84,10 @@ class BlockPoolSliceScanner { private final SortedSet<BlockScanInfo> blockInfoSet = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR); + + private final SortedSet<BlockScanInfo> newBlockInfoSet = + new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR); + private final GSet<Block, BlockScanInfo> blockMap = new LightWeightGSet<Block, BlockScanInfo>( LightWeightGSet.computeCapacity(0.5, "BlockMap")); @@ -195,7 +199,7 @@ class BlockPoolSliceScanner { BlockScanInfo info = new BlockScanInfo( block ); info.lastScanTime = scanTime--; //still keep 'info.lastScanType' to NONE. - addBlockInfo(info); + addBlockInfo(info, false); } RollingLogs rollingLogs = null; @@ -221,25 +225,42 @@ class BlockPoolSliceScanner { // Should we change throttler bandwidth every time bytesLeft changes? // not really required. } - - private synchronized void addBlockInfo(BlockScanInfo info) { - boolean added = blockInfoSet.add(info); + + /** + * Add the BlockScanInfo to sorted set of blockScanInfo + * @param info BlockScanInfo to be added + * @param isNewBlock true if the block is the new Block, false if + * BlockScanInfo is being updated with new scanTime + */ + private synchronized void addBlockInfo(BlockScanInfo info, + boolean isNewBlock) { + boolean added = false; + if (isNewBlock) { + // check whether the block already present + boolean exists = blockInfoSet.contains(info); + added = !exists && newBlockInfoSet.add(info); + } else { + added = blockInfoSet.add(info); + } blockMap.put(info); if (added) { updateBytesToScan(info.getNumBytes(), info.lastScanTime); } } - + private synchronized void delBlockInfo(BlockScanInfo info) { boolean exists = blockInfoSet.remove(info); + if (!exists){ + exists = newBlockInfoSet.remove(info); + } blockMap.remove(info); if (exists) { updateBytesToScan(-info.getNumBytes(), info.lastScanTime); } } - + /** Update blockMap by the given LogEntry */ private synchronized void updateBlockInfo(LogEntry e) { BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp)); @@ -249,7 +270,7 @@ class BlockPoolSliceScanner { delBlockInfo(info); info.lastScanTime = e.verificationTime; info.lastScanType = ScanType.VERIFICATION_SCAN; - addBlockInfo(info); + addBlockInfo(info, false); } } @@ -275,14 +296,14 @@ class BlockPoolSliceScanner { info = new BlockScanInfo(block.getLocalBlock()); info.lastScanTime = getNewBlockScanTime(); - addBlockInfo(info); + addBlockInfo(info, true); adjustThrottler(); } /** Deletes the block from internal structures */ synchronized void deleteBlock(Block block) { BlockScanInfo info = blockMap.get(block); - if ( info != null ) { + if (info != null) { delBlockInfo(info); } } @@ -319,7 +340,7 @@ class BlockPoolSliceScanner { info.lastScanType = type; info.lastScanTime = now; info.lastScanOk = scanOk; - addBlockInfo(info); + addBlockInfo(info, false); // Don't update meta data if the verification failed. if (!scanOk) { @@ -578,7 +599,7 @@ class BlockPoolSliceScanner { delBlockInfo(info); info.lastScanTime = lastScanTime; lastScanTime += verifyInterval; - addBlockInfo(info); + addBlockInfo(info, false); } } } @@ -674,12 +695,21 @@ class BlockPoolSliceScanner { throw e; } finally { rollVerificationLogs(); + rollNewBlocksInfo(); if (LOG.isDebugEnabled()) { LOG.debug("Done scanning block pool: " + blockPoolId); } } } - + + // add new blocks to scan in next iteration + private synchronized void rollNewBlocksInfo() { + for (BlockScanInfo newBlock : newBlockInfoSet) { + blockInfoSet.add(newBlock); + } + newBlockInfoSet.clear(); + } + private synchronized void rollVerificationLogs() { if (verificationLog != null) { try { Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Jul 24 06:23:41 2014 @@ -1074,10 +1074,11 @@ public class FSEditLog implements LogsPu logEdit(op); } - void logRemoveXAttrs(String src, List<XAttr> xAttrs) { + void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) { final RemoveXAttrOp op = RemoveXAttrOp.getInstance(); op.src = src; op.xAttrs = xAttrs; + logRpcIds(op, toLogRpcIds); logEdit(op); } Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu Jul 24 06:23:41 2014 @@ -821,6 +821,10 @@ public class FSEditLogLoader { RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op; fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src, removeXAttrOp.xAttrs); + if (toAddRetryCache) { + fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId, + removeXAttrOp.rpcCallId); + } break; } default: Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Thu Jul 24 06:23:41 2014 @@ -3551,6 +3551,7 @@ public abstract class FSEditLogOp { XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in); src = p.getSrc(); xAttrs = PBHelper.convertXAttrs(p.getXAttrsList()); + readRpcIds(in, logVersion); } @Override @@ -3561,18 +3562,22 @@ public abstract class FSEditLogOp { } b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs)); b.build().writeDelimitedTo(out); + // clientId and callId + writeRpcIds(rpcClientId, rpcCallId, out); } @Override protected void toXml(ContentHandler contentHandler) throws SAXException { XMLUtils.addSaxString(contentHandler, "SRC", src); appendXAttrsToXml(contentHandler, xAttrs); + appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } @Override void fromXml(Stanza st) throws InvalidXmlException { src = st.getValue("SRC"); xAttrs = readXAttrsFromXml(st); + readRpcIdsFromXml(st); } } Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jul 24 06:23:41 2014 @@ -8279,11 +8279,12 @@ public class FSNamesystem implements Nam nnConf.checkXAttrsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); boolean getAll = xAttrs == null || xAttrs.isEmpty(); - List<XAttr> filteredXAttrs = null; if (!getAll) { - filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs); - if (filteredXAttrs.isEmpty()) { - return filteredXAttrs; + try { + XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs); + } catch (AccessControlException e) { + logAuditEvent(false, "getXAttrs", src); + throw e; } } checkOperation(OperationCategory.READ); @@ -8302,15 +8303,21 @@ public class FSNamesystem implements Nam if (filteredAll == null || filteredAll.isEmpty()) { return null; } - List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size()); - for (XAttr xAttr : filteredXAttrs) { + List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size()); + for (XAttr xAttr : xAttrs) { + boolean foundIt = false; for (XAttr a : filteredAll) { if (xAttr.getNameSpace() == a.getNameSpace() && xAttr.getName().equals(a.getName())) { toGet.add(a); + foundIt = true; break; } } + if (!foundIt) { + throw new IOException( + "At least one of the attributes provided was not found."); + } } return toGet; } @@ -8344,17 +8351,42 @@ public class FSNamesystem implements Nam readUnlock(); } } - + + /** + * Remove an xattr for a file or directory. + * + * @param src + * - path to remove the xattr from + * @param xAttr + * - xAttr to remove + * @throws AccessControlException + * @throws SafeModeException + * @throws UnresolvedLinkException + * @throws IOException + */ void removeXAttr(String src, XAttr xAttr) throws IOException { - nnConf.checkXAttrsConfigFlag(); - HdfsFileStatus resultingStat = null; - FSPermissionChecker pc = getPermissionChecker(); + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + boolean success = false; try { - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + removeXAttrInt(src, xAttr, cacheEntry != null); + success = true; } catch (AccessControlException e) { logAuditEvent(false, "removeXAttr", src); throw e; + } finally { + RetryCache.setState(cacheEntry, success); } + } + + void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache) + throws IOException { + nnConf.checkXAttrsConfigFlag(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); @@ -8368,12 +8400,12 @@ public class FSNamesystem implements Nam xAttrs.add(xAttr); List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs); if (removedXAttrs != null && !removedXAttrs.isEmpty()) { - getEditLog().logRemoveXAttrs(src, removedXAttrs); + getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache); + } else { + throw new IOException( + "No matching attributes found for remove operation"); } resultingStat = getAuditFileInfo(src, false); - } catch (AccessControlException e) { - logAuditEvent(false, "removeXAttr", src); - throw e; } finally { writeUnlock(); } Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java Thu Jul 24 06:23:41 2014 @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelpe import org.apache.hadoop.security.AccessControlException; import com.google.common.collect.Lists; +import com.google.common.base.Preconditions; /** * There are four types of extended attributes <XAttr> defined by the @@ -60,8 +61,20 @@ public class XAttrPermissionFilter { throw new AccessControlException("User doesn't have permission for xattr: " + XAttrHelper.getPrefixName(xAttr)); } - - static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, + + static void checkPermissionForApi(FSPermissionChecker pc, + List<XAttr> xAttrs) throws AccessControlException { + Preconditions.checkArgument(xAttrs != null); + if (xAttrs.isEmpty()) { + return; + } + + for (XAttr xAttr : xAttrs) { + checkPermissionForApi(pc, xAttr); + } + } + + static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, List<XAttr> xAttrs) { assert xAttrs != null : "xAttrs can not be null"; if (xAttrs == null || xAttrs.isEmpty()) { Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java Thu Jul 24 06:23:41 2014 @@ -25,8 +25,8 @@ public class XAttrNameParam extends Stri /** Default parameter value. **/ public static final String DEFAULT = ""; - private static Domain DOMAIN = new Domain(NAME, - Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+")); + private static Domain DOMAIN = new Domain(NAME, + Pattern.compile(".*")); public XAttrNameParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Thu Jul 24 06:23:41 2014 @@ -2653,6 +2653,75 @@ public class TestDFSShell { } } + /* + * 1. Test that CLI throws an exception and returns non-0 when user does + * not have permission to read an xattr. + * 2. Test that CLI throws an exception and returns non-0 when a non-existent + * xattr is requested. + */ + @Test (timeout = 120000) + public void testGetFAttrErrors() throws Exception { + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + MiniDFSCluster cluster = null; + PrintStream bakErr = null; + try { + final Configuration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + final FileSystem fs = cluster.getFileSystem(); + final Path p = new Path("/foo"); + fs.mkdirs(p); + bakErr = System.err; + + final FsShell fshell = new FsShell(conf); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + System.setErr(new PrintStream(out)); + + // No permission for "other". + fs.setPermission(p, new FsPermission((short) 0700)); + + { + final int ret = ToolRunner.run(fshell, new String[] { + "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + } + + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + int ret = ToolRunner.run(fshell, new String[] { + "-getfattr", "-n", "user.a1", "/foo"}); + String str = out.toString(); + assertTrue("xattr value was incorrectly returned", + str.indexOf("1234") == -1); + out.reset(); + return null; + } + }); + + { + final int ret = ToolRunner.run(fshell, new String[]{ + "-getfattr", "-n", "user.nonexistent", "/foo"}); + String str = out.toString(); + assertTrue("xattr value was incorrectly returned", + str.indexOf( + "getfattr: At least one of the attributes provided was not found") + >= 0); + out.reset(); + } + } finally { + if (bakErr != null) { + System.setErr(bakErr); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test that the server trash configuration is respected when * the client configuration is not set. Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Thu Jul 24 06:23:41 2014 @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na import java.io.FileNotFoundException; import java.io.IOException; -import java.io.FileNotFoundException; import java.security.PrivilegedExceptionAction; import java.util.EnumSet; import java.util.List; @@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permi import static org.apache.hadoop.fs.permission.FsAction.ALL; import static org.apache.hadoop.fs.permission.FsAction.READ; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.After; @@ -261,11 +261,12 @@ public class FSXAttrBaseTest { fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with empty name should fail."); + } catch (RemoteException e) { + assertEquals("Unexpected RemoteException: " + e, e.getClassName(), + HadoopIllegalArgumentException.class.getCanonicalName()); + GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); - } catch (IllegalArgumentException e) { - GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " + - "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e); } // Set xattr with invalid name: "a1" @@ -274,11 +275,12 @@ public class FSXAttrBaseTest { XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail."); + } catch (RemoteException e) { + assertEquals("Unexpected RemoteException: " + e, e.getClassName(), + HadoopIllegalArgumentException.class.getCanonicalName()); + GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); - } catch (IllegalArgumentException e) { - GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " + - "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e); } // Set xattr without XAttrSetFlag @@ -341,9 +343,18 @@ public class FSXAttrBaseTest { } /** - * Tests for getting xattr - * 1. To get xattr which does not exist. - * 2. To get multiple xattrs. + * getxattr tests. Test that getxattr throws an exception if any of + * the following are true: + * an xattr that was requested doesn't exist + * the caller specifies an unknown namespace + * the caller doesn't have access to the namespace + * the caller doesn't have permission to get the value of the xattr + * the caller does not have search access to the parent directory + * the caller has only read access to the owning directory + * the caller has only search access to the owning directory and + * execute/search access to the actual entity + * the caller does not have search access to the owning directory and read + * access to the actual entity */ @Test(timeout = 120000) public void testGetXAttrs() throws Exception { @@ -351,21 +362,159 @@ public class FSXAttrBaseTest { fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); - // XAttr does not exist. - byte[] value = fs.getXAttr(path, name3); - Assert.assertEquals(value, null); - - List<String> names = Lists.newArrayList(); - names.add(name1); - names.add(name2); - names.add(name3); - Map<String, byte[]> xattrs = fs.getXAttrs(path, names); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); + /* An XAttr that was requested does not exist. */ + try { + final byte[] value = fs.getXAttr(path, name3); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "At least one of the attributes provided was not found.", e); + } + + /* Throw an exception if an xattr that was requested does not exist. */ + { + final List<String> names = Lists.newArrayList(); + names.add(name1); + names.add(name2); + names.add(name3); + try { + final Map<String, byte[]> xattrs = fs.getXAttrs(path, names); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "At least one of the attributes provided was not found.", e); + } + } fs.removeXAttr(path, name1); fs.removeXAttr(path, name2); + + /* Unknown namespace should throw an exception. */ + try { + final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo"); + Assert.fail("expected IOException"); + } catch (Exception e) { + GenericTestUtils.assertExceptionContains + ("An XAttr name must be prefixed with user/trusted/security/system, " + + "followed by a '.'", + e); + } + + /* + * The 'trusted' namespace should not be accessible and should throw an + * exception. + */ + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + fs.setXAttr(path, "trusted.foo", "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(path, "trusted.foo"); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("User doesn't have permission", e); + } + + fs.setXAttr(path, name1, "1234".getBytes()); + + /* + * Test that an exception is thrown if the caller doesn't have permission to + * get the value of the xattr. + */ + + /* Set access so that only the owner has access. */ + fs.setPermission(path, new FsPermission((short) 0700)); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(path, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * The caller must have search access to the parent directory. + */ + final Path childDir = new Path(path, "child" + pathCount); + /* Set access to parent so that only the owner has access. */ + FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700)); + fs.setXAttr(childDir, name1, "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* Check that read access to the owning directory is not good enough. */ + fs.setPermission(path, new FsPermission((short) 0704)); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that search access to the owning directory and search/execute + * access to the actual entity with extended attributes is not good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0701)); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that search access to the owning directory and read access to + * the actual entity with the extended attribute is good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0704)); + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); } /** @@ -402,6 +551,166 @@ public class FSXAttrBaseTest { fs.removeXAttr(path, name3); } + /** + * removexattr tests. Test that removexattr throws an exception if any of + * the following are true: + * an xattr that was requested doesn't exist + * the caller specifies an unknown namespace + * the caller doesn't have access to the namespace + * the caller doesn't have permission to get the value of the xattr + * the caller does not have "execute" (scan) access to the parent directory + * the caller has only read access to the owning directory + * the caller has only execute access to the owning directory and execute + * access to the actual entity + * the caller does not have execute access to the owning directory and write + * access to the actual entity + */ + @Test(timeout = 120000) + public void testRemoveXAttrPermissions() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE)); + + try { + fs.removeXAttr(path, name2); + fs.removeXAttr(path, name2); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("No matching attributes found", e); + } + + /* Unknown namespace should throw an exception. */ + final String expectedExceptionString = "An XAttr name must be prefixed " + + "with user/trusted/security/system, followed by a '.'"; + try { + fs.removeXAttr(path, "wackynamespace.foo"); + Assert.fail("expected IOException"); + } catch (RemoteException e) { + assertEquals("Unexpected RemoteException: " + e, e.getClassName(), + HadoopIllegalArgumentException.class.getCanonicalName()); + GenericTestUtils.assertExceptionContains(expectedExceptionString, e); + } catch (HadoopIllegalArgumentException e) { + GenericTestUtils.assertExceptionContains(expectedExceptionString, e); + } + + /* + * The 'trusted' namespace should not be accessible and should throw an + * exception. + */ + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + fs.setXAttr(path, "trusted.foo", "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(path, "trusted.foo"); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("User doesn't have permission", e); + } finally { + fs.removeXAttr(path, "trusted.foo"); + } + + /* + * Test that an exception is thrown if the caller doesn't have permission to + * get the value of the xattr. + */ + + /* Set access so that only the owner has access. */ + fs.setPermission(path, new FsPermission((short) 0700)); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(path, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * The caller must have "execute" (scan) access to the parent directory. + */ + final Path childDir = new Path(path, "child" + pathCount); + /* Set access to parent so that only the owner has access. */ + FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700)); + fs.setXAttr(childDir, name1, "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* Check that read access to the owning directory is not good enough. */ + fs.setPermission(path, new FsPermission((short) 0704)); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that execute access to the owning directory and scan access to + * the actual entity with extended attributes is not good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0701)); + try { + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that execute access to the owning directory and write access to + * the actual entity with extended attributes is good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0706)); + user.doAs(new PrivilegedExceptionAction<Object>() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + } + @Test(timeout = 120000) public void testRenameFileWithXAttr() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Thu Jul 24 06:23:41 2014 @@ -415,7 +415,7 @@ public class TestNamenodeRetryCache { LightWeightCache<CacheEntry, CacheEntry> cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); Map<CacheEntry, CacheEntry> oldEntries = new HashMap<CacheEntry, CacheEntry>(); @@ -434,7 +434,7 @@ public class TestNamenodeRetryCache { assertTrue(namesystem.hasRetryCache()); cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem .getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Thu Jul 24 06:23:41 2014 @@ -160,7 +160,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn0 = cluster.getNamesystem(0); LightWeightCache<CacheEntry, CacheEntry> cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); Map<CacheEntry, CacheEntry> oldEntries = new HashMap<CacheEntry, CacheEntry>(); @@ -181,7 +181,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1 .getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); @@ -1047,6 +1047,49 @@ public class TestRetryCacheWithHA { } } + /** removeXAttr */ + class RemoveXAttrOp extends AtMostOnceOp { + private final String src; + + RemoveXAttrOp(DFSClient client, String src) { + super("removeXAttr", client); + this.src = src; + } + + @Override + void prepare() throws Exception { + Path p = new Path(src); + if (!dfs.exists(p)) { + DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0); + client.setXAttr(src, "user.key", "value".getBytes(), + EnumSet.of(XAttrSetFlag.CREATE)); + } + } + + @Override + void invoke() throws Exception { + client.removeXAttr(src, "user.key"); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + for (int i = 0; i < CHECKTIMES; i++) { + Map<String, byte[]> iter = dfs.getXAttrs(new Path(src)); + Set<String> keySet = iter.keySet(); + if (!keySet.contains("user.key")) { + return true; + } + Thread.sleep(1000); + } + return false; + } + + @Override + Object getResult() { + return null; + } + } + @Test (timeout=60000) public void testCreateSnapshot() throws Exception { final DFSClient client = genClientWithDummyHandler(); @@ -1183,6 +1226,13 @@ public class TestRetryCacheWithHA { testClientRetryWithFailover(op); } + @Test (timeout=60000) + public void testRemoveXAttr() throws Exception { + DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr"); + testClientRetryWithFailover(op); + } + /** * When NN failover happens, if the client did not receive the response and * send a retry request to the other NN, the same response should be recieved Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Thu Jul 24 06:23:41 2014 @@ -355,12 +355,6 @@ public class TestParam { public void testXAttrNameParam() { final XAttrNameParam p = new XAttrNameParam("user.a1"); Assert.assertEquals(p.getXAttrName(), "user.a1"); - try { - new XAttrNameParam("a1"); - Assert.fail(); - } catch (IllegalArgumentException e) { - LOG.info("EXPECTED: " + e); - } } @Test Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== Binary files - no diff available. Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1613007&r1=1613006&r2=1613007&view=diff ============================================================================== --- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original) +++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Thu Jul 24 06:23:41 2014 @@ -986,6 +986,8 @@ <NAMESPACE>USER</NAMESPACE> <NAME>a2</NAME> </XATTR> + <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID> + <RPC_CALLID>82</RPC_CALLID> </DATA> </RECORD> <RECORD>