[2/2] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
Merge branch 'trunk' into hdfs-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312d

Branch: refs/heads/hdfs-7240
Commit: 312deac6781bd15a5e1a46e2007243bf5186
Parents: b14a70e 5667129
Author: Anu Engineer 
Authored: Wed Nov 4 14:19:34 2015 -0800
Committer: Anu Engineer 
Committed: Wed Nov 4 14:19:34 2015 -0800

--
 .gitignore  |1 +
 LICENSE.txt |   59 +
 dev-support/docker/Dockerfile   |7 +-
 dev-support/test-patch.sh   |   10 +-
 .../main/resources/assemblies/hadoop-dist.xml   |4 +-
 .../assemblies/hadoop-hdfs-nfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-httpfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-kms-dist.xml|4 +-
 .../assemblies/hadoop-mapreduce-dist.xml|4 +-
 .../resources/assemblies/hadoop-nfs-dist.xml|4 +-
 .../main/resources/assemblies/hadoop-sls.xml|4 +-
 .../main/resources/assemblies/hadoop-src.xml|4 +-
 .../main/resources/assemblies/hadoop-tools.xml  |4 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   |4 +-
 hadoop-client/pom.xml   |6 +-
 .../JWTRedirectAuthenticationHandler.java   |7 +-
 .../server/KerberosAuthenticationHandler.java   |4 +-
 .../TestJWTRedirectAuthentictionHandler.java|   42 +-
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   74 -
 hadoop-common-project/hadoop-common/CHANGES.txt |  270 +-
 hadoop-common-project/hadoop-common/pom.xml |5 +
 .../hadoop-common/src/main/bin/hadoop   |   15 +-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |6 +-
 .../src/main/bin/hadoop-daemons.sh  |6 +-
 .../src/main/bin/hadoop-functions.sh|  109 +-
 .../src/main/bin/hadoop-layout.sh.example   |   16 +-
 .../hadoop-common/src/main/bin/rcc  |4 +-
 .../hadoop-common/src/main/bin/slaves.sh|6 +-
 .../hadoop-common/src/main/bin/start-all.sh |6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |6 +-
 .../main/conf/hadoop-user-functions.sh.example  |   10 +-
 .../org/apache/hadoop/conf/Configuration.java   |2 +-
 .../fs/CommonConfigurationKeysPublic.java   |   11 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   26 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |   29 -
 .../org/apache/hadoop/fs/FilterFileSystem.java  |8 +-
 .../java/org/apache/hadoop/fs/GlobFilter.java   |2 +-
 .../java/org/apache/hadoop/fs/GlobPattern.java  |7 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |2 +-
 .../org/apache/hadoop/fs/HarFileSystem.java |6 +
 .../java/org/apache/hadoop/fs/HardLink.java |8 +
 .../org/apache/hadoop/fs/LocalDirAllocator.java |6 +-
 .../apache/hadoop/fs/shell/CopyCommands.java|6 +-
 .../java/org/apache/hadoop/fs/shell/Delete.java |2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |   53 +-
 .../org/apache/hadoop/ha/HAServiceTarget.java   |   50 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |2 +-
 .../org/apache/hadoop/http/HttpServer2.java |2 +
 .../java/org/apache/hadoop/io/SequenceFile.java |   15 +-
 .../org/apache/hadoop/io/WritableUtils.java |8 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java |3 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   |3 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  |5 +
 .../erasurecode/codec/AbstractErasureCodec.java |2 +
 .../io/erasurecode/codec/ErasureCodec.java  |2 +
 .../io/erasurecode/codec/RSErasureCodec.java|2 +
 .../io/erasurecode/codec/XORErasureCodec.java   |2 +
 .../erasurecode/coder/AbstractErasureCoder.java |2 +
 .../coder/AbstractErasureCodingStep.java|2 +
 .../coder/AbstractErasureDecoder.java   |   25 +-
 .../coder/AbstractErasureEncoder.java   |2 +
 .../io/erasurecode/coder/ErasureCoder.java  |2 +
 .../io/erasurecode/coder/ErasureCodingStep.java |2 +
 .../erasurecode/coder/ErasureDecodingStep.java  |2 +
 .../erasurecode/coder/ErasureEncodingStep.java  |2 +
 .../io/erasurecode/coder/RSErasureDecoder.java  |2 +
 .../io/erasurecode/coder/RSErasureEncoder.java  |2 +
 .../io/erasurecode/coder/XORErasureDecoder.java |2 +
 .../io/erasurecode/coder/XORErasureEncoder.java |2 +
 .../io/erasurecode/grouper/BlockGrouper.java|2 +
 .../rawcoder/AbstractRawErasureCoder.java   |  114 +-
 .../rawcoder/AbstractRawErasureDecoder.java |   10 +-
 

[1/2] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/hdfs-7240 [created] 312de


http://git-wip-us.apache.org/repos/asf/hadoop/blob/312d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index da09b0e,29bcd79..c93a362
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -2816,30 -2633,14 +2831,30 @@@ public class DataNode extends Reconfigu
}
  
/**
-* Convenience method, which unwraps RemoteException.
-* @throws IOException not a RemoteException.
-*/
 -   * Update replica with the new generation stamp and length.  
++  * Convenience method, which unwraps RemoteException.
++  * @throws IOException not a RemoteException.
++  */
 +  private static ReplicaRecoveryInfo callInitReplicaRecovery(
 +  InterDatanodeProtocol datanode,
 +  RecoveringBlock rBlock) throws IOException {
 +try {
 +  return datanode.initReplicaRecovery(rBlock);
- } catch(RemoteException re) {
++} catch (RemoteException re) {
 +  throw re.unwrapRemoteException();
 +}
 +  }
 +
 +  /**
-* Update replica with the new generation stamp and length.  
++   * Update replica with the new generation stamp and length.
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
--  final long recoveryId, final long newBlockId, final long newLength)
++   final long recoveryId, final long 
newBlockId, final long newLength)
throws IOException {
 -final String storageID = data.updateReplicaUnderRecovery(oldBlock,
 -recoveryId, newBlockId, newLength);
 +final FsDatasetSpi dataset =
 +(FsDatasetSpi) getDataset(oldBlock.getBlockPoolId());
 +final String storageID = dataset.updateReplicaUnderRecovery(
 +oldBlock, recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
@@@ -2851,234 -2652,6 +2866,244 @@@
  return storageID;
}
  
-   /** A convenient class used in block recovery */
-   static class BlockRecord { 
++  /**
++   * A convenient class used in block recovery
++   */
++  static class BlockRecord {
 +final DatanodeID id;
 +final InterDatanodeProtocol datanode;
 +final ReplicaRecoveryInfo rInfo;
- 
 +private String storageID;
 +
 +BlockRecord(DatanodeID id,
 +InterDatanodeProtocol datanode,
 +ReplicaRecoveryInfo rInfo) {
 +  this.id = id;
 +  this.datanode = datanode;
 +  this.rInfo = rInfo;
 +}
 +
 +void updateReplicaUnderRecovery(String bpid, long recoveryId,
 +long newBlockId, long newLength)
 +throws IOException {
 +  final ExtendedBlock b = new ExtendedBlock(bpid, rInfo);
 +  storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, 
newBlockId,
 +  newLength);
 +}
 +
 +@Override
 +public String toString() {
 +  return "block:" + rInfo + " node:" + id;
 +}
 +  }
 +
-   /** Recover a block */
++
++  /**
++   * Recover a block
++   */
 +  private void recoverBlock(RecoveringBlock rBlock) throws IOException {
 +ExtendedBlock block = rBlock.getBlock();
 +String blookPoolId = block.getBlockPoolId();
 +DatanodeID[] datanodeids = rBlock.getLocations();
 +List syncList = new 
ArrayList(datanodeids.length);
 +int errorCount = 0;
 +
 +//check generation stamps
- for(DatanodeID id : datanodeids) {
++for (DatanodeID id : datanodeids) {
 +  try {
 +BPOfferService bpos = blockPoolManager.get(blookPoolId);
 +DatanodeRegistration bpReg = bpos.bpRegistration;
- InterDatanodeProtocol datanode = bpReg.equals(id)?
- this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
- dnConf.socketTimeout, dnConf.connectToDnViaHostname);
++InterDatanodeProtocol datanode = bpReg.equals(id) ?
++this : DataNode.createInterDataNodeProtocolProxy(id, getConf(),
++dnConf.socketTimeout, dnConf.connectToDnViaHostname);
 +ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
 +if (info != null &&
 +info.getGenerationStamp() >= block.getGenerationStamp() &&
 +info.getNumBytes() > 0) {
 +  syncList.add(new BlockRecord(id, datanode, info));
 +}
 +  } catch (RecoveryInProgressException ripE) {
 +InterDatanodeProtocol.LOG.warn(
 +"Recovery 

hadoop git commit: MAPREDUCE-6519 Avoid unsafe split and append on fields that might be IPv6 literals

2015-11-04 Thread eclark
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-11890 009e67021 -> 9fe251d15


MAPREDUCE-6519 Avoid unsafe split and append on fields that might be IPv6 
literals


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fe251d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fe251d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fe251d1

Branch: refs/heads/HADOOP-11890
Commit: 9fe251d154e49b1bf27464465d0523c2880afe6c
Parents: 009e670
Author: Elliott Clark 
Authored: Wed Nov 4 13:01:27 2015 -0800
Committer: Elliott Clark 
Committed: Wed Nov 4 13:01:27 2015 -0800

--
 .../apache/hadoop/mapred/FileInputFormat.java   | 23 ++--
 .../apache/hadoop/mapreduce/util/HostUtil.java  |  6 ++---
 .../mapreduce/v2/hs/HistoryClientService.java   | 11 ++
 .../hadoop/ipc/TestMRCJCSocketFactory.java  |  8 ---
 .../apache/hadoop/mapred/ReliabilityTest.java   | 13 ++-
 .../hadoop/mapred/TestClientRedirect.java   |  8 ---
 .../org/apache/hadoop/mapred/UtilsForTests.java | 16 --
 .../mapreduce/MiniHadoopClusterManager.java |  6 +++--
 8 files changed, 37 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe251d1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index 2c58ebe..65dbdd2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StopWatch;
@@ -685,19 +686,19 @@ public abstract class FileInputFormat implements 
InputFormat {
   
   private String[] identifyHosts(int replicationFactor, 
  Map racksMap) {
-
+
 String [] retVal = new String[replicationFactor];
-   
-List  rackList = new LinkedList(); 
+
+List  rackList = new LinkedList();
 
 rackList.addAll(racksMap.values());
-
+
 // Sort the racks based on their contribution to this split
 sortInDescendingOrder(rackList);
-
+
 boolean done = false;
 int index = 0;
-
+
 // Get the host list for all our aggregated items, sort
 // them and return the top entries
 for (NodeInfo ni: rackList) {
@@ -706,27 +707,27 @@ public abstract class FileInputFormat implements 
InputFormat {
 
   ListhostList = new LinkedList();
   hostList.addAll(hostSet);
-
+
   // Sort the hosts in this rack based on their contribution
   sortInDescendingOrder(hostList);
 
   for (NodeInfo host: hostList) {
 // Strip out the port number from the host name
-retVal[index++] = host.node.getName().split(":")[0];
+retVal[index++] = NetUtils.getHostFromHostPort(host.node.getName());
 if (index == replicationFactor) {
   done = true;
   break;
 }
   }
-  
+
   if (done == true) {
 break;
   }
 }
 return retVal;
   }
-  
-  private String[] fakeRacks(BlockLocation[] blkLocations, int index) 
+
+  private String[] fakeRacks(BlockLocation[] blkLocations, int index)
   throws IOException {
 String[] allHosts = blkLocations[index].getHosts();
 String[] allTopos = new String[allHosts.length];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe251d1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
index 

[20/50] [abbrv] hadoop git commit: Fix CHANGES.txt

2015-11-04 Thread aengineer
Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2529464f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2529464f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2529464f

Branch: refs/heads/HDFS-7240
Commit: 2529464f0841732792343d515cd1be1dccb3c453
Parents: 6e4f8a4
Author: Kihwal Lee 
Authored: Mon Nov 2 09:09:33 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 2 09:09:33 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2529464f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8e6634a..0bbc60d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1529,9 +1529,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
 better efficiency. (Charlie Helin via wang)
 
-HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
-values() since it creates a temporary array. (Staffan Friberg via yliu)
-
 HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
 BlockManager#excessReplicateMap. (yliu)
 
@@ -2220,6 +2217,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8099. Change "DFSInputStream has been closed already" message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)



[37/50] [abbrv] hadoop git commit: HDFS-9351. checkNNStartup() need to be called when fsck calls FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)

2015-11-04 Thread aengineer
HDFS-9351. checkNNStartup() need to be called when fsck calls 
FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/194251c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/194251c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/194251c8

Branch: refs/heads/HDFS-7240
Commit: 194251c85250fcbe80a6ffee88b2cd4689334be3
Parents: dac0463
Author: Yongjun Zhang 
Authored: Tue Nov 3 17:16:17 2015 -0800
Committer: Yongjun Zhang 
Committed: Tue Nov 3 17:16:17 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/server/namenode/FSNamesystem.java  | 20 
 .../hdfs/server/namenode/NamenodeFsck.java  |  9 -
 3 files changed, 11 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/194251c8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 13c4094..2def995 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2228,6 +2228,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
 commitBlock. (Chang Li via zhz)
 
+HDFS-9351. checkNNStartup() need to be called when fsck calls
+FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/194251c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 65b40c8..734e3ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6364,26 +6364,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return list;
   }
 
-  /**
-   * Get the list of snapshottable directories.
-   * @return The list of all the current snapshottable directories
-   * @see #getSnapshottableDirListing()
-   * @throws IOException
-   */
-  List getSnapshottableDirs() throws IOException {
-List snapshottableDirs = new ArrayList();
-final FSPermissionChecker pc = getFSDirectory().getPermissionChecker();
-final String user = pc.isSuperUser() ? null : pc.getUser();
-final SnapshottableDirectoryStatus[] snapDirs =
-snapshotManager.getSnapshottableDirListing(user);
-if (snapDirs != null) {
-  for (SnapshottableDirectoryStatus sds : snapDirs) {
-snapshottableDirs.add(sds.getFullPath().toString());
-  }
-}
-return snapshottableDirs;
-  }
-
   @Override  //NameNodeMXBean
   public int getDistinctVersionCount() {
 return blockManager.getDatanodeManager().getDatanodesSoftwareVersions()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/194251c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 0b2a53b..9d4edb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -345,7 +346,13 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   namenode.getNamesystem().logFsckEvent(path, remoteAddress);
 
   if (snapshottableDirs != null) {
-   

[36/50] [abbrv] hadoop git commit: HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in commitBlock. Contributed by Chang Li.

2015-11-04 Thread aengineer
HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in 
commitBlock. Contributed by Chang Li.

Change-Id: If5ce1b2d212bb0726bce52ad12a3de401bcec02d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dac0463a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dac0463a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dac0463a

Branch: refs/heads/HDFS-7240
Commit: dac0463a4e20dfb3a802355919fc22b8e017a4e1
Parents: 7e28296
Author: Zhe Zhang 
Authored: Tue Nov 3 13:34:05 2015 -0800
Committer: Zhe Zhang 
Committed: Tue Nov 3 13:34:24 2015 -0800

--
 .../org/apache/hadoop/hdfs/DataStreamer.java|   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/blockmanagement/BlockInfo.java  |   2 +-
 .../server/blockmanagement/BlockManager.java|   4 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  27 +++--
 .../TestCommitBlockWithInvalidGenStamp.java | 100 +++
 .../namenode/TestQuotaWithStripedBlocks.java|   4 +-
 7 files changed, 128 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 03c2c52..7cb89c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -351,7 +351,7 @@ class DataStreamer extends Daemon {
   }
 
   private volatile boolean streamerClosed = false;
-  protected ExtendedBlock block; // its length is number of bytes acked
+  protected volatile ExtendedBlock block; // its length is number of bytes 
acked
   protected Token accessToken;
   private DataOutputStream blockStream;
   private DataInputStream blockReplyStream;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fbf211f..13c4094 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2225,6 +2225,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index e15b5ee..e9fa123 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -411,7 +411,7 @@ public abstract class BlockInfo extends Block
 }
 Preconditions.checkState(!isComplete());
 uc.commit();
-this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
+this.setNumBytes(block.getNumBytes());
 // Sort out invalid replicas.
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dac0463a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dbe0726..3c6c4d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -653,6 +653,10 @@ public class BlockManager 

[46/50] [abbrv] hadoop git commit: HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for filesystem entirely allocated for DFS use. (Tony Wu via lei)

2015-11-04 Thread aengineer
HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for 
filesystem entirely allocated for DFS use. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2a5441b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2a5441b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2a5441b

Branch: refs/heads/HDFS-7240
Commit: e2a5441b062fd0758138079d24a2740fc5e5e350
Parents: ec41460
Author: Lei Xu 
Authored: Wed Nov 4 10:27:35 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:27:35 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a5441b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd560d1..5f3ff11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1620,6 +1620,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
 
+HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
+filesystem entirely allocated for DFS use. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a5441b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 5c865e1..2219aa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -136,7 +136,7 @@ public class TestNameNodeMXBean {
   assertTrue(liveNodes.size() == 2);
   for (Map liveNode : liveNodes.values()) {
 assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
-assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
+assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) >= 0);
 assertTrue(liveNode.containsKey("capacity"));
 assertTrue(((Long)liveNode.get("capacity")) > 0);
 assertTrue(liveNode.containsKey("numBlocks"));



[25/50] [abbrv] hadoop git commit: HADOOP-12508. delete fails with exception when lease is held on blob. Contributed by Gaurav Kanade.

2015-11-04 Thread aengineer
HADOOP-12508. delete fails with exception when lease is held on blob. 
Contributed by Gaurav Kanade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e7dcab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e7dcab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e7dcab1

Branch: refs/heads/HDFS-7240
Commit: 9e7dcab185abf2fdabb28f2799b9952b5664a4b0
Parents: 3ce0a65
Author: cnauroth 
Authored: Mon Nov 2 10:21:39 2015 -0800
Committer: cnauroth 
Committed: Mon Nov 2 10:21:39 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../fs/azure/AzureNativeFileSystemStore.java| 32 +++-
 .../hadoop/fs/azure/SelfRenewingLease.java  |  5 +-
 .../fs/azure/TestNativeAzureFileSystemLive.java | 86 
 4 files changed, 124 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e7dcab1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c8d60b0..1a9c93c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1307,6 +1307,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek 
API.
 (Dushyanth via cnauroth)
 
+HADOOP-12508. delete fails with exception when lease is held on blob.
+(Gaurav Kanade via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e7dcab1/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 6412714..69ece4a 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2370,7 +2370,37 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   @Override
   public void delete(String key) throws IOException {
-delete(key, null);
+try {
+  delete(key, null);
+} catch (IOException e) {
+  Throwable t = e.getCause();
+  if(t != null && t instanceof StorageException) {
+StorageException se = (StorageException) t;
+if(se.getErrorCode().equals(("LeaseIdMissing"))){
+  SelfRenewingLease lease = null;
+  try {
+lease = acquireLease(key);
+delete(key, lease);
+  } catch (AzureException e3) {
+LOG.warn("Got unexpected exception trying to acquire lease on "
++ key + "." + e3.getMessage());
+throw e3;
+  } finally {
+try {
+  if(lease != null){
+lease.free();
+  }
+} catch (Exception e4){
+  LOG.error("Unable to free lease on " + key, e4);
+}
+  }
+} else {
+  throw e;
+}
+  } else {
+throw e;
+  }
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e7dcab1/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 06f32ce..900d730 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -22,6 +22,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
@@ -61,7 +63,8 @@ public class SelfRenewingLease {
 
 
   // Time to wait to retry getting the lease in milliseconds
-  private static final int LEASE_ACQUIRE_RETRY_INTERVAL = 2000;
+  @VisibleForTesting
+  static final int 

[44/50] [abbrv] hadoop git commit: Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. Contributed by Xiaobing Zhou."

2015-11-04 Thread aengineer
Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. 
Contributed by Xiaobing Zhou."

This reverts commit 84cbd72afda6344e220526fac5c560f00f84e374.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88beb46c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88beb46c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88beb46c

Branch: refs/heads/HDFS-7240
Commit: 88beb46cf6e6fd3e51f73a411a2750de7595e326
Parents: 3fb1ece
Author: Haohui Mai 
Authored: Wed Nov 4 10:21:13 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 10:21:13 2015 -0800

--
 .../org/apache/hadoop/security/token/Token.java |  11 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 -
 .../web/webhdfs/DataNodeUGIProvider.java| 106 ++---
 .../datanode/web/webhdfs/WebHdfsHandler.java|   2 +-
 .../src/main/resources/hdfs-default.xml |   8 -
 .../web/webhdfs/TestDataNodeUGIProvider.java| 231 ---
 7 files changed, 19 insertions(+), 346 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index f189a96..2420155 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.security.token;
 
 import com.google.common.collect.Maps;
-import com.google.common.primitives.Bytes;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,11 +29,9 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.ServiceLoader;
-import java.util.UUID;
 
 /**
  * The client-side form of the token.
@@ -341,12 +337,7 @@ public class Token implements 
Writable {
 identifierToString(buffer);
 return buffer.toString();
   }
-
-  public String buildCacheKey() {
-return UUID.nameUUIDFromBytes(
-Bytes.concat(kind.getBytes(), identifier, password)).toString();
-  }
-
+  
   private static ServiceLoader renewers =
   ServiceLoader.load(TokenRenewer.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 500dc92..f2d8296 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2152,9 +2152,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9160. [OIV-Doc] : Missing details of 'delimited' for processor options
 (nijel via vinayakumarb)
 
-HDFS-8855. Webhdfs client leaks active NameNode connections.
-(Xiaobing Zhou via jitendra) 
-
 HDFS-9235. hdfs-native-client build getting errors when built with cmake
 2.6. (Eric Payne via wheat9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 424f963..c14ce20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,10 +70,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEBHDFS_NETTY_HIGH_WATERMARK =
   "dfs.webhdfs.netty.high.watermark";
   public static final int  DFS_WEBHDFS_NETTY_HIGH_WATERMARK_DEFAULT = 65535;
-  public static final String  DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY =
-  "dfs.webhdfs.ugi.expire.after.access";
-  public static final int DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT =
-  10*60*1000; //10 minutes
 
   // HA related configuration
   public static final String  DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = 

[48/50] [abbrv] hadoop git commit: HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic replica. (Tony Wu via lei)

2015-11-04 Thread aengineer
HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic 
replica. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56671292
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56671292
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56671292

Branch: refs/heads/HDFS-7240
Commit: 5667129276c3123ecb0a96b78d5897431c47a9d5
Parents: 0fb1867
Author: Lei Xu 
Authored: Wed Nov 4 10:46:19 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:49:28 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestPipelines.java   | 6 ++
 .../hadoop/hdfs/server/datanode/FsDatasetTestUtils.java   | 7 +++
 .../datanode/fsdataset/impl/FsDatasetImplTestUtils.java   | 5 +
 .../datanode/fsdataset/impl/TestInterDatanodeProtocol.java| 5 +++--
 5 files changed, 20 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f3ff11..ef1152e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1623,6 +1623,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
 filesystem entirely allocated for DFS use. (Tony Wu via lei)
 
+HDFS-9363. Add fetchReplica() to FsDatasetTestUtils to return 
FsDataset-agnostic
+replica. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index e4fea60..c9831b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -102,10 +101,9 @@ public class TestPipelines {
 List lb = cluster.getNameNodeRpc().getBlockLocations(
   filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
 
-String bpid = cluster.getNamesystem().getBlockPoolId();
 for (DataNode dn : cluster.getDataNodes()) {
-  Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
-  .getBlock().getBlockId());
+  Replica r =
+  cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
 
   assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
   assertEquals("Should be RBW replica on " + dn

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index 40c4438..02af467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -206,4 +206,11 @@ public interface FsDatasetTestUtils {
* @throws IOException on I/O error.
*/
   void injectCorruptReplica(ExtendedBlock block) throws IOException;
+
+  /**
+   * Get the replica of a block. Returns null if it does not exist.
+   * @param block the block whose replica will be returned.
+   * @return Replica for the block.
+   */
+  Replica fetchReplica(ExtendedBlock block);
 }


[47/50] [abbrv] hadoop git commit: HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails intermittently due to assumption that a lease error will be thrown. Contributed by Gaur

2015-11-04 Thread aengineer
HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails 
intermittently due to assumption that a lease error will be thrown. Contributed 
by Gaurav Kanade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fb1867f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fb1867f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fb1867f

Branch: refs/heads/HDFS-7240
Commit: 0fb1867fd62b5df664ad66386d6067db8fbf2317
Parents: e2a5441
Author: cnauroth 
Authored: Wed Nov 4 10:19:04 2015 -0800
Committer: cnauroth 
Committed: Wed Nov 4 10:28:44 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 
 .../TestAzureFileSystemInstrumentation.java | 25 +---
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1867f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index efb73f4..dd70947 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1320,6 +1320,10 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
 
+HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics
+fails intermittently due to assumption that a lease error will be thrown.
+(Gaurav Kanade via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1867f/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
index 896ec1b..0c9126c 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
 import org.apache.hadoop.fs.azure.AzureException;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.hamcrest.BaseMatcher;
@@ -405,22 +406,30 @@ public class TestAzureFileSystemInstrumentation {
 
   @Test
   public void testClientErrorMetrics() throws Exception {
-String directoryName = "metricsTestDirectory_ClientError";
-Path directoryPath = new Path("/" + directoryName);
-assertTrue(fs.mkdirs(directoryPath));
-String leaseID = testAccount.acquireShortLease(directoryName);
+String fileName = "metricsTestFile_ClientError";
+Path filePath = new Path("/"+fileName);
+final int FILE_SIZE = 100;
+OutputStream outputStream = null;
+String leaseID = null;
 try {
+  // Create a file
+  outputStream = fs.create(filePath);
+  leaseID = testAccount.acquireShortLease(fileName);
   try {
-fs.delete(directoryPath, true);
-assertTrue("Should've thrown.", false);
+outputStream.write(new byte[FILE_SIZE]);
+outputStream.close();
+assertTrue("Should've thrown", false);
   } catch (AzureException ex) {
 assertTrue("Unexpected exception: " + ex,
-ex.getMessage().contains("lease"));
+  ex.getMessage().contains("lease"));
   }
   assertEquals(1, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_CLIENT_ERRORS));
   assertEquals(0, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_SERVER_ERRORS));
 } finally {
-  testAccount.releaseLease(leaseID, directoryName);
+  if(leaseID != null){
+testAccount.releaseLease(leaseID, fileName);
+  }
+  IOUtils.closeStream(outputStream);
 }
   }
 



[33/50] [abbrv] hadoop git commit: YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer binds to default port 8188. (Meng Ding via wangda)

2015-11-04 Thread aengineer
YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer 
binds to default port 8188. (Meng Ding via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0783184f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0783184f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0783184f

Branch: refs/heads/HDFS-7240
Commit: 0783184f4b3f669f7211e42b395b62d63144100d
Parents: 957f031
Author: Wangda Tan 
Authored: Tue Nov 3 11:18:34 2015 -0800
Committer: Wangda Tan 
Committed: Tue Nov 3 11:18:34 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../applications/distributedshell/TestDistributedShell.java | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0783184f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1040f45..d6ad672 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1039,6 +1039,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4127. RM fail with noAuth error if switched from failover to 
non-failover.
 (Varun Saxena via jianhe)
 
+YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
+binds to default port 8188. (Meng Ding via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0783184f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index dcb6e72..3197875 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -92,9 +92,14 @@ public class TestDistributedShell {
   yarnCluster.init(conf);
   
   yarnCluster.start();
-  
+
+  conf.set(
+  YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":"
+  + yarnCluster.getApplicationHistoryServer().getPort());
+
   waitForNMsToRegister();
-  
+
   URL url = 
Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
   if (url == null) {
 throw new RuntimeException("Could not find 'yarn-site.xml' dummy file 
in classpath");



[15/50] [abbrv] hadoop git commit: Updated the 2.6.2 final release date.

2015-11-04 Thread aengineer
Updated the 2.6.2 final release date.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4a6b5b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4a6b5b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4a6b5b4

Branch: refs/heads/HDFS-7240
Commit: a4a6b5b4b470b1e7a3c5e2d38433429c455bc709
Parents: b24fe06
Author: Sangjin Lee 
Authored: Fri Oct 30 18:47:16 2015 -0700
Committer: Sangjin Lee 
Committed: Fri Oct 30 18:47:16 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a1409f8..2560fe5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2170,7 +2170,7 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17df171..5a61eed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -3310,7 +3310,7 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e999659..4d6dcb8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -913,7 +913,7 @@ Release 2.6.3 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a6b5b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cc8f5f3..1040f45 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1887,7 +1887,7 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.2 - 2015-10-21
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 



[39/50] [abbrv] hadoop git commit: HADOOP-10787. Rename/remove non-HADOOP_*, etc from the shell scripts. Contributed by Allen Wittenauer.

2015-11-04 Thread aengineer
HADOOP-10787. Rename/remove non-HADOOP_*, etc from the shell scripts. 
Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73b9c7b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73b9c7b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73b9c7b8

Branch: refs/heads/HDFS-7240
Commit: 73b9c7b82b0f607a5328ad7dc4170da3ac0c1af3
Parents: 3e1745d
Author: Varun Vasudev 
Authored: Wed Nov 4 15:56:17 2015 +0530
Committer: Varun Vasudev 
Committed: Wed Nov 4 15:56:17 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop-common/src/main/bin/hadoop   | 15 ++--
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |  6 +-
 .../src/main/bin/hadoop-daemons.sh  |  6 +-
 .../src/main/bin/hadoop-functions.sh| 66 +
 .../src/main/bin/hadoop-layout.sh.example   | 16 ++---
 .../hadoop-common/src/main/bin/rcc  |  4 +-
 .../hadoop-common/src/main/bin/slaves.sh|  6 +-
 .../hadoop-common/src/main/bin/start-all.sh |  6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |  6 +-
 .../main/conf/hadoop-user-functions.sh.example  | 10 +--
 .../scripts/hadoop_add_common_to_classpath.bats |  4 +-
 .../hadoop_add_to_classpath_toolspath.bats  | 74 
 .../src/test/scripts/hadoop_basic_init.bats |  2 +-
 .../hadoop-kms/src/main/sbin/kms.sh |  6 +-
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  |  6 +-
 .../src/main/bin/distribute-exclude.sh  |  4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |  9 ++-
 .../src/main/bin/refresh-namenodes.sh   |  6 +-
 .../hadoop-hdfs/src/main/bin/start-balancer.sh  |  6 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh   |  6 +-
 .../src/main/bin/start-secure-dns.sh|  6 +-
 .../hadoop-hdfs/src/main/bin/stop-balancer.sh   |  6 +-
 .../hadoop-hdfs/src/main/bin/stop-dfs.sh|  6 +-
 .../hadoop-hdfs/src/main/bin/stop-secure-dns.sh |  6 +-
 hadoop-mapreduce-project/bin/mapred | 15 ++--
 .../bin/mr-jobhistory-daemon.sh |  6 +-
 .../hadoop-sls/src/main/bin/rumen2sls.sh|  9 ++-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  | 12 ++--
 .../hadoop-yarn/bin/start-yarn.sh   |  6 +-
 .../hadoop-yarn/bin/stop-yarn.sh|  6 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  6 +-
 .../hadoop-yarn/bin/yarn-daemon.sh  |  6 +-
 .../hadoop-yarn/bin/yarn-daemons.sh |  6 +-
 34 files changed, 235 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b9c7b8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 453efe6..dbf9700 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -32,6 +32,9 @@ Trunk (Unreleased)
 HADOOP-11356. Removed deprecated 
o.a.h.fs.permission.AccessControlException.
 (Li Lu via wheat9)
 
+HADOOP-10787 Rename/remove non-HADOOP_*, etc from the shell scripts.
+(aw via vvasudev)
+
   NEW FEATURES
 
 HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b9c7b8/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index ef67cc5..513b0f1 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -47,13 +47,13 @@ function hadoop_usage
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then
-  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
   bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
 if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
@@ -113,8 +113,7 @@ case ${COMMAND} in
   ;;
   archive)
 CLASS=org.apache.hadoop.tools.HadoopArchives
-hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
-hadoop_add_classpath "${TOOL_PATH}"
+hadoop_add_to_classpath_toolspath
   ;;
   checknative)
 

[19/50] [abbrv] hadoop git commit: HADOOP-12047. Indicate preference not to affect input buffers during coding in erasure coder. (Contributed by Kai Zheng)

2015-11-04 Thread aengineer
HADOOP-12047. Indicate preference not to affect input buffers during coding in 
erasure coder. (Contributed by Kai Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e4f8a46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e4f8a46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e4f8a46

Branch: refs/heads/HDFS-7240
Commit: 6e4f8a46c5ce983493cb0ac2234fceafdb3a5613
Parents: 3cde693
Author: Walter Su 
Authored: Mon Nov 2 10:40:14 2015 +0800
Committer: Walter Su 
Committed: Mon Nov 2 10:40:14 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../rawcoder/AbstractRawErasureCoder.java   | 60 ++--
 .../io/erasurecode/rawcoder/CoderOption.java| 43 ++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 48 +---
 .../erasurecode/rawcoder/RawErasureCoder.java   | 23 +---
 .../erasurecode/rawcoder/RawErasureDecoder.java | 32 ++-
 .../erasurecode/rawcoder/RawErasureEncoder.java | 35 ++--
 .../hadoop/io/erasurecode/TestCoderBase.java| 34 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 37 ++--
 .../erasurecode/rawcoder/TestXORRawCoder.java   |  1 +
 .../hadoop/hdfs/DFSStripedInputStream.java  |  1 -
 11 files changed, 253 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4f8a46/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2560fe5..5c8daad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -608,6 +608,9 @@ Trunk (Unreleased)
   HADOOP-12327. Initialize output buffers with ZERO bytes in erasure coder.
   (Kai Zheng via waltersu4549)
 
+  HADOOP-12047. Indicate preference not to affect input buffers during
+  coding in erasure coder. (Kai Zheng via waltersu4549)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4f8a46/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index d8a57eb..b195216 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configured;
 
 import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -36,11 +38,39 @@ public abstract class AbstractRawErasureCoder
   private final int numDataUnits;
   private final int numParityUnits;
   private final int numAllUnits;
+  private final Map coderOptions;
 
   public AbstractRawErasureCoder(int numDataUnits, int numParityUnits) {
 this.numDataUnits = numDataUnits;
 this.numParityUnits = numParityUnits;
 this.numAllUnits = numDataUnits + numParityUnits;
+this.coderOptions = new HashMap<>(3);
+
+coderOptions.put(CoderOption.PREFER_DIRECT_BUFFER, preferDirectBuffer());
+coderOptions.put(CoderOption.ALLOW_CHANGE_INPUTS, false);
+coderOptions.put(CoderOption.ALLOW_VERBOSE_DUMP, false);
+  }
+
+  @Override
+  public Object getCoderOption(CoderOption option) {
+if (option == null) {
+  throw new HadoopIllegalArgumentException("Invalid option");
+}
+return coderOptions.get(option);
+  }
+
+  @Override
+  public void setCoderOption(CoderOption option, Object value) {
+if (option == null || value == null) {
+  throw new HadoopIllegalArgumentException(
+  "Invalid option or option value");
+}
+if (option.isReadOnly()) {
+  throw new HadoopIllegalArgumentException(
+  "The option is read-only: " + option.name());
+}
+
+coderOptions.put(option, value);
   }
 
   /**
@@ -75,13 +105,35 @@ public abstract class AbstractRawErasureCoder
   }
 
   @Override
-  public boolean preferDirectBuffer() {
+  public void release() {
+// Nothing to do by default
+  }
+
+  /**
+   * Tell if direct buffer is 

[17/50] [abbrv] hadoop git commit: Revert "HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

2015-11-04 Thread aengineer
Revert "HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee."

This reverts commit 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd64167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd64167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd64167

Branch: refs/heads/HDFS-7240
Commit: 7fd6416759cbb202ed21b47d28c1587e04a5cdc6
Parents: 2ea4413
Author: yliu 
Authored: Sat Oct 31 16:20:48 2015 +0800
Committer: yliu 
Committed: Sat Oct 31 16:20:48 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ---
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd64167/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 211e7fc..30cdfee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,9 +2201,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd64167/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f610574..d9b8d60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,7 +659,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
-int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -709,17 +708,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
-  // Refresh the node count. If the live node count became smaller,
-  // but it is not reflected in this loop, it may loop forever in case
-  // the replicas/rack cannot be satisfied.
-  if (--refreshCounter == 0) {
-refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
-excludedNodes);
-// It has already gone through enough number of nodes.
-if (refreshCounter <= excludedNodes.size()) {
-  break;
-}
-  }
 }
   
 if (numOfReplicas>0) {



[13/50] [abbrv] hadoop git commit: MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when strategy is dynamic. Contributed by Kuhu Shukla.

2015-11-04 Thread aengineer
MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when 
strategy is dynamic. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2868ca03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2868ca03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2868ca03

Branch: refs/heads/HDFS-7240
Commit: 2868ca0328d908056745223fb38d9a90fd2811ba
Parents: 18727c6
Author: Kihwal Lee 
Authored: Fri Oct 30 14:56:41 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 14:56:41 2015 -0500

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../tools/mapred/lib/DynamicInputChunk.java | 137 +++
 .../tools/mapred/lib/DynamicInputFormat.java|  31 +++--
 .../tools/mapred/lib/DynamicRecordReader.java   |  13 +-
 .../org/apache/hadoop/tools/StubContext.java|   4 +
 .../mapred/lib/TestDynamicInputFormat.java  |  33 -
 6 files changed, 83 insertions(+), 138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2868ca03/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 32be987..e999659 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -674,6 +674,9 @@ Release 2.7.2 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
+MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when
+strategy is dynamic (Kuhu Shukla via kihwal)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2868ca03/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 8482e7d..9bf8e47 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -20,14 +20,10 @@ package org.apache.hadoop.tools.mapred.lib;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.CopyListingFileStatus;
-import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -47,72 +43,28 @@ import java.io.IOException;
  */
 class DynamicInputChunk {
   private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
-
-  private static Configuration configuration;
-  private static Path chunkRootPath;
-  private static String chunkFilePrefix;
-  private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
-  private static FileSystem fs;
-
   private Path chunkFilePath;
   private SequenceFileRecordReader reader;
   private SequenceFile.Writer writer;
+  private DynamicInputChunkContext chunkContext;
 
-  private static void initializeChunkInvariants(Configuration config)
-  throws IOException {
-configuration = config;
-Path listingFilePath = new Path(getListingFilePath(configuration));
-chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
-fs = chunkRootPath.getFileSystem(configuration);
-chunkFilePrefix = listingFilePath.getName() + ".chunk.";
-  }
-
-  private static String getListingFilePath(Configuration configuration) {
-final String listingFileString = configuration.get(
-DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
-assert !listingFileString.equals("") : "Listing file not found.";
-return listingFileString;
-  }
-
-  private static boolean areInvariantsInitialized() {
-return chunkRootPath != null;
-  }
-
-  private DynamicInputChunk(String chunkId, Configuration configuration)
+  DynamicInputChunk(String chunkId, DynamicInputChunkContext chunkContext)
   throws IOException {
-if 

[42/50] [abbrv] hadoop git commit: HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by Surendra Singh Lilhore.

2015-11-04 Thread aengineer
HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0eed886a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0eed886a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0eed886a

Branch: refs/heads/HDFS-7240
Commit: 0eed886a165f5a0850ddbfb1d5f98c7b5e379fb3
Parents: b9d25c3
Author: Haohui Mai 
Authored: Wed Nov 4 09:16:43 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 09:16:43 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 4 ++--
 .../hadoop-hdfs/src/main/webapps/static/hadoop.css   | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 530ed2d..bdcc1fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2231,6 +2231,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+HDFS-9357. NN UI renders icons of decommissioned DN incorrectly.
+(Surendra Singh Lilhore via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index e46ce7f..08199fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -285,8 +285,8 @@
   
 In service
 Down
-Decommisioned
-Decommissioned  dead
+Decommissioned
+Decommissioned  dead
   
 
 In operation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 58c3cb5..2ed5f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -235,7 +235,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e013";
 }
 
-.dfshealth-node-decommisioned:before {
+.dfshealth-node-decommissioned:before {
 color: #eea236;
 content: "\e136";
 }
@@ -245,7 +245,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e101";
 }
 
-.dfshealth-node-down-decommisioned:before {
+.dfshealth-node-down-decommissioned:before {
 color: #2e6da6;
 content: "\e017";
 }



[18/50] [abbrv] hadoop git commit: HDFS-9343. Empty caller context considered invalid. (Contributed by Mingliang Liu)

2015-11-04 Thread aengineer
HDFS-9343. Empty caller context considered invalid. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cde6931
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cde6931
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cde6931

Branch: refs/heads/HDFS-7240
Commit: 3cde6931cb5055a9d92503f4ecefa35571e7b07f
Parents: 7fd6416
Author: Arpit Agarwal 
Authored: Sun Nov 1 15:35:02 2015 -0800
Committer: Arpit Agarwal 
Committed: Sun Nov 1 15:35:02 2015 -0800

--
 .../java/org/apache/hadoop/ipc/CallerContext.java| 13 -
 .../main/java/org/apache/hadoop/util/ProtoUtil.java  |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop/hdfs/server/namenode/FSNamesystem.java|  8 
 .../hadoop/hdfs/server/namenode/TestAuditLogger.java | 15 ---
 5 files changed, 28 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index 8be7e35..b197575 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -44,6 +44,7 @@ public class CallerContext {
* {@link 
org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT}
*/
   private final String context;
+
   /** The caller's signature for validation.
*
* The signature is optional. The null or empty signature will be abandoned.
@@ -58,10 +59,6 @@ public class CallerContext {
 this.signature = builder.signature;
   }
 
-  public boolean isValid() {
-return context != null;
-  }
-
   public String getContext() {
 return context;
   }
@@ -71,6 +68,11 @@ public class CallerContext {
 null : Arrays.copyOf(signature, signature.length);
   }
 
+  @InterfaceAudience.Private
+  public boolean isContextValid() {
+return context != null && !context.isEmpty();
+  }
+
   @Override
   public int hashCode() {
 return new HashCodeBuilder().append(context).toHashCode();
@@ -92,9 +94,10 @@ public class CallerContext {
   .isEquals();
 }
   }
+
   @Override
   public String toString() {
-if (!isValid()) {
+if (!isContextValid()) {
   return "";
 }
 String str = context;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
index 4bfcd66..1a5acba 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
@@ -180,7 +180,7 @@ public abstract class ProtoUtil {
 
 // Add caller context if it is not null
 CallerContext callerContext = CallerContext.getCurrent();
-if (callerContext != null && callerContext.isValid()) {
+if (callerContext != null && callerContext.isContextValid()) {
   RPCCallerContextProto.Builder contextBuilder = RPCCallerContextProto
   .newBuilder().setContext(callerContext.getContext());
   if (callerContext.getSignature() != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 30cdfee..8e6634a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-9343. Empty caller context considered invalid. (Mingliang Liu via
+Arpit Agarwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cde6931/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff 

[04/50] [abbrv] hadoop git commit: Add an entry of YARN-4312 to CHANGES.txt

2015-11-04 Thread aengineer
Add an entry of YARN-4312 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d21214ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d21214ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d21214ce

Branch: refs/heads/HDFS-7240
Commit: d21214ce33cb176926aa3ae5a9f4efe00f66480b
Parents: f072eb5
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 17:56:59 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Fri Oct 30 17:56:59 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d21214ce/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 78c18d5..2151136 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1147,6 +1147,9 @@ Release 2.7.2 - UNRELEASED
 YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert 
Kanter
 via junping_du)
 
+YARN-4312. TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6
+as some of the test cases time out. (Varun Saxena via ozawa)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[03/50] [abbrv] hadoop git commit: HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests.

2015-11-04 Thread aengineer
HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f072eb5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f072eb5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f072eb5a

Branch: refs/heads/HDFS-7240
Commit: f072eb5a206d34d8af39d65c3ef1f39faaebfdd0
Parents: d2e01f4
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Oct 30 15:38:38 2015 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Oct 30 15:41:03 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../TestDFSStripedOutputStreamWithFailure.java  | 33 +---
 ...estDFSStripedOutputStreamWithFailure020.java | 22 +
 ...estDFSStripedOutputStreamWithFailure030.java | 22 +
 ...estDFSStripedOutputStreamWithFailure040.java | 22 +
 ...estDFSStripedOutputStreamWithFailure050.java | 22 +
 ...estDFSStripedOutputStreamWithFailure060.java | 22 +
 ...estDFSStripedOutputStreamWithFailure070.java | 22 +
 ...estDFSStripedOutputStreamWithFailure080.java | 22 +
 ...estDFSStripedOutputStreamWithFailure090.java | 22 +
 ...estDFSStripedOutputStreamWithFailure100.java | 22 +
 ...estDFSStripedOutputStreamWithFailure110.java | 22 +
 ...estDFSStripedOutputStreamWithFailure120.java | 22 +
 ...estDFSStripedOutputStreamWithFailure130.java | 22 +
 ...estDFSStripedOutputStreamWithFailure140.java | 22 +
 ...estDFSStripedOutputStreamWithFailure150.java | 22 +
 ...estDFSStripedOutputStreamWithFailure160.java | 22 +
 ...estDFSStripedOutputStreamWithFailure170.java | 22 +
 ...estDFSStripedOutputStreamWithFailure180.java | 22 +
 ...estDFSStripedOutputStreamWithFailure190.java | 22 +
 ...estDFSStripedOutputStreamWithFailure200.java | 22 +
 ...estDFSStripedOutputStreamWithFailure210.java | 23 ++
 22 files changed, 458 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f072eb5a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd1d6de..38b9e55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -187,6 +187,8 @@ Trunk (Unreleased)
 HDFS-9261. Erasure Coding: Skip encoding the data cells if all the parity 
data 
 streamers are failed for the current block group. (Rakesh R via umamahesh)
 
+HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests. 
(szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f072eb5a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 7bd976f..b60d0f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -131,15 +131,16 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   private static final List LENGTHS = newLengths();
 
-  static int getLength(int i) {
-return LENGTHS.get(i);
+  static Integer getLength(int i) {
+return i >= 0 && i < LENGTHS.size()? LENGTHS.get(i): null;
   }
 
+  private static final Random RANDOM = new Random();
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
   private final Path dir = new Path("/"
   + TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
-  private final Random random = new Random();
 
   private void setup(Configuration conf) throws IOException {
 final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
@@ -167,19 +168,6 @@ public class TestDFSStripedOutputStreamWithFailure {
   }
 
   @Test(timeout=24)
-  public void testDatanodeFailure56() throws Exception {
-runTest(getLength(56));
-  }
-
-  @Test(timeout=24)
-  public void testDatanodeFailureRandomLength() throws Exception {
-int lenIndex = random.nextInt(LENGTHS.size());
-LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
-+ lenIndex);
-runTest(getLength(lenIndex));

[29/50] [abbrv] hadoop git commit: HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu via lei)

2015-11-04 Thread aengineer
HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu via 
lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e05dbf2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e05dbf2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e05dbf2

Branch: refs/heads/HDFS-7240
Commit: 8e05dbf2bddce95d5f5a5bae5df61acabf0ba7c5
Parents: 5ba2b98
Author: Lei Xu 
Authored: Mon Nov 2 18:05:43 2015 -0800
Committer: Lei Xu 
Committed: Mon Nov 2 18:05:43 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  22 
 .../apache/hadoop/hdfs/TestCrcCorruption.java   | 120 +--
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |  35 --
 4 files changed, 76 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e05dbf2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3c60549..19ea5c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1661,6 +1661,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
 
+HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu 
via lei)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e05dbf2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 7ebf333..c81f154 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2117,6 +2117,28 @@ public class MiniDFSCluster {
 getMaterializedReplica(i, blk).corruptMeta();
   }
 
+  /**
+   * Corrupt the metadata of a block by deleting it.
+   * @param i index of the datanode
+   * @param blk name of the block.
+   */
+  public void deleteMeta(int i, ExtendedBlock blk)
+  throws IOException {
+getMaterializedReplica(i, blk).deleteMeta();
+  }
+
+  /**
+   * Corrupt the metadata of a block by truncating it to a new size.
+   * @param i index of the datanode.
+   * @param blk name of the block.
+   * @param newSize the new size of the metadata file.
+   * @throws IOException if any I/O errors.
+   */
+  public void truncateMeta(int i, ExtendedBlock blk, int newSize)
+  throws IOException {
+getMaterializedReplica(i, blk).truncateMeta(newSize);
+  }
+
   public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
   long newGenStamp) throws IOException {
 File blockFile = getBlockFile(dnIndex, blk);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e05dbf2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 3850ff2..398bcc2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -22,11 +22,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -35,12 +32,15 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.Logger;

[07/50] [abbrv] hadoop git commit: fix CHANGES.txt

2015-11-04 Thread aengineer
fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c0204a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c0204a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c0204a5

Branch: refs/heads/HDFS-7240
Commit: 3c0204a5866520e74917b26b6ac2061650a5bb6d
Parents: 43539b5
Author: Kihwal Lee 
Authored: Fri Oct 30 09:40:41 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:40:41 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0204a5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f6a22a1..c5846b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,9 +2201,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2276,6 +2273,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[14/50] [abbrv] hadoop git commit: Addendum to MAPREDUCE-6451

2015-11-04 Thread aengineer
Addendum to MAPREDUCE-6451


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b24fe064
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b24fe064
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b24fe064

Branch: refs/heads/HDFS-7240
Commit: b24fe0648348d325d14931f80cee8a170fb3358a
Parents: 2868ca0
Author: Kihwal Lee 
Authored: Fri Oct 30 16:05:23 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 16:05:23 2015 -0500

--
 .../mapred/lib/DynamicInputChunkContext.java| 113 +++
 1 file changed, 113 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24fe064/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
new file mode 100644
index 000..043ff1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.mapred.lib;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.tools.DistCpConstants;
+
+import java.io.IOException;
+
+/**
+ * Class to initialize the DynamicInputChunk invariants.
+ */
+class DynamicInputChunkContext {
+
+  private static Log LOG = LogFactory.getLog(DynamicInputChunkContext.class);
+  private Configuration configuration;
+  private Path chunkRootPath = null;
+  private String chunkFilePrefix;
+  private FileSystem fs;
+  private int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
+
+  public DynamicInputChunkContext(Configuration config)
+  throws IOException {
+this.configuration = config;
+Path listingFilePath = new Path(getListingFilePath(configuration));
+chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
+fs = chunkRootPath.getFileSystem(configuration);
+chunkFilePrefix = listingFilePath.getName() + ".chunk.";
+  }
+
+  public Configuration getConfiguration() {
+return configuration;
+  }
+
+  public Path getChunkRootPath() {
+return chunkRootPath;
+  }
+
+  public String getChunkFilePrefix() {
+return chunkFilePrefix;
+  }
+
+  public FileSystem getFs() {
+return fs;
+  }
+
+  private static String getListingFilePath(Configuration configuration) {
+final String listingFileString = configuration.get(
+DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
+assert !listingFileString.equals("") : "Listing file not found.";
+return listingFileString;
+  }
+
+  public int getNumChunksLeft() {
+return numChunksLeft;
+  }
+
+  public DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
+  throws IOException, InterruptedException {
+
+String taskId
+= taskAttemptContext.getTaskAttemptID().getTaskID().toString();
+Path acquiredFilePath = new Path(getChunkRootPath(), taskId);
+
+if (fs.exists(acquiredFilePath)) {
+  LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
+  return new DynamicInputChunk(acquiredFilePath, taskAttemptContext, this);
+}
+
+for (FileStatus chunkFile : getListOfChunkFiles()) {
+  if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
+LOG.info(taskId + " acquired " + chunkFile.getPath());
+return new DynamicInputChunk(acquiredFilePath, taskAttemptContext,
+this);
+  }
+}
+return null;
+  }
+
+  public 

[09/50] [abbrv] hadoop git commit: Update CHANGES.txt to reflect commit of MR-6273 to branch-2.6

2015-11-04 Thread aengineer
Update CHANGES.txt to reflect commit of MR-6273 to branch-2.6


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ae9efaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ae9efaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ae9efaf

Branch: refs/heads/HDFS-7240
Commit: 6ae9efaf5949bd5a5f4fd99b5777ce8f6d7f3a2c
Parents: eadf7b3
Author: Jason Lowe 
Authored: Fri Oct 30 15:18:53 2015 +
Committer: Jason Lowe 
Committed: Fri Oct 30 15:18:53 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae9efaf/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8594e1e..22f9e89 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -900,6 +900,10 @@ Release 2.6.3 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists 
to 
+avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
+(zhihai xu via devaraj)
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES



[02/50] [abbrv] hadoop git commit: Move YARN-3580 in CHANGES.txt from 2.8 to 2.7.2.

2015-11-04 Thread aengineer
Move YARN-3580 in CHANGES.txt from 2.8 to 2.7.2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2e01f4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2e01f4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2e01f4e

Branch: refs/heads/HDFS-7240
Commit: d2e01f4ed87c3c41156ec9a68855f923f8c0adf9
Parents: 7412ff4
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 15:49:06 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Fri Oct 30 15:49:06 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2e01f4e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d0fa27d..78c18d5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -689,9 +689,6 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3582. NPE in WebAppProxyServlet. (jian he via xgong)
 
-YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert 
Kanter
-via junping_du)
-
 YARN-3577. Misspelling of threshold in log4j.properties for tests.
 (Brahma Reddy Battula via aajisaka)
 
@@ -1147,6 +1144,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4313. Race condition in MiniMRYarnCluster when getting history server
 address. (Jian He via xgong)
 
+YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert 
Kanter
+via junping_du)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[30/50] [abbrv] hadoop git commit: HDFS-9313. Possible NullPointerException in BlockManager if no excess replica can be chosen. (mingma)

2015-11-04 Thread aengineer
HDFS-9313. Possible NullPointerException in BlockManager if no excess replica 
can be chosen. (mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d565480d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d565480d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d565480d

Branch: refs/heads/HDFS-7240
Commit: d565480da2f646b40c3180e1ccb2935c9863dfef
Parents: 8e05dbf
Author: Ming Ma 
Authored: Mon Nov 2 19:36:37 2015 -0800
Committer: Ming Ma 
Committed: Mon Nov 2 19:36:37 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/BlockPlacementPolicy.java   |  8 +++--
 .../BlockPlacementPolicyDefault.java|  6 
 .../blockmanagement/TestReplicationPolicy.java  | 31 
 4 files changed, 45 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d565480d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 19ea5c1..879c015 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2216,6 +2216,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage
 size is smaller than IO buffer size. (zhz)
 
+HDFS-9313. Possible NullPointerException in BlockManager if no excess
+replica can be chosen. (mingma)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d565480d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index be169c3..526a5d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -23,8 +23,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
@@ -33,13 +31,17 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /** 
  * This interface is used for choosing the desired number of targets
  * for placing block replicas.
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-  static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
+  static final Logger LOG = LoggerFactory.getLogger(
+  BlockPlacementPolicy.class);
 
   @InterfaceAudience.Private
   public static class NotEnoughReplicasException extends Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d565480d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d9b8d60..2723ed9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -981,6 +981,12 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 excessTypes);
   }
   firstOne = false;
+  if (cur == null) {
+LOG.warn("No excess replica can be found. excessTypes: {}." +
+" moreThanOne: {}. exactlyOne: {}.", excessTypes, moreThanOne,
+exactlyOne);
+break;
+  }
 
   // adjust rackmap, moreThanOne, and exactlyOne
   adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur);


[28/50] [abbrv] hadoop git commit: HDFS-9275. Wait previous ErasureCodingWork to finish before schedule another one. (Walter Su via yliu)

2015-11-04 Thread aengineer
HDFS-9275. Wait previous ErasureCodingWork to finish before schedule another 
one. (Walter Su via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ba2b98d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ba2b98d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ba2b98d

Branch: refs/heads/HDFS-7240
Commit: 5ba2b98d0fe29603e136fc43a14f853e820cf7e2
Parents: 7632409
Author: yliu 
Authored: Tue Nov 3 09:14:32 2015 +0800
Committer: yliu 
Committed: Tue Nov 3 09:14:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockManager.java|   5 +
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |   8 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   2 +
 .../TestReadStripedFileWithMissingBlocks.java   |   6 +-
 .../hadoop/hdfs/TestRecoverStripedFile.java | 143 ++-
 .../hdfs/TestSafeModeWithStripedFile.java   |   5 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |   8 +-
 .../hdfs/TestWriteStripedFileWithFailure.java   |   6 +-
 .../TestBlockTokenWithDFSStriped.java   |   4 +-
 .../namenode/TestRecoverStripedBlocks.java  |  70 +
 11 files changed, 145 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c13a725..3c60549 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -849,6 +849,9 @@ Trunk (Unreleased)
   HDFS-8438. Erasure Coding: Allow concat striped files if they have the 
same
   ErasureCodingPolicy. (Walter Su via jing9)
 
+  HDFS-9275. Wait previous ErasureCodingWork to finish before schedule
+  another one. (Walter Su via yliu)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 897df1e..dbe0726 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1586,6 +1586,10 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 if (block.isStriped()) {
+  if (pendingNum > 0) {
+// Wait the previous recovery to finish.
+return null;
+  }
   short[] indices = new short[liveBlockIndices.size()];
   for (int i = 0 ; i < liveBlockIndices.size(); i++) {
 indices[i] = liveBlockIndices.get(i);
@@ -1641,6 +1645,7 @@ public class BlockManager implements BlockStatsMXBean {
 if (block.isStriped()) {
   assert rw instanceof ErasureCodingWork;
   assert rw.getTargets().length > 0;
+  assert pendingNum == 0: "Should wait the previous recovery to finish";
   String src = getBlockCollection(block).getName();
   ErasureCodingPolicy ecPolicy = null;
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ba2b98d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index cc6e7d3..9942a2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -61,10 +61,10 @@ public class StripedFileTestUtil {
   public static final int BLOCK_STRIPED_CELL_SIZE = 64 * 1024;
   public static final int BLOCK_STRIPE_SIZE = BLOCK_STRIPED_CELL_SIZE * 
NUM_DATA_BLOCKS;
 
-  static final int stripesPerBlock = 4;
-  static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
-  static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
-  static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
+  public static final int stripesPerBlock = 4;
+  public static final int blockSize = BLOCK_STRIPED_CELL_SIZE * 

[32/50] [abbrv] hadoop git commit: HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. Contributed by Chris Nauroth.

2015-11-04 Thread aengineer
HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. Contributed by Chris 
Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/957f0311
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/957f0311
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/957f0311

Branch: refs/heads/HDFS-7240
Commit: 957f0311a160afb40dbb0619f455445b4f5d1e32
Parents: 6e0d353
Author: cnauroth 
Authored: Mon Nov 2 22:25:05 2015 -0800
Committer: cnauroth 
Committed: Mon Nov 2 22:25:05 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../src/test/java/org/apache/hadoop/net/TestDNS.java| 12 +---
 2 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/957f0311/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b055069..0d1bce2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1312,6 +1312,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12508. delete fails with exception when lease is held on blob.
 (Gaurav Kanade via cnauroth)
 
+HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/957f0311/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
index b26c7ca..a0bfe73 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
@@ -30,6 +30,7 @@ import javax.naming.NameNotFoundException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Time;
 
 import org.junit.Test;
@@ -37,6 +38,7 @@ import org.junit.Test;
 import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 /**
  * Test host name and IP resolution and caching.
@@ -185,13 +187,17 @@ public class TestDNS {
*
* This test may fail on some misconfigured test machines that don't have
* an entry for "localhost" in their hosts file. This entry is correctly
-   * configured out of the box on common Linux distributions, OS X and
-   * Windows.
+   * configured out of the box on common Linux distributions and OS X.
+   *
+   * Windows refuses to resolve 127.0.0.1 to "localhost" despite the presence 
of
+   * this entry in the hosts file.  We skip the test on Windows to avoid
+   * reporting a spurious failure.
*
* @throws Exception
*/
   @Test (timeout=6)
   public void testLookupWithHostsFallback() throws Exception {
+assumeTrue(!Shell.WINDOWS);
 final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
 
 try {
@@ -231,7 +237,7 @@ public class TestDNS {
 
   private String getLoopbackInterface() throws SocketException {
 return NetworkInterface.getByInetAddress(
-InetAddress.getLoopbackAddress()).getDisplayName();
+InetAddress.getLoopbackAddress()).getName();
   }
 
   /**



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
Merge branch 'trunk' into hdfs-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312d

Branch: refs/heads/HDFS-7240
Commit: 312deac6781bd15a5e1a46e2007243bf5186
Parents: b14a70e 5667129
Author: Anu Engineer 
Authored: Wed Nov 4 14:19:34 2015 -0800
Committer: Anu Engineer 
Committed: Wed Nov 4 14:19:34 2015 -0800

--
 .gitignore  |1 +
 LICENSE.txt |   59 +
 dev-support/docker/Dockerfile   |7 +-
 dev-support/test-patch.sh   |   10 +-
 .../main/resources/assemblies/hadoop-dist.xml   |4 +-
 .../assemblies/hadoop-hdfs-nfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-httpfs-dist.xml |4 +-
 .../resources/assemblies/hadoop-kms-dist.xml|4 +-
 .../assemblies/hadoop-mapreduce-dist.xml|4 +-
 .../resources/assemblies/hadoop-nfs-dist.xml|4 +-
 .../main/resources/assemblies/hadoop-sls.xml|4 +-
 .../main/resources/assemblies/hadoop-src.xml|4 +-
 .../main/resources/assemblies/hadoop-tools.xml  |4 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   |4 +-
 hadoop-client/pom.xml   |6 +-
 .../JWTRedirectAuthenticationHandler.java   |7 +-
 .../server/KerberosAuthenticationHandler.java   |4 +-
 .../TestJWTRedirectAuthentictionHandler.java|   42 +-
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   74 -
 hadoop-common-project/hadoop-common/CHANGES.txt |  270 +-
 hadoop-common-project/hadoop-common/pom.xml |5 +
 .../hadoop-common/src/main/bin/hadoop   |   15 +-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |6 +-
 .../src/main/bin/hadoop-daemons.sh  |6 +-
 .../src/main/bin/hadoop-functions.sh|  109 +-
 .../src/main/bin/hadoop-layout.sh.example   |   16 +-
 .../hadoop-common/src/main/bin/rcc  |4 +-
 .../hadoop-common/src/main/bin/slaves.sh|6 +-
 .../hadoop-common/src/main/bin/start-all.sh |6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |6 +-
 .../main/conf/hadoop-user-functions.sh.example  |   10 +-
 .../org/apache/hadoop/conf/Configuration.java   |2 +-
 .../fs/CommonConfigurationKeysPublic.java   |   11 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |   26 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |   29 -
 .../org/apache/hadoop/fs/FilterFileSystem.java  |8 +-
 .../java/org/apache/hadoop/fs/GlobFilter.java   |2 +-
 .../java/org/apache/hadoop/fs/GlobPattern.java  |7 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |2 +-
 .../org/apache/hadoop/fs/HarFileSystem.java |6 +
 .../java/org/apache/hadoop/fs/HardLink.java |8 +
 .../org/apache/hadoop/fs/LocalDirAllocator.java |6 +-
 .../apache/hadoop/fs/shell/CopyCommands.java|6 +-
 .../java/org/apache/hadoop/fs/shell/Delete.java |2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |   53 +-
 .../org/apache/hadoop/ha/HAServiceTarget.java   |   50 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |2 +-
 .../org/apache/hadoop/http/HttpServer2.java |2 +
 .../java/org/apache/hadoop/io/SequenceFile.java |   15 +-
 .../org/apache/hadoop/io/WritableUtils.java |8 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java |3 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   |3 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  |5 +
 .../erasurecode/codec/AbstractErasureCodec.java |2 +
 .../io/erasurecode/codec/ErasureCodec.java  |2 +
 .../io/erasurecode/codec/RSErasureCodec.java|2 +
 .../io/erasurecode/codec/XORErasureCodec.java   |2 +
 .../erasurecode/coder/AbstractErasureCoder.java |2 +
 .../coder/AbstractErasureCodingStep.java|2 +
 .../coder/AbstractErasureDecoder.java   |   25 +-
 .../coder/AbstractErasureEncoder.java   |2 +
 .../io/erasurecode/coder/ErasureCoder.java  |2 +
 .../io/erasurecode/coder/ErasureCodingStep.java |2 +
 .../erasurecode/coder/ErasureDecodingStep.java  |2 +
 .../erasurecode/coder/ErasureEncodingStep.java  |2 +
 .../io/erasurecode/coder/RSErasureDecoder.java  |2 +
 .../io/erasurecode/coder/RSErasureEncoder.java  |2 +
 .../io/erasurecode/coder/XORErasureDecoder.java |2 +
 .../io/erasurecode/coder/XORErasureEncoder.java |2 +
 .../io/erasurecode/grouper/BlockGrouper.java|2 +
 .../rawcoder/AbstractRawErasureCoder.java   |  114 +-
 .../rawcoder/AbstractRawErasureDecoder.java |   10 +-
 

[27/50] [abbrv] hadoop git commit: HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)

2015-11-04 Thread aengineer
HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76324094
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76324094
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76324094

Branch: refs/heads/HDFS-7240
Commit: 7632409482aaf06ecc6fe370a9f519afb969ad30
Parents: 78d6890
Author: Lei Xu 
Authored: Mon Nov 2 17:09:39 2015 -0800
Committer: Lei Xu 
Committed: Mon Nov 2 17:09:39 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/TestReplication.java | 115 +--
 .../server/datanode/FsDatasetTestUtils.java |   7 ++
 .../fsdataset/impl/FsDatasetImplTestUtils.java  |  25 
 4 files changed, 64 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fea4106..c13a725 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1656,6 +1656,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
 
+HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76324094/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index 6424bc3..d9c96ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -20,22 +20,14 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import com.google.common.base.Supplier;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -50,7 +42,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -62,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import 
org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -367,7 +359,7 @@ public class TestReplication {
 for (int i=0; i();
+  for (int dnIndex=0; dnIndex<3; dnIndex++) {
+replicas.add(cluster.getMaterializedReplica(dnIndex, block));
   }
-  
+  assertEquals(3, replicas.size());
+
+

[35/50] [abbrv] hadoop git commit: HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line endings, fails on Windows. Contributed by Chris Nauroth.

2015-11-04 Thread aengineer
HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line 
endings, fails on Windows. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e282966
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e282966
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e282966

Branch: refs/heads/HDFS-7240
Commit: 7e2829662b4c4bf33ebaf2fa09312d0bed3d6f92
Parents: 095ac83
Author: cnauroth 
Authored: Tue Nov 3 11:54:57 2015 -0800
Committer: cnauroth 
Committed: Tue Nov 3 11:54:57 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/server/namenode/TestAuditLogger.java   | 24 
 2 files changed, 18 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e282966/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1729b73..fbf211f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -,6 +,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
+endings, fails on Windows. (cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e282966/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 252f7af..d637abc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -243,7 +243,8 @@ public class TestAuditLogger {
   CallerContext.setCurrent(context);
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.setTimes(p, time, time);
-  assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes\n"));
+  assertTrue(auditlog.getOutput().endsWith(
+  String.format("callerContext=setTimes%n")));
   auditlog.clearOutput();
 
   // context with signature
@@ -254,7 +255,7 @@ public class TestAuditLogger {
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.setTimes(p, time, time);
   assertTrue(auditlog.getOutput().endsWith(
-  "callerContext=setTimes:L\n"));
+  String.format("callerContext=setTimes:L%n")));
   auditlog.clearOutput();
 
   // long context is truncated
@@ -266,7 +267,7 @@ public class TestAuditLogger {
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.setTimes(p, time, time);
   assertTrue(auditlog.getOutput().endsWith(
-  "callerContext=" + longContext.substring(0, 128) + ":L\n"));
+  String.format("callerContext=%s:L%n", longContext.substring(0, 
128;
   auditlog.clearOutput();
 
   // empty context is ignored
@@ -302,7 +303,8 @@ public class TestAuditLogger {
   } catch (InterruptedException ignored) {
 // Ignore
   }
-  assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes:L\n"));
+  assertTrue(auditlog.getOutput().endsWith(
+  String.format("callerContext=setTimes:L%n")));
   auditlog.clearOutput();
 
   // caller context is overridden in child thread
@@ -330,7 +332,7 @@ public class TestAuditLogger {
 // Ignore
   }
   assertTrue(auditlog.getOutput().endsWith(
-  "callerContext=setPermission:L\n"));
+  String.format("callerContext=setPermission:L%n")));
   auditlog.clearOutput();
 
   // reuse the current context's signature
@@ -339,7 +341,8 @@ public class TestAuditLogger {
   CallerContext.setCurrent(context);
   LOG.info("Set current caller context as {}", CallerContext.getCurrent());
   fs.mkdirs(new Path("/reuse-context-signature"));
-  assertTrue(auditlog.getOutput().endsWith("callerContext=mkdirs:L\n"));
+  assertTrue(auditlog.getOutput().endsWith(
+  String.format("callerContext=mkdirs:L%n")));
   auditlog.clearOutput();
 
   // too long signature is ignored
@@ -349,7 +352,8 @@ public class 

[34/50] [abbrv] hadoop git commit: HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows. Contributed by Xiaoyu Yao.

2015-11-04 Thread aengineer
HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/095ac834
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/095ac834
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/095ac834

Branch: refs/heads/HDFS-7240
Commit: 095ac834022df6136b42961c507ec745c6cf8f97
Parents: 0783184
Author: cnauroth 
Authored: Tue Nov 3 10:51:21 2015 -0800
Committer: cnauroth 
Committed: Tue Nov 3 11:21:08 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/balancer/TestBalancer.java  | 572 +--
 2 files changed, 277 insertions(+), 298 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ac834/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 879c015..1729b73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2219,6 +2219,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9313. Possible NullPointerException in BlockManager if no excess
 replica can be chosen. (mingma)
 
+HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows.
+(Xiaoyu Yao via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/095ac834/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 332ae15..dd54345 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.log4j.Level;
+import org.junit.After;
 import org.junit.Test;
 
 /**
@@ -106,6 +107,14 @@ public class TestBalancer {
   final static Path filePath = new Path(fileName);
   private MiniDFSCluster cluster;
 
+  @After
+  public void shutdown() throws Exception {
+if (cluster != null) {
+  cluster.shutdown();
+  cluster = null;
+}
+  }
+
   ClientProtocol client;
 
   static final long TIMEOUT = 4L; //msec
@@ -367,44 +376,38 @@ public class TestBalancer {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
 .hosts(hosts).racks(racks).simulatedCapacities(capacities).build();
 
-try {
-  cluster.waitActive();
-  client = NameNodeProxies.createProxy(conf,
-  cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
-  
-  // fill up the cluster to be 80% full
-  long totalCapacity = sum(capacities);
-  long totalUsedSpace = totalCapacity * 8 / 10;
-  InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
-  for (int i = 0; i < favoredNodes.length; i++) {
-// DFSClient will attempt reverse lookup. In case it resolves
-// "127.0.0.1" to "localhost", we manually specify the hostname.
-int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
-favoredNodes[i] = new InetSocketAddress(hosts[i], port);
-  }
+cluster.waitActive();
+client = NameNodeProxies.createProxy(conf,
+cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+
+// fill up the cluster to be 80% full
+long totalCapacity = sum(capacities);
+long totalUsedSpace = totalCapacity * 8 / 10;
+InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
+for (int i = 0; i < favoredNodes.length; i++) {
+  // DFSClient will attempt reverse lookup. In case it resolves
+  // "127.0.0.1" to "localhost", we manually specify the hostname.
+  int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
+  favoredNodes[i] = new InetSocketAddress(hosts[i], port);
+}
 
-  DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
-  totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
-  (short) numOfDatanodes, 0, false, favoredNodes);
-  
-  // start up an empty node with the same capacity
-  cluster.startDataNodes(conf, 1, true, null, new 

[45/50] [abbrv] hadoop git commit: HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)

2015-11-04 Thread aengineer
HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec414600
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec414600
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec414600

Branch: refs/heads/HDFS-7240
Commit: ec414600ede8e305c584818565b50e055ea5d2b5
Parents: 88beb46
Author: Lei Xu 
Authored: Tue Nov 3 14:17:11 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:22:17 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  65 ++-
 .../blockmanagement/BlockPlacementPolicy.java   |  53 --
 .../BlockPlacementPolicyDefault.java|  57 ---
 .../BlockPlacementPolicyWithNodeGroup.java  |  35 ++--
 .../BlockPlacementPolicyWithUpgradeDomain.java  |  84 +++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   9 +-
 .../hdfs/server/balancer/TestBalancer.java  | 103 ++-
 .../blockmanagement/TestBlockManager.java   |  13 +-
 .../blockmanagement/TestReplicationPolicy.java  |  93 +++---
 .../TestReplicationPolicyWithNodeGroup.java |   6 +-
 .../TestReplicationPolicyWithUpgradeDomain.java | 171 +++
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  10 +-
 13 files changed, 503 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec414600/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f2d8296..fd560d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1618,6 +1618,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
 
+HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec414600/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 5b3eb36..9f9cdc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicies;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -124,6 +125,7 @@ public class Dispatcher {
   private final int ioFileBufferSize;
 
   private final boolean connectToDnViaHostname;
+  private BlockPlacementPolicies placementPolicies;
 
   static class Allocator {
 private final int max;
@@ -949,6 +951,7 @@ public class Dispatcher {
 this.connectToDnViaHostname = conf.getBoolean(
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
+placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
   }
 
   public DistributedFileSystem getDistributedFileSystem() {
@@ -1166,66 +1169,24 @@ public class Dispatcher {
   }
 }
 
-if (cluster.isNodeGroupAware()
-&& isOnSameNodeGroupWithReplicas(source, target, block)) {
-  return false;
-}
-if (reduceNumOfRacks(source, target, block)) {
+if (!isGoodBlockCandidateForPlacementPolicy(source, target, block)) {
   return false;
 }
 return true;
   }
 
-  /**
-   * Determine whether moving the given block replica from source to target
-   * would reduce the number of racks of the block replicas.
-   */
-  private boolean reduceNumOfRacks(StorageGroup source, StorageGroup target,
-  DBlock block) {
-final DatanodeInfo sourceDn = source.getDatanodeInfo();
-if 

[40/50] [abbrv] hadoop git commit: Add 2.7.3 release to CHANGES.txt

2015-11-04 Thread aengineer
Add 2.7.3 release to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0383a397
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0383a397
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0383a397

Branch: refs/heads/HDFS-7240
Commit: 0383a3973b3b734fb23c331a2256dc92cff05365
Parents: 73b9c7b
Author: Jason Lowe 
Authored: Wed Nov 4 16:26:14 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:26:14 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index dbf9700..4114bbd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1404,6 +1404,18 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12484. Single File Rename Throws Incorrectly In Potential Race
 Condition Scenarios. (Gaurav Kanade via cnauroth)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2def995..530ed2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2231,6 +2231,18 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 23bef37..f30f0ef 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -622,6 +622,18 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6515. Update Application priority in AM side from AM-RM heartbeat
(Sunil G via jlowe)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d6ad672..1784d6e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1042,6 +1042,18 @@ Release 2.8.0 - UNRELEASED
 YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
 binds to default port 8188. (Meng Ding via wangda)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[31/50] [abbrv] hadoop git commit: HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)

2015-11-04 Thread aengineer
HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e0d3532
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e0d3532
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e0d3532

Branch: refs/heads/HDFS-7240
Commit: 6e0d35323505cc68dbd963b8628b89ee04af2f2b
Parents: d565480
Author: Allen Wittenauer 
Authored: Mon Nov 2 20:39:46 2015 -0800
Committer: Allen Wittenauer 
Committed: Mon Nov 2 20:39:46 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-common-project/hadoop-common/pom.xml | 1 -
 hadoop-project/pom.xml  | 5 +
 3 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0d3532/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1a9c93c..b055069 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -251,6 +251,8 @@ Trunk (Unreleased)
 
 HADOOP-12133. Add schemas to Maven Assembly XMLs (Gábor Lipták via aw)
 
+HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0d3532/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 4e47a3f..4735c6b 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -194,7 +194,6 @@
 
   com.google.re2j
   re2j
-  ${re2j.version}
   compile
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0d3532/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c974a61..efc3a7d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -800,6 +800,11 @@
 1.8.1
   
   
+com.google.re2j
+re2j
+${re2j.version}
+  
+  
 com.google.protobuf
 protobuf-java
 ${protobuf.version}



[23/50] [abbrv] hadoop git commit: HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage size is smaller than IO buffer size. Contributed by Zhe Zhang.

2015-11-04 Thread aengineer
HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage 
size is smaller than IO buffer size. Contributed by Zhe Zhang.

Change-Id: I09896c46e9ee0718b67c64fac5acfb3f7decf0b9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/259bea3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/259bea3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/259bea3b

Branch: refs/heads/HDFS-7240
Commit: 259bea3b48de7469a500831efb3306e8464a2dc9
Parents: 04d97f8
Author: Zhe Zhang 
Authored: Mon Nov 2 10:03:39 2015 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 2 10:03:39 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../namenode/ha/TestBootstrapStandby.java   | 79 +++-
 2 files changed, 62 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/259bea3b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3b2d997..a2e4824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2204,6 +2204,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9343. Empty caller context considered invalid. (Mingliang Liu via
 Arpit Agarwal)
 
+HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage
+size is smaller than IO buffer size. (zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/259bea3b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index fd45816..9f0d95b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -25,13 +25,16 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -109,12 +112,16 @@ public class TestBootstrapStandby {
 "storage directory does not exist or is not accessible", ioe);
   }
 
+  int expectedCheckpointTxId = (int)NameNodeAdapter.getNamesystem(nn0)
+  .getFSImage().getMostRecentCheckpointTxId();
+
   int rc = BootstrapStandby.run(new String[] { "-nonInteractive" },
   cluster.getConfiguration(index));
   assertEquals(0, rc);
 
   // Should have copied over the namespace from the active
-  FSImageTestUtil.assertNNHasCheckpoints(cluster, index, 
ImmutableList.of(0));
+  FSImageTestUtil.assertNNHasCheckpoints(cluster, index,
+  ImmutableList.of(expectedCheckpointTxId));
 }
 
 // We should now be able to start the standbys successfully.
@@ -221,7 +228,7 @@ public class TestBootstrapStandby {
* {@link DFSConfigKeys#DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY}
* created by HDFS-8808.
*/
-  @Test
+  @Test(timeout=3)
   public void testRateThrottling() throws Exception {
 cluster.getConfiguration(0).setLong(
 DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 1);
@@ -229,23 +236,46 @@ public class TestBootstrapStandby {
 cluster.waitActive();
 nn0 = cluster.getNameNode(0);
 cluster.transitionToActive(0);
-// Each edit has at least 1 byte. So the lowRate definitely should cause
-// a timeout, if enforced. If lowRate is not enforced, any reasonable test
-// machine should at least download an image with 5 edits in 5 seconds.
-for (int i = 0; i < 5; i++) {
+// Any reasonable test machine should be able to transfer 1 byte per MS
+// (which is ~1K/s)
+final int minXferRatePerMS = 1;
+int imageXferBufferSize = DFSUtilClient.getIoFileBufferSize(
+

[41/50] [abbrv] hadoop git commit: HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. Contributed by Chang Li

2015-11-04 Thread aengineer
HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. 
Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9d25c3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9d25c3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9d25c3e

Branch: refs/heads/HDFS-7240
Commit: b9d25c3ee2d20166d6a786c5a16cc001e249f61c
Parents: 0383a39
Author: Jason Lowe 
Authored: Wed Nov 4 16:34:01 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:34:01 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../security/JniBasedUnixGroupsNetgroupMapping.c  | 18 +-
 2 files changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9d25c3e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4114bbd..efb73f4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1416,6 +1416,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12296. when setnetgrent returns 0 in linux, exception should be
+thrown (Chang Li via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9d25c3e/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index de73a8a..4ae1051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -57,6 +57,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   int setnetgrentCalledFlag = 0;
 
   // if not NULL then THROW exception
+  char *errorType = NULL;
   char *errorMessage = NULL;
 
   cgroup = (*env)->GetStringUTFChars(env, jgroup, NULL);
@@ -94,7 +95,14 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   }
 }
   }
-
+#if defined(__linux__)
+  else {
+errorType = "java/io/IOException";
+errorMessage =
+"no netgroup of this name is known or some other error occurred";
+goto END;
+  }
+#endif
   //--
   // build return data (java array)
 
@@ -103,7 +111,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
 (*env)->FindClass(env, "java/lang/String"),
 NULL);
   if (jusers == NULL) {
-errorMessage = "java/lang/OutOfMemoryError";
+errorType = "java/lang/OutOfMemoryError";
 goto END;
   }
 
@@ -114,7 +122,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   for(current = userListHead; current != NULL; current = current->next) {
 jstring juser = (*env)->NewStringUTF(env, current->string);
 if (juser == NULL) {
-  errorMessage = "java/lang/OutOfMemoryError";
+  errorType = "java/lang/OutOfMemoryError";
   goto END;
 }
 (*env)->SetObjectArrayElement(env, jusers, i++, juser);
@@ -134,8 +142,8 @@ END:
   }
 
   // return results or THROW
-  if(errorMessage) {
-THROW(env, errorMessage, NULL);
+  if(errorType) {
+THROW(env, errorType, errorMessage);
 return NULL;
   } else {
 return jusers;



[12/50] [abbrv] hadoop git commit: HADOOP-12133 Add schemas to Maven Assembly XMLs

2015-11-04 Thread aengineer
HADOOP-12133 Add schemas to Maven Assembly XMLs

Signed-off-by: Allen Wittenauer 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18727c63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18727c63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18727c63

Branch: refs/heads/HDFS-7240
Commit: 18727c63da721da9d29932378818d8742f705808
Parents: 45d3967
Author: Gábor Lipták 
Authored: Sat Jun 27 11:11:20 2015 -0400
Committer: Allen Wittenauer 
Committed: Fri Oct 30 11:36:52 2015 -0700

--
 hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml  | 4 +++-
 .../src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml   | 4 +++-
 .../src/main/resources/assemblies/hadoop-httpfs-dist.xml | 4 +++-
 .../src/main/resources/assemblies/hadoop-kms-dist.xml| 4 +++-
 .../src/main/resources/assemblies/hadoop-mapreduce-dist.xml  | 4 ++--
 .../src/main/resources/assemblies/hadoop-nfs-dist.xml| 4 +++-
 hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml   | 4 +++-
 hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml   | 4 ++--
 hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml | 4 ++--
 .../src/main/resources/assemblies/hadoop-yarn-dist.xml   | 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 2 ++
 hadoop-tools/hadoop-sls/src/main/assemblies/sls.xml  | 4 +++-
 12 files changed, 31 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
--
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
index 1a5d7d0..85899e5 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
@@ -14,7 +14,9 @@
See the License for the specific language governing permissions and
limitations under the License.
 -->
-
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd;>
   hadoop-distro
   
 dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
index 89e8771..0edfdeb 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd;>
   hadoop-hdfs-nfs-dist
   
 dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
index 6468a8a..4d508ee 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3
 http://maven.apache.org/xsd/assembly-1.1.3.xsd;>
   hadoop-httpfs-dist
   
 dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18727c63/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

[06/50] [abbrv] hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.

2015-11-04 Thread aengineer
HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43539b5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43539b5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43539b5f

Branch: refs/heads/HDFS-7240
Commit: 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f
Parents: ce31b22
Author: Kihwal Lee 
Authored: Fri Oct 30 09:27:21 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:29:13 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43539b5f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 38b9e55..f6a22a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43539b5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d9b8d60..f610574 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,6 +659,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -708,6 +709,17 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+// It has already gone through enough number of nodes.
+if (refreshCounter <= excludedNodes.size()) {
+  break;
+}
+  }
 }
   
 if (numOfReplicas>0) {



[38/50] [abbrv] hadoop git commit: HADOOP-12544. Erasure Coding: create dummy raw coder to isolate performance issues in testing. Contributed by Rui Li.

2015-11-04 Thread aengineer
HADOOP-12544. Erasure Coding: create dummy raw coder to isolate performance 
issues in testing. Contributed by Rui Li.

Change-Id: I9856456b59ed881c5ba2acce51e4d9bd01dc6f48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e1745d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e1745d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e1745d8

Branch: refs/heads/HDFS-7240
Commit: 3e1745d8e8e5a44f7c8eab9a8234edaf389828c7
Parents: 194251c
Author: Zhe Zhang 
Authored: Tue Nov 3 22:26:27 2015 -0800
Committer: Zhe Zhang 
Committed: Tue Nov 3 22:26:27 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../erasurecode/rawcoder/DummyRawDecoder.java   | 47 +++
 .../erasurecode/rawcoder/DummyRawEncoder.java   | 46 +++
 .../rawcoder/DummyRawErasureCoderFactory.java   | 36 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  4 +
 .../erasurecode/rawcoder/TestDummyRawCoder.java | 83 
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 10 +--
 7 files changed, 224 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1745d8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0d1bce2..453efe6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -613,6 +613,9 @@ Trunk (Unreleased)
   HADOOP-12047. Indicate preference not to affect input buffers during
   coding in erasure coder. (Kai Zheng via waltersu4549)
 
+  HADOOP-12544. Erasure Coding: create dummy raw coder to isolate 
performance
+  issues in testing. (Rui Li via zhz)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1745d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
new file mode 100644
index 000..25dfa57
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A dummy raw decoder that does no real computation.
+ * Instead, it just returns zero bytes.
+ * This decoder can be used to isolate the performance issue to HDFS side logic
+ * instead of codec, and is intended for test only.
+ */
+@InterfaceAudience.Private
+public class DummyRawDecoder extends AbstractRawErasureDecoder {
+  public DummyRawDecoder(int numDataUnits, int numParityUnits) {
+super(numDataUnits, numParityUnits);
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+// Nothing to do. Output buffers have already been reset
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] inputOffsets, int dataLen,
+  int[] erasedIndexes, byte[][] outputs, int[] outputOffsets) {
+// Nothing to do. Output buffers have already been reset
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e1745d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawEncoder.java
--
diff --git 

[08/50] [abbrv] hadoop git commit: Creating 2.6.3 entries in CHANGES.txt files.

2015-11-04 Thread aengineer
Creating 2.6.3 entries in CHANGES.txt files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eadf7b30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eadf7b30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eadf7b30

Branch: refs/heads/HDFS-7240
Commit: eadf7b3096cb010eb7f0afd9afd4ae0d67b2645f
Parents: 3c0204a
Author: Jason Lowe 
Authored: Fri Oct 30 14:50:50 2015 +
Committer: Jason Lowe 
Committed: Fri Oct 30 14:50:50 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6b33a2c..ddd0796 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2156,6 +2156,18 @@ Release 2.7.0 - 2015-04-20
 HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
 Tomcat deployments. (Bowen Zhang via wheat9)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c5846b3..17df171 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -3298,6 +3298,18 @@ Release 2.7.0 - 2015-04-20
   HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
   Arpit Agarwal)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index af5c3f6..8594e1e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -888,6 +888,18 @@ Release 2.7.0 - 2015-04-20
 MAPREDUCE-6285. ClientServiceDelegate should not retry upon
 AuthenticationException. (Jonathan Eagles via ozawa)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eadf7b30/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 874397d..cc8f5f3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1875,6 +1875,18 @@ Release 2.7.0 - 2015-04-20
 YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
 and node-label column (Jason Lowe via wangda)
 
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES



[16/50] [abbrv] hadoop git commit: Revert "fix CHANGES.txt"

2015-11-04 Thread aengineer
Revert "fix CHANGES.txt"

This reverts commit 3c0204a5866520e74917b26b6ac2061650a5bb6d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ea4413b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ea4413b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ea4413b

Branch: refs/heads/HDFS-7240
Commit: 2ea4413b15f82a032d6dbd2532861d82a299461a
Parents: a4a6b5b
Author: yliu 
Authored: Sat Oct 31 16:20:37 2015 +0800
Committer: yliu 
Committed: Sat Oct 31 16:20:37 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ea4413b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a61eed..211e7fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2273,9 +2276,6 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[24/50] [abbrv] hadoop git commit: HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek API. Contributed by Dushyanth.

2015-11-04 Thread aengineer
HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek API. 
Contributed by Dushyanth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ce0a650
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ce0a650
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ce0a650

Branch: refs/heads/HDFS-7240
Commit: 3ce0a6502e78240f551c29bb27a2324ce359cd70
Parents: 259bea3
Author: cnauroth 
Authored: Mon Nov 2 09:38:37 2015 -0800
Committer: cnauroth 
Committed: Mon Nov 2 10:17:41 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 125 ++---
 ...estFileSystemOperationExceptionHandling.java | 131 +
 ...perationsExceptionHandlingMultiThreaded.java | 185 +++
 4 files changed, 422 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce0a650/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5c8daad..c8d60b0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1304,6 +1304,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12519. hadoop-azure tests should avoid creating a metrics
 configuration file in the module root directory. (cnauroth)
 
+HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek 
API.
+(Dushyanth via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce0a650/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 7c5a504..73bc6b3 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.azure;
 
 import java.io.DataInputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -49,6 +50,7 @@ import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -62,7 +64,6 @@ import 
org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.codehaus.jackson.JsonNode;
@@ -74,9 +75,11 @@ import org.codehaus.jackson.map.ObjectMapper;
 import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageErrorCode;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
-import com.microsoft.azure.storage.core.*;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import org.apache.hadoop.io.IOUtils;
 
 /**
  * A {@link FileSystem} for reading and writing files stored on http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ce0a650/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
new file mode 100644
index 000..35a1f50
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for 

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into hdfs-7240

2015-11-04 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/312d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index da09b0e,29bcd79..c93a362
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -2816,30 -2633,14 +2831,30 @@@ public class DataNode extends Reconfigu
}
  
/**
-* Convenience method, which unwraps RemoteException.
-* @throws IOException not a RemoteException.
-*/
 -   * Update replica with the new generation stamp and length.  
++  * Convenience method, which unwraps RemoteException.
++  * @throws IOException not a RemoteException.
++  */
 +  private static ReplicaRecoveryInfo callInitReplicaRecovery(
 +  InterDatanodeProtocol datanode,
 +  RecoveringBlock rBlock) throws IOException {
 +try {
 +  return datanode.initReplicaRecovery(rBlock);
- } catch(RemoteException re) {
++} catch (RemoteException re) {
 +  throw re.unwrapRemoteException();
 +}
 +  }
 +
 +  /**
-* Update replica with the new generation stamp and length.  
++   * Update replica with the new generation stamp and length.
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
--  final long recoveryId, final long newBlockId, final long newLength)
++   final long recoveryId, final long 
newBlockId, final long newLength)
throws IOException {
 -final String storageID = data.updateReplicaUnderRecovery(oldBlock,
 -recoveryId, newBlockId, newLength);
 +final FsDatasetSpi dataset =
 +(FsDatasetSpi) getDataset(oldBlock.getBlockPoolId());
 +final String storageID = dataset.updateReplicaUnderRecovery(
 +oldBlock, recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
@@@ -2851,234 -2652,6 +2866,244 @@@
  return storageID;
}
  
-   /** A convenient class used in block recovery */
-   static class BlockRecord { 
++  /**
++   * A convenient class used in block recovery
++   */
++  static class BlockRecord {
 +final DatanodeID id;
 +final InterDatanodeProtocol datanode;
 +final ReplicaRecoveryInfo rInfo;
- 
 +private String storageID;
 +
 +BlockRecord(DatanodeID id,
 +InterDatanodeProtocol datanode,
 +ReplicaRecoveryInfo rInfo) {
 +  this.id = id;
 +  this.datanode = datanode;
 +  this.rInfo = rInfo;
 +}
 +
 +void updateReplicaUnderRecovery(String bpid, long recoveryId,
 +long newBlockId, long newLength)
 +throws IOException {
 +  final ExtendedBlock b = new ExtendedBlock(bpid, rInfo);
 +  storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, 
newBlockId,
 +  newLength);
 +}
 +
 +@Override
 +public String toString() {
 +  return "block:" + rInfo + " node:" + id;
 +}
 +  }
 +
-   /** Recover a block */
++
++  /**
++   * Recover a block
++   */
 +  private void recoverBlock(RecoveringBlock rBlock) throws IOException {
 +ExtendedBlock block = rBlock.getBlock();
 +String blookPoolId = block.getBlockPoolId();
 +DatanodeID[] datanodeids = rBlock.getLocations();
 +List syncList = new 
ArrayList(datanodeids.length);
 +int errorCount = 0;
 +
 +//check generation stamps
- for(DatanodeID id : datanodeids) {
++for (DatanodeID id : datanodeids) {
 +  try {
 +BPOfferService bpos = blockPoolManager.get(blookPoolId);
 +DatanodeRegistration bpReg = bpos.bpRegistration;
- InterDatanodeProtocol datanode = bpReg.equals(id)?
- this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
- dnConf.socketTimeout, dnConf.connectToDnViaHostname);
++InterDatanodeProtocol datanode = bpReg.equals(id) ?
++this : DataNode.createInterDataNodeProtocolProxy(id, getConf(),
++dnConf.socketTimeout, dnConf.connectToDnViaHostname);
 +ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
 +if (info != null &&
 +info.getGenerationStamp() >= block.getGenerationStamp() &&
 +info.getNumBytes() > 0) {
 +  syncList.add(new BlockRecord(id, datanode, info));
 +}
 +  } catch (RecoveryInProgressException ripE) {
 +InterDatanodeProtocol.LOG.warn(
 +"Recovery for replica " + block + " on data-node " + id
- + " is already in 

[05/50] [abbrv] hadoop git commit: YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer binds to default port 8188. Contributed by Varun Saxena.

2015-11-04 Thread aengineer
YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer 
binds to default port 8188. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce31b227
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce31b227
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce31b227

Branch: refs/heads/HDFS-7240
Commit: ce31b22739512804da38cf87e0ce1059e3128da3
Parents: d21214c
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 17:51:39 2015 +0900
Committer: Tsuyoshi Ozawa 
Committed: Fri Oct 30 18:00:20 2015 +0900

--
 .../mapreduce/jobhistory/TestJobHistoryEventHandler.java  | 10 +++---
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../ApplicationHistoryServer.java |  2 +-
 3 files changed, 11 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce31b227/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 2b07efb..f213b32 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -489,9 +489,6 @@ public class TestJobHistoryEventHandler {
 TestParams t = new TestParams(false);
 Configuration conf = new YarnConfiguration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
-JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
-jheh.init(conf);
 MiniYARNCluster yarnCluster = null;
 long currentTime = System.currentTimeMillis();
 try {
@@ -499,6 +496,13 @@ public class TestJobHistoryEventHandler {
 TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
   yarnCluster.init(conf);
   yarnCluster.start();
+  Configuration confJHEH = new YarnConfiguration(conf);
+  confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+  confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":" +
+  yarnCluster.getApplicationHistoryServer().getPort());
+  JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 
0);
+  jheh.init(confJHEH);
   jheh.start();
   TimelineStore ts = yarnCluster.getApplicationHistoryServer()
   .getTimelineStore();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce31b227/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2151136..874397d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1150,6 +1150,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4312. TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6
 as some of the test cases time out. (Varun Saxena via ozawa)
 
+YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no 
longer
+binds to default port 8188. (Varun Saxena via ozawa)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce31b227/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 111a85f..21e1c1a 100644
--- 

[43/50] [abbrv] hadoop git commit: fix up CHANGES.txt

2015-11-04 Thread aengineer
fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb1ece4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb1ece4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb1ece4

Branch: refs/heads/HDFS-7240
Commit: 3fb1ece4e9b290ad4a0b6357a519b20f59561911
Parents: 0eed886
Author: Kihwal Lee 
Authored: Wed Nov 4 12:14:45 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:15:10 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb1ece4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bdcc1fc..500dc92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2225,9 +2225,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
-HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
-commitBlock. (Chang Li via zhz)
-
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
@@ -2246,6 +2243,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[01/50] [abbrv] hadoop git commit: YARN-4313. Race condition in MiniMRYarnCluster when getting history server address. Contributed by Jian He

2015-11-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 b14a70e79 -> 312de


YARN-4313. Race condition in MiniMRYarnCluster when getting history
server address. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7412ff48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7412ff48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7412ff48

Branch: refs/heads/HDFS-7240
Commit: 7412ff48eeb967c972c19c1370c77a41c5b3b81f
Parents: e5b1733
Author: Xuan 
Authored: Thu Oct 29 17:36:36 2015 -0700
Committer: Xuan 
Committed: Thu Oct 29 17:36:36 2015 -0700

--
 .../java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java  | 5 -
 hadoop-yarn-project/CHANGES.txt | 3 +++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7412ff48/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index 1dd6fca..3521834 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -190,6 +190,7 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
 public JobHistoryServerWrapper() {
   super(JobHistoryServerWrapper.class.getName());
 }
+private volatile boolean jhsStarted = false;
 
 @Override
 public synchronized void serviceStart() throws Exception {
@@ -211,9 +212,11 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
 new Thread() {
   public void run() {
 historyServer.start();
+jhsStarted = true;
   };
 }.start();
-while (historyServer.getServiceState() == STATE.INITED) {
+
+while (!jhsStarted) {
   LOG.info("Waiting for HistoryServer to start...");
   Thread.sleep(1500);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7412ff48/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0da15bd..d0fa27d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1144,6 +1144,9 @@ Release 2.7.2 - UNRELEASED
 YARN-4183. Enabling generic application history forces every job to get a
 timeline service delegation token (Mit Desai via jeagles)
 
+YARN-4313. Race condition in MiniMRYarnCluster when getting history server
+address. (Jian He via xgong)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



[22/50] [abbrv] hadoop git commit: MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart. Contributed by Masatake Iwasaki.

2015-11-04 Thread aengineer
MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart. 
Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04d97f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04d97f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04d97f8a

Branch: refs/heads/HDFS-7240
Commit: 04d97f8abb7fcc7b635b9499a48ddaa1fe0ac7e3
Parents: 90e1405
Author: Akira Ajisaka 
Authored: Tue Nov 3 01:48:45 2015 +0900
Committer: Akira Ajisaka 
Committed: Tue Nov 3 01:50:07 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapreduce/v2/MiniMRYarnCluster.java  | 36 
 2 files changed, 24 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04d97f8a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4d6dcb8..23bef37 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -287,6 +287,9 @@ Trunk (Unreleased)
 MAPREDUCE-5801. Uber mode's log message is missing a vcore reason
 (Steven Wong via aw)
 
+MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart.
+(Masatake Iwasaki via aajisaka)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04d97f8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index 3521834..cad6f3a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -186,6 +186,27 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
 super.serviceInit(conf);
   }
 
+  @Override
+  protected void serviceStart() throws Exception {
+super.serviceStart();
+
+//need to do this because historyServer.init creates a new Configuration
+getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
+
historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
+MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
+MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
+
+LOG.info("MiniMRYARN ResourceManager address: " +
+getConfig().get(YarnConfiguration.RM_ADDRESS));
+LOG.info("MiniMRYARN ResourceManager web address: " +
+WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
+LOG.info("MiniMRYARN HistoryServer address: " +
+getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
+LOG.info("MiniMRYARN HistoryServer web address: " +
+getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
+MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
+  }
+
   private class JobHistoryServerWrapper extends AbstractService {
 public JobHistoryServerWrapper() {
   super(JobHistoryServerWrapper.class.getName());
@@ -228,21 +249,6 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
   } catch (Throwable t) {
 throw new YarnRuntimeException(t);
   }
-  //need to do this because historyServer.init creates a new Configuration
-  getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
-  
historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
-  MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
-  
MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
-
-  LOG.info("MiniMRYARN ResourceManager address: " +
-   getConfig().get(YarnConfiguration.RM_ADDRESS));
-  LOG.info("MiniMRYARN ResourceManager web address: " +
-   WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
-  LOG.info("MiniMRYARN HistoryServer address: " +
-   getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
-  LOG.info("MiniMRYARN HistoryServer web address: "
-  + getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
-  MRWebAppUtil.getJHSHttpPolicy() == 

[26/50] [abbrv] hadoop git commit: HDFS-9339. Extend full test of KMS ACLs. Contributed by Daniel Templeton.

2015-11-04 Thread aengineer
HDFS-9339. Extend full test of KMS ACLs. Contributed by Daniel Templeton.

Change-Id: I618fa5e85250eabc1eef3d8c11f32700d6fb


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78d68908
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78d68908
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78d68908

Branch: refs/heads/HDFS-7240
Commit: 78d6890865424db850faecfc5c76f14c64925063
Parents: 9e7dcab
Author: Zhe Zhang 
Authored: Mon Nov 2 13:51:45 2015 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 2 13:51:45 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 .../apache/hadoop/hdfs/TestAclsEndToEnd.java| 1042 +-
 2 files changed, 1041 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d68908/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a2e4824..fea4106 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1613,6 +1613,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9229. Expose size of NameNode directory as a metric.
 (Surendra Singh Lilhore via zhz)
 
+HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78d68908/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
index de0646a..2b515d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
@@ -233,15 +233,15 @@ public class TestAclsEndToEnd {
 keyadminUgi.getUserName());
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.ROLLOVER",
 keyadminUgi.getUserName());
-conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET", "");
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET", " ");
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_KEYS",
 keyadminUgi.getUserName());
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
 hdfsUgi.getUserName());
-conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.SET_KEY_MATERIAL", "");
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.SET_KEY_MATERIAL", " ");
 conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
 hdfsUgi.getUserName());
-conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", "");
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", "*");
 
 return conf;
   }
@@ -478,6 +478,1042 @@ public class TestAclsEndToEnd {
   }
 
   /**
+   * Test that key creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateKey() throws Exception {
+Configuration conf = new Configuration();
+
+// Correct config with whitelist ACL
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+realUgi.getUserName());
+
+try {
+  setup(conf);
+
+  assertTrue("Exception during key creation with correct config"
+  + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+} finally {
+  teardown();
+}
+
+conf = new Configuration();
+
+// Correct config with default ACL
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+realUgi.getUserName());
+
+try {
+  setup(conf);
+
+  assertTrue("Exception during key creation with correct config"
+  + " using default key ACLs", createKey(realUgi, KEY2, conf));
+} finally {
+  teardown();
+}
+
+conf = new Configuration();
+
+// Denied because of blacklist
+conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.CREATE",
+realUgi.getUserName());
+conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+realUgi.getUserName());
+
+try {
+  setup(conf);
+
+  assertFalse("Allowed key creation with blacklist 

[11/50] [abbrv] hadoop git commit: HADOOP-11919. Empty commit to test github integration.

2015-11-04 Thread aengineer
HADOOP-11919. Empty commit to test github integration.

closes apache/hadoop#40


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d39679
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d39679
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d39679

Branch: refs/heads/HDFS-7240
Commit: 45d39679dfc47e66fe2d69da09689fa62017637f
Parents: 6344b6a
Author: Owen O'Malley 
Authored: Fri Oct 30 10:04:30 2015 -0700
Committer: Owen O'Malley 
Committed: Fri Oct 30 10:21:03 2015 -0700

--

--




[21/50] [abbrv] hadoop git commit: HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. Contributed by Rakesh R.

2015-11-04 Thread aengineer
HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. 
Contributed by Rakesh R.

Change-Id: Ia3ae582405e741ca8e90d9255ab9b95d085e5fa8


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90e14055
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90e14055
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90e14055

Branch: refs/heads/HDFS-7240
Commit: 90e14055168afdb93fa8089158c03a6a694e066c
Parents: 2529464
Author: Zhe Zhang 
Authored: Mon Nov 2 07:48:30 2015 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 2 07:48:30 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../TestErasureCodingPolicyWithSnapshot.java| 199 +++
 2 files changed, 202 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90e14055/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0bbc60d..3b2d997 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -189,6 +189,9 @@ Trunk (Unreleased)
 
 HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests. 
(szetszwo)
 
+HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. 
+(Rakesh R via zhz)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90e14055/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
new file mode 100644
index 000..515763c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestErasureCodingPolicyWithSnapshot {
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private Configuration conf;
+
+  private final static short GROUP_SIZE = StripedFileTestUtil.NUM_DATA_BLOCKS
+  + StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  private final static int SUCCESS = 0;
+  private final ErasureCodingPolicy sysDefaultPolicy = 
ErasureCodingPolicyManager
+  .getSystemDefaultPolicy();
+
+  @Before
+  public void setupCluster() throws IOException {
+conf = new HdfsConfiguration();
+cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
+cluster.waitActive();
+fs = cluster.getFileSystem();
+  }
+
+  @After
+  public void shutdownCluster() throws IOException {
+if (cluster != null) {
+  cluster.shutdown();
+}
+  }
+
+  /**
+   * Test correctness of successive snapshot creation and deletion with erasure
+   * coding policies. Create snapshot of ecDir's parent directory.
+   */
+  @Test(timeout = 12)
+  public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
+final int len = 1024;
+final Path ecDirParent = new Path("/parent");
+final Path 

[10/50] [abbrv] hadoop git commit: MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary(). Contributed by Junping Du

2015-11-04 Thread aengineer
MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary(). Contributed 
by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6344b6a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6344b6a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6344b6a7

Branch: refs/heads/HDFS-7240
Commit: 6344b6a7694c70f296392b6462dba452ff762109
Parents: 6ae9efa
Author: Jason Lowe 
Authored: Fri Oct 30 15:31:38 2015 +
Committer: Jason Lowe 
Committed: Fri Oct 30 15:31:38 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt   |  6 ++
 .../hadoop/mapreduce/v2/hs/HistoryFileManager.java | 13 ++---
 2 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6344b6a7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 22f9e89..32be987 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -671,6 +671,9 @@ Release 2.7.2 - UNRELEASED
 avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
+(Junping Du via jlowe)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES
@@ -904,6 +907,9 @@ Release 2.6.3 - UNRELEASED
 avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
 (zhihai xu via devaraj)
 
+MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
+(Junping Du via jlowe)
+
 Release 2.6.2 - 2015-10-21
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6344b6a7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index f0786da..b221961 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -966,9 +966,16 @@ public class HistoryFileManager extends AbstractService {
 
   private String getJobSummary(FileContext fc, Path path) throws IOException {
 Path qPath = fc.makeQualified(path);
-FSDataInputStream in = fc.open(qPath);
-String jobSummaryString = in.readUTF();
-in.close();
+FSDataInputStream in = null;
+String jobSummaryString = null;
+try {
+  in = fc.open(qPath);
+  jobSummaryString = in.readUTF();
+} finally {
+  if (in != null) {
+in.close();
+  }
+}
 return jobSummaryString;
   }
 



Git Push Summary

2015-11-04 Thread kkaranasos
Repository: hadoop
Updated Branches:
  refs/heads/yarn-2877 [created] 566712927


hadoop git commit: Add 2.7.3 release to CHANGES.txt

2015-11-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 73b9c7b82 -> 0383a3973


Add 2.7.3 release to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0383a397
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0383a397
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0383a397

Branch: refs/heads/trunk
Commit: 0383a3973b3b734fb23c331a2256dc92cff05365
Parents: 73b9c7b
Author: Jason Lowe 
Authored: Wed Nov 4 16:26:14 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:26:14 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index dbf9700..4114bbd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1404,6 +1404,18 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12484. Single File Rename Throws Incorrectly In Potential Race
 Condition Scenarios. (Gaurav Kanade via cnauroth)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2def995..530ed2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2231,6 +2231,18 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 23bef37..f30f0ef 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -622,6 +622,18 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6515. Update Application priority in AM side from AM-RM heartbeat
(Sunil G via jlowe)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0383a397/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d6ad672..1784d6e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1042,6 +1042,18 @@ Release 2.8.0 - UNRELEASED
 YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
 binds to default port 8188. (Meng Ding via wangda)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: Add 2.7.3 release to CHANGES.txt (cherry picked from commit 0383a3973b3b734fb23c331a2256dc92cff05365)

2015-11-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7029fc356 -> 7499f71e7


Add 2.7.3 release to CHANGES.txt
(cherry picked from commit 0383a3973b3b734fb23c331a2256dc92cff05365)

Conflicts:

hadoop-common-project/hadoop-common/CHANGES.txt
hadoop-mapreduce-project/CHANGES.txt
hadoop-yarn-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7499f71e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7499f71e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7499f71e

Branch: refs/heads/branch-2.7
Commit: 7499f71e77912676893ad5c5151bbfe54849a1bc
Parents: 7029fc3
Author: Jason Lowe 
Authored: Wed Nov 4 16:29:56 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:29:56 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7499f71e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2361bac..07d9ef0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop Change Log
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7499f71e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 314caa8..bd92181 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop HDFS Change Log
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7499f71e/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c7abeb5..3deafc6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop MapReduce Change Log
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7499f71e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5d6cd80..17974e1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop YARN Change Log
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. Contributed by Chang Li (cherry picked from commit b9d25c3ee2d20166d6a786c5a16cc001e249f61c)

2015-11-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8334d8647 -> b9389ac5b


HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. 
Contributed by Chang Li
(cherry picked from commit b9d25c3ee2d20166d6a786c5a16cc001e249f61c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9389ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9389ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9389ac5

Branch: refs/heads/branch-2
Commit: b9389ac5b5c9804c509794994dac80078ae49e2a
Parents: 8334d86
Author: Jason Lowe 
Authored: Wed Nov 4 16:34:01 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:35:12 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../security/JniBasedUnixGroupsNetgroupMapping.c  | 18 +-
 2 files changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9389ac5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b9260e3..beb3b9e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -803,6 +803,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12296. when setnetgrent returns 0 in linux, exception should be
+thrown (Chang Li via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9389ac5/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index de73a8a..4ae1051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -57,6 +57,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   int setnetgrentCalledFlag = 0;
 
   // if not NULL then THROW exception
+  char *errorType = NULL;
   char *errorMessage = NULL;
 
   cgroup = (*env)->GetStringUTFChars(env, jgroup, NULL);
@@ -94,7 +95,14 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   }
 }
   }
-
+#if defined(__linux__)
+  else {
+errorType = "java/io/IOException";
+errorMessage =
+"no netgroup of this name is known or some other error occurred";
+goto END;
+  }
+#endif
   //--
   // build return data (java array)
 
@@ -103,7 +111,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
 (*env)->FindClass(env, "java/lang/String"),
 NULL);
   if (jusers == NULL) {
-errorMessage = "java/lang/OutOfMemoryError";
+errorType = "java/lang/OutOfMemoryError";
 goto END;
   }
 
@@ -114,7 +122,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   for(current = userListHead; current != NULL; current = current->next) {
 jstring juser = (*env)->NewStringUTF(env, current->string);
 if (juser == NULL) {
-  errorMessage = "java/lang/OutOfMemoryError";
+  errorType = "java/lang/OutOfMemoryError";
   goto END;
 }
 (*env)->SetObjectArrayElement(env, jusers, i++, juser);
@@ -134,8 +142,8 @@ END:
   }
 
   // return results or THROW
-  if(errorMessage) {
-THROW(env, errorMessage, NULL);
+  if(errorType) {
+THROW(env, errorType, errorMessage);
 return NULL;
   } else {
 return jusers;



hadoop git commit: HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. Contributed by Chang Li

2015-11-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0383a3973 -> b9d25c3ee


HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. 
Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9d25c3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9d25c3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9d25c3e

Branch: refs/heads/trunk
Commit: b9d25c3ee2d20166d6a786c5a16cc001e249f61c
Parents: 0383a39
Author: Jason Lowe 
Authored: Wed Nov 4 16:34:01 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:34:01 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../security/JniBasedUnixGroupsNetgroupMapping.c  | 18 +-
 2 files changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9d25c3e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4114bbd..efb73f4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1416,6 +1416,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12296. when setnetgrent returns 0 in linux, exception should be
+thrown (Chang Li via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9d25c3e/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index de73a8a..4ae1051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -57,6 +57,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   int setnetgrentCalledFlag = 0;
 
   // if not NULL then THROW exception
+  char *errorType = NULL;
   char *errorMessage = NULL;
 
   cgroup = (*env)->GetStringUTFChars(env, jgroup, NULL);
@@ -94,7 +95,14 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   }
 }
   }
-
+#if defined(__linux__)
+  else {
+errorType = "java/io/IOException";
+errorMessage =
+"no netgroup of this name is known or some other error occurred";
+goto END;
+  }
+#endif
   //--
   // build return data (java array)
 
@@ -103,7 +111,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
 (*env)->FindClass(env, "java/lang/String"),
 NULL);
   if (jusers == NULL) {
-errorMessage = "java/lang/OutOfMemoryError";
+errorType = "java/lang/OutOfMemoryError";
 goto END;
   }
 
@@ -114,7 +122,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   for(current = userListHead; current != NULL; current = current->next) {
 jstring juser = (*env)->NewStringUTF(env, current->string);
 if (juser == NULL) {
-  errorMessage = "java/lang/OutOfMemoryError";
+  errorType = "java/lang/OutOfMemoryError";
   goto END;
 }
 (*env)->SetObjectArrayElement(env, jusers, i++, juser);
@@ -134,8 +142,8 @@ END:
   }
 
   // return results or THROW
-  if(errorMessage) {
-THROW(env, errorMessage, NULL);
+  if(errorType) {
+THROW(env, errorType, errorMessage);
 return NULL;
   } else {
 return jusers;



hadoop git commit: Add 2.7.3 release to CHANGES.txt (cherry picked from commit 0383a3973b3b734fb23c331a2256dc92cff05365)

2015-11-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b61ccaec -> 8334d8647


Add 2.7.3 release to CHANGES.txt
(cherry picked from commit 0383a3973b3b734fb23c331a2256dc92cff05365)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8334d864
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8334d864
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8334d864

Branch: refs/heads/branch-2
Commit: 8334d86472a3f9bd6896bf63f5ce9a14a0b8bf85
Parents: 0b61cca
Author: Jason Lowe 
Authored: Wed Nov 4 16:26:14 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:27:28 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8334d864/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9325ed5..b9260e3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -791,6 +791,18 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12484. Single File Rename Throws Incorrectly In Potential Race
 Condition Scenarios. (Gaurav Kanade via cnauroth)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8334d864/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b9b153c..e82e332 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1388,6 +1388,18 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8334d864/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 113fc6b..bab1e02 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -338,6 +338,18 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6515. Update Application priority in AM side from AM-RM heartbeat
(Sunil G via jlowe)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8334d864/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 25b50ba..5a9d1c6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -990,6 +990,18 @@ Release 2.8.0 - UNRELEASED
 YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
 binds to default port 8188. (Meng Ding via wangda)
 
+Release 2.7.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. Contributed by Chang Li (cherry picked from commit b9d25c3ee2d20166d6a786c5a16cc001e249f61c)

2015-11-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7499f71e7 -> abfc710b0


HADOOP-12296. when setnetgrent returns 0 in linux, exception should be thrown. 
Contributed by Chang Li
(cherry picked from commit b9d25c3ee2d20166d6a786c5a16cc001e249f61c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abfc710b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abfc710b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abfc710b

Branch: refs/heads/branch-2.7
Commit: abfc710b0bb83b4c6db83ede9b01e3c0d26d2738
Parents: 7499f71
Author: Jason Lowe 
Authored: Wed Nov 4 16:34:01 2015 +
Committer: Jason Lowe 
Committed: Wed Nov 4 16:35:36 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../security/JniBasedUnixGroupsNetgroupMapping.c  | 18 +-
 2 files changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abfc710b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 07d9ef0..561d8a2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12296. when setnetgrent returns 0 in linux, exception should be
+thrown (Chang Li via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abfc710b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index de73a8a..4ae1051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -57,6 +57,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   int setnetgrentCalledFlag = 0;
 
   // if not NULL then THROW exception
+  char *errorType = NULL;
   char *errorMessage = NULL;
 
   cgroup = (*env)->GetStringUTFChars(env, jgroup, NULL);
@@ -94,7 +95,14 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   }
 }
   }
-
+#if defined(__linux__)
+  else {
+errorType = "java/io/IOException";
+errorMessage =
+"no netgroup of this name is known or some other error occurred";
+goto END;
+  }
+#endif
   //--
   // build return data (java array)
 
@@ -103,7 +111,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
 (*env)->FindClass(env, "java/lang/String"),
 NULL);
   if (jusers == NULL) {
-errorMessage = "java/lang/OutOfMemoryError";
+errorType = "java/lang/OutOfMemoryError";
 goto END;
   }
 
@@ -114,7 +122,7 @@ 
Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   for(current = userListHead; current != NULL; current = current->next) {
 jstring juser = (*env)->NewStringUTF(env, current->string);
 if (juser == NULL) {
-  errorMessage = "java/lang/OutOfMemoryError";
+  errorType = "java/lang/OutOfMemoryError";
   goto END;
 }
 (*env)->SetObjectArrayElement(env, jusers, i++, juser);
@@ -134,8 +142,8 @@ END:
   }
 
   // return results or THROW
-  if(errorMessage) {
-THROW(env, errorMessage, NULL);
+  if(errorType) {
+THROW(env, errorType, errorMessage);
 return NULL;
   } else {
 return jusers;



hadoop git commit: HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by Surendra Singh Lilhore.

2015-11-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b9389ac5b -> b61aa716e


HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b61aa716
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b61aa716
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b61aa716

Branch: refs/heads/branch-2
Commit: b61aa716e8c2dc26f889f846b6bb84b90aecd5af
Parents: b9389ac
Author: Haohui Mai 
Authored: Wed Nov 4 09:16:43 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 09:17:01 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 4 ++--
 .../hadoop-hdfs/src/main/webapps/static/hadoop.css   | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61aa716/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e82e332..527a8ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1388,6 +1388,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+HDFS-9357. NN UI renders icons of decommissioned DN incorrectly.
+(Surendra Singh Lilhore via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61aa716/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index e46ce7f..08199fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -285,8 +285,8 @@
   
 In service
 Down
-Decommisioned
-Decommissioned  dead
+Decommissioned
+Decommissioned  dead
   
 
 In operation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61aa716/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 58c3cb5..2ed5f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -235,7 +235,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e013";
 }
 
-.dfshealth-node-decommisioned:before {
+.dfshealth-node-decommissioned:before {
 color: #eea236;
 content: "\e136";
 }
@@ -245,7 +245,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e101";
 }
 
-.dfshealth-node-down-decommisioned:before {
+.dfshealth-node-down-decommissioned:before {
 color: #2e6da6;
 content: "\e017";
 }



hadoop git commit: HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by Surendra Singh Lilhore.

2015-11-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk b9d25c3ee -> 0eed886a1


HDFS-9357. NN UI renders icons of decommissioned DN incorrectly. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0eed886a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0eed886a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0eed886a

Branch: refs/heads/trunk
Commit: 0eed886a165f5a0850ddbfb1d5f98c7b5e379fb3
Parents: b9d25c3
Author: Haohui Mai 
Authored: Wed Nov 4 09:16:43 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 09:16:43 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 4 ++--
 .../hadoop-hdfs/src/main/webapps/static/hadoop.css   | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 530ed2d..bdcc1fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2231,6 +2231,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
+HDFS-9357. NN UI renders icons of decommissioned DN incorrectly.
+(Surendra Singh Lilhore via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index e46ce7f..08199fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -285,8 +285,8 @@
   
 In service
 Down
-Decommisioned
-Decommissioned  dead
+Decommissioned
+Decommissioned  dead
   
 
 In operation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0eed886a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 58c3cb5..2ed5f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -235,7 +235,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e013";
 }
 
-.dfshealth-node-decommisioned:before {
+.dfshealth-node-decommissioned:before {
 color: #eea236;
 content: "\e136";
 }
@@ -245,7 +245,7 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 content: "\e101";
 }
 
-.dfshealth-node-down-decommisioned:before {
+.dfshealth-node-down-decommissioned:before {
 color: #2e6da6;
 content: "\e017";
 }



hadoop git commit: HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in commitBlock. Contributed by Chang Li.

2015-11-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 abfc710b0 -> 397b554c3


HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in 
commitBlock. Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/397b554c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/397b554c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/397b554c

Branch: refs/heads/branch-2.7
Commit: 397b554c36867724ca4167931270cd7af784e54a
Parents: abfc710
Author: Kihwal Lee 
Authored: Wed Nov 4 12:10:59 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:10:59 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   2 +-
 .../BlockInfoContiguousUnderConstruction.java   |   2 +-
 .../server/blockmanagement/BlockManager.java|   4 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  69 +
 .../TestCommitBlockWithInvalidGenStamp.java | 100 +++
 6 files changed, 178 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bd92181..45ce310 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index def829c..3c8b2d3 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -226,7 +226,7 @@ public class DFSOutputStream extends FSOutputSummer
   //
   class DataStreamer extends Daemon {
 private volatile boolean streamerClosed = false;
-private ExtendedBlock block; // its length is number of bytes acked
+private volatile ExtendedBlock block; // its length is number of bytes 
acked
 private Token accessToken;
 private DataOutputStream blockStream;
 private DataInputStream blockReplyStream;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 92153ab..4f315c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -274,7 +274,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
   throw new IOException("Trying to commit inconsistent block: id = "
   + block.getBlockId() + ", expected id = " + getBlockId());
 blockUCState = BlockUCState.COMMITTED;
-this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
+this.setNumBytes(block.getNumBytes());
 // Sort out invalid replicas.
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c360d4c..63a7aed 100644
--- 

[2/2] hadoop git commit: HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails intermittently due to assumption that a lease error will be thrown. Contributed by Gaurav Kanade.

2015-11-04 Thread cnauroth
HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails 
intermittently due to assumption that a lease error will be thrown. Contributed 
by Gaurav Kanade.

(cherry picked from commit 0fb1867fd62b5df664ad66386d6067db8fbf2317)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81f7e8af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81f7e8af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81f7e8af

Branch: refs/heads/branch-2
Commit: 81f7e8af2205b0e25d444adf3ecf036ac188f014
Parents: 8ed1fd2
Author: cnauroth 
Authored: Wed Nov 4 10:19:04 2015 -0800
Committer: cnauroth 
Committed: Wed Nov 4 10:29:01 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 
 .../TestAzureFileSystemInstrumentation.java | 25 +---
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81f7e8af/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index beb3b9e..1d6cd57 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -707,6 +707,10 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
 
+HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics
+fails intermittently due to assumption that a lease error will be thrown.
+(Gaurav Kanade via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81f7e8af/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
index 896ec1b..0c9126c 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
 import org.apache.hadoop.fs.azure.AzureException;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.hamcrest.BaseMatcher;
@@ -405,22 +406,30 @@ public class TestAzureFileSystemInstrumentation {
 
   @Test
   public void testClientErrorMetrics() throws Exception {
-String directoryName = "metricsTestDirectory_ClientError";
-Path directoryPath = new Path("/" + directoryName);
-assertTrue(fs.mkdirs(directoryPath));
-String leaseID = testAccount.acquireShortLease(directoryName);
+String fileName = "metricsTestFile_ClientError";
+Path filePath = new Path("/"+fileName);
+final int FILE_SIZE = 100;
+OutputStream outputStream = null;
+String leaseID = null;
 try {
+  // Create a file
+  outputStream = fs.create(filePath);
+  leaseID = testAccount.acquireShortLease(fileName);
   try {
-fs.delete(directoryPath, true);
-assertTrue("Should've thrown.", false);
+outputStream.write(new byte[FILE_SIZE]);
+outputStream.close();
+assertTrue("Should've thrown", false);
   } catch (AzureException ex) {
 assertTrue("Unexpected exception: " + ex,
-ex.getMessage().contains("lease"));
+  ex.getMessage().contains("lease"));
   }
   assertEquals(1, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_CLIENT_ERRORS));
   assertEquals(0, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_SERVER_ERRORS));
 } finally {
-  testAccount.releaseLease(leaseID, directoryName);
+  if(leaseID != null){
+testAccount.releaseLease(leaseID, fileName);
+  }
+  IOUtils.closeStream(outputStream);
 }
   }
 



[1/2] hadoop git commit: HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails intermittently due to assumption that a lease error will be thrown. Contributed by Gaurav Kanade.

2015-11-04 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8ed1fd217 -> 81f7e8af2
  refs/heads/trunk e2a5441b0 -> 0fb1867fd


HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics fails 
intermittently due to assumption that a lease error will be thrown. Contributed 
by Gaurav Kanade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fb1867f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fb1867f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fb1867f

Branch: refs/heads/trunk
Commit: 0fb1867fd62b5df664ad66386d6067db8fbf2317
Parents: e2a5441
Author: cnauroth 
Authored: Wed Nov 4 10:19:04 2015 -0800
Committer: cnauroth 
Committed: Wed Nov 4 10:28:44 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 
 .../TestAzureFileSystemInstrumentation.java | 25 +---
 2 files changed, 21 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1867f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index efb73f4..dd70947 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1320,6 +1320,10 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
 
+HADOOP-12540. TestAzureFileSystemInstrumentation#testClientErrorMetrics
+fails intermittently due to assumption that a lease error will be thrown.
+(Gaurav Kanade via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fb1867f/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
index 896ec1b..0c9126c 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestAzureFileSystemInstrumentation.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
 import org.apache.hadoop.fs.azure.AzureException;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.hamcrest.BaseMatcher;
@@ -405,22 +406,30 @@ public class TestAzureFileSystemInstrumentation {
 
   @Test
   public void testClientErrorMetrics() throws Exception {
-String directoryName = "metricsTestDirectory_ClientError";
-Path directoryPath = new Path("/" + directoryName);
-assertTrue(fs.mkdirs(directoryPath));
-String leaseID = testAccount.acquireShortLease(directoryName);
+String fileName = "metricsTestFile_ClientError";
+Path filePath = new Path("/"+fileName);
+final int FILE_SIZE = 100;
+OutputStream outputStream = null;
+String leaseID = null;
 try {
+  // Create a file
+  outputStream = fs.create(filePath);
+  leaseID = testAccount.acquireShortLease(fileName);
   try {
-fs.delete(directoryPath, true);
-assertTrue("Should've thrown.", false);
+outputStream.write(new byte[FILE_SIZE]);
+outputStream.close();
+assertTrue("Should've thrown", false);
   } catch (AzureException ex) {
 assertTrue("Unexpected exception: " + ex,
-ex.getMessage().contains("lease"));
+  ex.getMessage().contains("lease"));
   }
   assertEquals(1, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_CLIENT_ERRORS));
   assertEquals(0, 
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), 
WASB_SERVER_ERRORS));
 } finally {
-  testAccount.releaseLease(leaseID, directoryName);
+  if(leaseID != null){
+testAccount.releaseLease(leaseID, fileName);
+  }
+  IOUtils.closeStream(outputStream);
 }
   }
 



hadoop git commit: fix up CHANGES.txt

2015-11-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b61aa716e -> c8ffea3db


fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8ffea3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8ffea3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8ffea3d

Branch: refs/heads/branch-2
Commit: c8ffea3db77125b36df93c4970f6349049bb2673
Parents: b61aa71
Author: Kihwal Lee 
Authored: Wed Nov 4 12:14:45 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:15:58 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ffea3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 527a8ba..b7f24ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1382,9 +1382,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
-HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
-commitBlock. (Chang Li via zhz)
-
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
@@ -1403,6 +1400,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic replica. (Tony Wu via lei)

2015-11-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0fb1867fd -> 566712927


HDFS-9363. Add fetchReplica to FsDatasetTestUtils to return FsDataset-agnostic 
replica. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56671292
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56671292
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56671292

Branch: refs/heads/trunk
Commit: 5667129276c3123ecb0a96b78d5897431c47a9d5
Parents: 0fb1867
Author: Lei Xu 
Authored: Wed Nov 4 10:46:19 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:49:28 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestPipelines.java   | 6 ++
 .../hadoop/hdfs/server/datanode/FsDatasetTestUtils.java   | 7 +++
 .../datanode/fsdataset/impl/FsDatasetImplTestUtils.java   | 5 +
 .../datanode/fsdataset/impl/TestInterDatanodeProtocol.java| 5 +++--
 5 files changed, 20 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f3ff11..ef1152e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1623,6 +1623,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
 filesystem entirely allocated for DFS use. (Tony Wu via lei)
 
+HDFS-9363. Add fetchReplica() to FsDatasetTestUtils to return 
FsDataset-agnostic
+replica. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index e4fea60..c9831b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
@@ -102,10 +101,9 @@ public class TestPipelines {
 List lb = cluster.getNameNodeRpc().getBlockLocations(
   filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
 
-String bpid = cluster.getNamesystem().getBlockPoolId();
 for (DataNode dn : cluster.getDataNodes()) {
-  Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
-  .getBlock().getBlockId());
+  Replica r =
+  cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
 
   assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
   assertEquals("Should be RBW replica on " + dn

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56671292/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index 40c4438..02af467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -206,4 +206,11 @@ public interface FsDatasetTestUtils {
* @throws IOException on I/O error.
*/
   void injectCorruptReplica(ExtendedBlock block) throws IOException;
+
+  /**
+   * Get the replica of a block. Returns null if it does not exist.
+   * @param block the block whose replica will be returned.
+   * @return Replica for the block.
+   */
+  Replica fetchReplica(ExtendedBlock block);
 }


hadoop git commit: fix up CHANGES.txt

2015-11-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0eed886a1 -> 3fb1ece4e


fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb1ece4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb1ece4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb1ece4

Branch: refs/heads/trunk
Commit: 3fb1ece4e9b290ad4a0b6357a519b20f59561911
Parents: 0eed886
Author: Kihwal Lee 
Authored: Wed Nov 4 12:14:45 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:15:10 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb1ece4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bdcc1fc..500dc92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2225,9 +2225,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
-HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
-commitBlock. (Chang Li via zhz)
-
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
@@ -2246,6 +2243,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. Contributed by Xiaobing Zhou."

2015-11-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fb1ece4e -> 88beb46cf


Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. 
Contributed by Xiaobing Zhou."

This reverts commit 84cbd72afda6344e220526fac5c560f00f84e374.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88beb46c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88beb46c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88beb46c

Branch: refs/heads/trunk
Commit: 88beb46cf6e6fd3e51f73a411a2750de7595e326
Parents: 3fb1ece
Author: Haohui Mai 
Authored: Wed Nov 4 10:21:13 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 10:21:13 2015 -0800

--
 .../org/apache/hadoop/security/token/Token.java |  11 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 -
 .../web/webhdfs/DataNodeUGIProvider.java| 106 ++---
 .../datanode/web/webhdfs/WebHdfsHandler.java|   2 +-
 .../src/main/resources/hdfs-default.xml |   8 -
 .../web/webhdfs/TestDataNodeUGIProvider.java| 231 ---
 7 files changed, 19 insertions(+), 346 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index f189a96..2420155 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.security.token;
 
 import com.google.common.collect.Maps;
-import com.google.common.primitives.Bytes;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,11 +29,9 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.ServiceLoader;
-import java.util.UUID;
 
 /**
  * The client-side form of the token.
@@ -341,12 +337,7 @@ public class Token implements 
Writable {
 identifierToString(buffer);
 return buffer.toString();
   }
-
-  public String buildCacheKey() {
-return UUID.nameUUIDFromBytes(
-Bytes.concat(kind.getBytes(), identifier, password)).toString();
-  }
-
+  
   private static ServiceLoader renewers =
   ServiceLoader.load(TokenRenewer.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 500dc92..f2d8296 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2152,9 +2152,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9160. [OIV-Doc] : Missing details of 'delimited' for processor options
 (nijel via vinayakumarb)
 
-HDFS-8855. Webhdfs client leaks active NameNode connections.
-(Xiaobing Zhou via jitendra) 
-
 HDFS-9235. hdfs-native-client build getting errors when built with cmake
 2.6. (Eric Payne via wheat9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88beb46c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 424f963..c14ce20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -70,10 +70,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEBHDFS_NETTY_HIGH_WATERMARK =
   "dfs.webhdfs.netty.high.watermark";
   public static final int  DFS_WEBHDFS_NETTY_HIGH_WATERMARK_DEFAULT = 65535;
-  public static final String  DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY =
-  "dfs.webhdfs.ugi.expire.after.access";
-  public static final int DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT =
-  10*60*1000; //10 minutes
 
   // HA related configuration
   public static final String  

hadoop git commit: Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. Contributed by Xiaobing Zhou."

2015-11-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3f2be7b18 -> 6d9277819


Revert "HDFS-8855. Webhdfs client leaks active NameNode connections. 
Contributed by Xiaobing Zhou."

This reverts commit e7203b69548a7926a345e2b18f0a227f7f8ca52c.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d927781
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d927781
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d927781

Branch: refs/heads/branch-2
Commit: 6d92778191c7db598eaf36bdc05398b3714437a9
Parents: 3f2be7b
Author: Haohui Mai 
Authored: Wed Nov 4 10:21:57 2015 -0800
Committer: Haohui Mai 
Committed: Wed Nov 4 10:22:21 2015 -0800

--
 .../org/apache/hadoop/security/token/Token.java |  11 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 -
 .../web/webhdfs/DataNodeUGIProvider.java| 106 ++---
 .../datanode/web/webhdfs/WebHdfsHandler.java|   2 +-
 .../src/main/resources/hdfs-default.xml |   8 -
 .../web/webhdfs/TestDataNodeUGIProvider.java| 231 ---
 7 files changed, 19 insertions(+), 346 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d927781/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index 5bea120..24d6c1e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.security.token;
 
 import com.google.common.collect.Maps;
-import com.google.common.primitives.Bytes;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,11 +29,9 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.*;
-import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.ServiceLoader;
-import java.util.UUID;
 
 /**
  * The client-side form of the token.
@@ -341,12 +337,7 @@ public class Token implements 
Writable {
 identifierToString(buffer);
 return buffer.toString();
   }
-
-  public String buildCacheKey() {
-return UUID.nameUUIDFromBytes(
-Bytes.concat(kind.getBytes(), identifier, password)).toString();
-  }
-
+  
   private static ServiceLoader renewers =
   ServiceLoader.load(TokenRenewer.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d927781/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dfea3f3..db9cc29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1311,9 +1311,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9160. [OIV-Doc] : Missing details of 'delimited' for processor options
 (nijel via vinayakumarb)
 
-HDFS-8855. Webhdfs client leaks active NameNode connections.
-(Xiaobing Zhou via jitendra) 
-
 HDFS-9235. hdfs-native-client build getting errors when built with cmake
 2.6. (Eric Payne via wheat9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d927781/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9834108..1e6143c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -69,10 +69,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEBHDFS_NETTY_HIGH_WATERMARK =
   "dfs.webhdfs.netty.high.watermark";
   public static final int  DFS_WEBHDFS_NETTY_HIGH_WATERMARK_DEFAULT = 65535;
-  public static final String  DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY =
-  "dfs.webhdfs.ugi.expire.after.access";
-  public static final int DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_DEFAULT =
-  10*60*1000; //10 minutes
 
   // HA related configuration
   public static final 

hadoop git commit: HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)

2015-11-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 88beb46cf -> ec414600e


HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec414600
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec414600
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec414600

Branch: refs/heads/trunk
Commit: ec414600ede8e305c584818565b50e055ea5d2b5
Parents: 88beb46
Author: Lei Xu 
Authored: Tue Nov 3 14:17:11 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:22:17 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  65 ++-
 .../blockmanagement/BlockPlacementPolicy.java   |  53 --
 .../BlockPlacementPolicyDefault.java|  57 ---
 .../BlockPlacementPolicyWithNodeGroup.java  |  35 ++--
 .../BlockPlacementPolicyWithUpgradeDomain.java  |  84 +++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   9 +-
 .../hdfs/server/balancer/TestBalancer.java  | 103 ++-
 .../blockmanagement/TestBlockManager.java   |  13 +-
 .../blockmanagement/TestReplicationPolicy.java  |  93 +++---
 .../TestReplicationPolicyWithNodeGroup.java |   6 +-
 .../TestReplicationPolicyWithUpgradeDomain.java | 171 +++
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  10 +-
 13 files changed, 503 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec414600/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f2d8296..fd560d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1618,6 +1618,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
 
+HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec414600/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 5b3eb36..9f9cdc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicies;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -124,6 +125,7 @@ public class Dispatcher {
   private final int ioFileBufferSize;
 
   private final boolean connectToDnViaHostname;
+  private BlockPlacementPolicies placementPolicies;
 
   static class Allocator {
 private final int max;
@@ -949,6 +951,7 @@ public class Dispatcher {
 this.connectToDnViaHostname = conf.getBoolean(
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
+placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
   }
 
   public DistributedFileSystem getDistributedFileSystem() {
@@ -1166,66 +1169,24 @@ public class Dispatcher {
   }
 }
 
-if (cluster.isNodeGroupAware()
-&& isOnSameNodeGroupWithReplicas(source, target, block)) {
-  return false;
-}
-if (reduceNumOfRacks(source, target, block)) {
+if (!isGoodBlockCandidateForPlacementPolicy(source, target, block)) {
   return false;
 }
 return true;
   }
 
-  /**
-   * Determine whether moving the given block replica from source to target
-   * would reduce the number of racks of the block replicas.
-   */
-  private boolean reduceNumOfRacks(StorageGroup source, StorageGroup target,
-  DBlock block) {
-  

hadoop git commit: HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)

2015-11-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c8ffea3db -> 3f2be7b18


HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f2be7b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f2be7b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f2be7b1

Branch: refs/heads/branch-2
Commit: 3f2be7b18f92a8d7685f0f5ad26f43e1a261b474
Parents: c8ffea3
Author: Lei Xu 
Authored: Wed Nov 4 10:21:14 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:21:58 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  66 ++-
 .../blockmanagement/BlockPlacementPolicy.java   |  53 --
 .../BlockPlacementPolicyDefault.java|  57 ---
 .../BlockPlacementPolicyWithNodeGroup.java  |  35 ++--
 .../BlockPlacementPolicyWithUpgradeDomain.java  |  84 +++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   9 +-
 .../hdfs/server/balancer/TestBalancer.java  | 103 ++-
 .../blockmanagement/TestBlockManager.java   |  13 +-
 .../blockmanagement/TestReplicationPolicy.java  |  93 +++---
 .../TestReplicationPolicyWithNodeGroup.java |   6 +-
 .../TestReplicationPolicyWithUpgradeDomain.java | 171 +++
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  10 +-
 13 files changed, 504 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f2be7b1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b7f24ff..dfea3f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -771,6 +771,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
 
+HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f2be7b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 509b652..151ab09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
@@ -124,6 +125,7 @@ public class Dispatcher {
   private final int ioFileBufferSize;
 
   private final boolean connectToDnViaHostname;
+  private BlockPlacementPolicy placementPolicy;
 
   static class Allocator {
 private final int max;
@@ -888,6 +890,8 @@ public class Dispatcher {
 this.connectToDnViaHostname = conf.getBoolean(
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
 HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
+this.placementPolicy =
+BlockPlacementPolicy.getInstance(conf, null, cluster, null);
   }
 
   public DistributedFileSystem getDistributedFileSystem() {
@@ -1106,66 +1110,24 @@ public class Dispatcher {
   }
 }
 
-if (cluster.isNodeGroupAware()
-&& isOnSameNodeGroupWithReplicas(source, target, block)) {
-  return false;
-}
-if (reduceNumOfRacks(source, target, block)) {
+if (!isGoodBlockCandidateForPlacementPolicy(source, target, block)) {
   return false;
 }
 return true;
   }
 
-  /**
-   * Determine whether moving the given block replica from source to target
-   * would reduce the number of racks of the block replicas.
-   */
-  private boolean reduceNumOfRacks(StorageGroup source, StorageGroup target,
-  DBlock block) {
-final 

hadoop git commit: HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for filesystem entirely allocated for DFS use. (Tony Wu via lei)

2015-11-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec414600e -> e2a5441b0


HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for 
filesystem entirely allocated for DFS use. (Tony Wu via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2a5441b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2a5441b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2a5441b

Branch: refs/heads/trunk
Commit: e2a5441b062fd0758138079d24a2740fc5e5e350
Parents: ec41460
Author: Lei Xu 
Authored: Wed Nov 4 10:27:35 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:27:35 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a5441b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd560d1..5f3ff11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1620,6 +1620,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
 
+HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
+filesystem entirely allocated for DFS use. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a5441b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 5c865e1..2219aa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -136,7 +136,7 @@ public class TestNameNodeMXBean {
   assertTrue(liveNodes.size() == 2);
   for (Map liveNode : liveNodes.values()) {
 assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
-assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
+assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) >= 0);
 assertTrue(liveNode.containsKey("capacity"));
 assertTrue(((Long)liveNode.get("capacity")) > 0);
 assertTrue(liveNode.containsKey("numBlocks"));



hadoop git commit: HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for filesystem entirely allocated for DFS use. (Tony Wu via lei)

2015-11-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d9277819 -> 8ed1fd217


HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account for 
filesystem entirely allocated for DFS use. (Tony Wu via lei)

(cherry picked from commit e2a5441b062fd0758138079d24a2740fc5e5e350)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ed1fd21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ed1fd21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ed1fd21

Branch: refs/heads/branch-2
Commit: 8ed1fd2171269c1bf7efac417f84484f61dd2c5b
Parents: 6d92778
Author: Lei Xu 
Authored: Wed Nov 4 10:27:35 2015 -0800
Committer: Lei Xu 
Committed: Wed Nov 4 10:28:00 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed1fd21/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index db9cc29..6aa9432 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -773,6 +773,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9007. Fix HDFS Balancer to honor upgrade domain policy. (Ming Ma via 
lei)
 
+HDFS-9331. Modify TestNameNodeMXBean#testNameNodeMXBeanInfo() to account 
for
+filesystem entirely allocated for DFS use. (Tony Wu via lei)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed1fd21/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 463ca67..dfaf929 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -136,7 +136,7 @@ public class TestNameNodeMXBean {
   assertTrue(liveNodes.size() == 2);
   for (Map liveNode : liveNodes.values()) {
 assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
-assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
+assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) >= 0);
 assertTrue(liveNode.containsKey("capacity"));
 assertTrue(((Long)liveNode.get("capacity")) > 0);
 assertTrue(liveNode.containsKey("numBlocks"));



hadoop git commit: HADOOP-10787. Rename/remove non-HADOOP_*, etc from the shell scripts. Contributed by Allen Wittenauer.

2015-11-04 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3e1745d8e -> 73b9c7b82


HADOOP-10787. Rename/remove non-HADOOP_*, etc from the shell scripts. 
Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73b9c7b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73b9c7b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73b9c7b8

Branch: refs/heads/trunk
Commit: 73b9c7b82b0f607a5328ad7dc4170da3ac0c1af3
Parents: 3e1745d
Author: Varun Vasudev 
Authored: Wed Nov 4 15:56:17 2015 +0530
Committer: Varun Vasudev 
Committed: Wed Nov 4 15:56:17 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop-common/src/main/bin/hadoop   | 15 ++--
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |  6 +-
 .../src/main/bin/hadoop-daemons.sh  |  6 +-
 .../src/main/bin/hadoop-functions.sh| 66 +
 .../src/main/bin/hadoop-layout.sh.example   | 16 ++---
 .../hadoop-common/src/main/bin/rcc  |  4 +-
 .../hadoop-common/src/main/bin/slaves.sh|  6 +-
 .../hadoop-common/src/main/bin/start-all.sh |  6 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |  6 +-
 .../main/conf/hadoop-user-functions.sh.example  | 10 +--
 .../scripts/hadoop_add_common_to_classpath.bats |  4 +-
 .../hadoop_add_to_classpath_toolspath.bats  | 74 
 .../src/test/scripts/hadoop_basic_init.bats |  2 +-
 .../hadoop-kms/src/main/sbin/kms.sh |  6 +-
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  |  6 +-
 .../src/main/bin/distribute-exclude.sh  |  4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |  9 ++-
 .../src/main/bin/refresh-namenodes.sh   |  6 +-
 .../hadoop-hdfs/src/main/bin/start-balancer.sh  |  6 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh   |  6 +-
 .../src/main/bin/start-secure-dns.sh|  6 +-
 .../hadoop-hdfs/src/main/bin/stop-balancer.sh   |  6 +-
 .../hadoop-hdfs/src/main/bin/stop-dfs.sh|  6 +-
 .../hadoop-hdfs/src/main/bin/stop-secure-dns.sh |  6 +-
 hadoop-mapreduce-project/bin/mapred | 15 ++--
 .../bin/mr-jobhistory-daemon.sh |  6 +-
 .../hadoop-sls/src/main/bin/rumen2sls.sh|  9 ++-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  | 12 ++--
 .../hadoop-yarn/bin/start-yarn.sh   |  6 +-
 .../hadoop-yarn/bin/stop-yarn.sh|  6 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  6 +-
 .../hadoop-yarn/bin/yarn-daemon.sh  |  6 +-
 .../hadoop-yarn/bin/yarn-daemons.sh |  6 +-
 34 files changed, 235 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b9c7b8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 453efe6..dbf9700 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -32,6 +32,9 @@ Trunk (Unreleased)
 HADOOP-11356. Removed deprecated 
o.a.h.fs.permission.AccessControlException.
 (Li Lu via wheat9)
 
+HADOOP-10787 Rename/remove non-HADOOP_*, etc from the shell scripts.
+(aw via vvasudev)
+
   NEW FEATURES
 
 HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73b9c7b8/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index ef67cc5..513b0f1 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -47,13 +47,13 @@ function hadoop_usage
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then
-  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
   bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
 if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
@@ -113,8 +113,7 @@ case ${COMMAND} in
   ;;
   archive)
 CLASS=org.apache.hadoop.tools.HadoopArchives
-hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
-hadoop_add_classpath "${TOOL_PATH}"
+