svn commit: r1409636 - in /hadoop/common/branches/branch-2/hadoop-common-project: ./ hadoop-auth/ hadoop-common/ hadoop-common/src/main/docs/ hadoop-common/src/main/java/ hadoop-common/src/test/core/
Author: eli Date: Thu Nov 15 01:52:17 2012 New Revision: 1409636 URL: http://svn.apache.org/viewvc?rev=1409636&view=rev Log: HADOOP-9042. Add a test for umask in FileSystemContractBaseTest. Colin Patrick McCabe Modified: hadoop/common/branches/branch-2/hadoop-common-project/ (props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-auth/ (props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/ (props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt (contents, props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/docs/ (props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/ (props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/core/ (props changed) hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Propchange: hadoop/common/branches/branch-2/hadoop-common-project/ -- Merged /hadoop/common/trunk/hadoop-common-project:r1409635 Propchange: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-auth/ -- Merged /hadoop/common/trunk/hadoop-common-project/hadoop-auth:r1409635 Propchange: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/ -- Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common:r1409635 Modified: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1409636&r1=1409635&r2=1409636&view=diff == --- hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt Thu Nov 15 01:52:17 2012 @@ -81,6 +81,9 @@ Release 2.0.3-alpha - Unreleased HADOO-8998. set Cache-Control no-cache header on all dynamic content. (tucu) +HADOOP-9042. Add a test for umask in FileSystemContractBaseTest. +(Colin Patrick McCabe via eli) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang Propchange: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt -- Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1409635 Propchange: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/docs/ -- Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1409635 Propchange: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/ -- Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1409635 Propchange: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/core/ -- Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1409635 Modified: hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java?rev=1409636&r1=1409635&r2=1409636&view=diff == --- hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java (original) +++ hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Thu Nov 15 01:52:17 2012 @@ -23,11 +23,13 @@ import java.io.IOException; import junit.framework.TestCase; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; /** * @@ -151,6 +153,25 @@ public abstract class FileSystemContract assertFalse(fs.exists(testDeepSubDir)); } + + public void testMkdirsWithUma
svn commit: r1409635 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
Author: eli Date: Thu Nov 15 01:51:15 2012 New Revision: 1409635 URL: http://svn.apache.org/viewvc?rev=1409635&view=rev Log: HADOOP-9042. Add a test for umask in FileSystemContractBaseTest. Colin Patrick McCabe Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1409635&r1=1409634&r2=1409635&view=diff == --- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Thu Nov 15 01:51:15 2012 @@ -369,6 +369,9 @@ Release 2.0.3-alpha - Unreleased HADOO-8998. set Cache-Control no-cache header on all dynamic content. (tucu) +HADOOP-9042. Add a test for umask in FileSystemContractBaseTest. +(Colin Patrick McCabe via eli) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java?rev=1409635&r1=1409634&r2=1409635&view=diff == --- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java (original) +++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Thu Nov 15 01:51:15 2012 @@ -23,11 +23,13 @@ import java.io.IOException; import junit.framework.TestCase; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; /** * @@ -151,6 +153,25 @@ public abstract class FileSystemContract assertFalse(fs.exists(testDeepSubDir)); } + + public void testMkdirsWithUmask() throws Exception { +if (fs.getScheme().equals("s3") || fs.getScheme().equals("s3n")) { + // skip permission tests for S3FileSystem until HDFS-1333 is fixed. + return; +} +Configuration conf = fs.getConf(); +String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY); +try { + conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "062"); + final Path dir = new Path("/test/newDir"); + assertTrue(fs.mkdirs(dir, new FsPermission((short)0777))); + FileStatus status = fs.getFileStatus(dir); + assertTrue(status.isDirectory()); + assertEquals((short)0715, status.getPermission().toShort()); +} finally { + conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask); +} + } public void testGetFileStatusThrowsExceptionForNonExistentFile() throws Exception {
svn commit: r1409481 - in /hadoop/common/branches/branch-1.1/src: hdfs/org/apache/hadoop/hdfs/ hdfs/org/apache/hadoop/hdfs/server/namenode/ test/org/apache/hadoop/hdfs/server/namenode/ webapps/hdfs/
Author: suresh Date: Wed Nov 14 22:22:11 2012 New Revision: 1409481 URL: http://svn.apache.org/viewvc?rev=1409481&view=rev Log: HDFS-4174. Add abilit to list the corrupted files in WebUI (backport of HDFS-1031). Contributed by Jing Zhao. Added: hadoop/common/branches/branch-1.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java hadoop/common/branches/branch-1.1/src/webapps/hdfs/corrupt_files.jsp Modified: hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java hadoop/common/branches/branch-1.1/src/webapps/hdfs/dfshealth.jsp Modified: hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1409481&r1=1409480&r2=1409481&view=diff == --- hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Nov 14 22:22:11 2012 @@ -229,6 +229,9 @@ public class DFSConfigKeys extends Commo public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0; public static final String DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit"; public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 100; + public static final String DFS_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max"; + public static final int DFS_MAX_CORRUPT_FILES_RETURNED_DEFAULT = 500; + public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false; public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum"; Modified: hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1409481&r1=1409480&r2=1409481&view=diff == --- hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-1.1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Nov 14 22:22:11 2012 @@ -322,6 +322,8 @@ public class FSNamesystem implements FSC private boolean allowBrokenAppend = false; // enable durable sync private boolean durableSync = true; + // How many entries are returned by getCorruptInodes() + int maxCorruptFilesReturned; /** * Last block index used for replication work. @@ -508,6 +510,9 @@ public class FSNamesystem implements FSC DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); +this.maxCorruptFilesReturned = conf.getInt( +DFSConfigKeys.DFS_MAX_CORRUPT_FILES_RETURNED_KEY, +DFSConfigKeys.DFS_MAX_CORRUPT_FILES_RETURNED_DEFAULT); this.defaultReplication = conf.getInt("dfs.replication", 3); this.maxReplication = conf.getInt("dfs.replication.max", 512); this.minReplication = conf.getInt("dfs.replication.min", 1); @@ -6051,4 +6056,58 @@ public class FSNamesystem implements FSC public String toString() { return getClass().getSimpleName() + ": " + host2DataNodeMap; } + + /** + * Used by {@link FSNamesystem#getCorruptFileBlocks()} and + * {@link FSNamesystem#listCorruptFileBlocks()} to represent information about + * corrupt file and its corresponding block + */ + static class CorruptFileBlockInfo { +String path; +Block block; + +public CorruptFileBlockInfo(String p, Block b) { + path = p; + block = b; +} + +@Override +public String toString() { + return block.getBlockName() + "\t" + path; +} + } + + /** + * @return a collection of corrupt files with their blocks information, with a + * maximum of {@link FSNamesystem#maxCorruptFilesReturned} files + * listed in total + */ + private Collection getCorruptFileBlocks() { +ArrayList corruptFiles = +new ArrayList(); +for (Block blk : neededReplications.getCorruptQueue()) { + INode inode = blocksMap.getINode(blk); + if (inode != null && countNodes(blk).liveReplicas() == 0) { +String filePath = FSDirectory.getFullPathName(inode); +Corr
[Hadoop Wiki] Update of "Help" by SteveLoughran
Dear Wiki user, You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change notification. The "Help" page has been changed by SteveLoughran: http://wiki.apache.org/hadoop/Help?action=diff&rev1=7&rev2=8 * Any exceptions/errors found while browsing through the hadoop-daemons' logs. * The hadoop daemons are: namenode & datanode (hdfs) and jobtracker & tasktracker (map-reduce). * The daemons' logs are found in the ''${HADOOP_LOG_DIR}'' directory. +* Don't cross post to the developer lists -they will ignore you. +* Don't file a bug report if its just you not being able to get Hadoop working. They'll be closed as invalid. +* Do try and debug it yourself. +* Don't start resending the question every 30 minutes if you don't get an answer at first. Wait 24 hours (for everyone round the world to see it) * More generic tips on asking questions [[http://www.catb.org/~esr/faqs/smart-questions.html|here]]. + + Do realise that everyone on the user list is doing it co-operatively and not paid for it. You can't expect timely replies -many of the people will be in different time zones, and they are busy. Because they are busy you do have to do your homework -especially as they can't debug your server configuration for you. All they can do is give suggestions. <> '''[[#GotResponse|I got a response!]]''' <>
svn commit: r1409292 - in /hadoop/common/branches/branch-1: ./ src/hdfs/org/apache/hadoop/hdfs/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/org/apache/hadoop/hdfs/server/namenode/ src/we
Author: suresh Date: Wed Nov 14 18:43:36 2012 New Revision: 1409292 URL: http://svn.apache.org/viewvc?rev=1409292&view=rev Log: HDFS-4174. Add abilit to list the corrupted files in WebUI (backport of HDFS-1031). Contributed by Jing Zhao. Added: hadoop/common/branches/branch-1/src/test/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java hadoop/common/branches/branch-1/src/webapps/hdfs/corrupt_files.jsp Modified: hadoop/common/branches/branch-1/CHANGES.txt hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java hadoop/common/branches/branch-1/src/webapps/hdfs/dfshealth.jsp Modified: hadoop/common/branches/branch-1/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1409292&r1=1409291&r2=1409292&view=diff == --- hadoop/common/branches/branch-1/CHANGES.txt (original) +++ hadoop/common/branches/branch-1/CHANGES.txt Wed Nov 14 18:43:36 2012 @@ -329,6 +329,9 @@ Release 1.1.1 - Unreleased HDFS-1539. A config option for the datanode to fsycn a block file when block is completely written. (dhruba via szetszwo) +HDFS-4174. Add abilit to list the corrupted files in WebUI (backport of +HDFS-1031). (Jing Zhao via suresh) + BUG FIXES HADOOP-8878. Uppercase namenode hostname causes hadoop dfs calls with Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1409292&r1=1409291&r2=1409292&view=diff == --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Nov 14 18:43:36 2012 @@ -246,6 +246,9 @@ public class DFSConfigKeys extends Commo public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0; public static final String DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit"; public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 100; + public static final String DFS_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max"; + public static final int DFS_MAX_CORRUPT_FILES_RETURNED_DEFAULT = 500; + public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit"; public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false; public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum"; Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1409292&r1=1409291&r2=1409292&view=diff == --- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Nov 14 18:43:36 2012 @@ -45,12 +45,12 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.NavigableMap; import java.util.Random; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.Map.Entry; import java.util.concurrent.TimeUnit; import javax.management.NotCompliantMBeanException; @@ -61,6 +61,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; @@ -86,27 +87,26 @@ import org.apache.hadoop.hdfs.security.t import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.common.Storage; import
svn commit: r1409274 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
Author: jlowe Date: Wed Nov 14 17:49:02 2012 New Revision: 1409274 URL: http://svn.apache.org/viewvc?rev=1409274&view=rev Log: HADOOP-9037. Bug in test-patch.sh and precommit build process. Contributed by Kihwal Lee Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1409274&r1=1409273&r2=1409274&view=diff == --- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Wed Nov 14 17:49:02 2012 @@ -277,6 +277,9 @@ Trunk (Unreleased) HADOOP-8974. TestDFVariations fails on Windows. (Chris Nauroth via suresh) +HADOOP-9037. Bug in test-patch.sh and precommit build process (Kihwal Lee +via jlowe) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd)
svn commit: r1409274 - /hadoop/common/trunk/dev-support/test-patch.sh
Author: jlowe Date: Wed Nov 14 17:49:02 2012 New Revision: 1409274 URL: http://svn.apache.org/viewvc?rev=1409274&view=rev Log: HADOOP-9037. Bug in test-patch.sh and precommit build process. Contributed by Kihwal Lee Modified: hadoop/common/trunk/dev-support/test-patch.sh Modified: hadoop/common/trunk/dev-support/test-patch.sh URL: http://svn.apache.org/viewvc/hadoop/common/trunk/dev-support/test-patch.sh?rev=1409274&r1=1409273&r2=1409274&view=diff == --- hadoop/common/trunk/dev-support/test-patch.sh (original) +++ hadoop/common/trunk/dev-support/test-patch.sh Wed Nov 14 17:49:02 2012 @@ -710,7 +710,7 @@ runTests () { ordered_modules="$ordered_modules $module" fi done - if [ -n $hdfs_modules ]; then + if [ -n "$hdfs_modules" ]; then ordered_modules="$ordered_modules $hdfs_modules" if [[ $building_common -eq 0 ]]; then echo " Building hadoop-common with -Pnative in order to provide \
svn commit: r1409265 - in /hadoop/common/branches/branch-1-win: CHANGES.branch-1-win.txt bin/hadoop.cmd
Author: suresh Date: Wed Nov 14 17:29:33 2012 New Revision: 1409265 URL: http://svn.apache.org/viewvc?rev=1409265&view=rev Log: HADOOP-9026. Hadoop.cmd fails to initialize if user's %path% variable has parenthesis. Contributed by Ivan Mitic. Modified: hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt hadoop/common/branches/branch-1-win/bin/hadoop.cmd Modified: hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt?rev=1409265&r1=1409264&r2=1409265&view=diff == --- hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt (original) +++ hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt Wed Nov 14 17:29:33 2012 @@ -211,3 +211,6 @@ Branch-hadoop-1-win - unreleased HADOOP-9006. Winutils should keep Administrators privileges intact. (Chuan Liu via suresh) + +HADOOP-9026. Hadoop.cmd fails to initialize if user's %path% variable has +parenthesis. (Ivan Mitic via suresh) Modified: hadoop/common/branches/branch-1-win/bin/hadoop.cmd URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/bin/hadoop.cmd?rev=1409265&r1=1409264&r2=1409265&view=diff == --- hadoop/common/branches/branch-1-win/bin/hadoop.cmd (original) +++ hadoop/common/branches/branch-1-win/bin/hadoop.cmd Wed Nov 14 17:29:33 2012 @@ -143,10 +143,15 @@ call :updatepath %HADOOP_BIN_PATH% :updatepath set path_to_add=%* - set current_path_comparable=%path:(x86)=% + set current_path_comparable=%path% set current_path_comparable=%current_path_comparable: =_% - set path_to_add_comparable=%path_to_add:(x86)=% + set current_path_comparable=%current_path_comparable:(=_% + set current_path_comparable=%current_path_comparable:)=_% + set path_to_add_comparable=%path_to_add% set path_to_add_comparable=%path_to_add_comparable: =_% + set path_to_add_comparable=%path_to_add_comparable:(=_% + set path_to_add_comparable=%path_to_add_comparable:)=_% + for %%i in ( %current_path_comparable% ) do ( if /i "%%i" == "%path_to_add_comparable%" ( set path_to_add_exist=true