Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/pom.xml Wed Aug 20 01:34:29 2014 @@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0. <artifactId>netty</artifactId> <scope>compile</scope> </dependency> + <dependency> + <groupId>xerces</groupId> + <artifactId>xercesImpl</artifactId> + <scope>compile</scope> + </dependency> </dependencies> <build> @@ -355,16 +360,97 @@ http://maven.apache.org/xsd/maven-4.0.0. <profiles> <profile> - <id>windows</id> + <id>native-win</id> <activation> <activeByDefault>false</activeByDefault> <os> <family>windows</family> </os> </activation> - <properties> - <windows.build>true</windows.build> - </properties> + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-enforcer-plugin</artifactId> + <executions> + <execution> + <id>enforce-os</id> + <goals> + <goal>enforce</goal> + </goals> + <configuration> + <rules> + <requireOS> + <family>windows</family> + <message>native-win build only supported on Windows</message> + </requireOS> + </rules> + <fail>true</fail> + </configuration> + </execution> + </executions> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-antrun-plugin</artifactId> + <executions> + <execution> + <id>make</id> + <phase>compile</phase> + <goals> + <goal>run</goal> + </goals> + <configuration> + <target> + <mkdir dir="${project.build.directory}/native"/> + <exec executable="cmake" dir="${project.build.directory}/native" + failonerror="true"> + <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 'Visual Studio 10 Win64'"/> + </exec> + <exec executable="msbuild" dir="${project.build.directory}/native" + failonerror="true"> + <arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/> + </exec> + <!-- Copy for inclusion in distribution. --> + <copy todir="${project.build.directory}/bin"> + <fileset dir="${project.build.directory}/native/target/bin/Release"/> + </copy> + </target> + </configuration> + </execution> + <execution> + <id>native_tests</id> + <phase>test</phase> + <goals><goal>run</goal></goals> + <configuration> + <skip>${skipTests}</skip> + <target> + <property name="compile_classpath" refid="maven.compile.classpath"/> + <property name="test_classpath" refid="maven.test.classpath"/> + <macrodef name="run-test"> + <attribute name="test"/> + <sequential> + <echo message="Running @{test}"/> + <exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/"> + <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/> + <!-- HADOOP_HOME required to find winutils. --> + <env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/> + <!-- Make sure hadoop.dll and jvm.dll are on PATH. --> + <env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/> + </exec> + <echo message="Finished @{test}"/> + </sequential> + </macrodef> + <run-test test="test_libhdfs_threaded"/> + <echo message="Skipping test_libhdfs_zerocopy"/> + <run-test test="test_native_mini_dfs"/> + </target> + </configuration> + </execution> + </executions> + </plugin> + </plugins> + </build> </profile> <profile> <id>native</id> @@ -402,21 +488,25 @@ http://maven.apache.org/xsd/maven-4.0.0. <phase>test</phase> <goals><goal>run</goal></goals> <configuration> + <skip>${skipTests}</skip> <target> <property name="compile_classpath" refid="maven.compile.classpath"/> <property name="test_classpath" refid="maven.test.classpath"/> - <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/"> - <arg value="-c"/> - <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/> - <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/> - <env key="SKIPTESTS" value="${skipTests}"/> - </exec> - <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/"> - <arg value="-c"/> - <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/> - <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/> - <env key="SKIPTESTS" value="${skipTests}"/> - </exec> + <macrodef name="run-test"> + <attribute name="test"/> + <sequential> + <echo message="Running @{test}"/> + <exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/"> + <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/> + <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. --> + <env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/> + </exec> + <echo message="Finished @{test}"/> + </sequential> + </macrodef> + <run-test test="test_libhdfs_threaded"/> + <run-test test="test_libhdfs_zerocopy"/> + <run-test test="test_native_mini_dfs"/> </target> </configuration> </execution>
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt Wed Aug 20 01:34:29 2014 @@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH) MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH") endif (NOT GENERATED_JAVAH) -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2") -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE") -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64") +if (WIN32) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2") + + # Set warning level 4. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4") + + # Skip "unreferenced formal parameter". + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100") + + # Skip "conditional expression is constant". + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127") + + # Skip deprecated POSIX function warnings. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE") + + # Skip CRT non-secure function warnings. If we can convert usage of + # strerror, getenv and ctime to their secure CRT equivalents, then we can + # re-enable the CRT non-secure function warnings. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS") + + # Omit unneeded headers. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN") + + set(OS_DIR main/native/libhdfs/os/windows) + set(OUT_DIR target/bin) +else (WIN32) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64") + set(OS_DIR main/native/libhdfs/os/posix) + set(OS_LINK_LIBRARIES pthread) + set(OUT_DIR target/usr/local/lib) +endif (WIN32) include_directories( ${GENERATED_JAVAH} @@ -87,6 +117,7 @@ include_directories( ${JNI_INCLUDE_DIRS} main/native main/native/libhdfs + ${OS_DIR} ) set(_FUSE_DFS_VERSION 0.1.0) @@ -96,6 +127,9 @@ add_dual_library(hdfs main/native/libhdfs/exception.c main/native/libhdfs/jni_helper.c main/native/libhdfs/hdfs.c + main/native/libhdfs/common/htable.c + ${OS_DIR}/mutexes.c + ${OS_DIR}/thread_local_storage.c ) if (NEED_LINK_DL) set(LIB_DL dl) @@ -104,17 +138,14 @@ endif(NEED_LINK_DL) target_link_dual_libraries(hdfs ${JAVA_JVM_LIBRARY} ${LIB_DL} - pthread + ${OS_LINK_LIBRARIES} ) -dual_output_directory(hdfs target/usr/local/lib) + +dual_output_directory(hdfs ${OUT_DIR}) set(LIBHDFS_VERSION "0.0.0") set_target_properties(hdfs PROPERTIES SOVERSION ${LIBHDFS_VERSION}) -add_library(posix_util - main/native/util/posix_util.c -) - add_executable(test_libhdfs_ops main/native/libhdfs/test/test_libhdfs_ops.c ) @@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_d add_executable(test_libhdfs_threaded main/native/libhdfs/expect.c main/native/libhdfs/test_libhdfs_threaded.c + ${OS_DIR}/thread.c ) target_link_libraries(test_libhdfs_threaded hdfs native_mini_dfs - pthread + ${OS_LINK_LIBRARIES} ) add_executable(test_libhdfs_zerocopy @@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy target_link_libraries(test_libhdfs_zerocopy hdfs native_mini_dfs - pthread + ${OS_LINK_LIBRARIES} ) -add_executable(test_libhdfs_vecsum - main/native/libhdfs/test/vecsum.c -) -target_link_libraries(test_libhdfs_vecsum - hdfs - pthread - rt -) +# Skip vecsum on Windows. This could be made to work in the future by +# introducing an abstraction layer over the sys/mman.h functions. +if (NOT WIN32) + add_executable(test_libhdfs_vecsum + main/native/libhdfs/test/vecsum.c + ) + target_link_libraries(test_libhdfs_vecsum + hdfs + pthread + rt + ) +endif(NOT WIN32) IF(REQUIRE_LIBWEBHDFS) add_subdirectory(contrib/libwebhdfs) Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/distribute-exclude.sh Wed Aug 20 01:34:29 2014 @@ -57,9 +57,9 @@ excludeFilenameRemote=$("$HADOOP_PREFIX/ if [ "$excludeFilenameRemote" = '' ] ; then echo \ - "Error: hdfs getconf -excludeFile returned empty string, " \ - "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \ - "configuration and on all namenodes" + "Error: hdfs getconf -excludeFile returned empty string, " \ + "please setup dfs.hosts.exclude in hdfs-site.xml in local cluster " \ + "configuration and on all namenodes" exit 1 fi Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Wed Aug 20 01:34:29 2014 @@ -15,250 +15,237 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Environment Variables -# -# JSVC_HOME home directory of jsvc binary. Required for starting secure -# datanode. -# -# JSVC_OUTFILE path to jsvc output file. Defaults to -# $HADOOP_LOG_DIR/jsvc.out. -# -# JSVC_ERRFILE path to jsvc error file. Defaults to $HADOOP_LOG_DIR/jsvc.err. - -bin=`which $0` -bin=`dirname ${bin}` -bin=`cd "$bin" > /dev/null; pwd` - -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh - -function print_usage(){ - echo "Usage: hdfs [--config confdir] COMMAND" +function hadoop_usage +{ + echo "Usage: hdfs [--config confdir] [--daemon (start|stop|status)] COMMAND" echo " where COMMAND is one of:" - echo " dfs run a filesystem command on the file systems supported in Hadoop." - echo " namenode -format format the DFS filesystem" - echo " secondarynamenode run the DFS secondary namenode" - echo " namenode run the DFS namenode" - echo " journalnode run the DFS journalnode" - echo " zkfc run the ZK Failover Controller daemon" + echo " balancer run a cluster balancing utility" + echo " cacheadmin configure the HDFS cache" + echo " classpath prints the class path needed to get the" + echo " Hadoop jar and the required libraries" echo " datanode run a DFS datanode" + echo " dfs run a filesystem command on the file system" echo " dfsadmin run a DFS admin client" - echo " haadmin run a DFS HA admin client" - echo " fsck run a DFS filesystem checking utility" - echo " balancer run a cluster balancing utility" - echo " jmxget get JMX exported values from NameNode or DataNode." - echo " oiv apply the offline fsimage viewer to an fsimage" - echo " oiv_legacy apply the offline fsimage viewer to an legacy fsimage" - echo " oev apply the offline edits viewer to an edits file" echo " fetchdt fetch a delegation token from the NameNode" + echo " fsck run a DFS filesystem checking utility" echo " getconf get config values from configuration" echo " groups get the groups which users belong to" - echo " snapshotDiff diff two snapshots of a directory or diff the" - echo " current directory contents with a snapshot" + echo " haadmin run a DFS HA admin client" + echo " jmxget get JMX exported values from NameNode or DataNode." + echo " journalnode run the DFS journalnode" echo " lsSnapshottableDir list all snapshottable dirs owned by the current user" - echo " Use -help to see options" - echo " portmap run a portmap service" + echo " Use -help to see options" + echo " namenode run the DFS namenode" + echo " Use -format to initialize the DFS filesystem" echo " nfs3 run an NFS version 3 gateway" - echo " cacheadmin configure the HDFS cache" + echo " oev apply the offline edits viewer to an edits file" + echo " oiv apply the offline fsimage viewer to an fsimage" + echo " oiv_legacy apply the offline fsimage viewer to a legacy fsimage" + echo " portmap run a portmap service" + echo " secondarynamenode run the DFS secondary namenode" + echo " snapshotDiff diff two snapshots of a directory or diff the" + echo " current directory contents with a snapshot" + echo " zkfc run the ZK Failover Controller daemon" echo "" echo "Most commands print help when invoked w/o parameters." } -if [ $# = 0 ]; then - print_usage - exit +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" +else + this="${BASH_SOURCE-$0}" + bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi + +if [[ $# = 0 ]]; then + hadoop_exit_with_usage 1 fi COMMAND=$1 shift -case $COMMAND in - # usage flags - --help|-help|-h) - print_usage +case ${COMMAND} in + balancer) + CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}" + ;; + cacheadmin) + CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin + ;; + classpath) + hadoop_finalize + echo "${CLASSPATH}" exit - ;; -esac - -# Determine if we're starting a secure datanode, and if so, redefine appropriate variables -if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then - if [ -n "$JSVC_HOME" ]; then - if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then - HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR - fi - - if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then - HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR - HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" - fi - - HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER - HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" - starting_secure_dn="true" - else - echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\ - "isn't set. Falling back to starting insecure DN." - fi -fi - -# Determine if we're starting a privileged NFS daemon, and if so, redefine appropriate variables -if [ "$COMMAND" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then - if [ -n "$JSVC_HOME" ]; then - if [ -n "$HADOOP_PRIVILEGED_NFS_PID_DIR" ]; then - HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR + ;; + datanode) + daemon="true" + # Determine if we're starting a secure datanode, and + # if so, redefine appropriate variables + if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then + secure_service="true" + secure_user="${HADOOP_SECURE_DN_USER}" + + # backward compatiblity + HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}" + HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}" + + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS} ${HADOOP_DATANODE_OPTS}" + CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter" + else + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}" + CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' fi - - if [ -n "$HADOOP_PRIVILEGED_NFS_LOG_DIR" ]; then - HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR - HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" + ;; + dfs) + CLASS=org.apache.hadoop.fs.FsShell + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" + ;; + dfsadmin) + CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" + ;; + fetchdt) + CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher + ;; + fsck) + CLASS=org.apache.hadoop.hdfs.tools.DFSck + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" + ;; + getconf) + CLASS=org.apache.hadoop.hdfs.tools.GetConf + ;; + groups) + CLASS=org.apache.hadoop.hdfs.tools.GetGroups + ;; + haadmin) + CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin + CLASSPATH="${CLASSPATH}:${TOOL_PATH}" + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" + ;; + journalnode) + daemon="true" + CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode' + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}" + ;; + jmxget) + CLASS=org.apache.hadoop.hdfs.tools.JMXGet + ;; + lsSnapshottableDir) + CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir + ;; + namenode) + daemon="true" + CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}" + ;; + nfs3) + daemon="true" + if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then + secure_service="true" + secure_user="${HADOOP_PRIVILEGED_NFS_USER}" + + # backward compatiblity + HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}" + HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}" + + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS} ${HADOOP_NFS3_OPTS}" + CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter + else + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}" + CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 fi - - HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER - HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" - starting_privileged_nfs="true" - else - echo "It looks like you're trying to start a privileged NFS server, but"\ - "\$JSVC_HOME isn't set. Falling back to starting unprivileged NFS server." - fi -fi + ;; + oev) + CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer + ;; + oiv) + CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB + ;; + oiv_legacy) + CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer + ;; + portmap) + daemon="true" + CLASS=org.apache.hadoop.portmap.Portmap + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}" + ;; + secondarynamenode) + daemon="true" + CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}" + ;; + snapshotDiff) + CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff + ;; + zkfc) + daemon="true" + CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' + HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}" + ;; + -*) + hadoop_exit_with_usage 1 + ;; + *) + CLASS="${COMMAND}" + ;; +esac -if [ "$COMMAND" = "namenode" ] ; then - CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS" -elif [ "$COMMAND" = "zkfc" ] ; then - CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS" -elif [ "$COMMAND" = "secondarynamenode" ] ; then - CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS" -elif [ "$COMMAND" = "datanode" ] ; then - CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' - if [ "$starting_secure_dn" = "true" ]; then - HADOOP_OPTS="$HADOOP_OPTS -jvm server $HADOOP_DATANODE_OPTS" - else - HADOOP_OPTS="$HADOOP_OPTS -server $HADOOP_DATANODE_OPTS" +if [[ -n "${secure_service}" ]]; then + HADOOP_SECURE_USER="${secure_user}" + if hadoop_verify_secure_prereq; then + hadoop_setup_secure_service + priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out" + priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err" + priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid" + daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" + daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid" fi -elif [ "$COMMAND" = "journalnode" ] ; then - CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOURNALNODE_OPTS" -elif [ "$COMMAND" = "dfs" ] ; then - CLASS=org.apache.hadoop.fs.FsShell - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "dfsadmin" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "haadmin" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin - CLASSPATH=${CLASSPATH}:${TOOL_PATH} - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "fsck" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.DFSck - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "balancer" ] ; then - CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS" -elif [ "$COMMAND" = "jmxget" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.JMXGet -elif [ "$COMMAND" = "oiv" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB -elif [ "$COMMAND" = "oiv_legacy" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -elif [ "$COMMAND" = "oev" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer -elif [ "$COMMAND" = "fetchdt" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -elif [ "$COMMAND" = "getconf" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.GetConf -elif [ "$COMMAND" = "groups" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.GetGroups -elif [ "$COMMAND" = "snapshotDiff" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff -elif [ "$COMMAND" = "lsSnapshottableDir" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir -elif [ "$COMMAND" = "portmap" ] ; then - CLASS=org.apache.hadoop.portmap.Portmap - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_PORTMAP_OPTS" -elif [ "$COMMAND" = "nfs3" ] ; then - CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS" -elif [ "$COMMAND" = "cacheadmin" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin else - CLASS="$COMMAND" + daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" + daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid" fi -export CLASSPATH=$CLASSPATH - -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}" - -# Check to see if we should start a secure datanode -if [ "$starting_secure_dn" = "true" ]; then - if [ "$HADOOP_PID_DIR" = "" ]; then - HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid" +if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then + # shellcheck disable=SC2034 + HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}" + if [[ -n "${secure_service}" ]]; then + # shellcheck disable=SC2034 + HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log" else - HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" - fi - - JSVC=$JSVC_HOME/jsvc - if [ ! -f $JSVC ]; then - echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run secure datanodes. " - echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\ - "and set JSVC_HOME to the directory containing the jsvc binary." - exit + # shellcheck disable=SC2034 + HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log" fi +fi - if [[ ! $JSVC_OUTFILE ]]; then - JSVC_OUTFILE="$HADOOP_LOG_DIR/jsvc.out" - fi +hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}" +hadoop_finalize - if [[ ! $JSVC_ERRFILE ]]; then - JSVC_ERRFILE="$HADOOP_LOG_DIR/jsvc.err" - fi +export CLASSPATH - exec "$JSVC" \ - -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \ - -errfile "$JSVC_ERRFILE" \ - -pidfile "$HADOOP_SECURE_DN_PID" \ - -nodetach \ - -user "$HADOOP_SECURE_DN_USER" \ - -cp "$CLASSPATH" \ - $JAVA_HEAP_MAX $HADOOP_OPTS \ - org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@" -elif [ "$starting_privileged_nfs" = "true" ] ; then - if [ "$HADOOP_PID_DIR" = "" ]; then - HADOOP_PRIVILEGED_NFS_PID="/tmp/hadoop_privileged_nfs3.pid" +if [[ -n "${daemon}" ]]; then + if [[ -n "${secure_service}" ]]; then + hadoop_secure_daemon_handler \ + "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\ + "${daemon_pidfile}" "${daemon_outfile}" \ + "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@" else - HADOOP_PRIVILEGED_NFS_PID="$HADOOP_PID_DIR/hadoop_privileged_nfs3.pid" - fi - - JSVC=$JSVC_HOME/jsvc - if [ ! -f $JSVC ]; then - echo "JSVC_HOME is not set correctly so jsvc cannot be found. jsvc is required to run privileged NFS gateways. " - echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\ - "and set JSVC_HOME to the directory containing the jsvc binary." - exit - fi - - if [[ ! $JSVC_OUTFILE ]]; then - JSVC_OUTFILE="$HADOOP_LOG_DIR/nfs3_jsvc.out" + hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\ + "${daemon_pidfile}" "${daemon_outfile}" "$@" fi - - if [[ ! $JSVC_ERRFILE ]]; then - JSVC_ERRFILE="$HADOOP_LOG_DIR/nfs3_jsvc.err" - fi - - exec "$JSVC" \ - -Dproc_$COMMAND -outfile "$JSVC_OUTFILE" \ - -errfile "$JSVC_ERRFILE" \ - -pidfile "$HADOOP_PRIVILEGED_NFS_PID" \ - -nodetach \ - -user "$HADOOP_PRIVILEGED_NFS_USER" \ - -cp "$CLASSPATH" \ - $JAVA_HEAP_MAX $HADOOP_OPTS \ - org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter "$@" + exit $? else - # run it - exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@" + # shellcheck disable=SC2086 + hadoop_java_exec "${COMMAND}" "${CLASS}" "$@" fi - Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh Wed Aug 20 01:34:29 2014 @@ -18,19 +18,67 @@ # included in all the hdfs scripts with source command # should not be executed directly -bin=`which "$0"` -bin=`dirname "${bin}"` -bin=`cd "$bin"; pwd` +function hadoop_subproject_init +{ + if [ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]; then + . "${HADOOP_CONF_DIR}/hdfs-env.sh" + fi + + # at some point in time, someone thought it would be a good idea to + # create separate vars for every subproject. *sigh* + # let's perform some overrides and setup some defaults for bw compat + # this way the common hadoop var's == subproject vars and can be + # used interchangeable from here on out + # ... + # this should get deprecated at some point. + HADOOP_LOG_DIR="${HADOOP_HDFS_LOG_DIR:-$HADOOP_LOG_DIR}" + HADOOP_HDFS_LOG_DIR="${HADOOP_LOG_DIR}" + + HADOOP_LOGFILE="${HADOOP_HDFS_LOGFILE:-$HADOOP_LOGFILE}" + HADOOP_HDFS_LOGFILE="${HADOOP_LOGFILE}" + + HADOOP_NICENESS=${HADOOP_HDFS_NICENESS:-$HADOOP_NICENESS} + HADOOP_HDFS_NICENESS="${HADOOP_NICENESS}" + + HADOOP_STOP_TIMEOUT=${HADOOP_HDFS_STOP_TIMEOUT:-$HADOOP_STOP_TIMEOUT} + HADOOP_HDFS_STOP_TIMEOUT="${HADOOP_STOP_TIMEOUT}" + + HADOOP_PID_DIR="${HADOOP_HDFS_PID_DIR:-$HADOOP_PID_DIR}" + HADOOP_HDFS_PID_DIR="${HADOOP_PID_DIR}" + + HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER} + HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}" + + HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME_DIR}" + + HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}" + HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}" + + # turn on the defaults + + export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"} + export HADOOP_SECONDARYNAMENODE_OPTS=${HADOOP_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"} + export HADOOP_DATANODE_OPTS=${HADOOP_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"} + export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"} + export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"} + export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"} + + +} + +if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then + _hd_this="${BASH_SOURCE-$0}" + HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P) +fi -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then - . ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh + . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then - . "$HADOOP_COMMON_HOME"/libexec/hadoop-config.sh + . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then - . "$HADOOP_HOME"/libexec/hadoop-config.sh + . "${HADOOP_HOME}/libexec/hadoop-config.sh" else - echo "Hadoop common not found." - exit + echo "ERROR: Hadoop common not found." 2>&1 + exit 1 fi + Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/refresh-namenodes.sh Wed Aug 20 01:34:29 2014 @@ -20,24 +20,40 @@ # This script refreshes all namenodes, it's a simple wrapper # for dfsadmin to support multiple namenodes. -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" +else + this="${BASH_SOURCE-$0}" + bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi -namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -nnRpcAddresses) -if [ "$?" != '0' ] ; then errorFlag='1' ; +namenodes=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -nnRpcAddresses) +if [[ "$?" != '0' ]] ; then + errorFlag='1' ; else - for namenode in $namenodes ; do - echo "Refreshing namenode [$namenode]" - "$HADOOP_PREFIX/bin/hdfs" dfsadmin -fs hdfs://$namenode -refreshNodes - if [ "$?" != '0' ] ; then errorFlag='1' ; fi + for namenode in ${namenodes} ; do + echo "Refreshing namenode [${namenode}]" + "${HADOOP_HDFS_HOME}/bin/hdfs" dfsadmin \ + -fs hdfs://${namenode} -refreshNodes + if [[ "$?" != '0' ]]; then + errorFlag='1' + fi done fi -if [ "$errorFlag" = '1' ] ; then +if [[ "${errorFlag}" = '1' ]] ; then echo "Error: refresh of namenodes failed, see error messages above." exit 1 else Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh Wed Aug 20 01:34:29 2014 @@ -15,13 +15,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -bin=`dirname "${BASH_SOURCE-$0}"` -bin=`cd "$bin"; pwd` +function usage +{ + echo "Usage: start-balancer.sh [--config confdir] [-policy <policy>] [-threshold <threshold>]" +} -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh +this="${BASH_SOURCE-$0}" +bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) + +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" +else + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi # Start balancer daemon. -"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@ +exec "${bin}/hadoop-daemon.sh" --config "${HADOOP_CONF_DIR}" start balancer "$@" Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh Wed Aug 20 01:34:29 2014 @@ -20,98 +20,128 @@ # Optinally upgrade or rollback dfs state. # Run this on master node. -usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]" +function hadoop_usage +{ + echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]" +} + +this="${BASH_SOURCE-$0}" +bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) + +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" +else + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi -bin=`dirname "${BASH_SOURCE-$0}"` -bin=`cd "$bin"; pwd` +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh # get arguments -if [ $# -ge 1 ]; then - nameStartOpt="$1" - shift - case "$nameStartOpt" in - (-upgrade) - ;; - (-rollback) - dataStartOpt="$nameStartOpt" - ;; - (*) - echo $usage - exit 1 - ;; - esac +if [[ $# -ge 1 ]]; then + nameStartOpt="$1" + shift + case "$nameStartOpt" in + -upgrade) + ;; + -rollback) + dataStartOpt="$nameStartOpt" + ;; + *) + hadoop_exit_with_usage 1 + ;; + esac fi + #Add other possible options nameStartOpt="$nameStartOpt $@" #--------------------------------------------------------- # namenodes -NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes) +NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null) + +if [[ -z "${NAMENODES}" ]]; then + NAMENODES=$(hostname) +fi echo "Starting namenodes on [$NAMENODES]" -"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$NAMENODES" \ - --script "$bin/hdfs" start namenode $nameStartOpt +"${bin}/hadoop-daemons.sh" \ +--config "${HADOOP_CONF_DIR}" \ +--hostnames "${NAMENODES}" \ +start namenode ${nameStartOpt} #--------------------------------------------------------- # datanodes (using default slaves file) -if [ -n "$HADOOP_SECURE_DN_USER" ]; then - echo \ - "Attempting to start secure cluster, skipping datanodes. " \ - "Run start-secure-dns.sh as root to complete startup." +if [[ -n "${HADOOP_SECURE_DN_USER}" ]] && +[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then + echo "ERROR: Attempting to start secure cluster, skipping datanodes. " + echo "Run start-secure-dns.sh as root or configure " + echo "\${HADOOP_SECURE_COMMAND} to complete startup." else - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --script "$bin/hdfs" start datanode $dataStartOpt + + echo "Starting datanodes" + + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + start datanode ${dataStartOpt} fi #--------------------------------------------------------- # secondary namenodes (if any) -SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null) +SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null) -if [ -n "$SECONDARY_NAMENODES" ]; then - echo "Starting secondary namenodes [$SECONDARY_NAMENODES]" +if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then + SECONDARY_NAMENODES=$(hostname) +fi - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$SECONDARY_NAMENODES" \ - --script "$bin/hdfs" start secondarynamenode +if [[ -n "${SECONDARY_NAMENODES}" ]]; then + echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]" + + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + --hostnames "${SECONDARY_NAMENODES}" \ + start secondarynamenode fi #--------------------------------------------------------- # quorumjournal nodes (if any) -SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-) +SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-) -case "$SHARED_EDITS_DIR" in -qjournal://*) - JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g') - echo "Starting journal nodes [$JOURNAL_NODES]" - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$JOURNAL_NODES" \ - --script "$bin/hdfs" start journalnode ;; +case "${SHARED_EDITS_DIR}" in + qjournal://*) + JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g') + echo "Starting journal nodes [${JOURNAL_NODES}]" + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + --hostnames "${JOURNAL_NODES}" \ + start journalnode + ;; esac #--------------------------------------------------------- # ZK Failover controllers, if auto-HA is enabled -AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled) -if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then - echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]" - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$NAMENODES" \ - --script "$bin/hdfs" start zkfc +AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]') +if [[ "${AUTOHA_ENABLED}" = "true" ]]; then + echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]" + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + --hostnames "${NAMENODES}" \ + start zkfc fi # eof Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh Wed Aug 20 01:34:29 2014 @@ -17,17 +17,33 @@ # Run as root to start secure datanodes in a security-enabled cluster. -usage="Usage (run as root in order to start secure datanodes): start-secure-dns.sh" -bin=`dirname "${BASH_SOURCE-$0}"` -bin=`cd "$bin"; pwd` +function hadoop_usage { + echo "Usage: start-secure-dns.sh" +} -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh +this="${BASH_SOURCE-$0}" +bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) -if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then - "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" else - echo $usage + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi + +if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then + exec "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" start datanode "${dataStartOpt}" +else + echo hadoop_usage_and_exit 1 fi Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh Wed Aug 20 01:34:29 2014 @@ -15,14 +15,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -bin=`dirname "${BASH_SOURCE-$0}"` -bin=`cd "$bin"; pwd` +function hadoop_usage +{ + echo "Usage: stop-balancer.sh [--config confdir]" +} -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh +this="${BASH_SOURCE-$0}" +bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) + +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" +else + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi # Stop balancer daemon. # Run this on the machine where the balancer is running -"$HADOOP_PREFIX"/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer +"${bin}/hadoop-daemon.sh" --config "${HADOOP_CONF_DIR}" stop balancer Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh Wed Aug 20 01:34:29 2014 @@ -15,75 +15,100 @@ # See the License for the specific language governing permissions and # limitations under the License. -bin=`dirname "${BASH_SOURCE-$0}"` -bin=`cd "$bin"; pwd` +function hadoop_usage +{ + echo "Usage: start-balancer.sh [--config confdir] [-policy <policy>] [-threshold <threshold>]" +} + +this="${BASH_SOURCE-$0}" +bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) + +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" +else + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi #--------------------------------------------------------- # namenodes -NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes) +NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes) echo "Stopping namenodes on [$NAMENODES]" -"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$NAMENODES" \ - --script "$bin/hdfs" stop namenode +"${bin}/hadoop-daemons.sh" \ +--config "${HADOOP_CONF_DIR}" \ +--hostnames "${NAMENODES}" \ +stop namenode #--------------------------------------------------------- # datanodes (using default slaves file) -if [ -n "$HADOOP_SECURE_DN_USER" ]; then +if [[ -n "${HADOOP_SECURE_DN_USER}" ]] && +[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then echo \ - "Attempting to stop secure cluster, skipping datanodes. " \ - "Run stop-secure-dns.sh as root to complete shutdown." + "ERROR: Attempting to stop secure cluster, skipping datanodes. " \ + "Run stop-secure-dns.sh as root to complete shutdown." else - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --script "$bin/hdfs" stop datanode + + echo "Stopping datanodes" + + "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode fi #--------------------------------------------------------- # secondary namenodes (if any) -SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null) +SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null) -if [ -n "$SECONDARY_NAMENODES" ]; then - echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]" +if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then + SECONDARY_NAMENODES=$(hostname) +fi - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$SECONDARY_NAMENODES" \ - --script "$bin/hdfs" stop secondarynamenode +if [[ -n "${SECONDARY_NAMENODES}" ]]; then + echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]" + + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + --hostnames "${SECONDARY_NAMENODES}" \ + stop secondarynamenode fi #--------------------------------------------------------- # quorumjournal nodes (if any) -SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-) +SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-) -case "$SHARED_EDITS_DIR" in -qjournal://*) - JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g') - echo "Stopping journal nodes [$JOURNAL_NODES]" - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$JOURNAL_NODES" \ - --script "$bin/hdfs" stop journalnode ;; +case "${SHARED_EDITS_DIR}" in + qjournal://*) + JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g') + echo "Stopping journal nodes [${JOURNAL_NODES}]" + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + --hostnames "${JOURNAL_NODES}" \ + stop journalnode + ;; esac #--------------------------------------------------------- # ZK Failover controllers, if auto-HA is enabled -AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled) -if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then - echo "Stopping ZK Failover Controllers on NN hosts [$NAMENODES]" - "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$NAMENODES" \ - --script "$bin/hdfs" stop zkfc +AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]') +if [[ "${AUTOHA_ENABLED}" = "true" ]]; then + echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]" + "${bin}/hadoop-daemons.sh" \ + --config "${HADOOP_CONF_DIR}" \ + --hostnames "${NAMENODES}" \ + stop zkfc fi # eof Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh Wed Aug 20 01:34:29 2014 @@ -17,17 +17,33 @@ # Run as root to start secure datanodes in a security-enabled cluster. -usage="Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh" -bin=`dirname "${BASH_SOURCE-$0}"` -bin=`cd "$bin"; pwd` +function hadoop_usage { + echo "Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh" +} -DEFAULT_LIBEXEC_DIR="$bin"/../libexec -HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} -. $HADOOP_LIBEXEC_DIR/hdfs-config.sh +this="${BASH_SOURCE-$0}" +bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) -if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then - "$HADOOP_PREFIX"/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode +# let's locate libexec... +if [[ -n "${HADOOP_PREFIX}" ]]; then + DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" else - echo $usage + DEFAULT_LIBEXEC_DIR="${bin}/../libexec" +fi + +HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" +# shellcheck disable=SC2034 +HADOOP_NEW_CONFIG=true +if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then + . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" +else + echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 + exit 1 +fi + +if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then + "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode +else + hadoop_exit_with_usage 1 fi Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1613508-1619017 Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Wed Aug 20 01:34:29 2014 @@ -33,6 +33,7 @@ import org.apache.hadoop.classification. import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; @@ -448,6 +449,11 @@ public class Hdfs extends AbstractFileSy dfs.removeXAttr(getUriPath(path), name); } + @Override + public void access(Path path, final FsAction mode) throws IOException { + dfs.checkAccess(getUriPath(path), mode); + } + /** * Renew an existing delegation token. * Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Wed Aug 20 01:34:29 2014 @@ -32,19 +32,21 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; @@ -60,8 +62,6 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -91,7 +91,6 @@ import java.util.concurrent.atomic.Atomi import javax.net.SocketFactory; -import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -112,22 +111,23 @@ import org.apache.hadoop.fs.MD5MD5CRC32C import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.VolumeId; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; +import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; @@ -158,8 +158,8 @@ import org.apache.hadoop.hdfs.protocol.U import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; -import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; @@ -175,6 +175,7 @@ import org.apache.hadoop.hdfs.server.com import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; @@ -200,6 +201,7 @@ import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import com.google.common.net.InetAddresses; /******************************************************** @@ -2192,6 +2194,11 @@ public class DFSClient implements java.i return namenode.getDatanodeReport(type); } + public DatanodeStorageReport[] getDatanodeStorageReport( + DatanodeReportType type) throws IOException { + return namenode.getDatanodeStorageReport(type); + } + /** * Enter, leave or get safe mode. * @@ -2826,6 +2833,17 @@ public class DFSClient implements java.i } } + public void checkAccess(String src, FsAction mode) throws IOException { + checkOpen(); + try { + namenode.checkAccess(src, mode); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + @Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Aug 20 01:34:29 2014 @@ -201,6 +201,9 @@ public class DFSConfigKeys extends Commo public static final String DFS_ADMIN = "dfs.cluster.administrators"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml"; + public static final String DFS_SERVER_HTTPS_KEYPASSWORD_KEY = "ssl.server.keystore.keypassword"; + public static final String DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY = "ssl.server.keystore.password"; + public static final String DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY = "ssl.server.truststore.password"; public static final String DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore"; public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false; public static final String DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY = "dfs.namenode.support.allow.format"; @@ -381,8 +384,6 @@ public class DFSConfigKeys extends Commo public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT; public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads"; public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096; - public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks"; - public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64; public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours"; public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0; public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed"; @@ -666,4 +667,8 @@ public class DFSConfigKeys extends Commo public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY = "dfs.datanode.slow.io.warning.threshold.ms"; public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300; + + public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY = + "dfs.datanode.block.id.layout.upgrade.threads"; + public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12; } Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Wed Aug 20 01:34:29 2014 @@ -2136,12 +2136,12 @@ public class DFSOutputStream extends FSO throw new IOException(msg); } try { - Thread.sleep(localTimeout); if (retries == 0) { throw new IOException("Unable to close file because the last block" + " does not have enough number of replicas."); } retries--; + Thread.sleep(localTimeout); localTimeout *= 2; if (Time.now() - localstart > 5000) { DFSClient.LOG.info("Could not complete " + src + " retrying..."); Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Aug 20 01:34:29 2014 @@ -33,6 +33,9 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY; import java.io.IOException; import java.io.PrintStream; @@ -1531,16 +1534,38 @@ public class DFSUtil { .needsClientAuth( sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)) - .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) + .keyPassword(getPassword(sslConf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY)) .keyStore(sslConf.get("ssl.server.keystore.location"), - sslConf.get("ssl.server.keystore.password"), + getPassword(sslConf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY), sslConf.get("ssl.server.keystore.type", "jks")) .trustStore(sslConf.get("ssl.server.truststore.location"), - sslConf.get("ssl.server.truststore.password"), + getPassword(sslConf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY), sslConf.get("ssl.server.truststore.type", "jks")); } /** + * Leverages the Configuration.getPassword method to attempt to get + * passwords from the CredentialProvider API before falling back to + * clear text in config - if falling back is allowed. + * @param conf Configuration instance + * @param alias name of the credential to retreive + * @return String credential value or null + */ + static String getPassword(Configuration conf, String alias) { + String password = null; + try { + char[] passchars = conf.getPassword(alias); + if (passchars != null) { + password = new String(passchars); + } + } + catch (IOException ioe) { + password = null; + } + return password; + } + + /** * Converts a Date into an ISO-8601 formatted datetime string. */ public static String dateToIso8601String(Date date) { @@ -1643,9 +1668,11 @@ public class DFSUtil { .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey)); // initialize the webserver for uploading/downloading files. - LOG.info("Starting web server as: " - + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey), - httpAddr.getHostName())); + if (UserGroupInformation.isSecurityEnabled()) { + LOG.info("Starting web server as: " + + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey), + httpAddr.getHostName())); + } if (policy.isHttpEnabled()) { if (httpAddr.getPort() == 0) { Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed Aug 20 01:34:29 2014 @@ -59,6 +59,7 @@ import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -1898,4 +1899,23 @@ public class DistributedFileSystem exten } }.resolve(this, absF); } + + @Override + public void access(Path path, final FsAction mode) throws IOException { + final Path absF = fixRelativePart(path); + new FileSystemLinkResolver<Void>() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.checkAccess(getPathName(p), mode); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) + throws IOException { + fs.access(p, mode); + return null; + } + }.resolve(this, absF); + } } Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java Wed Aug 20 01:34:29 2014 @@ -18,6 +18,9 @@ package org.apache.hadoop.hdfs; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -32,4 +35,11 @@ public enum StorageType { SSD; public static final StorageType DEFAULT = DISK; + public static final StorageType[] EMPTY_ARRAY = {}; + + private static final StorageType[] VALUES = values(); + + public static List<StorageType> asList() { + return Arrays.asList(VALUES); + } } \ No newline at end of file Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java Wed Aug 20 01:34:29 2014 @@ -50,6 +50,9 @@ public class Block implements Writable, public static final Pattern metaFilePattern = Pattern .compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION + "$"); + public static final Pattern metaOrBlockFilePattern = Pattern + .compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION + + ")?$"); public static boolean isBlockFilename(File f) { String name = f.getName(); @@ -65,6 +68,11 @@ public class Block implements Writable, return metaFilePattern.matcher(name).matches(); } + public static File metaToBlockFile(File metaFile) { + return new File(metaFile.getParent(), metaFile.getName().substring( + 0, metaFile.getName().lastIndexOf('_'))); + } + /** * Get generation stamp from the name of the metafile name */ @@ -75,10 +83,10 @@ public class Block implements Writable, } /** - * Get the blockId from the name of the metafile name + * Get the blockId from the name of the meta or block file */ - public static long getBlockId(String metaFile) { - Matcher m = metaFilePattern.matcher(metaFile); + public static long getBlockId(String metaOrBlockFile) { + Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile); return m.matches() ? Long.parseLong(m.group(1)) : 0; } Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1619019&r1=1619018&r2=1619019&view=diff ============================================================================== --- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Aug 20 01:34:29 2014 @@ -24,6 +24,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -31,14 +32,14 @@ import org.apache.hadoop.fs.FileAlreadyE import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdfs.security.t import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.AtMostOnce; @@ -655,6 +657,13 @@ public interface ClientProtocol { throws IOException; /** + * Get a report on the current datanode storages. + */ + @Idempotent + public DatanodeStorageReport[] getDatanodeStorageReport( + HdfsConstants.DatanodeReportType type) throws IOException; + + /** * Get the block size for the given file. * @param filename The name of the file * @return The number of bytes in each block @@ -1259,17 +1268,11 @@ public interface ClientProtocol { /** * Set xattr of a file or directory. - * A regular user only can set xattr of "user" namespace. - * A super user can set xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". * <p/> - * For xattr of "user" namespace, its access permissions are - * defined by the file or directory permission bits. - * XAttr will be set only when login user has correct permissions. - * <p/> - * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes"> - * http://en.wikipedia.org/wiki/Extended_file_attributes</a> + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttr <code>XAttr</code> to set * @param flag set flag @@ -1280,18 +1283,13 @@ public interface ClientProtocol { throws IOException; /** - * Get xattrs of file or directory. Values in xAttrs parameter are ignored. - * If xattrs is null or empty, equals getting all xattrs of the file or - * directory. - * Only xattrs which login user has correct permissions will be returned. + * Get xattrs of a file or directory. Values in xAttrs parameter are ignored. + * If xAttrs is null or empty, this is the same as getting all xattrs of the + * file or directory. Only those xattrs for which the logged-in user has + * permissions to view are returned. * <p/> - * A regular user only can get xattr of "user" namespace. - * A super user can get xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. - * <p/> - * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes"> - * http://en.wikipedia.org/wiki/Extended_file_attributes</a> + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttrs xAttrs to get * @return List<XAttr> <code>XAttr</code> list @@ -1306,13 +1304,8 @@ public interface ClientProtocol { * Only the xattr names for which the logged in user has the permissions to * access will be returned. * <p/> - * A regular user only can get xattr names from the "user" namespace. - * A super user can get xattr names of the "user" and "trusted" namespace. - * XAttr names of the "security" and "system" namespaces are only used/exposed - * internally by the file system impl. - * <p/> - * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes"> - * http://en.wikipedia.org/wiki/Extended_file_attributes</a> + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttrs xAttrs to get * @return List<XAttr> <code>XAttr</code> list @@ -1324,19 +1317,33 @@ public interface ClientProtocol { /** * Remove xattr of a file or directory.Value in xAttr parameter is ignored. - * Name must be prefixed with user/trusted/security/system. - * <p/> - * A regular user only can remove xattr of "user" namespace. - * A super user can remove xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". * <p/> - * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes"> - * http://en.wikipedia.org/wiki/Extended_file_attributes</a> + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttr <code>XAttr</code> to remove * @throws IOException */ @AtMostOnce public void removeXAttr(String src, XAttr xAttr) throws IOException; + + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws IOException see specific implementation + */ + @Idempotent + public void checkAccess(String path, FsAction mode) throws IOException; }