AMBARI-20865. Remove redundant whitespace in Hadoop 3.0 configs (alejandro)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aef60264 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aef60264 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aef60264 Branch: refs/heads/branch-feature-AMBARI-12556 Commit: aef60264f105a3b060a91dea1d637638384f0289 Parents: f167236 Author: Alejandro Fernandez <afernan...@hortonworks.com> Authored: Wed Apr 26 14:04:44 2017 -0700 Committer: Alejandro Fernandez <afernan...@hortonworks.com> Committed: Mon May 1 15:04:14 2017 -0700 ---------------------------------------------------------------------- .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml | 200 +++++----- .../HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml | 382 +++++++++---------- .../HIVE/2.1.0.3.0/configuration/hcat-env.xml | 48 +-- .../HIVE/2.1.0.3.0/configuration/hive-env.xml | 78 ++-- .../configuration/hive-interactive-env.xml | 63 ++- .../YARN/3.0.0.3.0/configuration/yarn-env.xml | 206 +++++----- .../YARN/3.0.0.3.0/configuration/yarn-log4j.xml | 126 +++--- .../YARN/3.0.0.3.0/configuration/yarn-site.xml | 7 +- .../3.4.5/configuration/zookeeper-log4j.xml | 2 +- 9 files changed, 555 insertions(+), 557 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml index e447c52..e292e6e 100644 --- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml +++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml @@ -269,143 +269,143 @@ <display-name>hadoop-env template</display-name> <description>This is the jinja template for hadoop-env.sh file</description> <value> - # Set Hadoop-specific environment variables here. +# Set Hadoop-specific environment variables here. - # The only required environment variable is JAVA_HOME. All others are - # optional. When running a distributed configuration it is best to - # set JAVA_HOME in this file, so that it is correctly defined on - # remote nodes. +# The only required environment variable is JAVA_HOME. All others are +# optional. When running a distributed configuration it is best to +# set JAVA_HOME in this file, so that it is correctly defined on +# remote nodes. - # The java implementation to use. Required. - export JAVA_HOME={{java_home}} - export HADOOP_HOME_WARN_SUPPRESS=1 +# The java implementation to use. Required. +export JAVA_HOME={{java_home}} +export HADOOP_HOME_WARN_SUPPRESS=1 - # Hadoop home directory - export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop} +# Hadoop home directory +export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop} - # Hadoop Configuration Directory - #TODO: if env var set that can cause problems - export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}} +# Hadoop Configuration Directory +#TODO: if env var set that can cause problems +export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}} - # Path to jsvc required by secure datanode - export JSVC_HOME={{jsvc_path}} +# Path to jsvc required by secure datanode +export JSVC_HOME={{jsvc_path}} - # The maximum amount of heap to use, in MB. Default is 1000. - if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then - if [ "$HADOOP_HEAPSIZE" = "" ]; then - export HADOOP_HEAPSIZE="{{hadoop_heapsize}}" - fi - else - export HADOOP_HEAPSIZE="{{hadoop_heapsize}}" - fi +# The maximum amount of heap to use, in MB. Default is 1000. +if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then +if [ "$HADOOP_HEAPSIZE" = "" ]; then +export HADOOP_HEAPSIZE="{{hadoop_heapsize}}" +fi +else +export HADOOP_HEAPSIZE="{{hadoop_heapsize}}" +fi - export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}" +export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}" - # Extra Java runtime options. Empty by default. - export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}" +# Extra Java runtime options. Empty by default. +export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}" - # Command specific options appended to HADOOP_OPTS when specified +# Command specific options appended to HADOOP_OPTS when specified - {% if java_version < 8 %} - export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}" - export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" +{% if java_version < 8 %} +export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}" +export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" - # The following applies to multiple commands (fs, dfs, fsck, distcp etc) - export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS" - {% else %} - export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}" - export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" +# The following applies to multiple commands (fs, dfs, fsck, distcp etc) +export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS" +{% else %} +export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}" +export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" - # The following applies to multiple commands (fs, dfs, fsck, distcp etc) - export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS" - {% endif %} - HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}" +# The following applies to multiple commands (fs, dfs, fsck, distcp etc) +export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS" +{% endif %} +HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}" - HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}" - HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}" - HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}" +HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}" +HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}" +HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}" - # On secure datanodes, user to run the datanode as after dropping privileges - export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}} +# On secure datanodes, user to run the datanode as after dropping privileges +export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}} - # Extra ssh options. Empty by default. - export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR" +# Extra ssh options. Empty by default. +export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR" - # Where log files are stored. $HADOOP_HOME/logs by default. - export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER +# Where log files are stored. $HADOOP_HOME/logs by default. +export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER - # History server logs - export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER +# History server logs +export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER - # Where log files are stored in the secure data environment. - export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER +# Where log files are stored in the secure data environment. +export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER - # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. - # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves +# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. +# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves - # host:path where hadoop code should be rsync'd from. Unset by default. - # export HADOOP_MASTER=master:/home/$USER/src/hadoop +# host:path where hadoop code should be rsync'd from. Unset by default. +# export HADOOP_MASTER=master:/home/$USER/src/hadoop - # Seconds to sleep between slave commands. Unset by default. This - # can be useful in large clusters, where, e.g., slave rsyncs can - # otherwise arrive faster than the master can service them. - # export HADOOP_SLAVE_SLEEP=0.1 +# Seconds to sleep between slave commands. Unset by default. This +# can be useful in large clusters, where, e.g., slave rsyncs can +# otherwise arrive faster than the master can service them. +# export HADOOP_SLAVE_SLEEP=0.1 - # The directory where pid files are stored. /tmp by default. - export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER - export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER +# The directory where pid files are stored. /tmp by default. +export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER +export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER - # History server pid - export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER +# History server pid +export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER - YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT" +YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT" - # A string representing this instance of hadoop. $USER by default. - export HADOOP_IDENT_STRING=$USER +# A string representing this instance of hadoop. $USER by default. +export HADOOP_IDENT_STRING=$USER - # The scheduling priority for daemon processes. See 'man nice'. +# The scheduling priority for daemon processes. See 'man nice'. - # export HADOOP_NICENESS=10 +# export HADOOP_NICENESS=10 - # Add database libraries - JAVA_JDBC_LIBS="" - if [ -d "/usr/share/java" ]; then - for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2>/dev/null` - do - JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile - done - fi +# Add database libraries +JAVA_JDBC_LIBS="" +if [ -d "/usr/share/java" ]; then +for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2>/dev/null` +do +JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile +done +fi - # Add libraries required by nodemanager - MAPREDUCE_LIBS={{mapreduce_libs_path}} +# Add libraries required by nodemanager +MAPREDUCE_LIBS={{mapreduce_libs_path}} - # Add libraries to the hadoop classpath - some may not need a colon as they already include it - export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS} +# Add libraries to the hadoop classpath - some may not need a colon as they already include it +export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS} - if [ -d "/usr/lib/tez" ]; then - export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf - fi +if [ -d "/usr/lib/tez" ]; then +export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf +fi - # Setting path to hdfs command line - export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} +# Setting path to hdfs command line +export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} - #Mostly required for hadoop 2.0 - export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{{architecture}}-64 +#Mostly required for hadoop 2.0 +export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{{architecture}}-64 - {% if is_datanode_max_locked_memory_set %} - # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. - # Makes sense to fix only when runing DN as root - if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then - ulimit -l {{datanode_max_locked_memory}} - fi - {% endif %} - # Enable ACLs on zookeper znodes if required - {% if hadoop_zkfc_opts is defined %} - export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS" - {% endif %} +{% if is_datanode_max_locked_memory_set %} +# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. +# Makes sense to fix only when runing DN as root +if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then +ulimit -l {{datanode_max_locked_memory}} +fi +{% endif %} +# Enable ACLs on zookeper znodes if required +{% if hadoop_zkfc_opts is defined %} +export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS" +{% endif %} </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml index 5f6ec3f..f529494 100644 --- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml +++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml @@ -26,197 +26,197 @@ <display-name>hdfs-log4j template</display-name> <description>Custom log4j.properties</description> <value> - # - # Licensed to the Apache Software Foundation (ASF) under one - # or more contributor license agreements. See the NOTICE file - # distributed with this work for additional information - # regarding copyright ownership. The ASF licenses this file - # to you under the Apache License, Version 2.0 (the - # "License"); you may not use this file except in compliance - # with the License. You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, - # software distributed under the License is distributed on an - # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - # KIND, either express or implied. See the License for the - # specific language governing permissions and limitations - # under the License. - # - - - # Define some default values that can be overridden by system properties - # To change daemon root logger use hadoop_root_logger in hadoop-env - hadoop.root.logger=INFO,console - hadoop.log.dir=. - hadoop.log.file=hadoop.log - - - # Define the root logger to the system property "hadoop.root.logger". - log4j.rootLogger=${hadoop.root.logger}, EventCounter - - # Logging Threshold - log4j.threshhold=ALL - - # - # Daily Rolling File Appender - # - - log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender - log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - - # Rollver at midnight - log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - - # 30-day backup - #log4j.appender.DRFA.MaxBackupIndex=30 - log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - - # Pattern format: Date LogLevel LoggerName LogMessage - log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - # Debugging Pattern format - #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - - # - # console - # Add "console" to rootlogger above if you want to use this - # - - log4j.appender.console=org.apache.log4j.ConsoleAppender - log4j.appender.console.target=System.err - log4j.appender.console.layout=org.apache.log4j.PatternLayout - log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n - - # - # TaskLog Appender - # - - #Default values - hadoop.tasklog.taskid=null - hadoop.tasklog.iscleanup=false - hadoop.tasklog.noKeepSplits=4 - hadoop.tasklog.totalLogFileSize=100 - hadoop.tasklog.purgeLogSplits=true - hadoop.tasklog.logsRetainHours=12 - - log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender - log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} - log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup} - log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} - - log4j.appender.TLA.layout=org.apache.log4j.PatternLayout - log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - - # - #Security audit appender - # - hadoop.security.logger=INFO,console - hadoop.security.log.maxfilesize=256MB - hadoop.security.log.maxbackupindex=20 - log4j.category.SecurityLogger=${hadoop.security.logger} - hadoop.security.log.file=SecurityAuth.audit - log4j.additivity.SecurityLogger=false - log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender - log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} - log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout - log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd - - log4j.appender.RFAS=org.apache.log4j.RollingFileAppender - log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} - log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout - log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} - log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} - - # - # hdfs audit logging - # - hdfs.audit.logger=INFO,console - log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} - log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false - log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender - log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log - log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout - log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd - - # - # NameNode metrics logging. - # The default is to retain two namenode-metrics.log files up to 64MB each. - # - namenode.metrics.logger=INFO,NullAppender - log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger} - log4j.additivity.NameNodeMetricsLog=false - log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender - log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log - log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout - log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n - log4j.appender.NNMETRICSRFA.MaxBackupIndex=1 - log4j.appender.NNMETRICSRFA.MaxFileSize=64MB - - # - # mapred audit logging - # - mapred.audit.logger=INFO,console - log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} - log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false - log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender - log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log - log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout - log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd - - # - # Rolling File Appender - # - - log4j.appender.RFA=org.apache.log4j.RollingFileAppender - log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - - # Logfile size and and 30-day backups - log4j.appender.RFA.MaxFileSize=256MB - log4j.appender.RFA.MaxBackupIndex=10 - - log4j.appender.RFA.layout=org.apache.log4j.PatternLayout - log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n - log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - - # Custom Logging levels - - hadoop.metrics.log.level=INFO - #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG - #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG - #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG - log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} - - # Jets3t library - log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR - - # - # Null Appender - # Trap security logger on the hadoop client side - # - log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender - - # - # Event Counter Appender - # Sends counts of logging messages at different severity levels to Hadoop Metrics. - # - log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - - # Removes "deprecated" messages - log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN - - # - # HDFS block state change log from block manager - # - # Uncomment the following to suppress normal block state change - # messages from BlockManager in NameNode. - #log4j.logger.BlockStateChange=WARN +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +# Define some default values that can be overridden by system properties +# To change daemon root logger use hadoop_root_logger in hadoop-env +hadoop.root.logger=INFO,console +hadoop.log.dir=. +hadoop.log.file=hadoop.log + + +# Define the root logger to the system property "hadoop.root.logger". +log4j.rootLogger=${hadoop.root.logger}, EventCounter + +# Logging Threshold +log4j.threshhold=ALL + +# +# Daily Rolling File Appender +# + +log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} + +# Rollver at midnight +log4j.appender.DRFA.DatePattern=.yyyy-MM-dd + +# 30-day backup +#log4j.appender.DRFA.MaxBackupIndex=30 +log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout + +# Pattern format: Date LogLevel LoggerName LogMessage +log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# Debugging Pattern format +#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + + +# +# console +# Add "console" to rootlogger above if you want to use this +# + +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n + +# +# TaskLog Appender +# + +#Default values +hadoop.tasklog.taskid=null +hadoop.tasklog.iscleanup=false +hadoop.tasklog.noKeepSplits=4 +hadoop.tasklog.totalLogFileSize=100 +hadoop.tasklog.purgeLogSplits=true +hadoop.tasklog.logsRetainHours=12 + +log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender +log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} +log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup} +log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} + +log4j.appender.TLA.layout=org.apache.log4j.PatternLayout +log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n + +# +#Security audit appender +# +hadoop.security.logger=INFO,console +hadoop.security.log.maxfilesize=256MB +hadoop.security.log.maxbackupindex=20 +log4j.category.SecurityLogger=${hadoop.security.logger} +hadoop.security.log.file=SecurityAuth.audit +log4j.additivity.SecurityLogger=false +log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd + +log4j.appender.RFAS=org.apache.log4j.RollingFileAppender +log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} +log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} + +# +# hdfs audit logging +# +hdfs.audit.logger=INFO,console +log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} +log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false +log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log +log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd + +# +# NameNode metrics logging. +# The default is to retain two namenode-metrics.log files up to 64MB each. +# +namenode.metrics.logger=INFO,NullAppender +log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger} +log4j.additivity.NameNodeMetricsLog=false +log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender +log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log +log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout +log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n +log4j.appender.NNMETRICSRFA.MaxBackupIndex=1 +log4j.appender.NNMETRICSRFA.MaxFileSize=64MB + +# +# mapred audit logging +# +mapred.audit.logger=INFO,console +log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} +log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false +log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log +log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd + +# +# Rolling File Appender +# + +log4j.appender.RFA=org.apache.log4j.RollingFileAppender +log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} + +# Logfile size and and 30-day backups +log4j.appender.RFA.MaxFileSize=256MB +log4j.appender.RFA.MaxBackupIndex=10 + +log4j.appender.RFA.layout=org.apache.log4j.PatternLayout +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + + +# Custom Logging levels + +hadoop.metrics.log.level=INFO +#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG +#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG +#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG +log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} + +# Jets3t library +log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR + +# +# Null Appender +# Trap security logger on the hadoop client side +# +log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender + +# +# Event Counter Appender +# Sends counts of logging messages at different severity levels to Hadoop Metrics. +# +log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter + +# Removes "deprecated" messages +log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN + +# +# HDFS block state change log from block manager +# +# Uncomment the following to suppress normal block state change +# messages from BlockManager in NameNode. +#log4j.logger.BlockStateChange=WARN </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml index 964abdb..1244979 100644 --- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml +++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml @@ -26,31 +26,31 @@ <display-name>hcat-env template</display-name> <description>This is the jinja template for hcat-env.sh file</description> <value> - # Licensed to the Apache Software Foundation (ASF) under one - # or more contributor license agreements. See the NOTICE file - # distributed with this work for additional information - # regarding copyright ownership. The ASF licenses this file - # to you under the Apache License, Version 2.0 (the - # "License"); you may not use this file except in compliance - # with the License. You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. - JAVA_HOME={{java64_home}} - HCAT_PID_DIR={{hcat_pid_dir}}/ - HCAT_LOG_DIR={{hcat_log_dir}}/ - HCAT_CONF_DIR={{hcat_conf_dir}} - HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} - #DBROOT is the path where the connector jars are downloaded - DBROOT={{hcat_dbroot}} - USER={{webhcat_user}} - METASTORE_PORT={{hive_metastore_port}} +JAVA_HOME={{java64_home}} +HCAT_PID_DIR={{hcat_pid_dir}}/ +HCAT_LOG_DIR={{hcat_log_dir}}/ +HCAT_CONF_DIR={{hcat_conf_dir}} +HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} +#DBROOT is the path where the connector jars are downloaded +DBROOT={{hcat_dbroot}} +USER={{webhcat_user}} +METASTORE_PORT={{hive_metastore_port}} </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml index 4ed26f7..3cef34b 100644 --- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml +++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml @@ -436,56 +436,56 @@ <display-name>hive-env template</display-name> <description>This is the jinja template for hive-env.sh file</description> <value> - export HADOOP_USER_CLASSPATH_FIRST=true #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB - if [ "$SERVICE" = "cli" ]; then - if [ -z "$DEBUG" ]; then - export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit" - else - export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" - fi - fi +export HADOOP_USER_CLASSPATH_FIRST=true #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB +if [ "$SERVICE" = "cli" ]; then +if [ -z "$DEBUG" ]; then +export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit" +else +export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" +fi +fi - # The heap size of the jvm stared by hive shell script can be controlled via: +# The heap size of the jvm stared by hive shell script can be controlled via: - if [ "$SERVICE" = "metastore" ]; then - export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore - else - export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client - fi +if [ "$SERVICE" = "metastore" ]; then +export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore +else +export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client +fi - export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m" - export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}" +export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m" +export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}" - # Larger heap size may be required when running queries over large number of files or partitions. - # By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be - # appropriate for hive server (hwi etc). +# Larger heap size may be required when running queries over large number of files or partitions. +# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be +# appropriate for hive server (hwi etc). - # Set HADOOP_HOME to point to a specific hadoop install directory - HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} +# Set HADOOP_HOME to point to a specific hadoop install directory +HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} - export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}} +export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}} - # Hive Configuration Directory can be controlled by: - export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}} +# Hive Configuration Directory can be controlled by: +export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}} - # Folder containing extra libraries required for hive compilation/execution can be controlled by: - if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then - if [ -f "${HIVE_AUX_JARS_PATH}" ]; then - export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH} - elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then - export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar - fi - elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then - export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar - fi +# Folder containing extra libraries required for hive compilation/execution can be controlled by: +if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then + if [ -f "${HIVE_AUX_JARS_PATH}" ]; then + export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH} + elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then + export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar + fi +elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then + export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar +fi - export METASTORE_PORT={{hive_metastore_port}} +export METASTORE_PORT={{hive_metastore_port}} - {% if sqla_db_used or lib_dir_available %} - export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}" - export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}" - {% endif %} +{% if sqla_db_used or lib_dir_available %} +export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}" +export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}" +{% endif %} </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml index e2048a2..940fc79 100644 --- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml +++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml @@ -281,48 +281,47 @@ <display-name>hive-interactive-env template</display-name> <description>This is the jinja template for hive-env.sh file</description> <value> - if [ "$SERVICE" = "cli" ]; then - if [ -z "$DEBUG" ]; then - export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit" - else - export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" - fi - fi +if [ "$SERVICE" = "cli" ]; then +if [ -z "$DEBUG" ]; then +export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit" +else +export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" +fi +fi - # The heap size of the jvm stared by hive shell script can be controlled via: +# The heap size of the jvm stared by hive shell script can be controlled via: - if [ "$SERVICE" = "metastore" ]; then - export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore - else - export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client - fi +if [ "$SERVICE" = "metastore" ]; then +export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore +else +export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client +fi - export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m" - export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}" +export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m" +export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}" - # Larger heap size may be required when running queries over large number of files or partitions. - # By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be - # appropriate for hive server (hwi etc). +# Larger heap size may be required when running queries over large number of files or partitions. +# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be +# appropriate for hive server (hwi etc). - # Set HADOOP_HOME to point to a specific hadoop install directory - HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} +# Set HADOOP_HOME to point to a specific hadoop install directory +HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}} - # Hive Configuration Directory can be controlled by: - export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}} +# Hive Configuration Directory can be controlled by: +export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}} - # Add additional hcatalog jars - if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then - export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH} - else - export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar - fi +# Add additional hcatalog jars +if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then + export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH} +else + export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar +fi - export METASTORE_PORT={{hive_metastore_port}} - - # Spark assembly contains a conflicting copy of HiveConf from hive-1.2 - export HIVE_SKIP_SPARK_ASSEMBLY=true +export METASTORE_PORT={{hive_metastore_port}} +# Spark assembly contains a conflicting copy of HiveConf from hive-1.2 +export HIVE_SKIP_SPARK_ASSEMBLY=true </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml index 5fb4732..6a52865 100644 --- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml +++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml @@ -161,131 +161,131 @@ <display-name>yarn-env template</display-name> <description>This is the jinja template for yarn-env.sh file</description> <value> - export HADOOP_YARN_HOME={{hadoop_yarn_home}} - export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER - export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER - export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} - export JAVA_HOME={{java64_home}} - export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}" +export HADOOP_YARN_HOME={{hadoop_yarn_home}} +export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER +export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER +export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} +export JAVA_HOME={{java64_home}} +export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}" - # We need to add the EWMA appender for the yarn daemons only; - # however, YARN_ROOT_LOGGER is shared by the yarn client and the - # daemons. This is restrict the EWMA appender to daemons only. - INVOKER="${0##*/}" - if [ "$INVOKER" == "yarn-daemon.sh" ]; then - export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA} - fi +# We need to add the EWMA appender for the yarn daemons only; +# however, YARN_ROOT_LOGGER is shared by the yarn client and the +# daemons. This is restrict the EWMA appender to daemons only. +INVOKER="${0##*/}" +if [ "$INVOKER" == "yarn-daemon.sh" ]; then +export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA} +fi - # User for YARN daemons - export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} +# User for YARN daemons +export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} - # resolve links - $0 may be a softlink - export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" +# resolve links - $0 may be a softlink +export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" - # some Java parameters - # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ - if [ "$JAVA_HOME" != "" ]; then - #echo "run java in $JAVA_HOME" - JAVA_HOME=$JAVA_HOME - fi +# some Java parameters +# export JAVA_HOME=/home/y/libexec/jdk1.6.0/ +if [ "$JAVA_HOME" != "" ]; then +#echo "run java in $JAVA_HOME" +JAVA_HOME=$JAVA_HOME +fi - if [ "$JAVA_HOME" = "" ]; then - echo "Error: JAVA_HOME is not set." - exit 1 - fi +if [ "$JAVA_HOME" = "" ]; then +echo "Error: JAVA_HOME is not set." +exit 1 +fi - JAVA=$JAVA_HOME/bin/java - JAVA_HEAP_MAX=-Xmx1000m +JAVA=$JAVA_HOME/bin/java +JAVA_HEAP_MAX=-Xmx1000m - # For setting YARN specific HEAP sizes please use this - # Parameter and set appropriately - YARN_HEAPSIZE={{yarn_heapsize}} +# For setting YARN specific HEAP sizes please use this +# Parameter and set appropriately +YARN_HEAPSIZE={{yarn_heapsize}} - # check envvars which might override default args - if [ "$YARN_HEAPSIZE" != "" ]; then - JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" - fi +# check envvars which might override default args +if [ "$YARN_HEAPSIZE" != "" ]; then +JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" +fi - # Resource Manager specific parameters +# Resource Manager specific parameters - # Specify the max Heapsize for the ResourceManager using a numerical value - # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set - # the value to 1000. - # This value will be overridden by an Xmx setting specified in either YARN_OPTS - # and/or YARN_RESOURCEMANAGER_OPTS. - # If not specified, the default value will be picked from either YARN_HEAPMAX - # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. - export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}} +# Specify the max Heapsize for the ResourceManager using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1000. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_RESOURCEMANAGER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}} - # Specify the JVM options to be used when starting the ResourceManager. - # These options will be appended to the options specified as YARN_OPTS - # and therefore may override any similar flags set in YARN_OPTS - #export YARN_RESOURCEMANAGER_OPTS= +# Specify the JVM options to be used when starting the ResourceManager. +# These options will be appended to the options specified as YARN_OPTS +# and therefore may override any similar flags set in YARN_OPTS +#export YARN_RESOURCEMANAGER_OPTS= - # Node Manager specific parameters +# Node Manager specific parameters - # Specify the max Heapsize for the NodeManager using a numerical value - # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set - # the value to 1000. - # This value will be overridden by an Xmx setting specified in either YARN_OPTS - # and/or YARN_NODEMANAGER_OPTS. - # If not specified, the default value will be picked from either YARN_HEAPMAX - # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. - export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}} +# Specify the max Heapsize for the NodeManager using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1000. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_NODEMANAGER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}} - # Specify the max Heapsize for the timeline server using a numerical value - # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set - # the value to 1024. - # This value will be overridden by an Xmx setting specified in either YARN_OPTS - # and/or YARN_TIMELINESERVER_OPTS. - # If not specified, the default value will be picked from either YARN_HEAPMAX - # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. - export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}} +# Specify the max Heapsize for the timeline server using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1024. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_TIMELINESERVER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}} - # Specify the JVM options to be used when starting the NodeManager. - # These options will be appended to the options specified as YARN_OPTS - # and therefore may override any similar flags set in YARN_OPTS - #export YARN_NODEMANAGER_OPTS= +# Specify the JVM options to be used when starting the NodeManager. +# These options will be appended to the options specified as YARN_OPTS +# and therefore may override any similar flags set in YARN_OPTS +#export YARN_NODEMANAGER_OPTS= - # so that filenames w/ spaces are handled correctly in loops below - IFS= +# so that filenames w/ spaces are handled correctly in loops below +IFS= - # default log directory and file - if [ "$YARN_LOG_DIR" = "" ]; then - YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" - fi - if [ "$YARN_LOGFILE" = "" ]; then - YARN_LOGFILE='yarn.log' - fi +# default log directory and file +if [ "$YARN_LOG_DIR" = "" ]; then +YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" +fi +if [ "$YARN_LOGFILE" = "" ]; then +YARN_LOGFILE='yarn.log' +fi - # default policy file for service-level authorization - if [ "$YARN_POLICYFILE" = "" ]; then - YARN_POLICYFILE="hadoop-policy.xml" - fi +# default policy file for service-level authorization +if [ "$YARN_POLICYFILE" = "" ]; then +YARN_POLICYFILE="hadoop-policy.xml" +fi - # restore ordinary behaviour - unset IFS +# restore ordinary behaviour +unset IFS - YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" - YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" - YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" - YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" - YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" - YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" - YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" - YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" - export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT" - export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT" - if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then - YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" - fi - YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" - YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}" - {% if rm_security_opts is defined %} - YARN_OPTS="{{rm_security_opts}} $YARN_OPTS" - {% endif %} +YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" +YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" +YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" +YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" +YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" +YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" +YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" +YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" +export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT" +export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT" +if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then +YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" +fi +YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" +YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}" +{% if rm_security_opts is defined %} +YARN_OPTS="{{rm_security_opts}} $YARN_OPTS" +{% endif %} </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml index a200e74..dab4516 100644 --- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml +++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml @@ -25,74 +25,74 @@ <display-name>yarn-log4j template</display-name> <description>Custom log4j.properties</description> <value> - #Relative to Yarn Log Dir Prefix - yarn.log.dir=. - # - # Job Summary Appender - # - # Use following logger to send summary to separate file defined by - # hadoop.mapreduce.jobsummary.log.file rolled daily: - # hadoop.mapreduce.jobsummary.logger=INFO,JSA - # - hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} - hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log - log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender - # Set the ResourceManager summary log filename - yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log - # Set the ResourceManager summary log level and appender - yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger} - #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY +#Relative to Yarn Log Dir Prefix +yarn.log.dir=. +# +# Job Summary Appender +# +# Use following logger to send summary to separate file defined by +# hadoop.mapreduce.jobsummary.log.file rolled daily: +# hadoop.mapreduce.jobsummary.logger=INFO,JSA +# +hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} +hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log +log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender +# Set the ResourceManager summary log filename +yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log +# Set the ResourceManager summary log level and appender +yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger} +#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY - # To enable AppSummaryLogging for the RM, - # set yarn.server.resourcemanager.appsummary.logger to - # LEVEL,RMSUMMARY in hadoop-env.sh +# To enable AppSummaryLogging for the RM, +# set yarn.server.resourcemanager.appsummary.logger to +# LEVEL,RMSUMMARY in hadoop-env.sh - # Appender for ResourceManager Application Summary Log - # Requires the following properties to be set - # - hadoop.log.dir (Hadoop Log directory) - # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) - # - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender) - log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender - log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} - log4j.appender.RMSUMMARY.MaxFileSize=256MB - log4j.appender.RMSUMMARY.MaxBackupIndex=20 - log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout - log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - log4j.appender.JSA.layout=org.apache.log4j.PatternLayout - log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n - log4j.appender.JSA.DatePattern=.yyyy-MM-dd - log4j.appender.JSA.layout=org.apache.log4j.PatternLayout - log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} - log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false +# Appender for ResourceManager Application Summary Log +# Requires the following properties to be set +# - hadoop.log.dir (Hadoop Log directory) +# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) +# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender) +log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender +log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} +log4j.appender.RMSUMMARY.MaxFileSize=256MB +log4j.appender.RMSUMMARY.MaxBackupIndex=20 +log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout +log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.JSA.layout=org.apache.log4j.PatternLayout +log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n +log4j.appender.JSA.DatePattern=.yyyy-MM-dd +log4j.appender.JSA.layout=org.apache.log4j.PatternLayout +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false - # Appender for viewing information for errors and warnings - yarn.ewma.cleanupInterval=300 - yarn.ewma.messageAgeLimitSeconds=86400 - yarn.ewma.maxUniqueMessages=250 - log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender - log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} - log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} - log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} +# Appender for viewing information for errors and warnings +yarn.ewma.cleanupInterval=300 +yarn.ewma.messageAgeLimitSeconds=86400 +yarn.ewma.maxUniqueMessages=250 +log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender +log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} +log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} +log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} - # Audit logging for ResourceManager - rm.audit.logger=${hadoop.root.logger} - log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger} - log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false - log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender - log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log - log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout - log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd +# Audit logging for ResourceManager +rm.audit.logger=${hadoop.root.logger} +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger} +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false +log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log +log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd - # Audit logging for NodeManager - nm.audit.logger=${hadoop.root.logger} - log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger} - log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false - log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender - log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log - log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout - log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd +# Audit logging for NodeManager +nm.audit.logger=${hadoop.root.logger} +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger} +log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false +log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log +log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd </value> <value-attributes> <type>content</type> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml index cc96cd7..64e0bcb 100644 --- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml +++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml @@ -411,9 +411,10 @@ </property> <!-- These configs were inherited from HDP 2.1 --> + <!-- TODO, temporarily disable timeline service since failing due to YARN-6534 --> <property> <name>yarn.timeline-service.enabled</name> - <value>true</value> + <value>false</value> <description>Indicate to clients whether timeline service is enabled or not. If enabled, clients will put entities and events to the timeline server. </description> @@ -1033,11 +1034,9 @@ yarn.node-labels.manager-class </property> <!--ats v2.0 properties--> - - <!-- TODO HDP 3.0, set version to 2.0 once ready. --> <property> <name>yarn.timeline-service.version</name> - <value>1.5</value> + <value>2.0</value> <description>Timeline service version we’re currently using.</description> <on-ambari-upgrade add="false"/> </property> http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml index 76dff64..ff9138e 100644 --- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml +++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml @@ -30,7 +30,7 @@ </value-attributes> <on-ambari-upgrade add="false"/> </property> -<property> + <property> <name>zookeeper_log_number_of_backup_files</name> <value>10</value> <description>The number of backup files</description>