Repository: ambari
Updated Branches:
  refs/heads/trunk 73ac59fc3 -> 2f8636e5e


http://git-wip-us.apache.org/repos/asf/ambari/blob/2f8636e5/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json 
b/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
new file mode 100644
index 0000000..abe3559
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hawq_default.json
@@ -0,0 +1,1324 @@
+{
+    "roleCommand": "SERVICE_CHECK", 
+    "forceRefreshConfigTagsBeforeExecution": [], 
+    "configuration_attributes": {
+        "ranger-hdfs-audit": {}, 
+        "webhcat-log4j": {}, 
+        "ranger-yarn-plugin-properties": {}, 
+        "ranger-hdfs-policymgr-ssl": {}, 
+        "pig-env": {}, 
+        "ranger-hbase-audit": {}, 
+        "ssl-client": {}, 
+        "ranger-hive-policymgr-ssl": {}, 
+        "yarn-client": {}, 
+        "ranger-hive-security": {}, 
+        "hbase-policy": {}, 
+        "webhcat-env": {}, 
+        "hcat-env": {}, 
+        "tez-site": {}, 
+        "hdfs-site": {
+            "final": {
+                "dfs.datanode.failed.volumes.tolerated": "true", 
+                "dfs.datanode.data.dir": "true", 
+                "dfs.namenode.http-address": "true", 
+                "dfs.namenode.name.dir": "true", 
+                "dfs.webhdfs.enabled": "true"
+            }
+        }, 
+        "yarn-log4j": {}, 
+        "hive-env": {}, 
+        "tez-env": {}, 
+        "hbase-site": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "hive-log4j": {}, 
+        "hive-exec-log4j": {}, 
+        "ranger-yarn-audit": {}, 
+        "mapred-env": {}, 
+        "hawq-limits-env": {}, 
+        "ranger-hive-plugin-properties": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "pig-properties": {}, 
+        "ranger-hbase-plugin-properties": {}, 
+        "hawq-env": {}, 
+        "core-site": {}, 
+        "hive-site": {}, 
+        "yarn-env": {}, 
+        "hawq-site": {}, 
+        "ranger-yarn-policymgr-ssl": {}, 
+        "hawq-check-env": {}, 
+        "hadoop-env": {}, 
+        "hdfs-client": {}, 
+        "zookeeper-log4j": {}, 
+        "yarn-site": {}, 
+        "ranger-yarn-security": {}, 
+        "capacity-scheduler": {}, 
+        "hbase-log4j": {}, 
+        "ssl-server": {}, 
+        "webhcat-site": {}, 
+        "hbase-env": {}, 
+        "ranger-hive-audit": {}, 
+        "hawq-sysctl-env": {}, 
+        "pxf-site": {}, 
+        "zoo.cfg": {}, 
+        "ranger-hdfs-security": {}, 
+        "hiveserver2-site": {}, 
+        "ranger-hbase-policymgr-ssl": {}, 
+        "mapred-site": {}, 
+        "cluster-env": {}, 
+        "zookeeper-env": {}, 
+        "pig-log4j": {}, 
+        "ranger-hbase-security": {}
+    }, 
+    "commandParams": {
+        "command_timeout": "300", 
+        "script": "scripts/service_check.py", 
+        "script_type": "PYTHON", 
+        "service_package_folder": "common-services/HAWQ/2.0.0/package", 
+        "hooks_folder": "PHD/2.0.6/hooks"
+    }, 
+    "stageId": 0, 
+    "kerberosCommandParams": [], 
+    "clusterName": "phd", 
+    "hostname": "c6401.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/";, 
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.8.0_60", 
+        "ambari_db_rca_url": 
"jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "jce_policy-8.zip", 
+        "java_version": "8", 
+        "oracle_jdbc_url": 
"http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar";, 
+        "stack_name": "PHD", 
+        "stack_version": "3.3", 
+        "host_sys_prepped": "false", 
+        "db_name": "ambari", 
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "current_version": "3.3.2.0-2950", 
+        "ambari_db_rca_username": "mapred", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "mysql_jdbc_url": 
"http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar";
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "HAWQ", 
+    "role": "HAWQ_SERVICE_CHECK", 
+    "requestId": 159, 
+    "taskId": 3353, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "configurations": {
+        "ranger-hdfs-audit": {
+            "xasecure.audit.destination.db": "false", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE", 
+            "xasecure.audit.destination.db.user": "{{xa_audit_db_user}}", 
+            "xasecure.audit.destination.solr.urls": "", 
+            "xasecure.audit.destination.db.jdbc.driver": "{{jdbc_driver}}", 
+            "xasecure.audit.destination.solr.batch.filespool.dir": 
"/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": 
"/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.credential.provider.file": 
"jceks://file{{credential_file}}", 
+            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.provider.summary.enabled": "false", 
+            "xasecure.audit.destination.db.batch.filespool.dir": 
"/var/log/hadoop/hdfs/audit/db/spool", 
+            "xasecure.audit.destination.hdfs.dir": 
"hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", 
+            "xasecure.audit.destination.db.password": "crypted", 
+            "xasecure.audit.destination.db.jdbc.url": "{{audit_jdbc_url}}", 
+            "xasecure.audit.is.enabled": "true"
+        }, 
+        "webhcat-log4j": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) 
under one\n# or more contributor license agreements.  See the NOTICE file\n# 
distributed with this work for additional information\n# regarding copyright 
ownership.  The ASF licenses this file\n# to you under the Apache License, 
Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by 
applicable law or agreed to in writing,\n# software distributed under the 
License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR 
CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for 
the\n# specific language governing permissions and limitations\n# under the 
License.\n\n# Define some default values that can be overridden by system 
properties\nwebhcat.root.logger = INFO, standard\nwebhcat.log.dir = 
.\nwebhcat.log.file = webhcat.lo
 g\n\nlog4j.rootLogger = ${webhcat.root.logger}\n\n# Logging 
Threshold\nlog4j.threshhold = DEBUG\n\nlog4j.appender.standard  =  
org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.standard.File = 
${webhcat.log.dir}/${webhcat.log.file}\n\n# Rollver at 
midnight\nlog4j.appender.DRFA.DatePattern = 
.yyyy-MM-dd\n\nlog4j.appender.DRFA.layout = 
org.apache.log4j.PatternLayout\n\nlog4j.appender.standard.layout = 
org.apache.log4j.PatternLayout\nlog4j.appender.standard.layout.conversionPattern
 = %-5p | %d{DATE} | %c | %m%n\n\n# Class logging 
settings\nlog4j.logger.com.sun.jersey = 
DEBUG\nlog4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = 
ERROR\nlog4j.logger.org.apache.hadoop = 
INFO\nlog4j.logger.org.apache.hadoop.conf = 
WARN\nlog4j.logger.org.apache.zookeeper = WARN\nlog4j.logger.org.eclipse.jetty 
= INFO"
+        }, 
+        "ranger-yarn-plugin-properties": {
+            "hadoop.rpc.protection": "", 
+            "ranger-yarn-plugin-enabled": "No", 
+            "REPOSITORY_CONFIG_USERNAME": "yarn", 
+            "policy_user": "ambari-qa", 
+            "common.name.for.certificate": "", 
+            "REPOSITORY_CONFIG_PASSWORD": "yarn"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "xasecure.policymgr.clientssl.keystore": 
"/usr/phd/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
+            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore.credential.file": 
"jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.truststore": 
"/usr/phd/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
+            "xasecure.policymgr.clientssl.truststore.credential.file": 
"jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore.password": 
"myKeyFilePassword"
+        }, 
+        "pig-env": {
+            "content": 
"\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif 
[ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS 
-Dmapreduce.framework.name=yarn\"\nfi"
+        }, 
+        "ranger-hbase-audit": {}, 
+        "ssl-client": {
+            "ssl.client.truststore.reload.interval": "10000", 
+            "ssl.client.keystore.password": "bigdata", 
+            "ssl.client.truststore.type": "jks", 
+            "ssl.client.keystore.location": 
"/etc/security/clientKeys/keystore.jks", 
+            "ssl.client.truststore.location": 
"/etc/security/clientKeys/all.jks", 
+            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.keystore.type": "jks"
+        }, 
+        "ranger-hive-policymgr-ssl": {}, 
+        "yarn-client": {
+            "rpc.client.socket.linger.timeout": "-1", 
+            "rpc.client.write.timeout": "3600000", 
+            "rpc.client.timeout": "3600000", 
+            "yarn.client.failover.max.attempts": "15", 
+            "rpc.client.connect.tcpnodelay": "true", 
+            "rpc.client.connect.retry": "10", 
+            "rpc.client.connect.timeout": "600000", 
+            "rpc.client.ping.interval": "10000", 
+            "rpc.client.read.timeout": "3600000", 
+            "rpc.client.max.idle": "10000"
+        }, 
+        "ranger-hive-security": {}, 
+        "hbase-policy": {
+            "security.masterregion.protocol.acl": "*", 
+            "security.admin.protocol.acl": "*", 
+            "security.client.protocol.acl": "*"
+        }, 
+        "webhcat-env": {
+            "content": "\n# The file containing the running 
pid\nPID_FILE={{webhcat_pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n#
 The console error 
log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console 
log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n#
 Set HADOOP_HOME to point to a specific hadoop install directory\nexport 
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}"
+        }, 
+        "hcat-env": {
+            "content": "\n      # Licensed to the Apache Software Foundation 
(ASF) under one\n      # or more contributor license agreements. See the NOTICE 
file\n      # distributed with this work for additional information\n      # 
regarding copyright ownership. The ASF licenses this file\n      # to you under 
the Apache License, Version 2.0 (the\n      # \"License\"); you may not use 
this file except in compliance\n      # with the License. You may obtain a copy 
of the License at\n      #\n      # 
http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by 
applicable law or agreed to in writing, software\n      # distributed under the 
License is distributed on an \"AS IS\" BASIS,\n      # WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for 
the specific language governing permissions and\n      # limitations under the 
License.\n\n      JAVA_HOME={{java64_home}}\n      
HCAT_PID_DIR={{hcat_pid_dir}}/\n      HCAT_L
 OG_DIR={{hcat_log_dir}}/\n      HCAT_CONF_DIR={{hcat_conf_dir}}\n      
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n      #DBROOT is the path where 
the connector jars are downloaded\n      DBROOT={{hcat_dbroot}}\n      
USER={{hcat_user}}\n      METASTORE_PORT={{hive_metastore_port}}"
+        }, 
+        "tez-site": {
+            "tez.task.get-task.sleep.interval-ms.max": "200", 
+            "tez.task.max-events-per-heartbeat": "500", 
+            "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc 
-XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.runtime.compress": "true", 
+            "tez.runtime.io.sort.mb": "272", 
+            "tez.runtime.convert.user-payload.to.history-text": "false", 
+            "tez.generate.debug.artifacts": "false", 
+            "tez.am.tez-ui.history-url.template": 
"__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", 
+            "tez.am.view-acls": "*", 
+            "tez.am.log.level": "INFO", 
+            "tez.counters.max.groups": "1000", 
+            "tez.counters.max": "5000", 
+            "tez.shuffle-vertex-manager.max-src-fraction": "0.4", 
+            "tez.runtime.unordered.output.buffer.size-mb": "100", 
+            "tez.task.resource.memory.mb": "1536", 
+            "tez.history.logging.service.class": 
"org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService", 
+            "tez.runtime.optimize.local.fetch": "true", 
+            "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc 
-XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.task.am.heartbeat.counter.interval-ms.max": "4000", 
+            "tez.am.max.app.attempts": "2", 
+            "tez.am.launch.env": 
"LD_LIBRARY_PATH=/usr/phd/${hdp.version}/hadoop/lib/native:/usr/phd/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
 
+            "tez.am.container.idle.release-timeout-max.millis": "20000", 
+            "tez.use.cluster.hadoop-libs": "false", 
+            "tez.am.launch.cluster-default.cmd-opts": "-server 
-Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "tez.am.container.idle.release-timeout-min.millis": "10000", 
+            "tez.grouping.min-size": "16777216", 
+            "tez.runtime.sorter.class": "PIPELINED", 
+            "tez.runtime.compress.codec": 
"org.apache.hadoop.io.compress.SnappyCodec", 
+            "tez.task.launch.cluster-default.cmd-opts": "-server 
-Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "tez.task.launch.env": 
"LD_LIBRARY_PATH=/usr/phd/${hdp.version}/hadoop/lib/native:/usr/phd/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
 
+            "tez.am.container.reuse.enabled": "true", 
+            "tez.session.am.dag.submit.timeout.secs": "300", 
+            "tez.grouping.split-waves": "1.7", 
+            "tez.grouping.max-size": "1073741824", 
+            "tez.session.client.timeout.secs": "-1", 
+            "tez.cluster.additional.classpath.prefix": 
"/usr/phd/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
 
+            "tez.lib.uris": "/phd/apps/${hdp.version}/tez/tez.tar.gz", 
+            "tez.staging-dir": "/tmp/${user.name}/staging", 
+            "tez.am.am-rm.heartbeat.interval-ms.max": "250", 
+            "tez.am.maxtaskfailures.per.node": "10", 
+            "tez.task.generate.counters.per.io": "true", 
+            "tez.am.container.reuse.non-local-fallback.enabled": "false", 
+            "tez.am.container.reuse.rack-fallback.enabled": "true", 
+            "tez.runtime.pipelined.sorter.sort.threads": "2", 
+            "tez.am.container.reuse.locality.delay-allocation-millis": "250", 
+            "tez.shuffle-vertex-manager.min-src-fraction": "0.2", 
+            "tez.am.resource.memory.mb": "1536"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.datanode.handler.count": "60", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.content-summary.limit": "5000", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:50010", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.namenode.audit.log.async": "true", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "0.999", 
+            "dfs.namenode.checkpoint.edits.dir": 
"${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.blocksize": "134217728", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.namenode.fslock.fair": "false", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.allow.truncate": "true", 
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
+            "dfs.namenode.accesstime.precision": "-1", 
+            "dfs.block.local-path-access.user": "gpadmin", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": 
"c6402.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.datanode.http.address": "0.0.0.0:50075", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.client.retry.policy.enabled": "false", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.encryption.key.provider.uri": "", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+        }, 
+        "yarn-log4j": {
+            "content": "\n#Relative to Yarn Log Dir 
Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to 
send summary to separate file defined by\n# 
hadoop.mapreduce.jobsummary.log.file rolled daily:\n# 
hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n#
 Set the ResourceManager summary log 
filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n#
 Set the ResourceManager summary log level and 
appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n#
 To enable AppSummaryLogging for the RM,\n# set 
yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in 
hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# 
Requires the 
 following properties to be set\n#    - hadoop.log.dir (Hadoop Log 
directory)\n#    - yarn.server.resourcemanager.appsummary.log.file (resource 
manager app summary log filename)\n#    - 
yarn.server.resourcemanager.appsummary.logger (resource manager app summary log 
level and 
appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: 
%m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Applic
 
ationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false\n\n#
 Appender for viewing information for errors and 
warnings\nyarn.ewma.cleanupInterval=300\nyarn.ewma.messageAgeLimitSeconds=86400\nyarn.ewma.maxUniqueMessages=250\nlog4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender\nlog4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}\nlog4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}\nlog4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}\n
    "
+        }, 
+        "hive-env": {
+            "hive_user_nproc_limit": "16000", 
+            "hive_pid_dir": "/var/run/hive", 
+            "hcat_log_dir": "/var/log/webhcat", 
+            "hive.client.heapsize": "1024", 
+            "hive_ambari_database": "MySQL", 
+            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z 
\"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 
-XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC 
-XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS 
-XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 
-XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by 
hive shell script can be controlled via:\n\nif [ \"$SERVICE\" = \"metastore\" 
]; then\n  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for 
HiveMetastore\nelse\n  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for 
HiveServer2 and Client\nfi\n\nexport HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  
-Xmx${HADOOP_HEAPSIZE}m\"\n\n# Larger heap size may be required when running 
queries over large number of files or partitions.\n# By default hive shell 
scripts use a heap size of 256 (MB).  Larger heap size would also be\n# 
appropriate for
  hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop 
install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive 
Configuration Directory can be controlled by:\nexport 
HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries 
required for hive compilation/execution can be controlled by:\nif [ 
\"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  if [ -f \"${HIVE_AUX_JARS_PATH}\" 
]; then\n    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n  elif [ -d 
\"/usr/phd/current/hive-webhcat/share/hcatalog\" ]; then\n    export 
HIVE_AUX_JARS_PATH=/usr/phd/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n
  fi\nelif [ -d \"/usr/phd/current/hive-webhcat/share/hcatalog\" ]; then\n  
export 
HIVE_AUX_JARS_PATH=/usr/phd/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\nfi\n\nexport
 METASTORE_PORT={{hive_metastore_port}}\n\n{% if sqla_db_used or 
lib_dir_available %}\nexport 
LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\nexport JA
 VA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n{% endif %}", 
+            "hive_timeline_logging_enabled": "true", 
+            "hive_user_nofile_limit": "32000", 
+            "hive.metastore.heapsize": "512", 
+            "hive_database_name": "hive", 
+            "hive_database_type": "mysql", 
+            "hive_user": "hive", 
+            "hive_database": "New MySQL Database", 
+            "hive.heapsize": "1082", 
+            "hcat_pid_dir": "/var/run/webhcat", 
+            "cost_based_optimizer": "On", 
+            "webhcat_user": "hcat", 
+            "hive_security_authorization": "None", 
+            "hive_exec_orc_storage_strategy": "SPEED", 
+            "hive_log_dir": "/var/log/hive", 
+            "hive_txn_acid": "off", 
+            "hcat_user": "hcat"
+        }, 
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport 
TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop 
install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The 
java implementation to use.\nexport JAVA_HOME={{java64_home}}\n    ", 
+            "tez_user": "tez"
+        }, 
+        "hbase-site": {
+            "hbase.regionserver.wal.codec": 
"org.apache.hadoop.hbase.regionserver.wal.WALCellCodec", 
+            "hbase.master.info.bindAddress": "0.0.0.0", 
+            "hbase.regionserver.port": "16020", 
+            "hbase.client.keyvalue.maxsize": "1048576", 
+            "hbase.hstore.compactionThreshold": "3", 
+            "hbase.hregion.majorcompaction.jitter": "0.50", 
+            "hbase.security.authentication": "simple", 
+            "hbase.rootdir": 
"hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
+            "hbase.rpc.timeout": "90000", 
+            "hbase.regionserver.handler.count": "30", 
+            "hbase.hregion.majorcompaction": "604800000", 
+            "hbase.rpc.protection": "authentication", 
+            "hbase.hregion.memstore.block.multiplier": "4", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.superuser": "hbase", 
+            "hbase.coprocessor.region.classes": 
"org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint", 
+            "hbase.zookeeper.property.clientPort": "2181", 
+            "hbase.hstore.compaction.max": "10", 
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging", 
+            "zookeeper.session.timeout": "90000", 
+            "hbase.regionserver.global.memstore.size": "0.4", 
+            "hbase.tmp.dir": "/tmp/hbase-${user.name}", 
+            "hfile.block.cache.size": "0.4", 
+            "hbase.hregion.max.filesize": "10737418240", 
+            "hbase.client.scanner.caching": "100", 
+            "hbase.client.retries.number": "35", 
+            "hbase.defaults.for.version.skip": "true", 
+            "hbase.master.info.port": "16010", 
+            "hbase.zookeeper.quorum": 
"c6403.ambari.apache.org,c6402.ambari.apache.org,c6401.ambari.apache.org", 
+            "hbase.regionserver.info.port": "16030", 
+            "zookeeper.znode.parent": "/hbase-unsecure", 
+            "hbase.coprocessor.master.classes": "", 
+            "hbase.hstore.blockingStoreFiles": "10", 
+            "hbase.master.port": "16000", 
+            "hbase.security.authorization": "false", 
+            "phoenix.query.timeoutMs": "60000", 
+            "hbase.local.dir": "${hbase.tmp.dir}/local", 
+            "hbase.cluster.distributed": "true", 
+            "hbase.hregion.memstore.mslab.enabled": "true", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "hbase.zookeeper.useMulti": "true"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n#\n\n\n# 
Define some default values that can be overridden by system properties\n# To 
change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define 
the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling 
File 
Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default 
values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\n\n#\n#Security audit 
appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n#
 hdfs audit 
logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# 
NameNode metrics logging.\n# The default is to retain two namenode-metrics.log 
files up to 64MB 
each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601}
 
%m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n#
 mapred audit logging\n#\nmapred
 
.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling 
File 
Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Logfile size and and 30-day 
backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 
ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n#
 Jets3t 
library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n#
 Null Appender\n# Trap security logger on the hadoop client 
side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n#
 Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop 
Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n#
 Removes \"deprecated\" 
messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n#
 HDFS block state change log from block manager\n#\n# Uncomment the following 
to suppress normal block state change\n# messages from BlockManager in 
NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN\n    "
+        }, 
+        "hive-log4j": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) 
under one\n# or more contributor license agreements.  See the NOTICE file\n# 
distributed with this work for additional information\n# regarding copyright 
ownership.  The ASF licenses this file\n# to you under the Apache License, 
Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by 
applicable law or agreed to in writing, software\n# distributed under the 
License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the 
specific language governing permissions and\n# limitations under the 
License.\n\n# Define some default values that can be overridden by system 
properties\nhive.log.threshold=ALL\nhive.root.logger=INFO,DRFA\nhive.log.dir=${java.io.tmpdir}/${user.na
 me}\nhive.log.file=hive.log\n\n# Define the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, 
EventCounter\n\n# Logging 
Threshold\nlog4j.threshold=${hive.log.threshold}\n\n#\n# Daily Rolling File 
Appender\n#\n# Use the PidDailyerRollingFileAppend class instead if you want to 
use separate log files\n# for different CLI session.\n#\n# 
log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n\nlog4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t
 ]: %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger 
above if you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} [%t]: %p %c{2}: 
%m%n\nlog4j.appender.console.encoding=UTF-8\n\n#custom logging 
levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts 
of logging messages at different severity levels to Hadoop 
Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,DRFA\nlog4j.category.Datastore=ERROR,DRFA\nlog4j.category.Datastore.Schema=ERROR,DRFA\nlog4j.category.JPOX.Datastore=ERROR,DRFA\nlog4j.category.JPOX.Plugin=ERROR,DRFA\nlog4j.category.JPOX.MetaData=ERROR,DRFA\nlog4j.category.JPOX.Query=ERROR,DRFA\nlog4j.category.JPOX.General=ERROR,DRFA\nlog4j.category.JPOX.En
 hancer=ERROR,DRFA\n\n\n# Silence useless ZK 
logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA"
+        }, 
+        "hive-exec-log4j": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) 
under one\n# or more contributor license agreements.  See the NOTICE file\n# 
distributed with this work for additional information\n# regarding copyright 
ownership.  The ASF licenses this file\n# to you under the Apache License, 
Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by 
applicable law or agreed to in writing, software\n# distributed under the 
License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the 
specific language governing permissions and\n# limitations under the 
License.\n\n# Define some default values that can be overridden by system 
properties\n\nhive.log.threshold=ALL\nhive.root.logger=INFO,FA\nhive.log.dir=${java.io.tmpdir}/${user.na
 me}\nhive.query.id=hadoop\nhive.log.file=${hive.query.id}.log\n\n# Define the 
root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, 
EventCounter\n\n# Logging 
Threshold\nlog4j.threshhold=${hive.log.threshold}\n\n#\n# File 
Appender\n#\n\nlog4j.appender.FA=org.apache.log4j.FileAppender\nlog4j.appender.FA.File=${hive.log.dir}/${hive.log.file}\nlog4j.appender.FA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\nlog4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm
 :ss} %p %c{2}: %m%n\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# 
Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop 
Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,FA\nlog4j.category.Datastore=ERROR,FA\nlog4j.category.Datastore.Schema=ERROR,FA\nlog4j.category.JPOX.Datastore=ERROR,FA\nlog4j.category.JPOX.Plugin=ERROR,FA\nlog4j.category.JPOX.MetaData=ERROR,FA\nlog4j.category.JPOX.Query=ERROR,FA\nlog4j.category.JPOX.General=ERROR,FA\nlog4j.category.JPOX.Enhancer=ERROR,FA\n\n\n#
 Silence useless ZK 
logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA"
+        }, 
+        "ranger-yarn-audit": {
+            "xasecure.audit.destination.db": "false", 
+            "xasecure.audit.destination.solr.zookeepers": "NONE", 
+            "xasecure.audit.destination.db.user": "{{xa_audit_db_user}}", 
+            "xasecure.audit.destination.solr.urls": "", 
+            "xasecure.audit.destination.db.jdbc.driver": "{{jdbc_driver}}", 
+            "xasecure.audit.destination.solr.batch.filespool.dir": 
"/var/log/hadoop/yarn/audit/solr/spool", 
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": 
"/var/log/hadoop/yarn/audit/hdfs/spool", 
+            "xasecure.audit.credential.provider.file": 
"jceks://file{{credential_file}}", 
+            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.provider.summary.enabled": "false", 
+            "xasecure.audit.destination.db.batch.filespool.dir": 
"/var/log/hadoop/yarn/audit/db/spool", 
+            "xasecure.audit.destination.hdfs.dir": 
"hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", 
+            "xasecure.audit.destination.db.password": "crypted", 
+            "xasecure.audit.destination.db.jdbc.url": "{{audit_jdbc_url}}", 
+            "xasecure.audit.is.enabled": "true"
+        }, 
+        "mapred-env": {
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "content": "\n# export 
JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport 
HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport 
HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export 
HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log 
files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export 
HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export 
HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export 
HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. 
$USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for 
daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION 
$HADOOP_OPTS\"\nexport HADOOP_OPTS=\"-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} 
$HADOOP_OPTS\"\nexport 
JAVA_LIBRARY_PATH=\"${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}\"\n    ", 
+            "mapred_user_nofile_limit": "32768", 
+            "mapred_user_nproc_limit": "65536", 
+            "mapred_user": "mapred"
+        }, 
+        "hawq-limits-env": {
+            "hard_nofile": "2900000", 
+            "hard_nproc": "131072", 
+            "soft_nproc": "131072", 
+            "soft_nofile": "2900000"
+        }, 
+        "ranger-hive-plugin-properties": {}, 
+        "ranger-hdfs-plugin-properties": {
+            "hadoop.rpc.protection": "", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "policy_user": "ambari-qa", 
+            "common.name.for.certificate": "", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "pig-properties": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) 
under one\n# or more contributor license agreements.  See the NOTICE file\n# 
distributed with this work for additional information\n# regarding copyright 
ownership.  The ASF licenses this file\n# to you under the Apache License, 
Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n\n# Pig 
configuration file. All values can be overwritten by command line\n# arguments; 
for a description of the properties, run\n#\n#     pig -h properties\n#\n\n####
 ########################################################################\n#\n# 
== Logging properties\n#\n\n# Location of pig log file. If blank, a file with a 
timestamped slug\n# ('pig_1399336559369.log') will be generated in the current 
working directory.\n#\n# pig.logfile=\n# pig.logfile=/tmp/pig-err.log\n\n# 
Log4j configuration file. Set at runtime with the -4 parameter. The source\n# 
distribution has a ./conf/log4j.properties.template file you can rename and\n# 
customize.\n#\n# log4jconf=./conf/log4j.properties\n\n# Verbose Output.\n# * 
false (default): print only INFO and above to screen\n# * true: Print all log 
messages to screen\n#\n# verbose=false\n\n# Omit timestamps on log messages. 
(default: false)\n#\n# brief=false\n\n# Logging level. 
debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)\n#\n# debug=INFO\n\n# Roll up 
warnings across tasks, so that when millions of mappers suddenly cry\n# out in 
error they are partially silenced. (default, recommended: true)\n#\n# 
aggregate.war
 ning=true\n\n# Should DESCRIBE pretty-print its schema?\n# * false (default): 
print on a single-line, suitable for pasting back in to your script\n# * true 
(recommended): prints on multiple lines with indentation, much more 
readable\n#\n# pig.pretty.print.schema=false\n\n# === Profiling UDFs  ===\n\n# 
Turn on UDF timers? This will cause two counters to be\n# tracked for every UDF 
and LoadFunc in your script: approx_microsecs measures\n# approximate time 
spent inside a UDF approx_invocations reports the approximate\n# number of 
times the UDF was invoked.\n#\n# * false (default): do not record timing 
information of UDFs.\n# * true: report UDF performance. Uses more counters, but 
gives more insight\n#   into script operation\n#\n# pig.udf.profile=false\n\n# 
Specify frequency of profiling (default: every 100th).\n# 
pig.udf.profile.frequency=100\n\n############################################################################\n#\n#
 == Site-specific Properties\n#\n\n# Execution Mode. Local 
 mode is much faster, but only suitable for small amounts\n# of data. Local 
mode interprets paths on the local file system; Mapreduce mode\n# on the HDFS. 
Read more under 'Execution Modes' within the Getting Started\n# 
documentation.\n#\n# * mapreduce (default): use the Hadoop cluster defined in 
your Hadoop config files\n# * local: use local mode\n# * tez: use Tez on Hadoop 
cluster\n# * tez_local: use Tez local mode\n#\n# exectype=mapreduce\n\n# 
Bootstrap file with default statements to execute in every Pig job, similar 
to\n# .bashrc.  If blank, uses the file '.pigbootup' from your home directory; 
If a\n# value is supplied, that file is NOT loaded.  This does not do tilde 
expansion\n# -- you must supply the full path to the file.\n#\n# 
pig.load.default.statements=\n# 
pig.load.default.statements=/home/bob/.pigrc\n\n# Kill all waiting/running MR 
jobs upon a MR job failure? (default: false) If\n# false, jobs that can proceed 
independently will do so unless a parent stage\n# fails. If tr
 ue, the failure of any stage in the script kills all jobs.\n#\n# 
stop.on.failure=false\n\n# File containing the pig script to run. Rarely set in 
the properties file.\n# Commandline: -f\n#\n# file=\n\n# Jarfile to load, colon 
separated. Rarely used.\n#\n# jar=\n\n# Register additional .jar files to use 
with your Pig script.\n# Most typically used as a command line option (see 
http://pig.apache.org/docs/r0.12.0/basic.html#register):\n#\n#     pig 
-Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar\n#\n# 
pig.additional.jars=<colon separated list of jars with optional wildcards>\n# 
pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar\n\n#
 Specify potential packages to which a UDF or a group of UDFs belong,\n# 
eliminating the need to qualify the UDF on every call. See\n# 
http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names\n#\n# Commandline 
use:\n#\n#     pig \\\n#      
  
-Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar
 \\\n#       
-Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \\\n#     
  happy_job.pig\n#\n# udf.import.list=<colon separated list of imports>\n# 
udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util\n\n#\n#
 Reuse jars across jobs run by the same user? (default: false) If enabled, 
jars\n# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since 
most\n# jars change infrequently, this gives a minor speedup.\n#\n# 
pig.user.cache.enabled=false\n\n# Base path for storing jars cached by the 
pig.user.cache.enabled feature. (default: /tmp)\n#\n# 
pig.user.cache.location=/tmp\n\n# Replication factor for cached jars. If not 
specified mapred.submit.replication\n# is used, whose default is 10.\n#\n# 
pig.user.cache.replication=10\n\n# Default UTC offset. (default: the h
 ost's current UTC offset) Supply a UTC\n# offset in Java's timezone format: 
e.g., +08:00.\n#\n# 
pig.datetime.default.tz=\n\n############################################################################\n#\n#
 Memory impacting properties\n#\n\n# Amount of memory (as fraction of heap) 
allocated to bags before a spill is\n# forced. Default is 0.2, meaning 20% of 
available memory. Note that this memory\n# is shared across all large bags used 
by the application. See\n# 
http://pig.apache.org/docs/r0.12.0/perf.html#memory-management\n#\n# 
pig.cachedbag.memusage=0.2\n\n# Don't spill bags smaller than this size 
(bytes). Default: 5000000, or about\n# 5MB. Usually, the more spilling the 
longer runtime, so you might want to tune\n# it according to heap size of each 
task and so forth.\n#\n# pig.spill.size.threshold=5000000\n\n# EXPERIMENTAL: If 
a file bigger than this size (bytes) is spilled -- thus\n# freeing a bunch of 
ram -- tell the JVM to perform garbage collection.  This\n# should help reduc
 e the number of files being spilled, but causes more-frequent\n# garbage 
collection. Default: 40000000 (about 40 MB)\n#\n# 
pig.spill.gc.activation.size=40000000\n\n# Maximum amount of data to replicate 
using the distributed cache when doing\n# fragment-replicated join. (default: 
1000000000, about 1GB) Consider increasing\n# this in a production environment, 
but carefully.\n#\n# pig.join.replicated.max.bytes=1000000000\n\n# Fraction of 
heap available for the reducer to perform a skewed join. A low\n# fraction 
forces Pig to use more reducers, but increases the copying cost. See\n# 
http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins\n#\n# 
pig.skewedjoin.reduce.memusage=0.3\n\n#\n# === SchemaTuple ===\n#\n# The 
SchemaTuple feature (PIG-2632) uses a tuple's schema (when known) to\n# 
generate a custom Java class to hold records. Otherwise, tuples are loaded 
as\n# a plain list that is unaware of its contents' schema -- and so each 
element\n# has to be wrapped as a Java object on its 
 own. This can provide more efficient\n# CPU utilization, serialization, and 
most of all memory usage.\n#\n# This feature is considered experimental and is 
off by default. You can\n# selectively enable it for specific operations using 
pig.schematuple.udf,\n# pig.schematuple.load, pig.schematuple.fr_join and 
pig.schematuple.merge_join\n#\n\n# Enable the SchemaTuple optimization in all 
available cases? (default: false; recommended: true)\n#\n# 
pig.schematuple=false\n\n# EXPERIMENTAL: Use SchemaTuples with UDFs (default: 
value of pig.schematuple).\n# pig.schematuple.udf=false\n\n# EXPERIMENTAL, 
CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc's with\n# known schemas 
should output SchemaTuples. (default: value of pig.schematuple)\n# 
pig.schematuple.load=false\n\n# EXPERIMENTAL: Use SchemaTuples in replicated 
joins. The potential memory\n# saving here is significant. (default: value of 
pig.schematuple)\n# pig.schematuple.fr_join=false\n\n# EXPERIMENTAL: Use 
SchemaTuples in merge joi
 ns. (default: value of pig.schematuple).\n# 
pig.schematuple.merge_join=false\n\n############################################################################\n#\n#
 Serialization options\n#\n\n# Omit empty part files from the output? (default: 
false)\n#\n# * false (default): reducers generates an output file, even if 
output is empty\n# * true (recommended): do not generate zero-byte part 
files\n#\n# The default behavior of MapReduce is to generate an empty file for 
no data, so\n# Pig follows that. But many small files can cause annoying extra 
map tasks and\n# put load on the HDFS, so consider setting this to 'true'\n#\n# 
pig.output.lazy=false\n\n#\n# === Tempfile Handling\n#\n\n# EXPERIMENTAL: 
Storage format for temporary files generated by intermediate\n# stages of Pig 
jobs. This can provide significant speed increases for certain\n# codecs, as 
reducing the amount of data transferred to and from disk can more\n# than make 
up for the cost of compression/compression. Recommend that you
  set\n# up LZO compression in Hadoop and specify tfile storage.\n#\n# Compress 
temporary files?\n# * false (default): do not compress\n# * true (recommended): 
compress temporary files.\n#\n# pig.tmpfilecompression=false\n# 
pig.tmpfilecompression=true\n\n# Tempfile storage container type.\n#\n# * tfile 
(default, recommended): more efficient, but only supports supports gz(gzip) and 
lzo compression.\n#   
https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf\n#
 * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression\n#\n# 
pig.tmpfilecompression.storage=tfile\n\n# Codec types for intermediate job 
files. tfile supports gz(gzip) and lzo;\n# seqfile support gz(gzip), lzo, 
snappy, bzip2\n#\n# * lzo (recommended with caveats): moderate compression, low 
cpu burden;\n#   typically leads to a noticeable speedup. Best default choice, 
but you must\n#   set up LZO independently due to license incompatibility\n# * 
snappy: moderate compression, l
 ow cpu burden; typically leads to a noticeable speedup..\n# * gz (default): 
higher compression, high CPU burden. Typically leads to a noticeable 
slowdown.\n# * bzip2: most compression, major CPU burden. Typically leads to a 
noticeable slowdown.\n#\n# pig.tmpfilecompression.codec=gzip\n\n#\n# === Split 
Combining\n#\n\n#\n# Should pig try to combine small files for fewer map tasks? 
This improves the\n# efficiency of jobs with many small input files, reduces 
the overhead on the\n# jobtracker, and reduces the number of output files a 
map-only job\n# produces. However, it only works with certain loaders and 
increases non-local\n# map tasks. See 
http://pig.apache.org/docs/r0.12.0/perf.html#combine-files\n#\n# * false 
(default, recommended): _do_ combine files\n# * true: do not combine 
files\n#\n# pig.noSplitCombination=false\n\n#\n# Size, in bytes, of data to be 
processed by a single map. Smaller files are\n# combined untill this size is 
reached. If unset, defaults to the file system's\n#
  default block size.\n#\n# pig.maxCombinedSplitSize=\n\n# 
###########################################################################\n#\n#
 Execution options\n#\n\n# Should pig omit combiners? (default, recommended: 
false -- meaning pig _will_\n# use combiners)\n#\n# When combiners work well, 
they eliminate a significant amount of\n# data. However, if they do not 
eliminate much data -- say, a DISTINCT operation\n# that only eliminates 5% of 
the records -- they add a noticeable overhead to\n# the job. So the recommended 
default is false (use combiners), selectively\n# disabling them per-job:\n#\n#  
   pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig\n#\n# 
pig.exec.nocombiner=false\n\n# EXPERIMENTAL: Aggregate records in map task 
before sending to the combiner?\n# (default: false, 10; recommended: true, 10). 
In cases where there is a massive\n# reduction of data in the aggregation step, 
pig can do a first pass of\n# aggregation before the data even leaves the 
mapper, savin
 g much serialization\n# overhead. It's off by default but can give a major 
improvement to\n# group-and-aggregate operations. Pig skips partial aggregation 
unless reduction\n# is better than a factor of minReduction (default: 10). 
See\n# 
http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation\n#\n# 
pig.exec.mapPartAgg=false\n# pig.exec.mapPartAgg.minReduction=10\n\n#\n# === 
Control how many reducers are used.\n#\n\n# Estimate number of reducers naively 
using a fixed amount of data per\n# reducer. Optimally, you have both fewer 
reducers than available reduce slots,\n# and reducers that are neither getting 
too little data (less than a half-GB or\n# so) nor too much data (more than 2-3 
times the reducer child process max heap\n# size). The default of 1000000000 
(about 1GB) is probably low for a production\n# cluster -- however it's much 
worse to set this too high (reducers spill many\n# times over in group-sort) 
than too low (delay waiting for reduce slots).\n#\n# pig.exec.r
 educers.bytes.per.reducer=1000000000\n\n#\n# Don't ever use more than this 
many reducers. (default: 999)\n#\n# pig.exec.reducers.max=999\n\n#\n# === Local 
mode for small jobs\n#\n\n# EXPERIMENTAL: Use local mode for small jobs? If 
true, jobs with input data\n# size smaller than pig.auto.local.input.maxbytes 
bytes and one or no reducers\n# are run in local mode, which is much faster. 
Note that file paths are still\n# interpreted as pig.exectype implies.\n#\n# * 
true (recommended): allow local mode for small jobs, which is much faster.\n# * 
false (default): always use pig.exectype.\n#\n# 
pig.auto.local.enabled=false\n\n#\n# Definition of a small job for the 
pig.auto.local.enabled feature. Only jobs\n# with less than this may bytes are 
candidates to run locally (default:\n# 100000000 bytes, about 1GB)\n#\n# 
pig.auto.local.input.maxbytes=100000000\n\n############################################################################\n#\n#
 Security Features\n#\n\n# Comma-delimited list of comma
 nds/operators that are disallowed. This security\n# feature can be used by 
administrators to block use of certain commands by\n# users.\n#\n# * <blank> 
(default): all commands and operators are allowed.\n# * fs,set (for example): 
block all filesystem commands and config changes from pig scripts.\n#\n# 
pig.blacklist=\n# pig.blacklist=fs,set\n\n# Comma-delimited list of the only 
commands/operators that are allowed. This\n# security feature can be used by 
administrators to block use of certain\n# commands by users.\n#\n# * <blank> 
(default): all commands and operators not on the pig.blacklist are allowed.\n# 
* load,store,filter,group: only LOAD, STORE, FILTER, GROUP\n#   from pig 
scripts. All other commands and operators will fail.\n#\n# pig.whitelist=\n# 
pig.whitelist=load,store,filter,group\n\n#####################################################################\n#\n#
 Advanced Site-specific Customizations\n#\n\n# Remove intermediate output 
files?\n#\n# * true (default, recommended): 
 remove the files\n# * false: do NOT remove the files. You must clean them up 
yourself.\n#\n# Keeping them is useful for advanced debugging, but can be 
dangerous -- you\n# must clean them up yourself.  Inspect the intermediate 
outputs with\n#\n#     LOAD '/path/to/tmp/file' USING 
org.apache.pig.impl.io.TFileStorage();\n#\n# (Or ...SequenceFileInterStorage if 
pig.tmpfilecompression.storage is seqfile)\n#\n# 
pig.delete.temp.files=true\n\n# EXPERIMENTAL: A Pig Progress Notification 
Listener (PPNL) lets you wire pig's\n# progress into your visibility stack. To 
use a PPNL, supply the fully qualified\n# class name of a PPNL implementation. 
Note that only one PPNL can be set up, so\n# if you need several, write a PPNL 
that will chain them.\n#\n# See https://github.com/twitter/ambrose for a pretty 
awesome one of these\n#\n# pig.notification.listener=<fully qualified class 
name of a PPNL implementation>\n\n# String argument to pass to your PPNL 
constructor (optional). Only a single\n# string 
 value is allowed. (default none)\n#\n# 
pig.notification.listener.arg=<somevalue>\n\n# EXPERIMENTAL: Class invoked to 
estimate the number of reducers to use.\n# (default: 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)\n#\n#
 If you don't know how or why to write a PigReducerEstimator, you're 
unlikely\n# to use this. By default, the naive 
mapReduceLayer.InputSizeReducerEstimator is\n# used, but you can specify 
anything implementing the interface\n# 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator\n#\n#
 pig.exec.reducer.estimator=<fully qualified class name of a 
PigReducerEstimator implementation>\n\n# Optional String argument to pass to 
your PigReducerEstimator. (default: none;\n# a single String argument is 
allowed).\n#\n# pig.exec.reducer.estimator.arg=<somevalue>\n\n# Class invoked 
to report the size of reducers output. By default, the reducers'\n# output is 
computed as the total size of output files. But not e
 very storage is\n# file-based, and so this logic can be replaced by 
implementing the interface\n# 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader\n#
 If you need to register more than one reader, you can register them as a 
comma\n# separated list. Every reader implements a boolean supports(POStore 
sto) method.\n# When there are more than one reader, they are consulted in 
order, and the\n# first one whose supports() method returns true will be 
used.\n#\n# pig.stats.output.size.reader=<fully qualified class name of a 
PigStatsOutputSizeReader implementation>\n# 
pig.stats.output.size.reader.unsupported=<comma separated list of StoreFuncs 
that are not supported by this reader>\n\n# By default, Pig retrieves 
TaskReports for every launched task to compute\n# various job statistics. But 
this can cause OOM if the number of tasks is\n# large. In such case, you can 
disable it by setting this property to true.\n# 
pig.stats.notaskreport=false\n\n#\n# Override
  hadoop configs programatically\n#\n# By default, Pig expects hadoop configs 
(hadoop-site.xml and core-site.xml)\n# to be present on the classpath. There 
are cases when these configs are\n# needed to be passed programatically, such 
as while using the PigServer API.\n# In such cases, you can override hadoop 
configs by setting the property\n# \"pig.use.overriden.hadoop.configs\".\n#\n# 
When this property is set to true, Pig ignores looking for hadoop configs\n# in 
the classpath and instead picks it up from Properties/Configuration\n# object 
passed to it.\n#\n# pig.use.overriden.hadoop.configs=false\n\n# Implied 
LoadFunc for the LOAD operation when no USING clause is\n# present. Supply the 
fully qualified class name of a LoadFunc\n# implementation. Note: setting this 
means you will have to modify most code\n# brought in from elsewhere on the 
web, as people generally omit the USING\n# clause for TSV files.\n#\n# * 
org.apache.pig.builtin.PigStorage (default): the traditional tab-separate
 d-values LoadFunc\n# * my.custom.udfcollection.MyCustomLoadFunc (for example): 
use MyCustomLoadFunc instead\n#\n# pig.default.load.func=<fully qualified class 
name of a LoadFunc implementation>\n\n# The implied StoreFunc for STORE 
operations with no USING clause. Supply the\n# fully qualified class name of a 
StoreFunc implementation.\n#\n# * org.apache.pig.builtin.PigStorage (default): 
the traditional tab-separated-values StoreFunc.\n# * 
my.custom.udfcollection.MyCustomStoreFunc (for example): use MyCustomStoreFunc 
instead\n#\n# pig.default.store.func=<fully qualified class name of a StoreFunc 
implementation>\n\n# Recover jobs when the application master is restarted? 
(default: false). This\n# is a Hadoop 2 specific property; enable it to take 
advantage of AM recovery.\n#\n# pig.output.committer.recovery.support=true\n\n# 
Should scripts check to prevent multiple stores writing to the same 
location?\n# (default: false) When set to true, stops the execution of script 
right away.\n#\np
 ig.location.check.strict=false\n\n# In addition to the fs-style commands (rm, 
ls, etc) Pig can now execute\n# SQL-style DDL commands, eg \"sql create table 
pig_test(name string, age int)\".\n# The only implemented backend is hcat, and 
luckily that's also the default.\n#\n# pig.sql.type=hcat\n\n# Path to the hcat 
executable, for use with pig.sql.type=hcat (default: 
null)\n#\nhcat.bin=/usr/local/hcat/bin/hcat\n\n###########################################################################\n#\n#
 Overrides for extreme environments\n#\n# (Most people won't have to adjust 
these parameters)\n#\n\n\n# Limit the pig script length placed in the jobconf 
xml. (default:10240)\n# Extremely long queries can waste space in the JobConf; 
since its contents are\n# only advisory, the default is fine unless you are 
retaining it for forensics.\n#\n# pig.script.max.size=10240\n\n# Disable use of 
counters by Pig. Note that the word 'counter' is singular here.\n#\n# * false 
(default, recommended): do NOT disa
 ble counters.\n# * true: disable counters. Set this to true only when your Pig 
job will\n#   otherwise die because of using more counters than hadoop 
configured limit\n#\n# pig.disable.counter=true\n\n# Sample size (per-mapper, 
in number of rows) the ORDER..BY operation's\n# RandomSampleLoader uses to 
estimate how your data should be\n# partitioned. (default, recommended: 100 
rows per task) Increase this if you\n# have exceptionally large input splits 
and are unhappy with the reducer skew.\n#\n# 
pig.random.sampler.sample.size=100\n\n# Process an entire script at once, 
reducing the amount of work and number of\n# tasks? (default, recommended: 
true) See 
http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution\n#\n# 
MultiQuery optimization is very useful, and so the recommended default is\n# 
true. You may find a that a script fails to compile under MultiQuery. If so,\n# 
disable it at runtime:\n#\n#     pig -no_multiquery 
script_that_makes_pig_sad.pig\n#\n# opt.multiquery=true
 \n\n# For small queries, fetch data directly from the HDFS. (default, 
recommended:\n# true). If you want to force Pig to launch a MR job, for example 
when you're\n# testing a live cluster, disable with the -N option. See 
PIG-3642.\n#\n# opt.fetch=true\n\n# Enable auto/grace parallelism in tez. These 
should be used by default unless\n# you encounter some bug in automatic 
parallelism. If pig.tez.auto.parallelism\n# to false, use 1 as default 
parallelism\npig.tez.auto.parallelism=true\npig.tez.grace.parallelism=true\n\n###########################################################################\n#\n#
 Streaming properties\n#\n\n# Define what properties will be set in the 
streaming environment. Just set this\n# property to a comma-delimited list of 
properties to set, and those properties\n# will be set in the 
environment.\n#\n# pig.streaming.environment=<comma-delimited list of 
propertes>\n\n# Specify a comma-delimited list of local files to ship to 
distributed cache for\n# streaming job.
 \n#\n# pig.streaming.ship.files=<comma-delimited list of local files>\n\n# 
Specify a comma-delimited list of remote files to cache on distributed cache\n# 
for streaming job.\n#\n# pig.streaming.cache.files=<comma-delimited list of 
remote files>\n\n# Specify the python command to be used for python streaming 
udf. By default,\n# python is used, but you can overwrite it with a non-default 
version such as\n# python2.7.\n#\n# pig.streaming.udf.python.command=python"
+        }, 
+        "ranger-hbase-plugin-properties": {}, 
+        "hawq-env": {
+            "hawq_password": "gpadmin"
+        }, 
+        "core-site": {
+            "ipc.server.listen.queue.size": "3300", 
+            "net.topology.script.file.name": 
"/etc/hadoop/conf/topology_script.py", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
+            "hadoop.proxyuser.hcat.groups": "*", 
+            "fs.trash.interval": "360", 
+            "hadoop.proxyuser.hive.groups": "*", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "io.compression.codecs": 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec",
 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "io.compression.codec.lzo.class": 
"com.hadoop.compression.lzo.LzoCodec", 
+            "io.serializations": 
"org.apache.hadoop.io.serializer.WritableSerialization", 
+            "ipc.client.connect.timeout": "300000", 
+            "hadoop.security.authentication": "simple", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": 
"120", 
+            "hadoop.security.key.provider.path": "", 
+            "hadoop.security.authorization": "false", 
+            "ipc.server.tcpnodelay": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "DEFAULT", 
+            "hadoop.proxyuser.oozie.hosts": "*", 
+            "ipc.client.connection.maxidletime": "3600000"
+        }, 
+        "hive-site": {
+            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
+            "hive.fetch.task.aggr": "false", 
+            "hive.execution.engine": "tez", 
+            "hive.tez.java.opts": "-server -Djava.net.preferIPv4Stack=true 
-XX:NewRatio=8 -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails 
-verbose:gc -XX:+PrintGCTimeStamps", 
+            "hive.server2.thrift.http.port": "10001", 
+            "hive.tez.min.partition.factor": "0.25", 
+            "hive.tez.cpu.vcores": "-1", 
+            "hive.compute.query.using.stats": "true", 
+            "hive.stats.dbclass": "fs", 
+            "hive.merge.size.per.task": "256000000", 
+            "hive.fetch.task.conversion": "more", 
+            "hive.auto.convert.sortmerge.join.to.mapjoin": "false", 
+            "hive.server2.thrift.http.path": "cliservice", 
+            "hive.exec.scratchdir": "/tmp/hive", 
+            "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", 
+            "hive.zookeeper.namespace": "hive_zookeeper_namespace", 
+            "hive.cbo.enable": "true", 
+            "hive.optimize.reducededuplication": "true", 
+            "hive.optimize.bucketmapjoin": "true", 
+            "hive.mapjoin.bucket.cache.size": "10000", 
+            "hive.limit.optimize.enable": "true", 
+            "hive.fetch.task.conversion.threshold": "1073741824", 
+            "hive.server2.enable.doAs": "true", 
+            "hive.exec.max.dynamic.partitions": "5000", 
+            "hive.metastore.sasl.enabled": "false", 
+            "hive.txn.manager": 
"org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", 
+            "hive.optimize.constant.propagation": "true", 
+            "hive.exec.submitviachild": "false", 
+            "hive.metastore.kerberos.principal": "hive/_h...@example.com", 
+            "hive.txn.max.open.batch": "1000", 
+            "hive.exec.compress.output": "false", 
+            "hive.tez.auto.reducer.parallelism": "true", 
+            "hive.security.authenticator.manager": 
"org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", 
+            "hive.merge.mapfiles": "true", 
+            "hive.exec.parallel.thread.number": "8", 
+            "hive.mapjoin.optimized.hashtable": "true", 
+            "hive.default.fileformat": "TextFile", 
+            "hive.optimize.metadataonly": "true", 
+            "hive.tez.dynamic.partition.pruning.max.event.size": "1048576", 
+            "hive.server2.thrift.max.worker.threads": "500", 
+            "hive.optimize.sort.dynamic.partition": "false", 
+            "hive.server2.table.type.mapping": "CLASSIC", 
+            "hive.metastore.pre.event.listeners": 
"org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener",
 
+            "hive.metastore.failure.retries": "24", 
+            "hive.merge.smallfiles.avgsize": "16000000", 
+            "hive.tez.max.partition.factor": "2.0", 
+            "hive.server2.transport.mode": "binary", 
+            "atlas.hook.hive.minThreads": "1", 
+            "hive.tez.container.size": "170", 
+            "hive.optimize.bucketmapjoin.sortedmerge": "false", 
+            "hive.compactor.worker.threads": "0", 
+            "hive.security.metastore.authorization.manager": 
"org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
 
+            "hive.map.aggr.hash.percentmemory": "0.5", 
+            "hive.user.install.directory": "/user/", 
+            "datanucleus.autoCreateSchema": "false", 
+            "hive.conf.restricted.list": 
"hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
 
+            "hive.merge.rcfile.block.level": "true", 
+            "hive.map.aggr": "true", 
+            "hive.metastore.client.connect.retry.delay": "5s", 
+            "hive.security.authorization.enabled": "false", 
+            "hive.map.aggr.hash.force.flush.memory.threshold": "0.9", 
+            "hive.server2.tez.default.queues": "default", 
+            "hive.prewarm.enabled": "false", 
+            "hive.exec.reducers.max": "1009", 
+            "hive.metastore.kerberos.keytab.file": 
"/etc/security/keytabs/hive.service.keytab", 
+            "hive.stats.fetch.partition.stats": "true", 
+            "hive.cli.print.header": "false", 
+            "hive.server2.thrift.sasl.qop": "auth", 
+            "hive.server2.support.dynamic.service.discovery": "true", 
+            "hive.server2.thrift.port": "10000", 
+            "hive.exec.reducers.bytes.per.reducer": "67108864", 
+            "hive.compactor.abortedtxn.threshold": "1000", 
+            "hive.tez.dynamic.partition.pruning.max.data.size": "104857600", 
+            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
+            "hive.metastore.client.socket.timeout": "1800s", 
+            "hive.server2.zookeeper.namespace": "hiveserver2", 
+            "hive.prewarm.numcontainers": "3", 
+            "hive.cluster.delegation.token.store.class": 
"org.apache.hadoop.hive.thrift.ZooKeeperTokenStore", 
+            "hive.security.metastore.authenticator.manager": 
"org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", 
+            "atlas.hook.hive.maxThreads": "1", 
+            "hive.auto.convert.join": "true", 
+            "hive.enforce.bucketing": "false", 
+            "hive.server2.authentication.spnego.keytab": 
"HTTP/_h...@example.com", 
+            "hive.mapred.reduce.tasks.speculative.execution": "false", 
+            "javax.jdo.option.ConnectionURL": 
"jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
+            "hive.exec.dynamic.partition.mode": "strict", 
+            "hive.auto.convert.sortmerge.join": "true", 
+            "hive.zookeeper.quorum": 
"c6403.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6401.ambari.apache.org:2181",
 
+            "hive.security.authorization.manager": 
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
 
+            "hive.exec.parallel": "false", 
+            "hive.exec.compress.intermediate": "false", 
+            "hive.enforce.sorting": "true", 
+            "hive.txn.timeout": "300", 
+            "hive.metastore.authorization.storage.checks": "false", 
+            "hive.exec.orc.default.stripe.size": "67108864", 
+            "hive.metastore.cache.pinobjtypes": 
"Table,Database,Type,FieldSchema,Order", 
+            "hive.server2.logging.operation.enabled": "true", 
+            "hive.merge.tezfiles": "false", 
+            "hive.compactor.initiator.on": "false", 
+            "hive.auto.convert.join.noconditionaltask": "true", 
+            "hive.compactor.worker.timeout": "86400L", 
+            "hive.optimize.null.scan": "true", 
+            "hive.server2.tez.initialize.default.sessions": "false", 
+            "datanucleus.cache.level2.type": "none", 
+            "hive.stats.autogather": "true", 
+            "hive.server2.use.SSL": "false", 
+            "hive.exec.submit.local.task.via.child": "true", 
+            "hive.merge.mapredfiles": "false", 
+            "hive.vectorized.execution.enabled": "true", 
+            "hive.cluster.delegation.token.store.zookeeper.connectString": 
"c6403.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6401.ambari.apache.org:2181",
 
+            "hive.map.aggr.hash.min.reduction": "0.5", 
+            "hive.tez.log.level": "INFO", 
+            "hive.server2.tez.sessions.per.default.queue": "1", 
+            "hive.exec.max.dynamic.partitions.pernode": "2000", 
+            "hive.tez.dynamic.partition.pruning": "true", 
+            "datanucleus.fixedDatastore": "true", 
+            "hive.limit.pushdown.memory.usage": "0.04", 
+            "hive.security.metastore.authorization.auth.reads": "true", 
+            "ambari.hive.db.schema.name": "hive", 
+            "hive.vectorized.groupby.checkinterval": "4096", 
+            "hive.smbjoin.cache.rows": "10000", 
+            "hive.metastore.execute.setugi": "true", 
+            "hive.zookeeper.client.port": "2181", 
+            "hive.vectorized.groupby.maxentries": "100000", 
+            "hive.server2.authentication.spnego.principal": 
"/etc/security/keytabs/spnego.service.keytab", 
+            "hive.cluster.delegation.token.store.zookeeper.znode": 
"/hive/cluster/delegation", 
+            "javax.jdo.option.ConnectionPassword": "password", 
+            "hive.exec.max.created.files": "100000", 
+            "hive.default.fileformat.managed": "TextFile", 
+            "hive.vectorized.execution.reduce.enabled": "false", 
+            "hive.orc.splits.include.file.footer": "false", 
+            "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", 
+            "hive.merge.orcfile.stripe.level": "true", 
+            "hive.exec.failure.hooks": 
"org.apache.hadoop.hive.ql.hooks.ATSHook", 
+            "hive.server2.allow.user.substitution": "true", 
+            "hive.optimize.index.filter": "true", 
+            "hive.exec.orc.encoding.strategy": "SPEED", 
+            "hive.metastore.connect.retries": "24", 
+            "hive.metastore.server.max.threads": "100000", 
+            "hive.exec.orc.compression.strategy": "SPEED", 
+            "hive.vectorized.groupby.flush.percent": "0.1", 
+            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
+            "hive.enforce.sortmergebucketmapjoin": "true", 
+            "hive.auto.convert.join.noconditionaltask.size": "59419306", 
+            "javax.jdo.option.ConnectionUserName": "hive", 
+            "hive.compactor.delta.num.threshold": "10", 
+            "hive.exec.dynamic.partition": "true", 
+            "hive.server2.authentication": "NONE", 
+            "hive.stats.fetch.column.stats": "true", 
+            "hive.orc.compute.splits.num.threads": "10", 
+            "hive.tez.smb.number.waves": "0.5", 
+            "hive.convert.join.bucket.mapjoin.tez": "false", 
+            "hive.optimize.reducededuplication.min.reducer": "4", 
+            "hive.metastore.schema.verification": "true", 
+            "hive.server2.logging.operation.log.location": 
"/tmp/hive/operation_logs", 
+            "hive.tez.input.format": 
"org.apache.hadoop.hive.ql.io.HiveInputFormat", 
+            "hive.exec.orc.default.compress": "ZLIB", 
+            "hive.support.concurrency": "false", 
+            "hive.compactor.check.interval": "300L", 
+            "hive.compactor.delta.pct.threshold": "0.1f"
+        }, 
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "yarn_user_nproc_limit": "65536", 
+            "apptimelineserver_heapsize": "1024", 
+            "yarn_user_nofile_limit": "32768", 
+            "is_supported_yarn_ranger": "true", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\n      export HADOOP_YARN_HOME={{hadoop_yarn_home}}\n 
     export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\n      export 
YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\n      export 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n      export 
JAVA_HOME={{java64_home}}\n      export 
JAVA_LIBRARY_PATH=\"${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}\"\n\n      # 
We need to add the EWMA appender for the yarn daemons only;\n      # however, 
YARN_ROOT_LOGGER is shared by the yarn client and the\n      # daemons. This is 
restrict the EWMA appender to daemons only.\n      INVOKER=\"${0##*/}\"\n      
if [ \"$INVOKER\" == \"yarn-daemon.sh\" ]; then\n        export 
YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}\n      fi\n\n      # User 
for YARN daemons\n      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n   
   # resolve links - $0 may be a softlink\n      export 
YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n      # some Java 
parameters\n      # e
 xport JAVA_HOME=/home/y/libexec/jdk1.6.0/\n      if [ \"$JAVA_HOME\" != \"\" 
]; then\n      #echo \"run java in $JAVA_HOME\"\n      JAVA_HOME=$JAVA_HOME\n   
   fi\n\n      if [ \"$JAVA_HOME\" = \"\" ]; then\n      echo \"Error: 
JAVA_HOME is not set.\"\n      exit 1\n      fi\n\n      
JAVA=$JAVA_HOME/bin/java\n      JAVA_HEAP_MAX=-Xmx1000m\n\n      # For setting 
YARN specific HEAP sizes please use this\n      # Parameter and set 
appropriately\n      YARN_HEAPSIZE={{yarn_heapsize}}\n\n      # check envvars 
which might override default args\n      if [ \"$YARN_HEAPSIZE\" != \"\" ]; 
then\n      JAVA_HEAP_MAX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\n      fi\n\n      # 
Resource Manager specific parameters\n\n      # Specify the max Heapsize for 
the ResourceManager using a numerical value\n      # in the scale of MB. For 
example, to specify an jvm option of -Xmx1000m, set\n      # the value to 
1000.\n      # This value will be overridden by an Xmx setting specified in 
either YARN_OPTS\n      # an
 d/or YARN_RESOURCEMANAGER_OPTS.\n      # If not specified, the default value 
will be picked from either YARN_HEAPMAX\n      # or JAVA_HEAP_MAX with 
YARN_HEAPMAX as the preferred option of the two.\n      export 
YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n      # Specify 
the JVM options to be used when starting the ResourceManager.\n      # These 
options will be appended to the options specified as YARN_OPTS\n      # and 
therefore may override any similar flags set in YARN_OPTS\n      #export 
YARN_RESOURCEMANAGER_OPTS=\n\n      # Node Manager specific parameters\n\n      
# Specify the max Heapsize for the NodeManager using a numerical value\n      # 
in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n   
   # the value to 1000.\n      # This value will be overridden by an Xmx 
setting specified in either YARN_OPTS\n      # and/or YARN_NODEMANAGER_OPTS.\n  
    # If not specified, the default value will be picked from either 
YARN_HEAPMAX\n      
 # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n     
 export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n      # Specify 
the max Heapsize for the HistoryManager using a numerical value\n      # in the 
scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n      # 
the value to 1024.\n      # This value will be overridden by an Xmx setting 
specified in either YARN_OPTS\n      # and/or YARN_HISTORYSERVER_OPTS.\n      # 
If not specified, the default value will be picked from either YARN_HEAPMAX\n   
   # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n   
   export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n      # 
Specify the JVM options to be used when starting the NodeManager.\n      # 
These options will be appended to the options specified as YARN_OPTS\n      # 
and therefore may override any similar flags set in YARN_OPTS\n      #export 
YARN_NODEMANAGER_OPTS=\n\n      # so that filenames w/ s
 paces are handled correctly in loops below\n      IFS=\n\n\n      # default 
log directory and file\n      if [ \"$YARN_LOG_DIR\" = \"\" ]; then\n      
YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\n      fi\n      if [ \"$YARN_LOGFILE\" 
= \"\" ]; then\n      YARN_LOGFILE='yarn.log'\n      fi\n\n      # default 
policy file for service-level authorization\n      if [ \"$YARN_POLICYFILE\" = 
\"\" ]; then\n      YARN_POLICYFILE=\"hadoop-policy.xml\"\n      fi\n\n      # 
restore ordinary behaviour\n      unset IFS\n\n\n      YARN_OPTS=\"$YARN_OPTS 
-Dhadoop.log.dir=$YARN_LOG_DIR\"\n      YARN_OPTS=\"$YARN_OPTS 
-Dyarn.log.dir=$YARN_LOG_DIR\"\n      YARN_OPTS=\"$YARN_OPTS 
-Dhadoop.log.file=$YARN_LOGFILE\"\n      YARN_OPTS=\"$YARN_OPTS 
-Dyarn.log.file=$YARN_LOGFILE\"\n      YARN_OPTS=\"$YARN_OPTS 
-Dyarn.home.dir=$YARN_COMMON_HOME\"\n      YARN_OPTS=\"$YARN_OPTS 
-Dyarn.id.str=$YARN_IDENT_STRING\"\n      YARN_OPTS=\"$YARN_OPTS 
-Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\n      YARN_OPTS=
 \"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\n      if 
[ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n      YARN_OPTS=\"$YARN_OPTS 
-Djava.library.path=$JAVA_LIBRARY_PATH\"\n      fi\n      
YARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"\n      
YARN_OPTS=\"$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}\"\n    ", 
+            "yarn_heapsize": "1024", 
+            "min_user_id": "1000", 
+            "yarn_cgroups_enabled": "false", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
+        }, 
+        "hawq-site": {
+            "hawq_re_cgroup_hierarchy_name": "hawq", 
+            "hawq_master_directory": "/data/hawq/master", 
+            "hawq_segment_address_port": "40000", 
+            "hawq_master_temp_directory": "/tmp", 
+            "hawq_standby_address_host": "c6402.ambari.apache.org", 
+            "hawq_master_address_port": "5432", 
+            "hawq_segment_temp_directory": "/tmp", 
+            "hawq_master_address_host": "c6403.ambari.apache.org", 
+            "hawq_rm_yarn_queue_name": "default", 
+            "hawq_rm_yarn_address": "c6402.ambari.apache.org:8032", 
+            "hawq_re_cgroup_mount_point": "/sys/fs/cgroup", 
+            "hawq_dfs_url": "c6401.ambari.apache.org:8020/hawq_default", 
+            "hawq_global_rm_type": "none", 
+            "hawq_segment_directory": "/data/hawq/segment", 
+            "hawq_rm_memory_limit_perseg": "64GB", 
+            "hawq_rm_yarn_scheduler_address": "c6402.ambari.apache.org:8030", 
+            "hawq_rm_yarn_app_name": "hawq", 
+            "hawq_re_cpu_enable": "false", 
+            "hawq_rm_nvcore_limit_perseg": "16"
+        }, 
+        "ranger-yarn-policymgr-ssl": {
+            "xasecure.policymgr.clientssl.keystore": 
"/usr/phd/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks", 
+            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore.credential.file": 
"jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.truststore": 
"/usr/phd/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks", 
+            "xasecure.policymgr.clientssl.truststore.credential.file": 
"jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore.password": 
"myKeyFilePassword"
+        }, 
+        "hawq-check-env": {
+            "content"

<TRUNCATED>

Reply via email to