http://git-wip-us.apache.org/repos/asf/ambari/blob/4b2f1ab0/ambari-web/app/assets/data/configurations/service_versions.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/configurations/service_versions.json 
b/ambari-web/app/assets/data/configurations/service_versions.json
index ddd59f0..4628d4d 100644
--- a/ambari-web/app/assets/data/configurations/service_versions.json
+++ b/ambari-web/app/assets/data/configurations/service_versions.json
@@ -1,224 +1,582 @@
 {
-  "items": [
+  "href" : 
"http://192.168.56.101:8080/api/v1/clusters/tdk/configurations/service_config_versions?fields=*";,
+  "items" : [
     {
-      "serviceconfigversion": "1",
-      "createtime": "43800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "HDFS",
-      "configurations": [
+      "href" : 
"http://192.168.56.101:8080/api/v1/clusters/tdk/configurations/service_config_versions?service_name=GANGLIA&service_config_version=1";,
+      "cluster_name" : "tdk",
+      "configurations" : [
         {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
+          "Config" : {
+            "cluster_name" : "tdk"
           },
-          "properties": {}
-        }
-      ]
-    },
-    {
-      "serviceconfigversion": "1",
-      "createtime": "43300000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "YARN",
-      "configurations": [
-        {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
-          },
-          "properties": {}
+          "type" : "ganglia-env",
+          "tag" : "version1414410959005",
+          "version" : 1,
+          "properties" : {
+            "additional_clusters" : " ",
+            "ganglia_runtime_dir" : "/var/run/ganglia/hdp",
+            "gmetad_user" : "nobody",
+            "gmond_user" : "nobody",
+            "rrdcached_base_dir" : "/var/lib/ganglia/rrds",
+            "rrdcached_delay" : "1800",
+            "rrdcached_flush_timeout" : "7200",
+            "rrdcached_timeout" : "3600",
+            "rrdcached_write_threads" : "4"
+          },
+          "properties_attributes" : { }
         }
-      ]
+      ],
+      "createtime" : 1414410973349,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : true,
+      "service_config_version" : 1,
+      "service_config_version_note" : "Initial configurations for Ganglia",
+      "service_name" : "GANGLIA",
+      "user" : "admin"
     },
     {
-      "serviceconfigversion": "2",
-      "createtime": "43500000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "HDFS",
-      "configurations": [
+      "href" : 
"http://192.168.56.101:8080/api/v1/clusters/tdk/configurations/service_config_versions?service_name=HDFS&service_config_version=2";,
+      "cluster_name" : "tdk",
+      "configurations" : [
         {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
+          "Config" : {
+            "cluster_name" : "tdk"
           },
-          "properties": {}
-        }
-      ]
-    },
-    {
-      "serviceconfigversion": "2",
-      "createtime": "13800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "YARN",
-      "configurations": [
-        {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
-          },
-          "properties": {}
-        }
-      ]
-    },
-    {
-      "serviceconfigversion": "3",
-      "createtime": "23800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "HDFS",
-      "configurations": [
+          "type" : "core-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "fs.defaultFS" : "hdfs://dev01.hortonworks.com:8020",
+            "fs.trash.interval" : "360",
+            "hadoop.security.auth_to_local" : "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+            "hadoop.security.authentication" : "simple",
+            "hadoop.security.authorization" : "false",
+            "io.compression.codecs" : 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "io.file.buffer.size" : "131072",
+            "io.serializations" : 
"org.apache.hadoop.io.serializer.WritableSerialization",
+            "ipc.client.connect.max.retries" : "50",
+            "ipc.client.connection.maxidletime" : "30000",
+            "ipc.client.idlethreshold" : "8000",
+            "ipc.server.tcpnodelay" : "true",
+            "mapreduce.jobtracker.webinterface.trusted" : "false",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "fs.defaultFS" : "true"
+            }
+          }
+        },
         {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
+          "Config" : {
+            "cluster_name" : "tdk"
           },
-          "properties": {}
-        }
-      ]
-    },
-    {
-      "serviceconfigversion": "3",
-      "createtime": "47800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "YARN",
-      "configurations": [
-        {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
-          },
-          "properties": {}
-        }
-      ]
-    },
-    {
-      "serviceconfigversion": "4",
-      "createtime": "43900000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "HDFS",
-      "configurations": [
+          "type" : "hadoop-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.operations.protocol.acl" : "hadoop",
+            "security.client.datanode.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.datanode.protocol.acl" : "*",
+            "security.inter.datanode.protocol.acl" : "*",
+            "security.inter.tracker.protocol.acl" : "*",
+            "security.job.client.protocol.acl" : "*",
+            "security.job.task.protocol.acl" : "*",
+            "security.namenode.protocol.acl" : "*",
+            "security.refresh.policy.protocol.acl" : "hadoop",
+            "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop"
+          },
+          "properties_attributes" : { }
+        },
         {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
+          "Config" : {
+            "cluster_name" : "tdk"
           },
-          "properties": {}
-        }
-      ]
-    },
-    {
-      "serviceconfigversion": "4",
-      "createtime": "33800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "YARN",
-      "configurations": [
-        {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
-          },
-          "properties": {}
+          "type" : "hdfs-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n#\n\n\n# 
Define some default values that can be overridden by system properties\n# To 
change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.l
 ogger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define 
the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling 
File 
Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appende
 
r.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default 
values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\n\n#\n#Security audit 
appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.log
 
ger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n#
 hdfs audit 
logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit
 
=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# 
mapred audit 
logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling 
File 
Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.f
 ile}\n\n# Logfile size and and 30-day 
backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging 
levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n#
 Jets3t 
library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n#
 Null Appender\n# Trap security logger on the hadoop client 
side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n#
 Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop Metrics.\n#\
 nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# 
Removes \"deprecated\" 
messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hdfs-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "dfs.block.access.token.enable" : "true",
+            "dfs.blockreport.initialDelay" : "120",
+            "dfs.blocksize" : "134217728",
+            "dfs.client.read.shortcircuit" : "true",
+            "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
+            "dfs.cluster.administrators" : " hdfs",
+            "dfs.datanode.address" : "0.0.0.0:50010",
+            "dfs.datanode.balance.bandwidthPerSec" : "6250000",
+            "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
+            "dfs.datanode.data.dir.perm" : "750",
+            "dfs.datanode.du.reserved" : "1073741824",
+            "dfs.datanode.failed.volumes.tolerated" : "0",
+            "dfs.datanode.http.address" : "0.0.0.0:50075",
+            "dfs.datanode.https.address" : "0.0.0.0:50475",
+            "dfs.datanode.ipc.address" : "0.0.0.0:8010",
+            "dfs.datanode.max.transfer.threads" : "1024",
+            "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.heartbeat.interval" : "3",
+            "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
+            "dfs.http.policy" : "HTTP_ONLY",
+            "dfs.https.port" : "50470",
+            "dfs.journalnode.edits.dir" : "/grid/0/hdfs/journal",
+            "dfs.journalnode.http-address" : "0.0.0.0:8480",
+            "dfs.namenode.accesstime.precision" : "0",
+            "dfs.namenode.avoid.read.stale.datanode" : "true",
+            "dfs.namenode.avoid.write.stale.datanode" : "true",
+            "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
+            "dfs.namenode.checkpoint.edits.dir" : 
"${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.checkpoint.period" : "21600",
+            "dfs.namenode.checkpoint.txns" : "1000000",
+            "dfs.namenode.handler.count" : "40",
+            "dfs.namenode.http-address" : "dev01.hortonworks.com:50070",
+            "dfs.namenode.https-address" : "dev01.hortonworks.com:50470",
+            "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
+            "dfs.namenode.name.dir.restore" : "true",
+            "dfs.namenode.safemode.threshold-pct" : "1.0f",
+            "dfs.namenode.secondary.http-address" : 
"dev01.hortonworks.com:50090",
+            "dfs.namenode.stale.datanode.interval" : "30000",
+            "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
+            "dfs.permissions.enabled" : "true",
+            "dfs.permissions.superusergroup" : "hdfs",
+            "dfs.replication" : "3",
+            "dfs.replication.max" : "50",
+            "dfs.support.append" : "true",
+            "dfs.webhdfs.enabled" : "true",
+            "fs.permissions.umask-mode" : "022"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "dfs.support.append" : "true",
+              "dfs.namenode.http-address" : "true"
+            }
+          }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1414314983497",
+          "version" : 2,
+          "properties" : {
+            "content" : "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defau
 lt.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when 
specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix
 }}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}
 /$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} 
-Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The 
following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HA
 DOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  
$HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code 
should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n
 # The scheduling priority for daemon processes.  See 'man nice'.\n\n# export 
HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor 
jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+            "dfs.datanode.data.dir.mount.file" : 
"/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "dtnode_heapsize" : "1024m",
+            "hadoop_heapsize" : "1024",
+            "hadoop_pid_dir_prefix" : "/var/run/hadoop",
+            "hadoop_root_logger" : "INFO,RFA",
+            "hdfs_log_dir_prefix" : "/var/log/hadoop",
+            "hdfs_user" : "hdfs",
+            "namenode_heapsize" : "1025m",
+            "namenode_opt_maxnewsize" : "200m",
+            "namenode_opt_maxpermsize" : "256m",
+            "namenode_opt_newsize" : "200m",
+            "namenode_opt_permsize" : "128m",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : { }
         }
-      ]
+      ],
+      "createtime" : 1414314997499,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : false,
+      "service_config_version" : 2,
+      "service_config_version_note" : "",
+      "service_name" : "HDFS",
+      "user" : "admin"
     },
     {
-      "serviceconfigversion": "5",
-      "createtime": "41800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "HDFS",
-      "configurations": [
+      "href" : 
"http://192.168.56.101:8080/api/v1/clusters/tdk/configurations/service_config_versions?service_name=HDFS&service_config_version=1";,
+      "cluster_name" : "tdk",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defau
 lt.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when 
specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix
 }}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}
 /$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} 
-Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The 
following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HA
 DOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  
$HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code 
should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n
 # The scheduling priority for daemon processes.  See 'man nice'.\n\n# export 
HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor 
jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+            "dfs.datanode.data.dir.mount.file" : 
"/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "dtnode_heapsize" : "1024m",
+            "hadoop_heapsize" : "1024",
+            "hadoop_pid_dir_prefix" : "/var/run/hadoop",
+            "hadoop_root_logger" : "INFO,RFA",
+            "hdfs_log_dir_prefix" : "/var/log/hadoop",
+            "hdfs_user" : "hdfs",
+            "namenode_heapsize" : "1024m",
+            "namenode_opt_maxnewsize" : "200m",
+            "namenode_opt_maxpermsize" : "256m",
+            "namenode_opt_newsize" : "200m",
+            "namenode_opt_permsize" : "128m",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hdfs-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n#\n\n\n# 
Define some default values that can be overridden by system properties\n# To 
change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.l
 ogger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define 
the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling 
File 
Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appende
 
r.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default 
values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\n\n#\n#Security audit 
appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.log
 
ger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n#
 hdfs audit 
logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit
 
=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# 
mapred audit 
logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling 
File 
Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.f
 ile}\n\n# Logfile size and and 30-day 
backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging 
levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n#
 Jets3t 
library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n#
 Null Appender\n# Trap security logger on the hadoop client 
side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n#
 Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop Metrics.\n#\
 nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# 
Removes \"deprecated\" 
messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hdfs-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "dfs.block.access.token.enable" : "true",
+            "dfs.blockreport.initialDelay" : "120",
+            "dfs.blocksize" : "134217728",
+            "dfs.client.read.shortcircuit" : "true",
+            "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
+            "dfs.cluster.administrators" : " hdfs",
+            "dfs.datanode.address" : "0.0.0.0:50010",
+            "dfs.datanode.balance.bandwidthPerSec" : "6250000",
+            "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
+            "dfs.datanode.data.dir.perm" : "750",
+            "dfs.datanode.du.reserved" : "1073741824",
+            "dfs.datanode.failed.volumes.tolerated" : "0",
+            "dfs.datanode.http.address" : "0.0.0.0:50075",
+            "dfs.datanode.https.address" : "0.0.0.0:50475",
+            "dfs.datanode.ipc.address" : "0.0.0.0:8010",
+            "dfs.datanode.max.transfer.threads" : "1024",
+            "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.heartbeat.interval" : "3",
+            "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
+            "dfs.http.policy" : "HTTP_ONLY",
+            "dfs.https.port" : "50470",
+            "dfs.journalnode.edits.dir" : "/grid/0/hdfs/journal",
+            "dfs.journalnode.http-address" : "0.0.0.0:8480",
+            "dfs.namenode.accesstime.precision" : "0",
+            "dfs.namenode.avoid.read.stale.datanode" : "true",
+            "dfs.namenode.avoid.write.stale.datanode" : "true",
+            "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
+            "dfs.namenode.checkpoint.edits.dir" : 
"${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.checkpoint.period" : "21600",
+            "dfs.namenode.checkpoint.txns" : "1000000",
+            "dfs.namenode.handler.count" : "40",
+            "dfs.namenode.http-address" : "dev01.hortonworks.com:50070",
+            "dfs.namenode.https-address" : "dev01.hortonworks.com:50470",
+            "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
+            "dfs.namenode.name.dir.restore" : "true",
+            "dfs.namenode.safemode.threshold-pct" : "1.0f",
+            "dfs.namenode.secondary.http-address" : 
"dev01.hortonworks.com:50090",
+            "dfs.namenode.stale.datanode.interval" : "30000",
+            "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
+            "dfs.permissions.enabled" : "true",
+            "dfs.permissions.superusergroup" : "hdfs",
+            "dfs.replication" : "3",
+            "dfs.replication.max" : "50",
+            "dfs.support.append" : "true",
+            "dfs.webhdfs.enabled" : "true",
+            "fs.permissions.umask-mode" : "022"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "dfs.support.append" : "true",
+              "dfs.namenode.http-address" : "true"
+            }
+          }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hadoop-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.operations.protocol.acl" : "hadoop",
+            "security.client.datanode.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.datanode.protocol.acl" : "*",
+            "security.inter.datanode.protocol.acl" : "*",
+            "security.inter.tracker.protocol.acl" : "*",
+            "security.job.client.protocol.acl" : "*",
+            "security.job.task.protocol.acl" : "*",
+            "security.namenode.protocol.acl" : "*",
+            "security.refresh.policy.protocol.acl" : "hadoop",
+            "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop"
+          },
+          "properties_attributes" : { }
+        },
         {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "core-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "fs.defaultFS" : "hdfs://dev01.hortonworks.com:8020",
+            "fs.trash.interval" : "360",
+            "hadoop.security.auth_to_local" : "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+            "hadoop.security.authentication" : "simple",
+            "hadoop.security.authorization" : "false",
+            "io.compression.codecs" : 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "io.file.buffer.size" : "131072",
+            "io.serializations" : 
"org.apache.hadoop.io.serializer.WritableSerialization",
+            "ipc.client.connect.max.retries" : "50",
+            "ipc.client.connection.maxidletime" : "30000",
+            "ipc.client.idlethreshold" : "8000",
+            "ipc.server.tcpnodelay" : "true",
+            "mapreduce.jobtracker.webinterface.trusted" : "false",
+            "proxyuser_group" : "users"
           },
-          "properties": {}
+          "properties_attributes" : {
+            "final" : {
+              "fs.defaultFS" : "true"
+            }
+          }
         }
-      ]
+      ],
+      "createtime" : 1414268003791,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : false,
+      "service_config_version" : 1,
+      "service_config_version_note" : "Initial configurations for HDFS",
+      "service_name" : "HDFS",
+      "user" : "admin"
     },
     {
-      "serviceconfigversion": "5",
-      "createtime": "46800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "YARN",
-      "configurations": [
-        {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
-          },
-          "properties": {}
+      "href" : 
"http://192.168.56.101:8080/api/v1/clusters/tdk/configurations/service_config_versions?service_name=HDFS&service_config_version=3";,
+      "cluster_name" : "tdk",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hadoop-env",
+          "tag" : "version1414314983497",
+          "version" : 2,
+          "properties" : {
+            "content" : "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop 
Configuration Directory\n#TODO: if env var set that can cause problems\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different 
for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport 
JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. 
Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by defau
 lt.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when 
specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix
 }}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}
 /$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} 
-Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The 
following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure 
datanodes, user to run the datanode as after dropping privileges\nexport 
HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by 
default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o 
SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs 
by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History 
server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# 
Where log files are stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HA
 DOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  
$HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code 
should be rsync'd from.  Unset by default.\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n
 # The scheduling priority for daemon processes.  See 'man nice'.\n\n# export 
HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor 
jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ 
-d \"/usr/lib/tez\" ]; then\n  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n#
 Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 
2.0\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+            "dfs.datanode.data.dir.mount.file" : 
"/etc/hadoop/conf/dfs_data_dir_mount.hist",
+            "dtnode_heapsize" : "1024m",
+            "hadoop_heapsize" : "1024",
+            "hadoop_pid_dir_prefix" : "/var/run/hadoop",
+            "hadoop_root_logger" : "INFO,RFA",
+            "hdfs_log_dir_prefix" : "/var/log/hadoop",
+            "hdfs_user" : "hdfs",
+            "namenode_heapsize" : "1025m",
+            "namenode_opt_maxnewsize" : "200m",
+            "namenode_opt_maxpermsize" : "256m",
+            "namenode_opt_newsize" : "200m",
+            "namenode_opt_permsize" : "128m",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hadoop-policy",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "security.admin.operations.protocol.acl" : "hadoop",
+            "security.client.datanode.protocol.acl" : "*",
+            "security.client.protocol.acl" : "*",
+            "security.datanode.protocol.acl" : "*",
+            "security.inter.datanode.protocol.acl" : "*",
+            "security.inter.tracker.protocol.acl" : "*",
+            "security.job.client.protocol.acl" : "*",
+            "security.job.task.protocol.acl" : "*",
+            "security.namenode.protocol.acl" : "*",
+            "security.refresh.policy.protocol.acl" : "hadoop",
+            "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hdfs-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n#\n\n\n# 
Define some default values that can be overridden by system properties\n# To 
change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.l
 ogger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define 
the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling 
File 
Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appende
 
r.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default 
values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\n\n#\n#Security audit 
appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.log
 
ger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n#
 hdfs audit 
logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit
 
=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# 
mapred audit 
logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling 
File 
Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.f
 ile}\n\n# Logfile size and and 30-day 
backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging 
levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n#
 Jets3t 
library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n#
 Null Appender\n# Trap security logger on the hadoop client 
side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n#
 Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop Metrics.\n#\
 nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# 
Removes \"deprecated\" 
messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "hdfs-site",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "dfs.block.access.token.enable" : "true",
+            "dfs.blockreport.initialDelay" : "120",
+            "dfs.blocksize" : "134217728",
+            "dfs.client.read.shortcircuit" : "true",
+            "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
+            "dfs.cluster.administrators" : " hdfs",
+            "dfs.datanode.address" : "0.0.0.0:50010",
+            "dfs.datanode.balance.bandwidthPerSec" : "6250000",
+            "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
+            "dfs.datanode.data.dir.perm" : "750",
+            "dfs.datanode.du.reserved" : "1073741824",
+            "dfs.datanode.failed.volumes.tolerated" : "0",
+            "dfs.datanode.http.address" : "0.0.0.0:50075",
+            "dfs.datanode.https.address" : "0.0.0.0:50475",
+            "dfs.datanode.ipc.address" : "0.0.0.0:8010",
+            "dfs.datanode.max.transfer.threads" : "1024",
+            "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.heartbeat.interval" : "3",
+            "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
+            "dfs.http.policy" : "HTTP_ONLY",
+            "dfs.https.port" : "50470",
+            "dfs.journalnode.edits.dir" : "/grid/0/hdfs/journal",
+            "dfs.journalnode.http-address" : "0.0.0.0:8480",
+            "dfs.namenode.accesstime.precision" : "0",
+            "dfs.namenode.avoid.read.stale.datanode" : "true",
+            "dfs.namenode.avoid.write.stale.datanode" : "true",
+            "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
+            "dfs.namenode.checkpoint.edits.dir" : 
"${dfs.namenode.checkpoint.dir}",
+            "dfs.namenode.checkpoint.period" : "21600",
+            "dfs.namenode.checkpoint.txns" : "1000000",
+            "dfs.namenode.handler.count" : "40",
+            "dfs.namenode.http-address" : "dev01.hortonworks.com:50070",
+            "dfs.namenode.https-address" : "dev01.hortonworks.com:50470",
+            "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
+            "dfs.namenode.name.dir.restore" : "true",
+            "dfs.namenode.safemode.threshold-pct" : "1.0f",
+            "dfs.namenode.secondary.http-address" : 
"dev01.hortonworks.com:50090",
+            "dfs.namenode.stale.datanode.interval" : "30000",
+            "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
+            "dfs.permissions.enabled" : "true",
+            "dfs.permissions.superusergroup" : "hdfs",
+            "dfs.replication" : "3",
+            "dfs.replication.max" : "50",
+            "dfs.support.append" : "true",
+            "dfs.webhdfs.enabled" : "true",
+            "fs.permissions.umask-mode" : "022"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "dfs.support.append" : "true",
+              "dfs.namenode.http-address" : "true"
+            }
+          }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "core-site",
+          "tag" : "version1414410959005",
+          "version" : 2,
+          "properties" : {
+            "fs.defaultFS" : "hdfs://dev01.hortonworks.com:8020",
+            "fs.trash.interval" : "360",
+            "hadoop.security.auth_to_local" : "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+            "hadoop.security.authentication" : "simple",
+            "hadoop.security.authorization" : "false",
+            "io.compression.codecs" : 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
+            "io.file.buffer.size" : "131072",
+            "io.serializations" : 
"org.apache.hadoop.io.serializer.WritableSerialization",
+            "ipc.client.connect.max.retries" : "50",
+            "ipc.client.connection.maxidletime" : "30000",
+            "ipc.client.idlethreshold" : "8000",
+            "ipc.server.tcpnodelay" : "true",
+            "mapreduce.jobtracker.webinterface.trusted" : "false",
+            "proxyuser_group" : "users"
+          },
+          "properties_attributes" : {
+            "final" : {
+              "fs.defaultFS" : "true"
+            }
+          }
         }
-      ]
+      ],
+      "createtime" : 1414410973754,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : true,
+      "service_config_version" : 3,
+      "service_config_version_note" : null,
+      "service_name" : "HDFS",
+      "user" : "admin"
     },
     {
-      "serviceconfigversion": "6",
-      "createtime": "44800000000",
-      "author": "admin",
-      "group_id" : null,
-      "group_name" : null,
-      "service_config_version_note" : "Notes should be here",
-      "service_name" : "YARN",
-      "configurations": [
-        {
-          "type": "core-site",
-          "tag": "1",
-          "version": "1",
-          "Config": {
-            "cluster_name": "c1"
-          },
-          "properties": {}
+      "href" : 
"http://192.168.56.101:8080/api/v1/clusters/tdk/configurations/service_config_versions?service_name=ZOOKEEPER&service_config_version=1";,
+      "cluster_name" : "tdk",
+      "configurations" : [
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "zookeeper-env",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "clientPort" : "2181",
+            "content" : "\nexport JAVA_HOME={{java64_home}}\nexport 
ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport 
ZOOPIDFILE={{zk_pid_file}}\nexport 
SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport 
CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled 
%}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS 
-Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport 
CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS 
-Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+            "initLimit" : "10",
+            "syncLimit" : "5",
+            "tickTime" : "2000",
+            "zk_data_dir" : "/hadoop/zookeeper",
+            "zk_log_dir" : "/var/log/zookeeper",
+            "zk_pid_dir" : "/var/run/zookeeper",
+            "zk_user" : "zookeeper"
+          },
+          "properties_attributes" : { }
+        },
+        {
+          "Config" : {
+            "cluster_name" : "tdk"
+          },
+          "type" : "zookeeper-log4j",
+          "tag" : "version1",
+          "version" : 1,
+          "properties" : {
+            "content" : "\n#\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
 http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the 
License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: 
console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling 
log file\n#log
 4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file 
and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# 
Log INFO level and above messages to the 
console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601}
 - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log 
file output\n#    Log DEBUG level and above messages to a log 
file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n#
 Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# 
uncomment the next line to limit number of backup 
files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLI
 NGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - 
%m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log 
DEBUG level and above messages to a log 
file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n###
 Notice we are including log4j's NDC here 
(%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p 
[%t:%C{1}@%L][%x] - %m%n"
+          },
+          "properties_attributes" : { }
         }
-      ]
+      ],
+      "createtime" : 1414268003886,
+      "group_id" : -1,
+      "group_name" : "default",
+      "hosts" : [ ],
+      "is_current" : true,
+      "service_config_version" : 1,
+      "service_config_version_note" : "Initial configurations for ZooKeeper",
+      "service_name" : "ZOOKEEPER",
+      "user" : "admin"
     }
   ]
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b2f1ab0/ambari-web/app/assets/data/configurations/service_versions_total.json
----------------------------------------------------------------------
diff --git 
a/ambari-web/app/assets/data/configurations/service_versions_total.json 
b/ambari-web/app/assets/data/configurations/service_versions_total.json
new file mode 100644
index 0000000..0e7668d
--- /dev/null
+++ b/ambari-web/app/assets/data/configurations/service_versions_total.json
@@ -0,0 +1,8 @@
+{
+  "itemTotal" : "5",
+  "items" : [
+    {
+      "service_name" : "HDFS"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b2f1ab0/ambari-web/app/assets/data/hosts/HDP2/decommission_state.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/hosts/HDP2/decommission_state.json 
b/ambari-web/app/assets/data/hosts/HDP2/decommission_state.json
new file mode 100644
index 0000000..a576927
--- /dev/null
+++ b/ambari-web/app/assets/data/hosts/HDP2/decommission_state.json
@@ -0,0 +1,10 @@
+{
+  "HostRoles" : {
+    "cluster_name" : "tdk",
+    "component_name" : "DATANODE",
+    "desired_admin_state" : "INSERVICE",
+    "host_name" : "dev01.hortonworks.com"
+  },
+  "host" : {
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b2f1ab0/ambari-web/app/assets/data/hosts/quick_links.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/hosts/quick_links.json 
b/ambari-web/app/assets/data/hosts/quick_links.json
index 6b5ca69..a07b6e3 100644
--- a/ambari-web/app/assets/data/hosts/quick_links.json
+++ b/ambari-web/app/assets/data/hosts/quick_links.json
@@ -48,6 +48,16 @@
         },
         {
           "HostRoles" : {
+            "component_name" : "HBASE_MASTER"
+          }
+        },
+        {
+          "HostRoles" : {
+            "component_name" : "STORM_UI_SERVER"
+          }
+        },
+        {
+          "HostRoles" : {
             "component_name" : "SQOOP"
           }
         },
@@ -59,4 +69,4 @@
       ]
     }
   ]
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b2f1ab0/ambari-web/app/assets/data/users/user_admin.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/users/user_admin.json 
b/ambari-web/app/assets/data/users/user_admin.json
index 0165ce7..72cd945 100644
--- a/ambari-web/app/assets/data/users/user_admin.json
+++ b/ambari-web/app/assets/data/users/user_admin.json
@@ -1,11 +1,18 @@
 {
-  "href" : "http://dev.hortonworks.com:8080/api/v1/users/admin";,
   "Users" : {
-    "user_name" : "admin",
+    "active" : true,
+    "admin" : true,
+    "groups" : [ ],
     "ldap_user" : false,
-    "roles" : [
-      "admin",
-      "user"
-    ]
-  }
-}
+    "user_name" : "admin"
+  },
+  "privileges" : [
+    {
+      "PrivilegeInfo" : {
+        "permission_name" : "AMBARI.ADMIN",
+        "privilege_id" : 1,
+        "user_name" : "admin"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4b2f1ab0/ambari-web/app/assets/data/users/user_user.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/users/user_user.json 
b/ambari-web/app/assets/data/users/user_user.json
index 99515f2..4b1f829 100644
--- a/ambari-web/app/assets/data/users/user_user.json
+++ b/ambari-web/app/assets/data/users/user_user.json
@@ -1,10 +1,10 @@
 {
-  "href" : "http://dev.hortonworks.com:8080/api/v1/users/user";,
   "Users" : {
-    "user_name" : "user",
+    "active" : true,
+    "admin" : false,
+    "groups" : [ ],
     "ldap_user" : false,
-    "roles" : [
-      "user"
-    ]
-  }
-}
+    "user_name" : "user"
+  },
+  "privileges" : [ ]
+}
\ No newline at end of file

Reply via email to