CHUKWA-757. Updated Hadoop configuration to stream data to Chukwa. (Eric Yang)
Project: http://git-wip-us.apache.org/repos/asf/chukwa/repo Commit: http://git-wip-us.apache.org/repos/asf/chukwa/commit/88ec3d01 Tree: http://git-wip-us.apache.org/repos/asf/chukwa/tree/88ec3d01 Diff: http://git-wip-us.apache.org/repos/asf/chukwa/diff/88ec3d01 Branch: refs/heads/master Commit: 88ec3d01180519193f0f85912e404d8816acf6d2 Parents: d26630b Author: Eric Yang <[email protected]> Authored: Thu Jun 18 18:45:08 2015 -0700 Committer: Eric Yang <[email protected]> Committed: Thu Jun 18 18:45:08 2015 -0700 ---------------------------------------------------------------------- CHANGES.txt | 2 + conf/chukwa-agent-conf.xml | 2 +- conf/chukwa-demux-conf.xml | 2 +- conf/hadoop-log4j.properties | 223 ++++++++++++++++++++--------------- conf/hadoop-metrics2.properties | 96 ++++++++++++--- conf/initial_adaptors | 11 +- 6 files changed, 216 insertions(+), 120 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/chukwa/blob/88ec3d01/CHANGES.txt ---------------------------------------------------------------------- diff --git a/CHANGES.txt b/CHANGES.txt index 665ae09..c306557 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -16,6 +16,8 @@ Trunk (unreleased changes) IMPROVEMENTS + CHUKWA-757. Updated Hadoop configuration to stream data to Chukwa. (Eric Yang) + CHUKWA-749. Added Chukwa tags to Solr schema for indexing logs. (Eric Yang) CHUKWA-754. Improved graph explorer selection box performance. (Eric Yang) http://git-wip-us.apache.org/repos/asf/chukwa/blob/88ec3d01/conf/chukwa-agent-conf.xml ---------------------------------------------------------------------- diff --git a/conf/chukwa-agent-conf.xml b/conf/chukwa-agent-conf.xml index 1410e5b..ba47154 100644 --- a/conf/chukwa-agent-conf.xml +++ b/conf/chukwa-agent-conf.xml @@ -78,7 +78,7 @@ <property> <name>chukwa.pipeline</name> - <value>org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter</value> + <value>org.apache.hadoop.chukwa.datacollection.writer.hbase.HBaseWriter,org.apache.hadoop.chukwa.datacollection.writer.solr.SolrWriter</value> </property> <property> http://git-wip-us.apache.org/repos/asf/chukwa/blob/88ec3d01/conf/chukwa-demux-conf.xml ---------------------------------------------------------------------- diff --git a/conf/chukwa-demux-conf.xml b/conf/chukwa-demux-conf.xml index 151bfa5..fca9f1e 100644 --- a/conf/chukwa-demux-conf.xml +++ b/conf/chukwa-demux-conf.xml @@ -121,7 +121,7 @@ <property> <name>HadoopMetrics</name> - <value>org.apache.hadoop.chukwa.extraction.demux.processor.mapper.HadoopMetricsProcessor</value> + <value>org.apache.hadoop.chukwa.extraction.hbase.HadoopMetricsProcessor</value> <description>Parser class for Hadoop Metrics </description> </property> http://git-wip-us.apache.org/repos/asf/chukwa/blob/88ec3d01/conf/hadoop-log4j.properties ---------------------------------------------------------------------- diff --git a/conf/hadoop-log4j.properties b/conf/hadoop-log4j.properties index 3d6e988..aad7cc7 100644 --- a/conf/hadoop-log4j.properties +++ b/conf/hadoop-log4j.properties @@ -1,9 +1,10 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # @@ -18,40 +19,36 @@ hadoop.root.logger=INFO,console hadoop.log.dir=. hadoop.log.file=hadoop.log -# -# Job Summary Appender -# -# Use following logger to send summary to separate file defined by -# hadoop.mapreduce.jobsummary.log.file rolled daily: -hadoop.mapreduce.jobsummary.logger=INFO,JSA -# -#hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} -hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log - # Define the root logger to the system property "hadoop.root.logger". log4j.rootLogger=${hadoop.root.logger}, EventCounter # Logging Threshold -log4j.threshhold=ALL +log4j.threshold=ALL + +# Null Appender +log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender # -# Daily Rolling File Appender +# Rolling File Appender # +log4j.appender.RFA=org.apache.log4j.net.SocketAppender +log4j.appender.RFA.Port=${hadoop.log.port} +log4j.appender.RFA.RemoteHost=localhost +log4j.appender.RFA.layout=org.apache.log4j.PatternLayout +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd +# +# Daily Rolling File Appender +# -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 +log4j.appender.DRFA=org.apache.log4j.net.SocketAppender +log4j.appender.DRFA.Port=${hadoop.log.port} +log4j.appender.DRFA.RemoteHost=localhost log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n +log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n # @@ -85,55 +82,76 @@ log4j.appender.TLA.layout=org.apache.log4j.PatternLayout log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n # -#Security audit appender +# HDFS block state change log from block manager +# +# Uncomment the following to suppress normal block state change +# messages from BlockManager in NameNode. +#log4j.logger.BlockStateChange=WARN + +# +#Security appender # -hadoop.security.log.file=SecurityAuth.audit -log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +hadoop.security.logger=INFO,RFAS +log4j.category.SecurityLogger=${hadoop.security.logger} +log4j.appender.RFAS=org.apache.log4j.net.SocketAppender +log4j.appender.RFAS.Port=${hadoop.log.port} +log4j.appender.RFAS.RemoteHost=localhost +log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# +# Daily Rolling Security appender +# +log4j.appender.DRFAS=org.apache.log4j.net.SocketAppender +log4j.appender.DRFAS.Port=${hadoop.log.port} +log4j.appender.DRFAS.RemoteHost=localhost log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -#new logger -log4j.logger.SecurityLogger=OFF,console -log4j.logger.SecurityLogger.additivity=false # -# Rolling File Appender +# hadoop configuration logging # -#log4j.appender.RFA=org.apache.log4j.RollingFileAppender -#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} +# Uncomment the following line to turn off configuration deprecation warnings. +# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN -# Logfile size and and 30-day backups -#log4j.appender.RFA.MaxFileSize=1MB -#log4j.appender.RFA.MaxBackupIndex=30 - -#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n +# +# hdfs audit logging +# +hdfs.audit.logger=INFO,RFAAUDIT +log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} +log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false +log4j.appender.RFAAUDIT=org.apache.log4j.net.SocketAppender +log4j.appender.RFAAUDIT.Port=${hadoop.log.port} +log4j.appender.RFAAUDIT.RemoteHost=localhost +log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n # -# FSNamesystem Audit logging -# All audit events are logged at INFO level +# mapred audit logging # -log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN +mapred.audit.logger=INFO,MRAUDIT +log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} +log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false +log4j.appender.MRAUDIT=org.apache.log4j.net.SocketAppender +log4j.appender.MRAUDIT.Port=${hadoop.log.port} +log4j.appender.MRAUDIT.RemoteHost=localhost +log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n # Custom Logging levels -hadoop.metrics.log.level=INFO #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG -log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} +#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG # Jets3t library log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR -# -# Null Appender -# Trap security logger on the hadoop client side -# -log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender +# AWS SDK & S3A FileSystem +log4j.logger.com.amazonaws=ERROR +log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR +log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN # # Event Counter Appender @@ -142,52 +160,65 @@ log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter # -# Job Summary Appender +# Job Summary Appender # +# Use following logger to send summary to separate file defined by +# hadoop.mapreduce.jobsummary.log.file : +# hadoop.mapreduce.jobsummary.logger=INFO,JSA +# +hadoop.mapreduce.jobsummary.logger=INFO,JSA log4j.appender.JSA=org.apache.log4j.net.SocketAppender +log4j.appender.JSA.Port=9102 log4j.appender.JSA.RemoteHost=localhost -log4j.appender.JSA.Port=9098 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger} log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false # -# AUDIT LOGGING - All audit events are logged at INFO level -# -# CHUKWA AUDIT LOG - -log4j.appender.DRFAAUDIT=org.apache.hadoop.chukwa.inputtools.log4j.ChukwaDailyRollingFileAppender -log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/audit.log -log4j.appender.DRFAAUDIT.recordType=HadoopLog -log4j.appender.DRFAAUDIT.chukwaClientHostname=localhost -log4j.appender.DRFAAUDIT.chukwaClientPortNum=9093 -log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd -log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=INFO,DRFAAUDIT -log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false - -# ClientTrace (Shuffle bytes) -log4j.appender.MR_CLIENTTRACE=org.apache.hadoop.chukwa.inputtools.log4j.ChukwaDailyRollingFileAppender -log4j.appender.MR_CLIENTTRACE.File=${hadoop.log.dir}/mr_clienttrace.log -log4j.appender.MR_CLIENTTRACE.recordType=ClientTrace -log4j.appender.MR_CLIENTTRACE.chukwaClientHostname=localhost -log4j.appender.MR_CLIENTTRACE.chukwaClientPortNum=9093 -log4j.appender.MR_CLIENTTRACE.DatePattern=.yyyy-MM-dd -log4j.appender.MR_CLIENTTRACE.layout=org.apache.log4j.PatternLayout -log4j.appender.MR_CLIENTTRACE.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.logger.org.apache.hadoop.mapred.TaskTracker.clienttrace=INFO,MR_CLIENTTRACE -log4j.additivity.org.apache.hadoop.mapred.TaskTracker.clienttrace=false - -# ClientTrace (HDFS bytes) -log4j.appender.HDFS_CLIENTTRACE=org.apache.hadoop.chukwa.inputtools.log4j.ChukwaDailyRollingFileAppender -log4j.appender.HDFS_CLIENTTRACE.File=${hadoop.log.dir}/hdfs_clienttrace.log -log4j.appender.HDFS_CLIENTTRACE.recordType=ClientTrace -log4j.appender.HDFS_CLIENTTRACE.chukwaClientHostname=localhost -log4j.appender.HDFS_CLIENTTRACE.chukwaClientPortNum=9093 -log4j.appender.HDFS_CLIENTTRACE.DatePattern=.yyyy-MM-dd -log4j.appender.HDFS_CLIENTTRACE.layout=org.apache.log4j.PatternLayout -log4j.appender.HDFS_CLIENTTRACE.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.logger.org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace=INFO,HDFS_CLIENTTRACE -log4j.additivity.org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace=false +# Yarn ResourceManager Application Summary Log +# +yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false +log4j.appender.RMSUMMARY=org.apache.log4j.net.SocketAppender +log4j.appender.RMSUMMARY.Port=9102 +log4j.appender.RMSUMMARY.RemoteHost=localhost +log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout +log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n + +# HS audit log configs +mapreduce.hs.audit.logger=INFO,HSAUDIT +log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger} +log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false +log4j.appender.HSAUDIT=org.apache.log4j.net.SocketAppender +log4j.appender.HSAUDIT.Port=9101 +log4j.appender.HSAUDIT.RemoteHost=localhost +log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n + +# Http Server Request Logs +#log4j.logger.http.requests.namenode=INFO,namenoderequestlog +#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log +#log4j.appender.namenoderequestlog.RetainDays=3 + +#log4j.logger.http.requests.datanode=INFO,datanoderequestlog +#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log +#log4j.appender.datanoderequestlog.RetainDays=3 + +#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog +#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log +#log4j.appender.resourcemanagerrequestlog.RetainDays=3 + +#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog +#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log +#log4j.appender.jobhistoryrequestlog.RetainDays=3 + +#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog +#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log +#log4j.appender.nodemanagerrequestlog.RetainDays=3 http://git-wip-us.apache.org/repos/asf/chukwa/blob/88ec3d01/conf/hadoop-metrics2.properties ---------------------------------------------------------------------- diff --git a/conf/hadoop-metrics2.properties b/conf/hadoop-metrics2.properties index af6af05..b574bad 100644 --- a/conf/hadoop-metrics2.properties +++ b/conf/hadoop-metrics2.properties @@ -1,25 +1,83 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# syntax: [prefix].[source|sink].[instance].[options] +# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details -# Stream metrics to Chukwa SocketAdaptor *.sink.socket.class=org.apache.hadoop.chukwa.inputtools.log4j.Log4jMetricsSink +*.period=10 namenode.sink.socket.host=localhost namenode.sink.socket.port=9095 datanode.sink.socket.host=localhost datanode.sink.socket.port=9095 -jobtracker.sink.socket.host=localhost -jobtracker.sink.socket.port=9095 -tasktracker.sink.socket.host=localhost -tasktracker.sink.socket.port=9095 +resourcemanager.sink.socket.host=localhost +resourcemanager.sink.socket.port=9095 +nodemanager.sink.socket.host=localhost +nodemanager.sink.socket.port=9095 +mrappmaster.sink.socket.host=localhost +mrappmaster.sink.socket.port=9095 +jobhistory.sink.socket.host=localhost +jobhistory.sink.socket.port=9095 + +#*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink +# default sampling period, in seconds +#*.period=10 + +# The namenode-metrics.out will contain metrics from all context +#namenode.sink.file.filename=namenode-metrics.out +# Specifying a special sampling period for namenode: +#namenode.sink.*.period=8 + +#datanode.sink.file.filename=datanode-metrics.out + +#resourcemanager.sink.file.filename=resourcemanager-metrics.out + +#nodemanager.sink.file.filename=nodemanager-metrics.out + +#mrappmaster.sink.file.filename=mrappmaster-metrics.out + +#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out + +# the following example split metrics of different +# context to different sinks (in this case files) +#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink +#nodemanager.sink.file_jvm.context=jvm +#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out +#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink +#nodemanager.sink.file_mapred.context=mapred +#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out + +# +# Below are for sending metrics to Ganglia +# +# for Ganglia 3.0 support +# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30 +# +# for Ganglia 3.1 support +# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 + +# *.sink.ganglia.period=10 + +# default for supportsparse is false +# *.sink.ganglia.supportsparse=true + +#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both +#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 + +# Tag values to use for the ganglia prefix. If not defined no tags are used. +# If '*' all tags are used. If specifiying multiple tags separate them with +# commas. Note that the last segment of the property name is the context name. +# +#*.sink.ganglia.tagsForPrefix.jvm=ProcesName +#*.sink.ganglia.tagsForPrefix.dfs= +#*.sink.ganglia.tagsForPrefix.rpc= +#*.sink.ganglia.tagsForPrefix.mapred= + +#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 + +#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 http://git-wip-us.apache.org/repos/asf/chukwa/blob/88ec3d01/conf/initial_adaptors ---------------------------------------------------------------------- diff --git a/conf/initial_adaptors b/conf/initial_adaptors index 7afbc9e..5bc26be 100644 --- a/conf/initial_adaptors +++ b/conf/initial_adaptors @@ -1,5 +1,10 @@ add sigar.SystemMetrics SystemMetrics 60 0 add SocketAdaptor HadoopMetrics 9095 0 -add SocketAdaptor Hadoop 9096 0 -add SocketAdaptor ChukwaMetrics 9097 0 -add SocketAdaptor JobSummary 9098 0 +add SocketAdaptor HadoopNNLog 9096 0 +add SocketAdaptor HadoopSNLog 9097 0 +add SocketAdaptor HadoopDNLog 9098 0 +add SocketAdaptor HadoopRMLog 9099 0 +add SocketAdaptor HadoopNMLog 9100 0 +add SocketAdaptor HadoopHSLog 9101 0 +add SocketAdaptor HadoopRMSUMMARY 9102 0 +add SocketAdaptor HadoopTLA 9103 0
