Modified: release/karaf/documentation/decanter/1_x.html
==============================================================================
--- release/karaf/documentation/decanter/1_x.html (original)
+++ release/karaf/documentation/decanter/1_x.html Sun Jul 23 19:41:03 2017
@@ -509,71 +509,74 @@ table.CodeRay td.code>pre{padding:0}
</div>
<div id="toc" class="toc2">
<div id="toctitle">Apache Karaf Decanter 1.x - Documentation</div>
-<ul class="sectlevel0">
-<li><a href="#_user_guide">User Guide</a>
<ul class="sectlevel1">
-<li><a href="#_introduction">1. Introduction</a></li>
-<li><a href="#_collectors">2. Collectors</a>
+<li><a href="#_user_guide">1. User Guide</a>
<ul class="sectlevel2">
-<li><a href="#_log">2.1. Log</a></li>
-<li><a href="#_file">2.2. File</a></li>
-<li><a href="#_jmx">2.3. JMX</a></li>
-<li><a href="#_activemq_jmx">2.4. ActiveMQ (JMX)</a></li>
-<li><a href="#_camel_jmx">2.5. Camel (JMX)</a></li>
-<li><a href="#_camel_tracer">2.6. Camel Tracer</a></li>
-<li><a href="#_system">2.7. System</a></li>
-</ul>
-</li>
-<li><a href="#_appenders">3. Appenders</a>
-<ul class="sectlevel2">
-<li><a href="#_log_2">3.1. Log</a></li>
-<li><a href="#_elasticsearch">3.2. Elasticsearch</a>
+<li><a href="#_introduction">1.1. Introduction</a></li>
+<li><a href="#_collectors">1.2. Collectors</a>
<ul class="sectlevel3">
-<li><a href="#_embedding_decanter_elasticsearch">3.2.1. Embedding Decanter
Elasticsearch</a></li>
-<li><a href="#_embedding_decanter_kibana">3.2.2. Embedding Decanter
Kibana</a></li>
-<li><a href="#_elasticsearch_head_console">3.2.3. Elasticsearch Head
console</a></li>
-</ul>
-</li>
-<li><a href="#_jdbc">3.3. JDBC</a></li>
-<li><a href="#_jms">3.4. JMS</a></li>
-<li><a href="#_camel">3.5. Camel</a></li>
+<li><a href="#_log">1.2.1. Log</a></li>
+<li><a href="#_cxf_logging_feature_integration">1.2.2. CXF Logging feature
integration</a></li>
+<li><a href="#_log_socket">1.2.3. Log Socket</a></li>
+<li><a href="#_file">1.2.4. File</a></li>
+<li><a href="#_eventadmin">1.2.5. EventAdmin</a></li>
+<li><a href="#_jmx">1.2.6. JMX</a></li>
+<li><a href="#_activemq_jmx">1.2.7. ActiveMQ (JMX)</a></li>
+<li><a href="#_camel_jmx">1.2.8. Camel (JMX)</a></li>
+<li><a href="#_camel_tracer_notifier">1.2.9. Camel Tracer &
Notifier</a></li>
+<li><a href="#_system">1.2.10. System</a></li>
+<li><a href="#_network_socket">1.2.11. Network socket</a></li>
+<li><a href="#_jms">1.2.12. JMS</a></li>
+<li><a href="#_mqtt">1.2.13. MQTT</a></li>
+<li><a href="#_kafka">1.2.14. Kafka</a></li>
+<li><a href="#_rest_servlet">1.2.15. Rest Servlet</a></li>
</ul>
</li>
-<li><a href="#_sla_service_level_agreement">4. SLA (Service Level
Agreement)</a>
-<ul class="sectlevel2">
-<li><a href="#_checker">4.1. Checker</a></li>
-<li><a href="#_alerters">4.2. Alerters</a>
+<li><a href="#_appenders">1.3. Appenders</a>
<ul class="sectlevel3">
-<li><a href="#_log_3">4.2.1. Log</a></li>
-<li><a href="#_e_mail">4.2.2. E-mail</a></li>
-<li><a href="#_camel_2">4.2.3. Camel</a></li>
+<li><a href="#_log_2">1.3.1. Log</a></li>
+<li><a href="#_elasticsearch_kibana">1.3.2. Elasticsearch & Kibana</a></li>
+<li><a href="#_jdbc">1.3.3. JDBC</a></li>
+<li><a href="#_jms_2">1.3.4. JMS</a></li>
+<li><a href="#_camel">1.3.5. Camel</a></li>
+<li><a href="#_kafka_2">1.3.6. Kafka</a></li>
+<li><a href="#_redis">1.3.7. Redis</a></li>
+<li><a href="#_mqtt_2">1.3.8. MQTT</a></li>
+<li><a href="#_cassandra">1.3.9. Cassandra</a></li>
+<li><a href="#_mongodb">1.3.10. MongoDB</a></li>
+<li><a href="#_network_socket_2">1.3.11. Network socket</a></li>
</ul>
</li>
+<li><a href="#_sla_service_level_agreement">1.4. SLA (Service Level
Agreement)</a>
+<ul class="sectlevel3">
+<li><a href="#_checker">1.4.1. Checker</a></li>
+<li><a href="#_alerters">1.4.2. Alerters</a></li>
</ul>
</li>
</ul>
</li>
-<li><a href="#_developer_guide">Developer Guide</a>
-<ul class="sectlevel1">
-<li><a href="#_architecture">1. Architecture</a></li>
-<li><a href="#_custom_collector">2. Custom Collector</a>
+<li><a href="#_developer_guide">2. Developer Guide</a>
<ul class="sectlevel2">
-<li><a href="#_event_driven_collector">2.1. Event Driven Collector</a></li>
-<li><a href="#_polled_collector">2.2. Polled Collector</a></li>
+<li><a href="#_architecture">2.1. Architecture</a></li>
+<li><a href="#_custom_collector">2.2. Custom Collector</a>
+<ul class="sectlevel3">
+<li><a href="#_event_driven_collector">2.2.1. Event Driven Collector</a></li>
+<li><a href="#_polled_collector">2.2.2. Polled Collector</a></li>
</ul>
</li>
-<li><a href="#_custom_appender">3. Custom Appender</a></li>
-<li><a href="#_custom_sla_alerter">4. Custom SLA Alerter</a></li>
+<li><a href="#_custom_appender">2.3. Custom Appender</a></li>
+<li><a href="#_custom_sla_alerter">2.4. Custom SLA Alerter</a></li>
</ul>
</li>
</ul>
</div>
</div>
<div id="content">
-<h1 id="_user_guide" class="sect0">User Guide</h1>
<div class="sect1">
-<h2 id="_introduction">1. Introduction</h2>
+<h2 id="_user_guide">1. User Guide</h2>
<div class="sectionbody">
+<div class="sect2">
+<h3 id="_introduction">1.1. Introduction</h3>
<div class="paragraph">
<p>Apache Karaf Decanter is monitoring solution running in Apache Karaf.</p>
</div>
@@ -615,17 +618,42 @@ is created and sent to alerters. Decante
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:repo-add
mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features</pre>
+<pre>karaf@root()> feature:repo-add
mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.1.0/xml/features</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>Or</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:repo-add decanter 1.1.0</pre>
</div>
</div>
<div class="paragraph">
<p>Now, you have to install the collectors, appenders, and eventually SLA
alerters feature to match your need.</p>
</div>
+<div class="paragraph">
+<p>For convenience the <code>decanter</code> feature installs a quick "ready
to go" set of components:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>elasticsearch</p>
+</li>
+<li>
+<p>kibana</p>
+</li>
+<li>
+<p>the elasticsearch appender</p>
+</li>
+<li>
+<p>the JMX and log collectors</p>
+</li>
+</ul>
</div>
</div>
-<div class="sect1">
-<h2 id="_collectors">2. Collectors</h2>
-<div class="sectionbody">
+<div class="sect2">
+<h3 id="_collectors">1.2. Collectors</h3>
<div class="paragraph">
<p>Decanter collectors harvest the monitoring data, and send this data to the
Decanter appenders.</p>
</div>
@@ -643,8 +671,8 @@ data and send to the appenders.</p>
</li>
</ul>
</div>
-<div class="sect2">
-<h3 id="_log">2.1. Log</h3>
+<div class="sect3">
+<h4 id="_log">1.2.1. Log</h4>
<div class="paragraph">
<p>The Decanter Log Collector is an event driven collector. It automatically
reacts when a log occurs, and
send the log details (level, logger name, message, etc) to the appenders.</p>
@@ -660,9 +688,97 @@ send the log details (level, logger name
<div class="paragraph">
<p>The log collector doesn’t need any configuration, the installation of
the decanter-collector-log feature is enough.</p>
</div>
+<div class="admonitionblock note">
+<table>
+<tr>
+<td class="icon">
+<div class="title">Note</div>
+</td>
+<td class="content">
+<div class="paragraph">
+<p>The Decanter log collector is using
<code>osgi:DecanterLogCollectorAppender</code> appender.
+In order to work, your Apache Karaf Pax Logging configuration should contain
this appender.</p>
</div>
-<div class="sect2">
-<h3 id="_file">2.2. File</h3>
+<div class="paragraph">
+<p>The default Apache Karaf <code>etc/org.ops4j.pax.logging.cfg</code>
configuration file is already fine:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>log4j.rootLogger = DEBUG, out, osgi:*</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>If you want, you can explicitly specify the
<code>DecanterLogCollectorAppender</code> appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>log4j.rootLogger = DEBUG, out, osgi:DecanterLogCollectorAppender,
osgi:VmLogAppender</pre>
+</div>
+</div>
+</td>
+</tr>
+</table>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_cxf_logging_feature_integration">1.2.2. CXF Logging feature
integration</h4>
+<div class="paragraph">
+<p>The <a href="http://cxf.apache.org/docs/message-logging.html">CXF message
logging</a> nicely integrates with Decanter. Simply add the <a
href="https://github.com/apache/cxf/blob/master/rt/features/logging/src/main/java/org/apache/cxf/ext/logging/LoggingFeature.java">org.apache.cxf.ext.logging.LoggingFeature</a>
to your service.</p>
+</div>
+<div class="paragraph">
+<p>This will automatically log the messages from all clients and endpoints to
slf4j. All meta data can be found in the MDC attributes. The message logging
can be switched on/off per service using the org.ops4j.pax.logging.cfg.</p>
+</div>
+<div class="paragraph">
+<p>When using with Decanter make sure you enable the log collector to actually
process the message logs.</p>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_log_socket">1.2.3. Log Socket</h4>
+<div class="paragraph">
+<p>The Decanter Log Socket Collector is an event driven collector. It creates
a socket, waiting for incoming event. The expected
+events are log4j LoggingEvent. The log4j LoggingEvent is transformed as a Map
containing the log details (level, logger name, message, …​).
+This Map is sent to the appenders.</p>
+</div>
+<div class="paragraph">
+<p>The collector allows you to remotely use Decanter. For instance, you can
have an application running on a different platform (spring-boot,
+application servers, …​). This application can use a log4j socket
appender that send the logging events to the Decanter
+log socket collector.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-collector-log-socket</code> feature install the log
socket collector:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-collector-log-socket</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature installs the collector and a default
<code>etc/org.apache.karaf.decanter.collector.log.socket.cfg</code>
configuration file
+containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>#
+# Decanter Log/Log4j Socket collector configuration
+#
+
+#port=4560
+#workers=10</pre>
+</div>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>port</code>Â property defines the port number where the collector
is bound and listen for incoming logging event. Default is 4560.</p>
+</li>
+<li>
+<p>the <code>workers</code> properties defines the number of threads (workers)
which can deal with multiple clients in the same time.</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_file">1.2.4. File</h4>
<div class="paragraph">
<p>The Decanter File Collector is an event driven collector. It automatically
reacts when new lines are appended into
a file (especially a log file). It acts like the tail Unix command. Basically,
it’s an alternative to the log collector.
@@ -725,8 +841,74 @@ my=stuff</pre>
<p>The file collector will tail on karaf.log file, and send any new line in
this log file as collected data.</p>
</div>
</div>
-<div class="sect2">
-<h3 id="_jmx">2.3. JMX</h3>
+<div class="sect3">
+<h4 id="_eventadmin">1.2.5. EventAdmin</h4>
+<div class="paragraph">
+<p>The Decanter EventAdmin Collector is an event-driven collector, listening
for all internal events happening in
+the Apache Karaf Container.</p>
+</div>
+<div class="admonitionblock note">
+<table>
+<tr>
+<td class="icon">
+<div class="title">Note</div>
+</td>
+<td class="content">
+<div class="paragraph">
+<p>It’s the perfect way to audit all actions performed on resources
(features, bundles, configurations, …​) by users
+(via local shell console, SSH, or JMX).</p>
+</div>
+<div class="paragraph">
+<p>We recommend to use this collector to implement users and actions
auditing.</p>
+</div>
+</td>
+</tr>
+</table>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-collector-eventadmin</code> feature installs the
eventadmin collector:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-collector-eventadmin</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>By default, the eventadmin collector is listening for all OSGi framework
and Karaf internal events.</p>
+</div>
+<div class="paragraph">
+<p>You can specify additional events to trap by providing a
‘etc/org.apache.karaf.decanter.collector.eventadmin-my.cfg’
configuration
+file, containing the EventAdmin topics you want to listen:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>event.topics=my/*</pre>
+</div>
+</div>
+<div class="admonitionblock note">
+<table>
+<tr>
+<td class="icon">
+<div class="title">Note</div>
+</td>
+<td class="content">
+<div class="paragraph">
+<p>By default, the events contain timestamp and subject.
+You can disable this by modifying
<code>etc/org.apache.felix.eventadmin.impl.EventAdmin</code> configuration
file:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>org.apache.felix.eventadmin.AddTimestamp=true
+org.apache.felix.eventadmin.AddSubject=true</pre>
+</div>
+</div>
+</td>
+</tr>
+</table>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_jmx">1.2.6. JMX</h4>
<div class="paragraph">
<p>The Decanter JMX Collector is a polled collector, executed periodically by
the Decanter Scheduler.</p>
</div>
@@ -767,7 +949,13 @@ url=local
# Object name filter to use. Instead of harvesting all MBeans, you can select
only
# some MBeans matching the object name filter
-#object.name=org.apache.camel:context=*,type=routes,name=*</pre>
+#object.name=org.apache.camel:context=*,type=routes,name=*
+
+# Several object names can also be specified.
+# What matters is that the property names begin with "object.name".
+#object.name.system=java.lang:*
+#object.name.karaf=org.apache.karaf:type=http,name=*
+#object.name.3=org.apache.activemq:*</pre>
</div>
</div>
<div class="paragraph">
@@ -795,7 +983,7 @@ is secured.</p>
<li>
<p>the <code>object.name</code> property is optional. If this property is not
specified, the collector will retrieve the attributes
of all MBeans. You can filter to consider only some MBeans. This property
contains the ObjectName filter to retrieve
-the attributes only to some MBeans.</p>
+the attributes only to some MBeans. Several object names can be listed,
provided the property prefix is <code>object.name.</code>.</p>
</li>
<li>
<p>any other values will be part of the collected data. It means that you can
add your own property if you want to add
@@ -808,8 +996,8 @@ additional data, and create queries base
<code>etc/org.apache.karaf.decanter.collector.jmx-[ANYNAME].cfg</code>.</p>
</div>
</div>
-<div class="sect2">
-<h3 id="_activemq_jmx">2.4. ActiveMQ (JMX)</h3>
+<div class="sect3">
+<h4 id="_activemq_jmx">1.2.7. ActiveMQ (JMX)</h4>
<div class="paragraph">
<p>The ActiveMQ JMX collector is just a special configuration of the JMX
collector.</p>
</div>
@@ -818,7 +1006,7 @@ additional data, and create queries base
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-collector-activemq</pre>
+<pre>karaf@root()> feature:install decanter-collector-jmx-activemq</pre>
</div>
</div>
<div class="paragraph">
@@ -857,17 +1045,17 @@ object.name=org.apache.activemq:*</pre>
<p>This configuration actually contains a filter to retrieve only the ActiveMQ
JMX MBeans.</p>
</div>
</div>
-<div class="sect2">
-<h3 id="_camel_jmx">2.5. Camel (JMX)</h3>
+<div class="sect3">
+<h4 id="_camel_jmx">1.2.8. Camel (JMX)</h4>
<div class="paragraph">
<p>The Camel JMX collector is just a special configuration of the JMX
collector.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-collector-camel</code> feature installs the default JMX
collector, with the specific Camel JMX configuration:</p>
+<p>The <code>decanter-collector-jmx-camel</code> feature installs the default
JMX collector, with the specific Camel JMX configuration:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-collector-camel</pre>
+<pre>karaf@root()> feature:install decanter-collector-jmx-camel</pre>
</div>
</div>
<div class="paragraph">
@@ -906,21 +1094,23 @@ object.name=org.apache.camel:context=*,t
<p>This configuration actually contains a filter to retrieve only the Camel
Routes JMX MBeans.</p>
</div>
</div>
-<div class="sect2">
-<h3 id="_camel_tracer">2.6. Camel Tracer</h3>
+<div class="sect3">
+<h4 id="_camel_tracer_notifier">1.2.9. Camel Tracer & Notifier</h4>
<div class="paragraph">
-<p>The Camel Tracer provides a Camel Tracer Handler that you can set on a
Camel Tracer.</p>
+<p>Decanter provides a Camel Tracer Handler that you can set on a Camel
Tracer. It also provides a Camel Event Notifier.</p>
</div>
+<div class="sect4">
+<h5 id="_camel_tracer">Camel Tracer</h5>
<div class="paragraph">
<p>If you enable the tracer on a Camel route, all tracer events (exchanges on
each step of the route) are send to the
appenders.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-collector-camel-tracer</code> feature provides the Camel
Tracer Handler:</p>
+<p>The <code>decanter-collector-camel</code> feature provides the Camel Tracer
Handler:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-collector-camel-tracer</pre>
+<pre>karaf@root()> feature:install decanter-collector-camel</pre>
</div>
</div>
<div class="paragraph">
@@ -959,9 +1149,35 @@ in the Camel Tracer:</p>
</blueprint></pre>
</div>
</div>
+<div class="paragraph">
+<p>You can extend the Decanter event with any property using a custom
<code>DecanterCamelEventExtender</code>:</p>
</div>
-<div class="sect2">
-<h3 id="_system">2.7. System</h3>
+<div class="listingblock">
+<div class="content">
+<pre>public interface DecanterCamelEventExtender {
+
+ void extend(Map<String, Object> decanterData, Exchange
camelExchange);
+
+}</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>You can inject your extender using <code>setExtender(myExtender)</code> on
the <code>DecanterTraceEventHandler</code>. Decanter will automatically
+call your extender to populate extra properties.</p>
+</div>
+</div>
+<div class="sect4">
+<h5 id="_camel_event_notifier">Camel Event Notifier</h5>
+<div class="paragraph">
+<p>Decanter also provides <code>DecanterEventNotifier</code> implementing a
Camel event notifier: <a
href="http://camel.apache.org/eventnotifier-to-log-details-about-all-sent-exchanges.html"
class="bare">http://camel.apache.org/eventnotifier-to-log-details-about-all-sent-exchanges.html</a></p>
+</div>
+<div class="paragraph">
+<p>It’s very similar to the Decanter Camel Tracer. You can control the
camel contexts and routes to which you want to trap event.</p>
+</div>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_system">1.2.10. System</h4>
<div class="paragraph">
<p>The system collector is a polled collector (periodically executed by the
Decanter Scheduler).</p>
</div>
@@ -988,17 +1204,21 @@ in the Camel Tracer:</p>
# This collector executes system commands, retrieve the exec output/err
#Â sent to the appenders
#
-# The format is key=command
+# The format is command.key=command_to_execute
+# where command is a reserved keyword used to identify a command property
# for instance:
-# df=df -h
-# free=free
+#
+# command.df=df -h
+# command.free=free
+#
# You can also create a script containing command like:
#
# df -k / | awk -F " |%" '/dev/{print $8}'
#
# This script will get the available space on the / filesystem for instance.
# and call the script:
-# df=/bin/script
+#
+# command.df=/bin/script
#
# Another example of script to get the temperature:
#
@@ -1011,7 +1231,7 @@ in the Camel Tracer:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>name=command</pre>
+<pre>command.name=system_command</pre>
</div>
</div>
<div class="paragraph">
@@ -1022,126 +1242,515 @@ in the Camel Tracer:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>df=df -k / | awk -F " |%" '/dev/{print $8}'</pre>
+<pre>command.df=df -k / | awk -F " |%" '/dev/{print $8}'</pre>
+</div>
</div>
</div>
+<div class="sect3">
+<h4 id="_network_socket">1.2.11. Network socket</h4>
+<div class="paragraph">
+<p>The Decanter network socket collector listens for incoming messages coming
from a remote network socket collector.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-collector-socket</code> feature installs the network
socket collector:</p>
</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-collector-socket</pre>
</div>
</div>
-<div class="sect1">
-<h2 id="_appenders">3. Appenders</h2>
-<div class="sectionbody">
<div class="paragraph">
-<p>Decanter appenders receive the data from the collectors, and store the data
into a storage backend.</p>
+<p>This feature installs a default
<code>etc/org.apache.karaf.decanter.collector.socket.cfg</code> configuration
file containing:</p>
</div>
-<div class="sect2">
-<h3 id="_log_2">3.1. Log</h3>
+<div class="listingblock">
+<div class="content">
+<pre># Decanter Socket Collector
+
+# Port number on which to listen
+#port=34343
+
+# Number of worker threads to deal with
+#workers=10</pre>
+</div>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>port</code> property contains the port number where the network
socket collector is listening</p>
+</li>
+<li>
+<p>the <code>workers</code> property contains the number of worker thread the
socket collector is using for connection</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_jms">1.2.12. JMS</h4>
<div class="paragraph">
-<p>The Decanter Log Appender creates a log message for each event received
from the collectors.</p>
+<p>The Decanter JMS collector consumes the data from a JMS queue or topic.
It’s a way to aggregate collected data coming
+from remote and several machines.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-appender-log</code> feature installs the log
appender:</p>
+<p>The <code>decanter-collector-jms</code> feature installs the JMS
collector:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-appender-log</pre>
+<pre>karaf@root()> feature:install decanter-collector-jms</pre>
</div>
</div>
<div class="paragraph">
-<p>The log appender doesn’t require any configuration.</p>
+<p>This feature also installs a default
<code>etc/org.apache.karaf.decanter.collector.jms.cfg</code> configuration file
containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>######################################
+# Decanter JMS Collector Configuration
+######################################
+
+# Name of the JMS connection factory
+connection.factory.name=jms/decanter
+
+# Name of the destination
+destination.name=decanter
+
+# Type of the destination (queue or topic)
+destination.type=queue
+
+# Connection username
+# username=
+
+# Connection password
+# password=</pre>
</div>
</div>
-<div class="sect2">
-<h3 id="_elasticsearch">3.2. Elasticsearch</h3>
-<div class="paragraph">
-<p>The Decanter Elasticsearch Appender stores the data (coming from the
collectors) into an Elasticsearch instance.</p>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>connection.factory.name</code> is the name of the
ConnectionFactory OSGi service to use</p>
+</li>
+<li>
+<p>the <code>destination.name</code> is the name of the queue or topic where
to consume messages from the JMS broker</p>
+</li>
+<li>
+<p>the <code>destination.type</code> is the type of the destination (queue or
topic)</p>
+</li>
+<li>
+<p>the <code>username</code> and <code>password</code> properties are the
credentials to use with a secured connection factory</p>
+</li>
+</ul>
</div>
-<div class="paragraph">
-<p>This appender transforms the data as a json object. This json object is
stored in the Elasticsearch instance.</p>
</div>
+<div class="sect3">
+<h4 id="_mqtt">1.2.13. MQTT</h4>
<div class="paragraph">
-<p>It’s probably one of the most interesting way to use Decanter.</p>
+<p>The Decanter MQTT collector receives collected messages from a MQTT broker.
It’s a way to aggregate collected data coming
+from remote and several machines.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-appender-elasticsearch</code> feature installs the
elasticsearch appender:</p>
+<p>The <code>decanter-collector-mqtt</code> feature installs the MQTT
collector:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-appender-elasticsearch</pre>
+<pre>karaf@root()> feature:install decanter-collector-mqtt</pre>
</div>
</div>
<div class="paragraph">
-<p>This feature installs the elasticsearch appender, especially the
<code>etc/org.apache.karaf.decanter.appender.elasticsearch.cfg</code>
-configuration file containing:</p>
+<p>This feature also installs a default
<code>etc/org.apache.karaf.decanter.collector.mqtt.cfg</code> configuration
file containing:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>################################################
-# Decanter Elasticsearch Appender Configuration
-################################################
+<pre>#######################################
+# Decanter MQTT Collector Configuration
+#######################################
-# Hostname of the elasticsearch instance
-host=localhost
-# Port number of the elasticsearch instance
-port=9300
-# Name of the elasticsearch cluster
-clusterName=elasticsearch</pre>
-</div>
+# URI of the MQTT broker
+server.uri=tcp://localhost:61616
+
+# MQTT Client ID
+client.id=decanter
+
+# MQTT topic name
+topic=decanter</pre>
</div>
-<div class="paragraph">
-<p>This file contains the elasticsearch instance connection properties:</p>
</div>
<div class="ulist">
<ul>
<li>
-<p>the <code>host</code> property contains the hostname (or IP address) of the
Elasticsearch instance</p>
+<p>the <code>server.uri</code> is the location of the MQTT broker</p>
</li>
<li>
-<p>the <code>port</code> property contains the port number of the
Elasticsearch instance</p>
+<p>the <code>client.id</code> is the Decanter MQTT client ID</p>
</li>
<li>
-<p>the <code>clusterName</code> property contains the name of the
Elasticsearch cluster where to send the data</p>
+<p>the <code>topic</code> is the MQTT topic pattern where to receive the
messages</p>
</li>
</ul>
</div>
+</div>
<div class="sect3">
-<h4 id="_embedding_decanter_elasticsearch">3.2.1. Embedding Decanter
Elasticsearch</h4>
+<h4 id="_kafka">1.2.14. Kafka</h4>
+<div class="paragraph">
+<p>The Decanter Kafka collector receives collected messages from a Kafka
broker. It’s a way to aggregate collected data coming
+from remote and several machines.</p>
+</div>
<div class="paragraph">
-<p>For convenience, Decanter provides an <code>elasticsearch</code> feature
starting an embedded Elasticsearch instance:</p>
+<p>The <code>decanter-collector-kafka</code> feature installs the Kafka
collector:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install elasticsearch</pre>
+<pre>karaf@root()> feature:install decanter-collector-kafka</pre>
</div>
</div>
<div class="paragraph">
-<p>Thanks to this elasticsearch instance, by default, the
decanter-appender-elasticsearch will send the data to this instance.</p>
-</div>
-<div class="paragraph">
-<p>The feature also installs the <code>etc/elasticsearch.yml</code>
configuration file:</p>
+<p>This feature also installs a default
<code>etc/org.apache.karaf.decanter.collector.kafka.cfg</code> configuration
file containing:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>###############################################################################
-##################### Elasticsearch Decanter Configuration ####################
-###############################################################################
+<pre>###############################
+# Decanter Kafka Configuration
+###############################
-# WARNING: change in this configuration file requires a refresh or restart of
-# the elasticsearch bundle
+# A list of host/port pairs to use for establishing the initial connection to
the Kafka cluster
+#bootstrap.servers=localhost:9092
-################################### Cluster ###################################
+# An id string to identify the group where the consumer belongs to
+#group.id=decanter
-# Cluster name identifies your cluster for auto-discovery. If you're running
-# multiple clusters on the same network, make sure you're using unique names.
-#
-cluster.name: elasticsearch
-cluster.routing.schedule: 50ms
+# Enable auto commit of consumed messages
+#enable.auto.commit=true
+# Auto commit interval (in ms) triggering the commit
+#auto.commit.interval.ms=1000
-#################################### Node #####################################
+# Timeout on the consumer session
+#session.timeout.ms=30000
-# Node names are generated dynamically on startup, so you're relieved
-# from configuring them manually. You can tie this node to a specific name:
+#Â Serializer class for key that implements the Serializer interface
+#key.serializer=org.apache.kafka.common.serialization.StringSerializer
+
+#Â Serializer class for value that implements the Serializer interface.
+#value.serializer=org.apache.kafka.common.serialization.StringSerializer
+
+# Name of the topic
+#topic=decanter
+
+# Security (SSL)
+#security.protocol=SSL
+
+# SSL truststore location (Kafka broker) and password
+#ssl.truststore.location=${karaf.etc}/keystores/keystore.jks
+#ssl.truststore.password=karaf
+
+# SSL keystore (if client authentication is required)
+#ssl.keystore.location=${karaf.etc}/keystores/clientstore.jks
+#ssl.keystore.password=karaf
+#ssl.key.password=karaf
+
+# (Optional) SSL provider (default uses the JVM one)
+#ssl.provider=
+
+# (Optional) SSL Cipher suites
+#ssl.cipher.suites=
+
+#Â (Optional) SSL Protocols enabled (default is TLSv1.2,TLSv1.1,TLSv1)
+#ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
+
+# (Optional) SSL Truststore type (default is JKS)
+#ssl.truststore.type=JKS
+
+# (Optional) SSL Keystore type (default is JKS)
+#ssl.keystore.type=JKS
+
+# Security (SASL)
+#Â For SASL, you have to configure Java System property as explained in
http://kafka.apache.org/documentation.html#security_ssl</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>The configuration is similar to the Decanter Kafka appender. Please, see
Kafka collector for details.</p>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_rest_servlet">1.2.15. Rest Servlet</h4>
+<div class="paragraph">
+<p>The Decanter Rest Servlet collector registers a servlet on the OSGi HTTP
service (by default on <code>/decanter/collect</code>).</p>
+</div>
+<div class="paragraph">
+<p>It listens for incoming collected messages on this servlet.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-collector-rest-servlet</code> feature installs the
collector:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-collector-rest-servlet</pre>
+</div>
+</div>
+</div>
+</div>
+<div class="sect2">
+<h3 id="_appenders">1.3. Appenders</h3>
+<div class="paragraph">
+<p>Decanter appenders receive the data from the collectors, and store the data
into a storage backend.</p>
+</div>
+<div class="sect3">
+<h4 id="_log_2">1.3.1. Log</h4>
+<div class="paragraph">
+<p>The Decanter Log Appender creates a log message for each event received
from the collectors.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-log</code> feature installs the log
appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-appender-log</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>The log appender doesn’t require any configuration.</p>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_elasticsearch_kibana">1.3.2. Elasticsearch & Kibana</h4>
+<div class="paragraph">
+<p>Decanter provides three appenders for Elasticsearch:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>decanter-appender-elasticsearch-rest (recommanded) is an appender which
directly uses the Elasticsearch HTTP REST API. It’s compliant with any
Elasticsearch version (1.x and 2.x).</p>
+</li>
+<li>
+<p>decanter-appender-elasticsearch-native-1.x is an appender which uses the
Elasticsearch 1.x Java Client API. It’s compliant only with Elasticsearch
1.x versions.</p>
+</li>
+<li>
+<p>decanter-appender-elasticsearch-native-2.x is an appender which uses the
Elasticsearch 2.x Java Client API. It’s compliant only with Elasticsearch
2.x versions.</p>
+</li>
+</ul>
+</div>
+<div class="paragraph">
+<p>These appenders store the data (coming from the collectors) into an
Elasticsearch node.
+They transformm the data as a json document, stored into Elasticsearch.</p>
+</div>
+<div class="sect4">
+<h5 id="_elasticsearch_http_rest_appender">Elasticsearch HTTP REST
appender</h5>
+<div class="paragraph">
+<p>The Decanter Elasticsearch HTTP REST API appender uses the Elasticsearch
REST API. It works with any Elasticsearch version (1.x and 2.x).</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-elasticsearch-rest</code> feature installs this
appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install
decanter-appender-elasticsearch-rest</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature installs the appender and the
<code>etc/org.apache.karaf.decanter.appender.elasticsearch.rest.cfg</code>
configuration file
+containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>#########################################################
+# Decanter Elasticsearch HTTP REST Appender Configuration
+#########################################################
+
+# HTTP address of the elasticsearch node
+# NB: the appender uses discovery via elasticsearch nodes API
+address=http://localhost:9200
+
+# Basic username and password authentication
+# username=user
+# password=password</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>The file contains the Elasticsearch node location:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>address</code> is the HTTP URL of the Elasticsearch node. Default
is <code>http://localhost:9200</code>.</p>
+</li>
+<li>
+<p>the <code>username</code> is the username used for authentication
(optional)</p>
+</li>
+<li>
+<p>the <code>password</code> is the password used for authentication
(optional)</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect4">
+<h5 id="_elasticsearch_1_x_native_appender">Elasticsearch 1.x Native
appender</h5>
+<div class="paragraph">
+<p>The Elasticsearch 1.x Native appender uses the Elasticsearch 1.x Java
Client API. It’s very specific to
+Elasticsearch 1.x versions, and can’t run with Elasticsearch 2.x.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-elasticsearch-native-1.x</code> feature
installs the elasticsearch appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install
decanter-appender-elasticsearch-native-1.x</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature installs the elasticsearch appender, especially the
<code>etc/org.apache.karaf.decanter.appender.elasticsearch.cfg</code>
+configuration file containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>################################################
+# Decanter Elasticsearch Appender Configuration
+################################################
+
+# Hostname of the elasticsearch instance
+host=localhost
+# Port number of the elasticsearch instance
+port=9300
+# Name of the elasticsearch cluster
+clusterName=elasticsearch</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This file contains the elasticsearch instance connection properties:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>host</code> property contains the hostname (or IP address) of the
Elasticsearch instance</p>
+</li>
+<li>
+<p>the <code>port</code> property contains the port number of the
Elasticsearch instance</p>
+</li>
+<li>
+<p>the <code>clusterName</code> property contains the name of the
Elasticsearch cluster where to send the data</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect4">
+<h5 id="_elasticsearch_2_x_native_appender">Elasticsearch 2.x Native
appender</h5>
+<div class="paragraph">
+<p>The Elasticsearch 2.x Native appender uses the Elasticsearch 2.x Java
Client API. It’s very specific to
+Elasticsearch 2.x versions, and can’t run with Elasticsearch 1.x.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-elasticsearch-native-2.x</code> feature
installs the elasticsearch appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install
decanter-appender-elasticsearch-native-2.x</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature installs the elasticsearch appender, especially the
<code>etc/org.apache.karaf.decanter.appender.elasticsearch.cfg</code>
+configuration file containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>################################################
+# Decanter Elasticsearch Appender Configuration
+################################################
+
+# Hostname of the elasticsearch instance
+host=localhost
+# Port number of the elasticsearch instance
+port=9300
+# Name of the elasticsearch cluster
+clusterName=elasticsearch</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This file contains the elasticsearch instance connection properties:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>host</code> property contains the hostname (or IP address) of the
Elasticsearch instance</p>
+</li>
+<li>
+<p>the <code>port</code> property contains the port number of the
Elasticsearch instance</p>
+</li>
+<li>
+<p>the <code>clusterName</code> property contains the name of the
Elasticsearch cluster where to send the data</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect4">
+<h5 id="_embedding_decanter_elasticsearch_1_x_and_2_x">Embedding Decanter
Elasticsearch (1.x and 2.x)</h5>
+<div class="admonitionblock note">
+<table>
+<tr>
+<td class="icon">
+<div class="title">Note</div>
+</td>
+<td class="content">
+<div class="paragraph">
+<p>For a larger and shared production platform, we recommend to dedicate a
Elasticsearch instance on its own JVM.
+It allows you some specific tuning for elasticsearch.
+Another acceptable configuration is to set up the Decanter embedded
Elasticsearch instance as part (client) of a larger
+cluster.</p>
+</div>
+<div class="paragraph">
+<p>The following Decanter Elasticsearch embedded instance setup works
perfectly fine for Karaf Decanter monitoring purpose,
+especially for the current Karaf instance.</p>
+</div>
+</td>
+</tr>
+</table>
+</div>
+<div class="paragraph">
+<p>For convenience, Decanter provides <code>elasticsearch</code> feature
starting an embedded Elasticsearch instance:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install elasticsearch</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>Decanter provides versions of this feature, depending of the Elasticsearch
version you want to use (1.x or 2.x).</p>
+</div>
+<div class="paragraph">
+<p>You can see the feature version available:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:version-list elasticsearch</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>Thanks to this elasticsearch instance, by default, the
decanter-appender-elasticsearch* appenders will send the data to this
instance.</p>
+</div>
+<div class="paragraph">
+<p>The feature also installs the <code>etc/elasticsearch.yml</code>
configuration file, different depending of the Elasticsearch version.</p>
+</div>
+<div class="paragraph">
+<p>For Elasticsearch 1.x:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>###############################################################################
+##################### Elasticsearch Decanter Configuration ####################
+###############################################################################
+
+# WARNING: change in this configuration file requires a refresh or restart of
+# the elasticsearch bundle
+
+################################### Cluster ###################################
+
+# Cluster name identifies your cluster for auto-discovery. If you're running
+# multiple clusters on the same network, make sure you're using unique names.
+#
+cluster.name: elasticsearch
+cluster.routing.schedule: 50ms
+
+
+#################################### Node #####################################
+
+# Node names are generated dynamically on startup, so you're relieved
+# from configuring them manually. You can tie this node to a specific name:
#
node.name: decanter
@@ -1496,376 +2105,951 @@ http.cors.allow-origin: /.*/
</div>
</div>
<div class="paragraph">
-<p>It’s a "standard" elasticsearch configuration file, allowing you to
configure the embedded elasticsearch instance.</p>
-</div>
-<div class="paragraph">
-<p>Warning: if you change the <code>etc/elasticsearch.yml</code> file, you
have to restart (with the <code>bundle:restart</code> command) the
-Decanter elasticsearch bundle in order to load the changes.</p>
-</div>
-<div class="paragraph">
-<p>The Decanter elasticsearch node also supports loading and override of the
settings using a
-<code>etc/org.apache.karaf.decanter.elasticsearch.cfg</code> configuration
file.
-This file is not provided by default, as it’s used for override of the
default settings.</p>
-</div>
-<div class="paragraph">
-<p>You can override the following elasticsearch properties in this
configuration file:</p>
+<p>For Elasticsearch 2.x:</p>
</div>
+<div class="listingblock">
+<div class="content">
+<pre># ======================== Elasticsearch Configuration
=========================
+#
+# NOTE: Elasticsearch comes with reasonable defaults for most settings.
+# Before you set out to tweak and tune the configuration, make sure you
+# understand what are you trying to accomplish and the consequences.
+#
+# The primary way of configuring a node is via this file. This template lists
+# the most important settings you may want to configure for a production
cluster.
+#
+# Please see the documentation for further information on configuration
options:
+#
<http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
+#
+# ---------------------------------- Cluster
-----------------------------------
+#
+# Use a descriptive name for your cluster:
+#
+cluster.name: elasticsearch
+#
+# ------------------------------------ Node
------------------------------------
+#
+# Use a descriptive name for the node:
+#
+node.name: decanter
+#
+# Add custom attributes to the node:
+#
+# node.rack: r1
+#
+# ----------------------------------- Paths
------------------------------------
+#
+# Path to directory where to store the data (separate multiple locations by
comma):
+#
+# path.data: /path/to/data
+path.data: data
+path.home: data
+#
+# Path to log files:
+#
+# path.logs: /path/to/logs
+#
+# ----------------------------------- Memory
-----------------------------------
+#
+# Lock the memory on startup:
+#
+# bootstrap.mlockall: true
+#
+# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half
the memory
+# available on the system and that the owner of the process is allowed to use
this limit.
+#
+# Elasticsearch performs poorly when the system is swapping the memory.
+#
+# ---------------------------------- Network
-----------------------------------
+#
+# Set the bind address to a specific IP (IPv4 or IPv6):
+#
+# network.host: 192.168.0.1
+#
+# Set a custom port for HTTP:
+#
+# http.port: 9200
+#
+# For more information, see the documentation at:
+#
<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
+#
+# --------------------------------- Discovery
----------------------------------
+#
+# Pass an initial list of hosts to perform discovery when new node is started:
+# The default list of hosts is ["127.0.0.1", "[::1]"]
+#
+# discovery.zen.ping.unicast.hosts: ["host1", "host2"]
+#
+# Prevent the "split brain" by configuring the majority of nodes (total number
of nodes / 2 + 1):
+#
+# discovery.zen.minimum_master_nodes: 3
+#
+# For more information, see the documentation at:
+#
<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
+#
+# ---------------------------------- Gateway
-----------------------------------
+#
+# Block initial recovery after a full cluster restart until N nodes are
started:
+#
+# gateway.recover_after_nodes: 3
+#
+# For more information, see the documentation at:
+#
<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
+#
+# ---------------------------------- Various
-----------------------------------
+#
+# Disable starting multiple nodes on a single system:
+#
+# node.max_local_storage_nodes: 1
+#
+# Require explicit names when deleting indices:
+#
+# action.destructive_requires_name: true</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>It’s a "standard" elasticsearch configuration file, allowing you to
configure the embedded elasticsearch instance.</p>
+</div>
+<div class="paragraph">
+<p>Warning: if you change the <code>etc/elasticsearch.yml</code> file, you
have to restart (with the <code>bundle:restart</code> command) the
+Decanter elasticsearch bundle in order to load the changes.</p>
+</div>
+<div class="paragraph">
+<p>The Decanter elasticsearch node also supports loading and override of the
settings using a
+<code>etc/org.apache.karaf.decanter.elasticsearch.cfg</code> configuration
file.
+This file is not provided by default, as it’s used for override of the
default settings.</p>
+</div>
+<div class="paragraph">
+<p>You can override the following elasticsearch properties in this
configuration file:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p><code>cluster.name</code></p>
+</li>
+<li>
+<p><code>http.enabled</code></p>
+</li>
+<li>
+<p><code>node.data</code></p>
+</li>
+<li>
+<p><code>node.name</code></p>
+</li>
+<li>
+<p><code>node.master</code></p>
+</li>
+<li>
+<p><code>path.data</code></p>
+</li>
+<li>
+<p><code>network.host</code></p>
+</li>
+<li>
+<p><code>cluster.routing.schedule</code></p>
+</li>
+<li>
+<p><code>path.plugins</code></p>
+</li>
+<li>
+<p><code>http.cors.enabled</code></p>
+</li>
+<li>
+<p><code>http.cors.allow-origin</code></p>
+</li>
+</ul>
+</div>
+<div class="paragraph">
+<p>The advantage of using this file is that the elasticsearch node is
automatically restarted in order to reload the
+settings as soon as you change the cfg file.</p>
+</div>
+</div>
+<div class="sect4">
+<h5
id="_embedding_decanter_kibana_3_x_only_working_with_elasticsearch_1_x">Embedding
Decanter Kibana 3.x (only working with Elasticsearch 1.x)</h5>
+<div class="paragraph">
+<p>In addition of the embedded elasticsearch 1.x instance, Decanter also
provides an embedded Kibana 3.x instance, containing
+ready to use Decanter dashboards.</p>
+</div>
+<div class="paragraph">
+<p>The <code>kibana</code> feature installs the embedded kibana instance:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install kibana/3.1.1</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>By default, the kibana instance is available on
<code>http://host:8181/kibana</code>.</p>
+</div>
+<div class="paragraph">
+<p>The Decanter Kibana instance provides ready to use dashboards:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>Karaf dashboard uses the data harvested by the default JMX collector, and
the log collector. Especially, it provides
+details about the threads, memory, garbage collection, etc.</p>
+</li>
+<li>
+<p>Camel dashboard uses the data harvested by the default JMX collector, or
the Camel (JMX) collector. It can also
+leverage the Camel Tracer collector. It provides details about routes
processing time, the failed exchanges, etc. This
+dashboard requires some tuning (updating the queries to match the route
IDs).</p>
+</li>
+<li>
+<p>ActiveMQ dashboard uses the data harvested by the default JMX collector, or
the ActiveMQ (JMX) collector. It provides
+details about the pending queue, the system usage, etc.</p>
+</li>
+<li>
+<p>OperatingSystem dashboard uses the data harvested by the system collector.
The default dashboard expects data containing
+the filesystem usage, and temperature data. It’s just a sample, you have
to tune the system collector and adapt this
+dashboard accordingly.</p>
+</li>
+</ul>
+</div>
+<div class="paragraph">
+<p>You can change these dashboards to add new panels, change the existing
panels, etc.</p>
+</div>
+<div class="paragraph">
+<p>Of course, you can create your own dashboards, starting from blank or
simple dashboards.</p>
+</div>
+<div class="paragraph">
+<p>By default, Decanter Kibana uses embedded elasticsearch instance. However,
it’s possible to use a remote elasticsearch
+instance by providing the elasticsearch parameter on the URL like this for
instance:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>http://localhost:8181/kibana?elasticsearch=http://localhost:9400</pre>
+</div>
+</div>
+</div>
+<div class="sect4">
+<h5
id="_embedding_decanter_kibana_4_x_only_working_with_elasticsearch_2_x">Embedding
Decanter Kibana 4.x (only working with Elasticsearch 2.x)</h5>
+<div class="paragraph">
+<p>In addition of the embedded elasticsearch 2.x instance, Decanter also
provides an embedded Kibana 4.x instance.</p>
+</div>
+<div class="paragraph">
+<p>The <code>kibana</code> feature installs the embedded kibana instance:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install kibana/4.1.2</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>By default, the kibana instance is available on
<code>http://host:8181/kibana</code>.</p>
+</div>
+<div class="admonitionblock note">
+<table>
+<tr>
+<td class="icon">
+<div class="title">Note</div>
+</td>
+<td class="content">
+<div class="paragraph">
+<p>Decanter Kibana 4 automatically detects collector features. Then, it
automatically creates corresponding dashboards.</p>
+</div>
+<div class="paragraph">
+<p>However, you still have a complete control of the visualizations and
dashboards. You can update the index to
+automatically include new fields and create your own visualizations and
dashboards.</p>
+</div>
+<div class="paragraph">
+<p>The default dashboard displayed is the "System" dashboard, requiring the
jmx collector.</p>
+</div>
+</td>
+</tr>
+</table>
+</div>
+</div>
+<div class="sect4">
+<h5 id="_elasticsearch_head_console">Elasticsearch Head console</h5>
+<div class="paragraph">
+<p>In addition of the embedded elasticsearch instance, Decanter also provides
a web console allowing you to monitor and
+manage your elasticsearch cluster. It’s a ready to use elastisearch-head
console, directly embedded in Karaf.</p>
+</div>
+<div class="paragraph">
+<p>The <code>elasticsearch-head</code> feature installs the embedded
elasticsearch-head web console, corresponding to the
+elasticsearch version you are using.</p>
+</div>
+<div class="paragraph">
+<p>We can install <code>elasticsearch-head</code> 1.x feature, working with
elasticsearch 1.x:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install elasticsearch-head/1.7.3</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>or 2.x feature, working with elasticsearch 2.x:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install elasticsearch-head/2.2.0</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>By default, the elasticsearch-head web console is available on
<code>http://host:8181/elasticsearch-head</code>.</p>
+</div>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_jdbc">1.3.3. JDBC</h4>
+<div class="paragraph">
+<p>The Decanter JDBC appender allows your to store the data (coming from the
collectors) into a database.</p>
+</div>
+<div class="paragraph">
+<p>The Decanter JDBC appender transforms the data as a json string. The
appender stores the json string and the timestamp
+into the database.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-jdbc</code> feature installs the jdbc
appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-appender-jdbc</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature also installs the
<code>etc/org.apache.karaf.decanter.appender.jdbc.cfg</code> configuration
file:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>#######################################
+# Decanter JDBC Appender Configuration
+#######################################
+
+# Name of the JDBC datasource
+datasource.name=jdbc/decanter
+
+# Name of the table storing the collected data
+table.name=decanter
+
+# Dialect (type of the database)
+# The dialect is used to create the table
+# Supported dialects are: generic, derby, mysql
+# Instead of letting Decanter created the table, you can create the table by
your own
+dialect=generic</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This configuration file allows you to specify the connection to the
database:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>datasource.name</code> property contains the name of the JDBC
datasource to use to connect to the database. You can
+create this datasource using the Karaf <code>jdbc:create</code> command
(provided by the <code>jdbc</code> feature).</p>
+</li>
+<li>
+<p>the <code>table.name</code> property contains the table name in the
database. The Decanter JDBC appender automatically creates
+the table for you, but you can create the table by yourself. The table is
simple and contains just two column:</p>
+<div class="ulist">
+<ul>
+<li>
+<p>timestamp as INTEGER</p>
+</li>
+<li>
+<p>content as VARCHAR or CLOB</p>
+</li>
+</ul>
+</div>
+</li>
+<li>
+<p>the <code>dialect</code> property allows you to specify the database type
(generic, mysql, derby). This property is only used for
+the table creation.</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_jms_2">1.3.4. JMS</h4>
+<div class="paragraph">
+<p>The Decanter JMS appender "forwards" the data (collected by the collectors)
to a JMS broker.</p>
+</div>
+<div class="paragraph">
+<p>The appender sends a JMS Map message to the broker. The Map message
contains the harvested data.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-jms</code> feature installs the JMS
appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-appender-jms</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature also installs the
<code>etc/org.apache.karaf.decanter.appender.jms.cfg</code> configuration file
containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>#####################################
+# Decanter JMS Appender Configuration
+#####################################
+
+# Name of the JMS connection factory
+connection.factory.name=jms/decanter
+
+# Name of the destination
+destination.name=decanter
+
+# Type of the destination (queue or topic)
+destination.type=queue
+
+# Connection username
+# username=
+
+# Connection password
+# password=</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This configuration file allows you to specify the connection properties to
the JMS broker:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>connection.factory.name</code> property specifies the JMS
connection factory to use. You can create this JMS connection
+factory using the <code>jms:create</code> command (provided by the
<code>jms</code> feature).</p>
+</li>
+<li>
+<p>the <code>destination.name</code> property specifies the JMS destination
name where to send the data.</p>
+</li>
+<li>
+<p>the <code>destination.type</code> property specifies the JMS destination
type (queue or topic).</p>
+</li>
+<li>
+<p>the <code>username</code> property is optional and specifies the username
to connect to the destination.</p>
+</li>
+<li>
+<p>the <code>password</code> property is optional and specifies the username
to connect to the destination.</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_camel">1.3.5. Camel</h4>
+<div class="paragraph">
+<p>The Decanter Camel appender sends the data (collected by the collectors) to
a Camel endpoint.</p>
+</div>
+<div class="paragraph">
+<p>It’s a very flexible appender, allowing you to use any Camel route to
transform and forward the harvested data.</p>
+</div>
+<div class="paragraph">
+<p>The Camel appender creates a Camel exchange and set the "in" message body
with a Map of the harvested data.
+The exchange is send to a Camel endpoint.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-camel</code> feature installs the Camel
appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-appender-camel</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature also installs the
<code>etc/org.apache.karaf.decanter.appender.camel.cfg</code> configuration
file containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>#
+# Decanter Camel appender configuration
+#
+
+# The destination.uri contains the URI of the Camel endpoint
+# where Decanter sends the collected data
+destination.uri=direct-vm:decanter</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This file allows you to specify the Camel endpoint where to send the
data:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>destination.uri</code> property specifies the URI of the Camel
endpoint where to send the data.</p>
+</li>
+</ul>
+</div>
+<div class="paragraph">
+<p>The Camel appender send an exchange. The "in" message body contains a Map
of the harvested data.</p>
+</div>
+<div class="paragraph">
+<p>For instance, in this configuration file, you can specify:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>destination.uri=direct-vm:decanter</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>And you can deploy the following Camel route definition:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre><?xml version="1.0" encoding="UTF-8"?>
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0">
+
+ <camelContext xmlns="http://camel.apache.org/schema/blueprint">
+ <route id="decanter">
+ <from uri="direct-vm:decanter"/>
+ ...
+ ANYTHING
+ ...
+ </route>
+ </camelContext>
+
+</blueprint></pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This route will receive the Map of harvested data. Using the body of the
"in" message, you can do what you want:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>transform and convert to another data format</p>
+</li>
+<li>
+<p>use any Camel EIPs (Enterprise Integration Patterns)</p>
+</li>
+<li>
+<p>send to any Camel endpoint</p>
+</li>
+</ul>
+</div>
+</div>
+<div class="sect3">
+<h4 id="_kafka_2">1.3.6. Kafka</h4>
+<div class="paragraph">
+<p>The Decanter Kafka appender sends the data (collected by the collectors) to
a Kafka topic.</p>
+</div>
+<div class="paragraph">
+<p>The <code>decanter-appender-kafka</code> feature installs the Kafka
appender:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>karaf@root()> feature:install decanter-appender-kafka</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This feature installs a default
<code>etc/org.apache.karaf.decanter.appender.kafka.cfg</code> configuration
file containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>##################################
+# Decanter JMS Kafka Configuration
+##################################
+
+# A list of host/port pairs to use for establishing the initial connection to
the Kafka cluster
+#bootstrap.servers=localhost:9092
+
+# An id string to pass to the server when making requests
+# client.id
+
+# The compression type for all data generated by the producer
+# compression.type=none
+
+# The number of acknowledgments the producer requires the leader to have
received before considering a request complete
+#Â - 0: the producer doesn't wait for ack
+#Â - 1: the producer just waits for the leader
+# - all: the producer waits for leader and all followers (replica), most secure
+# acks=all
+
+# Setting a value greater than zero will cause the client to resend any record
whose send fails with a potentially transient error
+# retries=0
+
+#Â The producer will attempt to batch records together into fewer requests
whenever multiple records are being sent to the same partition
+# batch.size=16384
+
+# The total bytes of memory the producer can use to buffer records waiting to
be sent to the server.
+# If records are sent faster than they can be delivered to the server the
producer will either block or throw an exception
+# buffer.memory=33554432
+
+#Â Serializer class for key that implements the Serializer interface
+#Â key.serializer=org.apache.kafka.common.serialization.StringSerializer
+
+#Â Serializer class for value that implements the Serializer interface.
+# value.serializer=org.apache.kafka.common.serialization.StringSerializer
+
+# Producer request timeout
+# request.timeout.ms=5000
+
+# Max size of the request
+# max.request.size=2097152
+
+# Name of the topic
+# topic=decanter
+
+# Security (SSL)
+# security.protocol=SSL
+
+# SSL truststore location (Kafka broker) and password
+# ssl.truststore.location=${karaf.etc}/keystores/keystore.jks
+# ssl.truststore.password=karaf
+
+# SSL keystore (if client authentication is required)
+# ssl.keystore.location=${karaf.etc}/keystores/clientstore.jks
+# ssl.keystore.password=karaf
+# ssl.key.password=karaf
+
+# (Optional) SSL provider (default uses the JVM one)
+# ssl.provider=
+
+# (Optional) SSL Cipher suites
+# ssl.cipher.suites=
+
+#Â (Optional) SSL Protocols enabled (default is TLSv1.2,TLSv1.1,TLSv1)
+# ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
+
+# (Optional) SSL Truststore type (default is JKS)
+# ssl.truststore.type=JKS
+
+# (Optional) SSL Keystore type (default is JKS)
+# ssl.keystore.type=JKS
+
+# Security (SASL)
+#Â For SASL, you have to configure Java System property as explained in
http://kafka.apache.org/documentation.html#security_ssl</pre>
+</div>
+</div>
+<div class="paragraph">
+<p>This file allows you to define how the messages are sent to the Kafka
broker:</p>
+</div>
+<div class="ulist">
+<ul>
+<li>
+<p>the <code>bootstrap.servers</code> contains a lit of host:port of the Kafka
brokers. Default value is <code>localhost:9092</code>.</p>
+</li>
+<li>
+<p>the <code>client.id</code> is optional. It identifies the client on the
Kafka broker.</p>
+</li>
+<li>
+<p>the <code>compression.type</code> defines if the messages have to be
compressed on the Kafka broker. Default value is <code>none</code> meaning no
compression.</p>
+</li>
+<li>
+<p>the <code>acks</code> defines the acknowledgement policy. Default value is
<code>all</code>. Possible values are:</p>
<div class="ulist">
<ul>
<li>
-<p><code>cluster.name</code></p>
+<p><code>0</code> means the appender doesn’t wait acknowledge from the
Kafka broker. Basically, it means there’s no guarantee that messages have
been received completely by the broker.</p>
</li>
<li>
-<p><code>http.enabled</code></p>
+<p><code>1</code> means the appender waits the acknowledge only from the
leader. If the leader falls down, it’s possible messages are lost if the
replicas are not yet be created on the followers.</p>
</li>
<li>
-<p><code>node.data</code></p>
+<p><code>all</code> means the appender waits the acknowledge from the leader
and all followers. This mode is the most reliable as the appender will receive
the acknowledge only when all replicas have been created. NB: this mode
doesn’t make sense if you have a single node Kafka broker or a
replication factor set to 1.</p>
+</li>
+</ul>
+</div>
</li>
<li>
-<p><code>node.name</code></p>
+<p>the <code>retries</code> defines the number of retries performed by the
appender in case of error. The default value is <code>0</code> meaning no retry
at all.</p>
</li>
<li>
-<p><code>node.master</code></p>
+<p>the <code>batch.size</code> defines the size of the batch records. The
appender will attempt to batch records together into fewer requests whenever
multiple records are being sent to the same Kafka partition. The default value
is 16384.</p>
</li>
<li>
-<p><code>path.data</code></p>
+<p>the <code>buffer.memory</code> defines the size of the buffer the appender
uses to send to the Kafka broker. The default value is 33554432.</p>
</li>
<li>
-<p><code>network.host</code></p>
+<p>the <code>key.serializer</code> defines the full qualified class name of
the Serializer used to serializer the keys. The default is a String serializer
(<code>org.apache.kafka.common.serialization.StringSerializer</code>).</p>
</li>
<li>
-<p><code>cluster.routing.schedule</code></p>
+<p>the <code>value.serializer</code> defines the full qualified class name of
the Serializer used to serializer the values. The default is a String
serializer
(<code>org.apache.kafka.common.serialization.StringSerializer</code>).</p>
</li>
<li>
-<p><code>path.plugins</code></p>
+<p>the <code>request.timeout.ms</code> is the time the producer wait before
considering the message production on the broker fails (default is 5s).</p>
</li>
<li>
-<p><code>http.cors.enabled</code></p>
+<p>the <code>max.request.size</code> is the max size of the request sent to
the broker (default is 2097152 bytes).</p>
</li>
<li>
-<p><code>http.cors.allow-origin</code></p>
+<p>the <code>topic</code> defines the name of the topic where to send data on
the Kafka broker.</p>
</li>
</ul>
</div>
<div class="paragraph">
-<p>The advantage of using this file is that the elasticsearch node is
automatically restarted in order to reload the
-settings as soon as you change the cfg file.</p>
+<p>It’s also possible to enable SSL security (with Kafka 0.9.x) using
the SSL properties.</p>
</div>
</div>
<div class="sect3">
-<h4 id="_embedding_decanter_kibana">3.2.2. Embedding Decanter Kibana</h4>
+<h4 id="_redis">1.3.7. Redis</h4>
<div class="paragraph">
-<p>In addition of the embedded elasticsearch instance, Decanter also provides
an embedded Kibana instance, containing
-ready to use Decanter dashboards.</p>
+<p>The Decanter Redis appender sends the data (collected by the collectors) to
a Redis broker.</p>
</div>
<div class="paragraph">
-<p>The <code>kibana</code> feature installs the embedded kibana instance:</p>
+<p>The <code>decanter-appender-redis</code> feature installs the Redis
appender:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install kibana</pre>
+<pre>karaf@root()> feature:install decanter-appender-redis</pre>
</div>
</div>
<div class="paragraph">
-<p>By default, the kibana instance is available on
<code>http://host:8181/kibana</code>.</p>
+<p>This feature also installs a default
<code>etc/org.apache.karaf.decanter.appender.redis.cfg</code> configuration
file containing:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>#######################################
+# Decanter Redis Appender Configuration
+#######################################
+
+#
+# Location of the Redis broker
+# It's possible to use a list of brokers, for instance:
+#Â host= locahost:6389,localhost:6332,localhost:6419
+#
+# Default is localhost:6379
+#
+address=localhost:6379
+
+#
+# Define the connection mode.
+#Â Possible modes: Single (default), Master_Slave, Sentinel, Cluster
+#
+mode=Single
+
+#
+# Name of the Redis map
+# Default is Decanter
+#
+map=Decanter
+
+#
+#Â For Master_Slave mode, we define the location of the master
+# Default is localhost:6379
+#
+#masterAddress=localhost:6379
+
+#
+# For Sentinel model, define the name of the master
+# Default is myMaster
+#
+#masterName=myMaster
+
+#
+#Â For Cluster mode, define the scan interval of the nodes in the cluster
+# Default value is 2000 (2 seconds).
+#
+#scanInterval=2000</pre>
+</div>
</div>
<div class="paragraph">
-<p>The Decanter Kibana instance provides ready to use dashboards:</p>
+<p>This file allows you to configure the Redis broker to use:</p>
</div>
<div class="ulist">
<ul>
<li>
-<p>Karaf dashboard uses the data harvested by the default JMX collector, and
the log collector. Especially, it provides
-details about the threads, memory, garbage collection, etc.</p>
+<p>the <code>address</code> property contains the location of the Redis
broker</p>
</li>
<li>
-<p>Camel dashboard uses the data harvested by the default JMX collector, or
the Camel (JMX) collector. It can also
-leverage the Camel Tracer collector. It provides details about routes
processing time, the failed exchanges, etc. This
-dashboard requires some tuning (updating the queries to match the route
IDs).</p>
+<p>the <code>mode</code> property defines the Redis topology to use (Single,
Master_Slave, Sentinel, Cluster)</p>
</li>
<li>
-<p>ActiveMQ dashboard uses the data harvested by the default JMX collector, or
the ActiveMQ (JMX) collector. It provides
-details about the pending queue, the system usage, etc.</p>
+<p>the <code>map</code> property contains the name of the Redis map to use</p>
</li>
<li>
-<p>OperatingSystem dashboard uses the data harvested by the system collector.
The default dashboard expects data containing
-the filesystem usage, and temperature data. It’s just a sample, you have
to tune the system collector and adapt this
-dashboard accordingly.</p>
+<p>the <code>masterAddress</code> is the location of the master when using the
Master_Slave topology</p>
+</li>
+<li>
+<p>the <code>masterName</code> is the name of the master when using the
Sentinel topology</p>
+</li>
+<li>
+<p>the <code>scanInternal</code> is the scan interval of the nodes when using
the Cluster topology</p>
</li>
</ul>
</div>
-<div class="paragraph">
-<p>You can change these dashboards to add new panels, change the existing
panels, etc.</p>
-</div>
-<div class="paragraph">
-<p>Of course, you can create your own dashboards, starting from blank or
simple dashboards.</p>
-</div>
-<div class="paragraph">
-<p>By default, Decanter Kibana uses embedded elasticsearch instance. However,
it’s possible to use a remote elasticsearch
-instance by providing the elasticsearch parameter on the URL like this for
instance:</p>
-</div>
-<div class="listingblock">
-<div class="content">
-<pre>http://localhost:8181/kibana?elasticsearch=http://localhost:9400</pre>
-</div>
-</div>
</div>
<div class="sect3">
-<h4 id="_elasticsearch_head_console">3.2.3. Elasticsearch Head console</h4>
-<div class="paragraph">
-<p>In addition of the embedded elasticsearch instance, Decanter also provides
a web console allowing you to monitor and
-manage your elasticsearch cluster. It’s a ready to use elastisearch-head
console, directly embedded in Karaf.</p>
-</div>
-<div class="paragraph">
-<p>The <code>elasticsearch-head</code> feature installs the embedded
elasticsearch-head web console:</p>
-</div>
-<div class="listingblock">
-<div class="content">
-<pre>karaf@root()> feature:install elasticsearch-head</pre>
-</div>
-</div>
-<div class="paragraph">
-<p>By default, the elasticsearch-head web console is available on
<code>http://host:8181/elasticsearch-head</code>.</p>
-</div>
-</div>
-</div>
-<div class="sect2">
-<h3 id="_jdbc">3.3. JDBC</h3>
-<div class="paragraph">
-<p>The Decanter JDBC appender allows your to store the data (coming from the
collectors) into a database.</p>
-</div>
+<h4 id="_mqtt_2">1.3.8. MQTT</h4>
<div class="paragraph">
-<p>The Decanter JDBC appender transforms the data as a json string. The
appender stores the json string and the timestamp
-into the database.</p>
+<p>The Decanter MQTT appender sends the data (collected by the collectors) to
a MQTT broker.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-appender-jdbc</code> feature installs the jdbc
appender:</p>
+<p>The <code>decanter-appender-mqtt</code> feature installs the MQTT
appender:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-appender-jdbc</pre>
+<pre>karaf@root()> feature:install decanter-appender-mqtt</pre>
</div>
</div>
<div class="paragraph">
-<p>This feature also installs the
<code>etc/org.apache.karaf.decanter.appender.jdbc.cfg</code> configuration
file:</p>
+<p>This feature installs a default
<code>etc/org.apache.karaf.decanter.appender.mqtt.cfg</code> configuration file
containing:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>#######################################
-# Decanter JDBC Appender Configuration
-#######################################
-
-# Name of the JDBC datasource
-datasource.name=jdbc/decanter
-
-# Name of the table storing the collected data
-table.name=decanter
-
-# Dialect (type of the database)
-# The dialect is used to create the table
-# Supported dialects are: generic, derby, mysql
-# Instead of letting Decanter created the table, you can create the table by
your own
-dialect=generic</pre>
+<pre>#server=tcp://localhost:9300
+#clientId=decanter
+#topic=decanter</pre>
</div>
</div>
<div class="paragraph">
-<p>This configuration file allows you to specify the connection to the
database:</p>
+<p>This file allows you to configure the location and where to send in the
MQTT broker:</p>
</div>
<div class="ulist">
<ul>
<li>
-<p>the <code>datasource.name</code> property contains the name of the JDBC
datasource to use to connect to the database. You can
-create this datasource using the Karaf <code>jdbc:create</code> command
(provided by the <code>jdbc</code> feature).</p>
-</li>
-<li>
-<p>the <code>table.name</code> property contains the table name in the
database. The Decanter JDBC appender automatically creates
-the table for you, but you can create the table by yourself. The table is
simple and contains just two column:</p>
-<div class="ulist">
-<ul>
-<li>
-<p>timestamp as INTEGER</p>
+<p>the <code>server</code> contains the location of the MQTT broker</p>
</li>
<li>
-<p>content as VARCHAR or CLOB</p>
-</li>
-</ul>
-</div>
+<p>the <code>clientId</code> identifies the appender on the MQTT broker</p>
</li>
<li>
-<p>the <code>dialect</code> property allows you to specify the database type
(generic, mysql, derby). This property is only used for
-the table creation.</p>
+<p>the <code>topic</code> is the name of the topic where to send the
messages</p>
</li>
</ul>
</div>
</div>
-<div class="sect2">
-<h3 id="_jms">3.4. JMS</h3>
-<div class="paragraph">
-<p>The Decanter JMS appender "forwards" the data (collected by the collectors)
to a JMS broker.</p>
-</div>
+<div class="sect3">
+<h4 id="_cassandra">1.3.9. Cassandra</h4>
<div class="paragraph">
-<p>The appender sends a JMS Map message to the broker. The Map message
contains the harvested data.</p>
+<p>The Decanter Cassandra appender allows you to store the data (coming from
the collectors) into an Apache Cassandra database.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-appender-jms</code> feature installs the JMS
appender:</p>
+<p>The <code>decanter-appender-cassandra</code> feature installs this
appender:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-appender-jms</pre>
+<pre>karaf@root()> feature:install decanter-appender-cassandra</pre>
</div>
</div>
<div class="paragraph">
-<p>This feature also installs the
<code>etc/org.apache.karaf.decanter.appender.jms.cfg</code> configuration file
containing:</p>
+<p>This feature installs the appender and a default
<code>etc/org.apache.karaf.decanter.appender.cassandra.cfg</code>Â
configuration file
+containing:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>#####################################
-# Decanter JMS Appender Configuration
-#####################################
-
-# Name of the JMS connection factory
-connection.factory.name=jms/decanter
+<pre>###########################################
+# Decanter Cassandra Appender Configuration
+###########################################
-# Name of the destination
-destination.name=decanter
+# Name of Keyspace
+keyspace.name=decanter
-# Type of the destination (queue or topic)
-destination.type=queue
+# Name of table to write to
+table.name=decanter
-# Connection username
-# username=
+# Cassandra host name
+cassandra.host=
-# Connection password
-# password=</pre>
+# Cassandra port
+cassandra.port=9042</pre>
</div>
</div>
-<div class="paragraph">
-<p>This configuration file allows you to specify the connection properties to
the JMS broker:</p>
-</div>
<div class="ulist">
<ul>
<li>
-<p>the <code>connection.factory.name</code> property specifies the JMS
connection factory to use. You can create this JMS connection
-factory using the <code>jms:create</code> command (provided by the
<code>jms</code> feature).</p>
-</li>
-<li>
-<p>the <code>destination.name</code> property specifies the JMS destination
name where to send the data.</p>
+<p>the <code>keyspace.name</code>Â property identifies the keyspace used for
Decanter data</p>
</li>
<li>
-<p>the <code>destination.type</code> property specifies the JMS destination
type (queue or topic).</p>
+<p>the <code>table.name</code> property defines the name of the table where to
store the data</p>
</li>
<li>
-<p>the <code>username</code> property is optional and specifies the username
to connect to the destination.</p>
+<p>the <code>cassandra.host</code> property contains the hostname or IP
address where the Cassandra instance is running (default is localhost)</p>
</li>
<li>
-<p>the <code>password</code> property is optional and specifies the username
to connect to the destination.</p>
+<p>the <code>cassandra.port</code> property contains the port number of the
Cassandra instance (default is 9042)</p>
</li>
</ul>
</div>
</div>
-<div class="sect2">
-<h3 id="_camel">3.5. Camel</h3>
-<div class="paragraph">
-<p>The Decanter Camel appender sends the data (collected by the collectors) to
a Camel endpoint.</p>
-</div>
-<div class="paragraph">
-<p>It’s a very flexible appender, allowing you to use any Camel route to
transform and forward the harvested data.</p>
-</div>
+<div class="sect3">
+<h4 id="_mongodb">1.3.10. MongoDB</h4>
<div class="paragraph">
-<p>The Camel appender creates a Camel exchange and set the "in" message body
with a Map of the harvested data.
-The exchange is send to a Camel endpoint.</p>
+<p>The Decanter MongoDB appender allows you to store the data (coming from the
collectors) into a MongoDB database.</p>
</div>
<div class="paragraph">
-<p>The <code>decanter-appender-camel</code> feature installs the Camel
appender:</p>
+<p>The <code>decanter-appender-mongodb</code> feature installs this
appender:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>karaf@root()> feature:install decanter-appender-camel</pre>
+<pre>karaf@root()> feature:install decanter-appender-mongodb</pre>
</div>
</div>
<div class="paragraph">
-<p>This feature also installs the
<code>etc/org.apache.karaf.decanter.appender.camel.cfg</code> configuration
file containing:</p>
+<p>This feature installs the appender and a default
<code>etc/org.apache.karaf.decanter.appender.mongodb.cfg</code> configuration
file
+containing:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>#
-# Decanter Camel appender configuration
-#
+<pre>################################
+# Decanter MongoDB Configuration
+################################
-# The destination.uri contains the URI of the Camel endpoint
-# where Decanter sends the collected data
-destination.uri=direct-vm:decanter</pre>
-</div>
+# MongoDB connection URI
+#uri=mongodb://localhost
+
+# MongoDB database name
+#database=decanter
+
+# MongoDB collection name
+#collection=decanter</pre>
</div>
-<div class="paragraph">
-<p>This file allows you to specify the Camel endpoint where to send the
data:</p>
</div>
<div class="ulist">
<ul>
<li>
-<p>the <code>destination.uri</code> property specifies the URI of the Camel
endpoint where to send the data.</p>
+<p>the <code>uri</code> property contains the location of the MongoDB
instance</p>
+</li>
+<li>
+<p>the <code>database</code> property contains the name of the MongoDB
database</p>
+</li>
+<li>
+<p>the <code>collection</code> property contains the name of the MongoDB
collection</p>
</li>
</ul>
</div>
+</div>
+<div class="sect3">
+<h4 id="_network_socket_2">1.3.11. Network socket</h4>
+<div class="paragraph">
+<p>The Decanter network socket appender send the collected data to a remote
Decanter network socket collector.</p>
+</div>
<div class="paragraph">
-<p>The Camel appender send an exchange. The "in" message body contains a Map
of the harvested data.</p>
+<p>The use case could be to dedicate a Karaf instance as a central monitoring
platform, receiving collected data from
+the other nodes.</p>
</div>
<div class="paragraph">
-<p>For instance, in this configuration file, you can specify:</p>
+<p>The <code>decanter-appender-socket</code> feature installs this
appender:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre>destination.uri=direct-vm:decanter</pre>
+<pre>karaf@root()> feature:install decanter-appender-socket</pre>
</div>
</div>
<div class="paragraph">
-<p>And you can deploy the following Camel route definition:</p>
+<p>This feature installs the appender and a default
<code>etc/org.apache.karaf.decanter.appender.socket.cfg</code>Â configuration
file
+containing:</p>
</div>
<div class="listingblock">
<div class="content">
-<pre><?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0">
+<pre># Decanter Socket Appender
- <camelContext xmlns="http://camel.apache.org/schema/blueprint">
- <route id="decanter">
- <from uri="direct-vm:decanter"/>
- ...
- ANYTHING
- ...
- </route>
- </camelContext>
+# Hostname (or IP address) where to send the collected data
+#host=localhost
-</blueprint></pre>
-</div>
+# Port number where to send the collected data
+#port=34343</pre>
</div>
-<div class="paragraph">
-<p>This route will receive the Map of harvested data. Using the body of the
"in" message, you can do what you want:</p>
</div>
<div class="ulist">
<ul>
<li>
-<p>transform and convert to another data format</p>
-</li>
-<li>
-<p>use any Camel EIPs (Enterprise Integration Patterns)</p>
+<p>the <code>host</code> property contains the hostname or IP address of the
remote network socket collector</p>
</li>
<li>
-<p>send to any Camel endpoint</p>
+<p>the <code>port</code> property contains the port number of the remote
network socket collector</p>
</li>
</ul>
</div>
</div>
</div>
-</div>
-<div class="sect1">
-<h2 id="_sla_service_level_agreement">4. SLA (Service Level Agreement)</h2>
-<div class="sectionbody">
+<div class="sect2">
+<h3 id="_sla_service_level_agreement">1.4. SLA (Service Level Agreement)</h3>
<div class="paragraph">
<p>Decanter provides a SLA (Service Level Agreement) layer. It allows you to
check values of harvested data (coming from
the collectors) and send alerts when the data is not in the expected state.</p>
</div>
-<div class="sect2">
-<h3 id="_checker">4.1. Checker</h3>
+<div class="sect3">
+<h4 id="_checker">1.4.1. Checker</h4>
<div class="paragraph">
<p>The SLA checker is automatically installed as soon as you install a SLA
alerter feature.</p>
</div>
@@ -1987,16 +3171,16 @@ or source of the collected data.</p>
</div>
</div>
</div>
-<div class="sect2">
-<h3 id="_alerters">4.2. Alerters</h3>
+<div class="sect3">
+<h4 id="_alerters">1.4.2. Alerters</h4>
<div class="paragraph">
<p>When the value doesn’t verify the check in the checker configuration,
an alert is created an sent to the alerters.</p>
</div>
<div class="paragraph">
<p>Apache Karaf Decanter provides ready to use alerters.</p>
</div>
-<div class="sect3">
-<h4 id="_log_3">4.2.1. Log</h4>
+<div class="sect4">
+<h5 id="_log_3">Log</h5>
<div class="paragraph">
<p>The Decanter SLA Log alerter log a message for each alert.</p>
</div>
@@ -2012,8 +3196,8 @@ or source of the collected data.</p>
<p>This alerter doesn’t need any configuration.</p>
</div>
</div>
-<div class="sect3">
-<h4 id="_e_mail">4.2.2. E-mail</h4>
+<div class="sect4">
+<h5 id="_e_mail">E-mail</h5>
<div class="paragraph">
<p>The Decanter SLA e-mail alerter sends an e-mail for each alert.</p>
</div>
@@ -2093,8 +3277,8 @@ ssl=false
</ul>
</div>
</div>
-<div class="sect3">
-<h4 id="_camel_2">4.2.3. Camel</h4>
+<div class="sect4">
+<h5 id="_camel_2">Camel</h5>
<div class="paragraph">
<p>The Decanter SLA Camel alerter sends each alert to a Camel endpoint.</p>
</div>
@@ -2164,10 +3348,12 @@ alert.destination.uri=direct-vm:decanter
</div>
</div>
</div>
-<h1 id="_developer_guide" class="sect0">Developer Guide</h1>
+</div>
<div class="sect1">
-<h2 id="_architecture">1. Architecture</h2>
+<h2 id="_developer_guide">2. Developer Guide</h2>
<div class="sectionbody">
+<div class="sect2">
+<h3 id="_architecture">2.1. Architecture</h3>
<div class="paragraph">
<p>Apache Karaf Decanter uses OSGi EventAdmin to dispatch the harvested data
between the collectors and the appenders,
and also to throw the alerts to the alerters:</p>
@@ -2194,10 +3380,8 @@ these topics.</p>
<p>It means that you can easily extend Decanter adding your own collectors,
appenders, or alerters.</p>
</div>
</div>
-</div>
-<div class="sect1">
-<h2 id="_custom_collector">2. Custom Collector</h2>
-<div class="sectionbody">
+<div class="sect2">
+<h3 id="_custom_collector">2.2. Custom Collector</h3>
<div class="paragraph">
<p>A Decanter collector sends an OSGi EventAdmin event to a
<code>decanter/collect/*</code> topic.</p>
</div>
@@ -2214,8 +3398,8 @@ these topics.</p>
</li>
</ul>
</div>
-<div class="sect2">
-<h3 id="_event_driven_collector">2.1. Event Driven Collector</h3>
+<div class="sect3">
+<h4 id="_event_driven_collector">2.2.1. Event Driven Collector</h4>
<div class="paragraph">
<p>For instance, the log collector is event driven: it automatically reacts to
internal log events.</p>
</div>
@@ -2227,7 +3411,7 @@ changes (installed, started, stopped, un
<p>The purpose is to send a monitoring event in a collect topic. This
monitoring event can be consumed by the appenders.</p>
</div>
<div class="paragraph">
-<p>We create the following <code>BundleCollector</code> class implemetings
<code>SynchronousBundleListener</code> interface:</p>
+<p>We create the following <code>BundleCollector</code> class implementing
<code>SynchronousBundleListener</code> interface:</p>
</div>
<div class="listingblock">
<div class="content">
@@ -2376,8 +3560,8 @@ public class Activator implements Bundle
<code>bundle:install</code> command.</p>
</div>
</div>
-<div class="sect2">
-<h3 id="_polled_collector">2.2. Polled Collector</h3>
+<div class="sect3">
+<h4 id="_polled_collector">2.2.2. Polled Collector</h4>
<div class="paragraph">
<p>You can also create a polled collector.</p>
</div>
@@ -2537,10 +3721,8 @@ public class Activator implements Bundle
</div>
</div>
</div>
-</div>
-<div class="sect1">
-<h2 id="_custom_appender">3. Custom Appender</h2>
-<div class="sectionbody">
+<div class="sect2">
+<h3 id="_custom_appender">2.3. Custom Appender</h3>
<div class="paragraph">
<p>A Decanter Appender is an OSGi EventAdmin EventHandler: it’s
listening of <code>decanter/collect/*</code> EventAdmin topics, and
receives the monitoring data coming from the collectors.</p>
@@ -2625,7 +3807,7 @@ public class Activator implements Bundle
<groupId>org.apache.karaf.decanter.sample.appender</groupId>
<artifactId>org.apache.karaf.decanter.sample.appender.systemout</artifactId>
- <version>1.0.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<name>Apache Karaf :: Decanter :: Sample :: Appender ::
SystemOut</name>
@@ -2675,10 +3857,8 @@ public class Activator implements Bundle
<p>Once built, you can enable this appender by deploying the bundle in Karaf
(using the deploy folder or the <code>bundle:install</code> command).</p>
</div>
</div>
-</div>
-<div class="sect1">
-<h2 id="_custom_sla_alerter">4. Custom SLA Alerter</h2>
-<div class="sectionbody">
+<div class="sect2">
+<h3 id="_custom_sla_alerter">2.4. Custom SLA Alerter</h3>
<div class="paragraph">
<p>A Decanter SLA Alerter is basically a special kind of appender.</p>
</div>
@@ -2815,9 +3995,10 @@ public class Activator implements Bundle
</div>
</div>
</div>
+</div>
<div id="footer">
<div id="footer-text">
-Last updated 2015-08-02 08:44:39 CEST
+Last updated 2016-07-03 08:10:09 +02:00
</div>
[... 3 lines stripped ...]