http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishKafkaTest.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishKafkaTest.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishKafkaTest.java
new file mode 100644
index 0000000..5480ea7
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishKafkaTest.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka.pubsub;
+
+import java.nio.charset.StandardCharsets;
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.ByteArraySerializer;
+import org.apache.nifi.util.MockFlowFile;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.Test;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.times;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.verify;
+
+public class PublishKafkaTest {
+
+    @Test
+    public void validateCustomSerilaizerDeserializerSettings() throws 
Exception {
+        PublishKafka_0_10 publishKafka = new PublishKafka_0_10();
+        TestRunner runner = TestRunners.newTestRunner(publishKafka);
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"okeydokey:1234");
+        runner.setProperty(PublishKafka_0_10.TOPIC, "foo");
+        runner.setProperty(PublishKafka_0_10.META_WAIT_TIME, "3 sec");
+        runner.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
ByteArraySerializer.class.getName());
+        runner.assertValid();
+        runner.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "Foo");
+        runner.assertNotValid();
+    }
+
+    @Test
+    public void validatePropertiesValidation() throws Exception {
+        PublishKafka_0_10 publishKafka = new PublishKafka_0_10();
+        TestRunner runner = TestRunners.newTestRunner(publishKafka);
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"okeydokey:1234");
+        runner.setProperty(PublishKafka_0_10.TOPIC, "foo");
+        runner.setProperty(PublishKafka_0_10.META_WAIT_TIME, "foo");
+
+        try {
+            runner.assertValid();
+            fail();
+        } catch (AssertionError e) {
+            assertTrue(e.getMessage().contains("'max.block.ms' validated 
against 'foo' is invalid"));
+        }
+    }
+
+    @Test
+    public void validateCustomValidation() {
+        String topicName = "validateCustomValidation";
+        PublishKafka_0_10 publishKafka = new PublishKafka_0_10();
+
+        /*
+         * Validates that Kerberos principle is required if one of SASL set for
+         * secirity protocol
+         */
+        TestRunner runner = TestRunners.newTestRunner(publishKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(KafkaProcessorUtils.SECURITY_PROTOCOL, 
KafkaProcessorUtils.SEC_SASL_PLAINTEXT);
+        try {
+            runner.run();
+            fail();
+        } catch (Throwable e) {
+            assertTrue(e.getMessage().contains("'Kerberos Service Name' is 
invalid because"));
+        }
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateSingleCharacterDemarcatedMessages() {
+        String topicName = "validateSingleCharacterDemarcatedMessages";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "\n");
+
+        runner.enqueue("Hello 
World\nGoodbye\n1\n2\n3\n4\n5".getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(7)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void 
validateMultiCharacterDemarcatedMessagesAndCustomPartitionerA() {
+        String topicName = 
"validateMultiCharacterDemarcatedMessagesAndCustomPartitioner";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.PARTITION_CLASS, 
Partitioners.RoundRobinPartitioner.class.getName());
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "foo");
+
+        runner.enqueue("Hello 
WorldfooGoodbyefoo1foo2foo3foo4foo5".getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(7)).send(Mockito.any(ProducerRecord.class));
+
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void 
validateMultiCharacterDemarcatedMessagesAndCustomPartitionerB() {
+        String topicName = 
"validateMultiCharacterDemarcatedMessagesAndCustomPartitioner";
+        StubPublishKafka putKafka = new StubPublishKafka(1);
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.PARTITION_CLASS, 
Partitioners.RoundRobinPartitioner.class.getName());
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "foo");
+
+        runner.enqueue("Hello 
WorldfooGoodbyefoo1foo2foo3foo4foo5".getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(7)).send(Mockito.any(ProducerRecord.class));
+
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateOnSendFailureAndThenResendSuccessA() throws Exception {
+        String topicName = "validateSendFailureAndThenResendSuccess";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "\n");
+        runner.setProperty(PublishKafka_0_10.META_WAIT_TIME, "3000 millis");
+
+        final String text = "Hello World\nGoodbye\nfail\n2";
+        runner.enqueue(text.getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        assertEquals(1, runner.getQueueSize().getObjectCount()); // due to 
failure
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(4)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+        putKafka.destroy();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateOnSendFailureAndThenResendSuccessB() throws Exception {
+        String topicName = "validateSendFailureAndThenResendSuccess";
+        StubPublishKafka putKafka = new StubPublishKafka(1);
+
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "\n");
+        runner.setProperty(PublishKafka_0_10.META_WAIT_TIME, "500 millis");
+
+        final String text = "Hello World\nGoodbye\nfail\n2";
+        runner.enqueue(text.getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        assertEquals(1, runner.getQueueSize().getObjectCount()); // due to 
failure
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(4)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void 
validateOnFutureGetFailureAndThenResendSuccessFirstMessageFail() throws 
Exception {
+        String topicName = "validateSendFailureAndThenResendSuccess";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "\n");
+        runner.setProperty(PublishKafka_0_10.META_WAIT_TIME, "500 millis");
+
+        final String text = "futurefail\nHello World\nGoodbye\n2";
+        runner.enqueue(text.getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        MockFlowFile ff = 
runner.getFlowFilesForRelationship(PublishKafka_0_10.REL_FAILURE).get(0);
+        assertNotNull(ff);
+        runner.enqueue(ff);
+
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        // 6 sends due to duplication
+        verify(producer, times(5)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateOnFutureGetFailureAndThenResendSuccess() throws 
Exception {
+        String topicName = "validateSendFailureAndThenResendSuccess";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "\n");
+        runner.setProperty(PublishKafka_0_10.META_WAIT_TIME, "500 millis");
+
+        final String text = "Hello World\nGoodbye\nfuturefail\n2";
+        runner.enqueue(text.getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+        MockFlowFile ff = 
runner.getFlowFilesForRelationship(PublishKafka_0_10.REL_FAILURE).get(0);
+        assertNotNull(ff);
+        runner.enqueue(ff);
+
+        runner.run(1, false);
+        assertEquals(0, runner.getQueueSize().getObjectCount());
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        // 6 sends due to duplication
+        verify(producer, times(6)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateDemarcationIntoEmptyMessages() {
+        String topicName = "validateDemarcationIntoEmptyMessages";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+        final TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(PublishKafka_0_10.KEY, "key1");
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "\n");
+
+        final byte[] bytes = 
"\n\n\n1\n2\n\n\n\n3\n4\n\n\n".getBytes(StandardCharsets.UTF_8);
+        runner.enqueue(bytes);
+        runner.run(1);
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(4)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateComplexRightPartialDemarcatedMessages() {
+        String topicName = "validateComplexRightPartialDemarcatedMessages";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "僠<僠
WILDSTUFF僠>僠");
+
+        runner.enqueue("Hello World僠<僠WILDSTUFF僠>僠Goodbye僠<僠
WILDSTUFF僠>僠I Mean IT!僠<僠WILDSTUFF僠
>".getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(3)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateComplexLeftPartialDemarcatedMessages() {
+        String topicName = "validateComplexLeftPartialDemarcatedMessages";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "僠<僠
WILDSTUFF僠>僠");
+
+        runner.enqueue("Hello World僠<僠WILDSTUFF僠>僠Goodbye僠<僠
WILDSTUFF僠>僠I Mean IT!僠<僠WILDSTUFF僠>僠<僠WILDSTUFF僠>僠
".getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+
+        runner.assertAllFlowFilesTransferred(PublishKafka_0_10.REL_SUCCESS, 1);
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(4)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void validateComplexPartialMatchDemarcatedMessages() {
+        String topicName = "validateComplexPartialMatchDemarcatedMessages";
+        StubPublishKafka putKafka = new StubPublishKafka(100);
+        TestRunner runner = TestRunners.newTestRunner(putKafka);
+        runner.setProperty(PublishKafka_0_10.TOPIC, topicName);
+        runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, 
"localhost:1234");
+        runner.setProperty(PublishKafka_0_10.MESSAGE_DEMARCATOR, "僠<僠
WILDSTUFF僠>僠");
+
+        runner.enqueue("Hello World僠<僠WILDSTUFF僠>僠Goodbye僠<僠
WILDBOOMSTUFF僠>僠".getBytes(StandardCharsets.UTF_8));
+        runner.run(1, false);
+
+        runner.assertAllFlowFilesTransferred(PublishKafka_0_10.REL_SUCCESS, 1);
+        Producer<byte[], byte[]> producer = putKafka.getProducer();
+        verify(producer, times(2)).send(Mockito.any(ProducerRecord.class));
+        runner.shutdown();
+    }
+}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishingContextTest.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishingContextTest.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishingContextTest.java
new file mode 100644
index 0000000..4a9a1c0
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/PublishingContextTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka.pubsub;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+
+import org.junit.Test;
+
+public class PublishingContextTest {
+
+    @Test
+    public void failInvalidConstructorArgs() {
+        try {
+            new PublishingContext(null, null);
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+        try {
+            new PublishingContext(mock(InputStream.class), null);
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+
+        try {
+            new PublishingContext(mock(InputStream.class), "");
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+
+        try {
+            new PublishingContext(mock(InputStream.class), "mytopic", -3);
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+    }
+
+    @Test
+    public void validateFullSetting() {
+        PublishingContext publishingContext = new 
PublishingContext(mock(InputStream.class), "topic", 3);
+        
publishingContext.setDelimiterBytes("delimiter".getBytes(StandardCharsets.UTF_8));
+        publishingContext.setKeyBytes("key".getBytes(StandardCharsets.UTF_8));
+
+        assertEquals("delimiter", new 
String(publishingContext.getDelimiterBytes(), StandardCharsets.UTF_8));
+        assertEquals("key", new String(publishingContext.getKeyBytes(), 
StandardCharsets.UTF_8));
+        assertEquals("topic", publishingContext.getTopic());
+        assertEquals("topic: 'topic'; delimiter: 'delimiter'", 
publishingContext.toString());
+    }
+
+    @Test
+    public void validateOnlyOnceSetPerInstance() {
+        PublishingContext publishingContext = new 
PublishingContext(mock(InputStream.class), "topic");
+        publishingContext.setKeyBytes(new byte[]{0});
+        try {
+            publishingContext.setKeyBytes(new byte[]{0});
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+
+        publishingContext.setDelimiterBytes(new byte[]{0});
+        try {
+            publishingContext.setDelimiterBytes(new byte[]{0});
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+
+        publishingContext.setMaxRequestSize(1024);
+        try {
+            publishingContext.setMaxRequestSize(1024);
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+
+        try {
+            publishingContext.setMaxRequestSize(-10);
+            fail();
+        } catch (IllegalArgumentException e) {
+            // success
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/StubPublishKafka.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/StubPublishKafka.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/StubPublishKafka.java
new file mode 100644
index 0000000..27d86f5
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/StubPublishKafka.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka.pubsub;
+
+import java.lang.reflect.Field;
+import static org.mockito.Mockito.when;
+
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.TopicAuthorizationException;
+import org.apache.kafka.common.serialization.ByteArraySerializer;
+import org.apache.nifi.logging.ComponentLog;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.exception.ProcessException;
+import static 
org.apache.nifi.processors.kafka.pubsub.KafkaProcessorUtils.BOOTSTRAP_SERVERS;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.mock;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+public class StubPublishKafka extends PublishKafka_0_10 {
+
+    private volatile Producer<byte[], byte[]> producer;
+
+    private volatile boolean failed;
+
+    private final int ackCheckSize;
+
+    private final ExecutorService executor = Executors.newCachedThreadPool();
+
+    StubPublishKafka(int ackCheckSize) {
+        this.ackCheckSize = ackCheckSize;
+    }
+
+    public Producer<byte[], byte[]> getProducer() {
+        return producer;
+    }
+
+    public void destroy() {
+        this.executor.shutdownNow();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    protected KafkaPublisher buildKafkaResource(ProcessContext context, 
ProcessSession session)
+            throws ProcessException {
+        final Map<String, String> kafkaProperties = new HashMap<>();
+        KafkaProcessorUtils.buildCommonKafkaProperties(context, 
ProducerConfig.class, kafkaProperties);
+        kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
ByteArraySerializer.class.getName());
+        kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
ByteArraySerializer.class.getName());
+        KafkaPublisher publisher;
+        try {
+            Field f = PublishKafka_0_10.class.getDeclaredField("brokers");
+            f.setAccessible(true);
+            f.set(this, 
context.getProperty(BOOTSTRAP_SERVERS).evaluateAttributeExpressions().getValue());
+            publisher = (KafkaPublisher) 
TestUtils.getUnsafe().allocateInstance(KafkaPublisher.class);
+            publisher.setAckWaitTime(15000);
+            producer = mock(Producer.class);
+            this.instrumentProducer(producer, false);
+            Field kf = KafkaPublisher.class.getDeclaredField("kafkaProducer");
+            kf.setAccessible(true);
+            kf.set(publisher, producer);
+
+            Field componentLogF = 
KafkaPublisher.class.getDeclaredField("componentLog");
+            componentLogF.setAccessible(true);
+            componentLogF.set(publisher, mock(ComponentLog.class));
+
+            Field ackCheckSizeField = 
KafkaPublisher.class.getDeclaredField("ackCheckSize");
+            ackCheckSizeField.setAccessible(true);
+            ackCheckSizeField.set(publisher, this.ackCheckSize);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw new IllegalStateException(e);
+        }
+        return publisher;
+    }
+
+    @SuppressWarnings("unchecked")
+    private void instrumentProducer(Producer<byte[], byte[]> producer, boolean 
failRandomly) {
+
+        when(producer.send(Mockito.any(ProducerRecord.class))).then(new 
Answer<Future<RecordMetadata>>() {
+            @Override
+            public Future<RecordMetadata> answer(InvocationOnMock invocation) 
throws Throwable {
+                ProducerRecord<byte[], byte[]> record = 
(ProducerRecord<byte[], byte[]>) invocation.getArguments()[0];
+                String value = new String(record.value(), 
StandardCharsets.UTF_8);
+                if ("fail".equals(value) && !StubPublishKafka.this.failed) {
+                    StubPublishKafka.this.failed = true;
+                    throw new RuntimeException("intentional");
+                }
+                Future<RecordMetadata> future = executor.submit(new 
Callable<RecordMetadata>() {
+                    @Override
+                    public RecordMetadata call() throws Exception {
+                        if ("futurefail".equals(value) && 
!StubPublishKafka.this.failed) {
+                            StubPublishKafka.this.failed = true;
+                            throw new 
TopicAuthorizationException("Unauthorized");
+                        } else {
+                            TopicPartition partition = new 
TopicPartition("foo", 0);
+                            RecordMetadata meta = new 
RecordMetadata(partition, 0, 0);
+                            return meta;
+                        }
+                    }
+                });
+                return future;
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/TestUtils.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/TestUtils.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/TestUtils.java
new file mode 100644
index 0000000..819e3b7
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/pubsub/TestUtils.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka.pubsub;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+
+import sun.misc.Unsafe;
+
+class TestUtils {
+
+    public static void setFinalField(Field field, Object instance, Object 
newValue) throws Exception {
+        field.setAccessible(true);
+        Field modifiersField = Field.class.getDeclaredField("modifiers");
+        modifiersField.setAccessible(true);
+        modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
+
+        field.set(instance, newValue);
+    }
+
+    static Unsafe getUnsafe() {
+        try {
+            Field f = Unsafe.class.getDeclaredField("theUnsafe");
+            f.setAccessible(true);
+            return (Unsafe) f.get(null);
+        } catch (Exception e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
new file mode 100644
index 0000000..802f889
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka.test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.ServerSocket;
+import java.util.Properties;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.zookeeper.server.ServerCnxnFactory;
+import org.apache.zookeeper.server.ServerConfig;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServerStartable;
+
+/**
+ * Embedded Kafka server, primarily to be used for testing.
+ */
+public class EmbeddedKafka {
+
+    private final KafkaServerStartable kafkaServer;
+
+    private final Properties zookeeperConfig;
+
+    private final Properties kafkaConfig;
+
+    private final ZooKeeperServer zkServer;
+
+    private final Logger logger = LoggerFactory.getLogger(EmbeddedKafka.class);
+
+    private final int kafkaPort;
+
+    private final int zookeeperPort;
+
+    private boolean started;
+
+    /**
+     * Will create instance of the embedded Kafka server. Kafka and Zookeeper
+     * configuration properties will be loaded from 'server.properties' and
+     * 'zookeeper.properties' located at the root of the classpath.
+     */
+    public EmbeddedKafka() {
+        this(loadPropertiesFromClasspath("/server.properties"), 
loadPropertiesFromClasspath("/zookeeper.properties"));
+    }
+
+    /**
+     * Will create instance of the embedded Kafka server.
+     *
+     * @param kafkaConfig
+     *            Kafka configuration properties
+     * @param zookeeperConfig
+     *            Zookeeper configuration properties
+     */
+    public EmbeddedKafka(Properties kafkaConfig, Properties zookeeperConfig) {
+        this.cleanupKafkaWorkDir();
+        this.zookeeperConfig = zookeeperConfig;
+        this.kafkaConfig = kafkaConfig;
+        this.kafkaPort = this.availablePort();
+        this.zookeeperPort = this.availablePort();
+
+        this.kafkaConfig.setProperty("port", String.valueOf(this.kafkaPort));
+        this.kafkaConfig.setProperty("zookeeper.connect", "localhost:" + 
this.zookeeperPort);
+        this.zookeeperConfig.setProperty("clientPort", 
String.valueOf(this.zookeeperPort));
+        this.zkServer = new ZooKeeperServer();
+        this.kafkaServer = new KafkaServerStartable(new 
KafkaConfig(kafkaConfig));
+    }
+
+    /**
+     *
+     * @return port for Kafka server
+     */
+    public int getKafkaPort() {
+        if (!this.started) {
+            throw new IllegalStateException("Kafka server is not started. 
Kafka port can't be determined.");
+        }
+        return this.kafkaPort;
+    }
+
+    /**
+     *
+     * @return port for Zookeeper server
+     */
+    public int getZookeeperPort() {
+        if (!this.started) {
+            throw new IllegalStateException("Kafka server is not started. 
Zookeeper port can't be determined.");
+        }
+        return this.zookeeperPort;
+    }
+
+    /**
+     * Will start embedded Kafka server. Its data directories will be created
+     * at 'kafka-tmp' directory relative to the working directory of the 
current
+     * runtime. The data directories will be deleted upon JVM exit.
+     *
+     */
+    public void start() {
+        if (!this.started) {
+            logger.info("Starting Zookeeper server");
+            this.startZookeeper();
+
+            logger.info("Starting Kafka server");
+            this.kafkaServer.startup();
+
+            logger.info("Embeded Kafka is started at localhost:" + 
this.kafkaServer.serverConfig().port()
+                    + ". Zookeeper connection string: " + 
this.kafkaConfig.getProperty("zookeeper.connect"));
+            this.started = true;
+        }
+    }
+
+    /**
+     * Will stop embedded Kafka server, cleaning up all working directories.
+     */
+    public void stop() {
+        if (this.started) {
+            logger.info("Shutting down Kafka server");
+            this.kafkaServer.shutdown();
+            this.kafkaServer.awaitShutdown();
+            logger.info("Shutting down Zookeeper server");
+            this.shutdownZookeeper();
+            logger.info("Embeded Kafka is shut down.");
+            this.cleanupKafkaWorkDir();
+            this.started = false;
+        }
+    }
+
+    /**
+     *
+     */
+    private void cleanupKafkaWorkDir() {
+        File kafkaTmp = new File("target/kafka-tmp");
+        try {
+            FileUtils.deleteDirectory(kafkaTmp);
+        } catch (Exception e) {
+            logger.warn("Failed to delete " + kafkaTmp.getAbsolutePath());
+        }
+    }
+
+    /**
+     * Will start Zookeeper server via {@link ServerCnxnFactory}
+     */
+    private void startZookeeper() {
+        QuorumPeerConfig quorumConfiguration = new QuorumPeerConfig();
+        try {
+            quorumConfiguration.parseProperties(this.zookeeperConfig);
+
+            ServerConfig configuration = new ServerConfig();
+            configuration.readFrom(quorumConfiguration);
+
+            FileTxnSnapLog txnLog = new FileTxnSnapLog(new 
File(configuration.getDataLogDir()), new File(configuration.getDataDir()));
+
+            zkServer.setTxnLogFactory(txnLog);
+            zkServer.setTickTime(configuration.getTickTime());
+            
zkServer.setMinSessionTimeout(configuration.getMinSessionTimeout());
+            
zkServer.setMaxSessionTimeout(configuration.getMaxSessionTimeout());
+            ServerCnxnFactory zookeeperConnectionFactory = 
ServerCnxnFactory.createFactory();
+            
zookeeperConnectionFactory.configure(configuration.getClientPortAddress(),
+                    configuration.getMaxClientCnxns());
+            zookeeperConnectionFactory.startup(zkServer);
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+        } catch (Exception e) {
+            throw new IllegalStateException("Failed to start Zookeeper 
server", e);
+        }
+    }
+
+    /**
+     * Will shut down Zookeeper server.
+     */
+    private void shutdownZookeeper() {
+        zkServer.shutdown();
+    }
+
+    /**
+     * Will load {@link Properties} from properties file discovered at the
+     * provided path relative to the root of the classpath.
+     */
+    private static Properties loadPropertiesFromClasspath(String path) {
+        try {
+            Properties kafkaProperties = new Properties();
+            kafkaProperties.load(Class.class.getResourceAsStream(path));
+            return kafkaProperties;
+        } catch (Exception e) {
+            throw new IllegalStateException(e);
+        }
+    }
+
+    /**
+     * Will determine the available port used by Kafka/Zookeeper servers.
+     */
+    private int availablePort() {
+        ServerSocket s = null;
+        try {
+            s = new ServerSocket(0);
+            s.setReuseAddress(true);
+            return s.getLocalPort();
+        } catch (Exception e) {
+            throw new IllegalStateException("Failed to discover available 
port.", e);
+        } finally {
+            try {
+                s.close();
+            } catch (IOException e) {
+                // ignore
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
new file mode 100644
index 0000000..0ed00fb
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka.test;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Properties;
+
+import kafka.producer.KeyedMessage;
+import kafka.producer.OldProducer;
+
+/**
+ * Helper class which helps to produce events targeting {@link EmbeddedKafka}
+ * server.
+ */
+public class EmbeddedKafkaProducerHelper implements Closeable {
+
+    private final EmbeddedKafka kafkaServer;
+
+    private final OldProducer producer;
+
+    /**
+     * Will create an instance of EmbeddedKafkaProducerHelper based on default
+     * configurations.<br>
+     * Default configuration includes:<br>
+     * <i>
+     * metadata.broker.list=[determined from the instance of EmbeddedKafka]<br>
+     * serializer.class=kafka.serializer.DefaultEncoder<br>
+     * key.serializer.class=kafka.serializer.DefaultEncoder<br>
+     * auto.create.topics.enable=true
+     * </i><br>
+     * <br>
+     * If you wish to supply additional configuration properties or override
+     * existing use
+     * {@link 
EmbeddedKafkaProducerHelper#EmbeddedKafkaProducerHelper(EmbeddedKafka, 
Properties)}
+     * constructor.
+     *
+     * @param kafkaServer
+     *            instance of {@link EmbeddedKafka}
+     */
+    public EmbeddedKafkaProducerHelper(EmbeddedKafka kafkaServer) {
+        this(kafkaServer, null);
+    }
+
+    /**
+     * Will create an instance of EmbeddedKafkaProducerHelper based on default
+     * configurations and additional configuration properties.<br>
+     * Default configuration includes:<br>
+     * metadata.broker.list=[determined from the instance of EmbeddedKafka]<br>
+     * serializer.class=kafka.serializer.DefaultEncoder<br>
+     * key.serializer.class=kafka.serializer.DefaultEncoder<br>
+     * auto.create.topics.enable=true<br>
+     * <br>
+     *
+     * @param kafkaServer
+     *            instance of {@link EmbeddedKafka}
+     * @param additionalProperties
+     *            instance of {@link Properties} specifying additional producer
+     *            configuration properties.
+     */
+    public EmbeddedKafkaProducerHelper(EmbeddedKafka kafkaServer, Properties 
additionalProperties) {
+        this.kafkaServer = kafkaServer;
+        Properties producerProperties = new Properties();
+        producerProperties.put("metadata.broker.list", "localhost:" + 
this.kafkaServer.getKafkaPort());
+        producerProperties.put("serializer.class", 
"kafka.serializer.DefaultEncoder");
+        producerProperties.put("key.serializer.class", 
"kafka.serializer.DefaultEncoder");
+        producerProperties.put("auto.create.topics.enable", "true");
+        if (additionalProperties != null) {
+            producerProperties.putAll(additionalProperties);
+        }
+        this.producer = new OldProducer(producerProperties);
+    }
+
+    /**
+     * Will send an event to a Kafka topic. If topic doesn't exist it will be
+     * auto-created.
+     *
+     * @param topicName
+     *            Kafka topic name.
+     * @param event
+     *            string representing an event(message) to be sent to Kafka.
+     */
+    public void sendEvent(String topicName, String event) {
+        KeyedMessage<byte[], byte[]> data = new KeyedMessage<byte[], 
byte[]>(topicName, event.getBytes());
+        this.producer.send(data.topic(), data.key(), data.message());
+    }
+
+    /**
+     * Will close the underlying Kafka producer.
+     */
+    @Override
+    public void close() throws IOException {
+        this.producer.close();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/log4j.properties
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/log4j.properties
new file mode 100644
index 0000000..57cd63f
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/log4j.properties
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+log4j.rootCategory=INFO, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %5p %t %c{2}:%L - 
%m%n
+
+#og4j.category.org.apache.nifi.processors.kafka=DEBUG

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/server.properties
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/server.properties
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/server.properties
new file mode 100644
index 0000000..2ecb1b2
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/server.properties
@@ -0,0 +1,121 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings 
#############################
+
+# The port the socket server listens on
+#port=9092
+
+# Hostname the broker will bind to. If not set, the server will bind to all 
interfaces
+#host.name=localhost
+
+# Hostname the broker will advertise to producers and consumers. If not set, 
it uses the
+# value for "host.name" if configured.  Otherwise, it will use the value 
returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=3
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection 
against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=target/kafka-tmp/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at 
startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs 
located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only 
fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data 
to disk.
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using 
replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when 
the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a 
small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data 
after a period of time or
+# every N messages (or both). This can be done globally and overridden on a 
per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy 
#############################
+
+# The following configurations control the disposal of log segments. The 
policy can
+# be set to delete segments after a period of time, or after a given size has 
accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. 
Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as 
long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log 
segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted 
according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+# By default the log cleaner is disabled and the log retention policy will 
default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual 
logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=6000

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/zookeeper.properties
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/zookeeper.properties
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/zookeeper.properties
new file mode 100644
index 0000000..f5c257e
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-10-processors/src/test/resources/zookeeper.properties
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# the directory where the snapshot is stored.
+dataDir=target/kafka-tmp/zookeeper
+# the port at which the clients will connect
+#clientPort=2181
+# disable the per-ip limit on the number of connections since this is a 
non-production config
+maxClientCnxns=0

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/pom.xml
----------------------------------------------------------------------
diff --git a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/pom.xml 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/pom.xml
new file mode 100644
index 0000000..92a6b29
--- /dev/null
+++ b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/pom.xml
@@ -0,0 +1,35 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd";>
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.apache.nifi</groupId>
+        <artifactId>nifi-kafka-bundle</artifactId>
+        <version>1.0.0-SNAPSHOT</version>
+    </parent>
+    <artifactId>nifi-kafka-0-8-nar</artifactId>
+    <packaging>nar</packaging>
+    <description>NiFi NAR for interacting with Apache Kafka</description>
+    <properties>
+        <maven.javadoc.skip>true</maven.javadoc.skip>
+        <source.skip>true</source.skip>
+    </properties>
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-kafka-0-8-processors</artifactId>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/LICENSE
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/LICENSE
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/LICENSE
new file mode 100644
index 0000000..304dd69
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/LICENSE
@@ -0,0 +1,299 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+APACHE NIFI SUBCOMPONENTS:
+
+The Apache NiFi project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+  The binary distribution of this product bundles 'Scala Library' under a BSD
+  style license.
+
+    Copyright (c) 2002-2015 EPFL
+    Copyright (c) 2011-2015 Typesafe, Inc.
+    
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without 
modification, 
+       are permitted provided that the following conditions are met:
+
+        Redistributions of source code must retain the above copyright notice, 
this list of 
+               conditions and the following disclaimer.
+    
+           Redistributions in binary form must reproduce the above copyright 
notice, this list of 
+               conditions and the following disclaimer in the documentation 
and/or other materials
+               provided with the distribution.
+    
+           Neither the name of the EPFL nor the names of its contributors may 
be used to endorse 
+               or promote products derived from this software without specific 
prior written permission.
+    
+       THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
“AS IS” AND ANY EXPRESS 
+       OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 
WARRANTIES OF MERCHANTABILITY 
+       AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 
THE COPYRIGHT OWNER OR 
+       CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
EXEMPLARY, OR CONSEQUENTIAL
+       DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
OR SERVICES; LOSS OF USE,
+       DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
THEORY OF LIABILITY, WHETHER 
+       IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
OTHERWISE) ARISING IN ANY WAY OUT 
+       OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 
DAMAGE.
+
+  The binary distribution of this product bundles 'JLine' under a BSD
+  style license.
+
+    Copyright (c) 2002-2006, Marc Prud'hommeaux <m...@cornell.edu>
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or
+    without modification, are permitted provided that the following
+    conditions are met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with
+    the distribution.
+
+    Neither the name of JLine nor the names of its contributors
+    may be used to endorse or promote products derived from this
+    software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+    BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+    AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+    EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+    OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+    AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+    LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+    IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+    OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  The binary distribution of this product bundles 'JOpt Simple' under an MIT
+  style license.
+
+    Copyright (c) 2009 Paul R. Holser, Jr.
+
+    Permission is hereby granted, free of charge, to any person obtaining
+    a copy of this software and associated documentation files (the
+    "Software"), to deal in the Software without restriction, including
+    without limitation the rights to use, copy, modify, merge, publish,
+    distribute, sublicense, and/or sell copies of the Software, and to
+    permit persons to whom the Software is furnished to do so, subject to
+    the following conditions:
+
+    The above copyright notice and this permission notice shall be
+    included in all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/NOTICE
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/NOTICE
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/NOTICE
new file mode 100644
index 0000000..832e68d
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-nar/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,72 @@
+nifi-kafka-nar
+Copyright 2014-2016 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+******************
+Apache Software License v2
+******************
+
+The following binary components are provided under the Apache Software License 
v2
+
+  (ASLv2) Apache Commons Lang
+    The following NOTICE information applies:
+      Apache Commons Lang
+      Copyright 2001-2014 The Apache Software Foundation
+
+      This product includes software from the Spring Framework,
+      under the Apache License 2.0 (see: StringUtils.containsWhitespace())
+
+  (ASLv2) Apache Kafka
+    The following NOTICE information applies:
+      Apache Kafka
+      Copyright 2012 The Apache Software Foundation.
+
+  (ASLv2) Yammer Metrics
+    The following NOTICE information applies:
+      Metrics
+      Copyright 2010-2012 Coda Hale and Yammer, Inc.
+
+      This product includes software developed by Coda Hale and Yammer, Inc.
+
+      This product includes code derived from the JSR-166 project 
(ThreadLocalRandom), which was released
+      with the following comments:
+
+          Written by Doug Lea with assistance from members of JCP JSR-166
+          Expert Group and released to the public domain, as explained at
+          http://creativecommons.org/publicdomain/zero/1.0/
+
+  (ASLv2) Snappy Java
+    The following NOTICE information applies:
+      This product includes software developed by Google
+       Snappy: http://code.google.com/p/snappy/ (New BSD License)
+      
+      This product includes software developed by Apache
+       PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/
+       (Apache 2.0 license)
+
+      This library containd statically linked libstdc++. This inclusion is 
allowed by 
+      "GCC RUntime Library Exception" 
+      http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
+
+  (ASLv2) Apache ZooKeeper
+    The following NOTICE information applies:
+      Apache ZooKeeper
+      Copyright 2009-2012 The Apache Software Foundation
+
+************************
+Common Development and Distribution License 1.1
+************************
+
+The following binary components are provided under the Common Development and 
Distribution License 1.1. See project link for details.
+
+    (CDDL 1.1) (GPL2 w/ CPE) JavaMail API (compat) (javax.mail:mail:jar:1.4.7 
- http://kenai.com/projects/javamail/mail)
+
+************************
+Common Development and Distribution License 1.0
+************************
+
+The following binary components are provided under the Common Development and 
Distribution License 1.0.  See project link for details.
+
+    (CDDL 1.0) JavaBeans Activation Framework (JAF) 
(javax.activation:activation:jar:1.1 - 
http://java.sun.com/products/javabeans/jaf/index.jsp)

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/pom.xml
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/pom.xml 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/pom.xml
new file mode 100644
index 0000000..ea498e6
--- /dev/null
+++ b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/pom.xml
@@ -0,0 +1,79 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <parent>
+        <groupId>org.apache.nifi</groupId>
+        <artifactId>nifi-kafka-bundle</artifactId>
+        <version>1.0.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>nifi-kafka-0-8-processors</artifactId>
+    <packaging>jar</packaging>  
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-processor-utils</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-utils</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka-clients</artifactId>
+            <version>0.8.2.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka_2.10</artifactId>
+            <version>0.8.2.2</version>
+            <exclusions>
+                <!-- Transitive dependencies excluded because they are located 
+                in a legacy Maven repository, which Maven 3 doesn't support. 
-->
+                <exclusion>
+                    <groupId>javax.jms</groupId>
+                    <artifactId>jms</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.sun.jdmk</groupId>
+                    <artifactId>jmxtools</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.sun.jmx</groupId>
+                    <artifactId>jmxri</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-mock</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-simple</artifactId>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/src/main/java/org/apache/nifi/processors/kafka/AbstractKafkaProcessor.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/src/main/java/org/apache/nifi/processors/kafka/AbstractKafkaProcessor.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/src/main/java/org/apache/nifi/processors/kafka/AbstractKafkaProcessor.java
new file mode 100644
index 0000000..5a470b3
--- /dev/null
+++ 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-0-8-processors/src/main/java/org/apache/nifi/processors/kafka/AbstractKafkaProcessor.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.kafka;
+
+import java.io.Closeable;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.nifi.annotation.lifecycle.OnStopped;
+import org.apache.nifi.processor.AbstractSessionFactoryProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.ProcessSessionFactory;
+import org.apache.nifi.processor.Processor;
+import org.apache.nifi.processor.exception.ProcessException;
+
+/**
+ * Base class for {@link Processor}s to publish and consume messages from Kafka
+ *
+ * @see PutKafka
+ */
+abstract class AbstractKafkaProcessor<T extends Closeable> extends 
AbstractSessionFactoryProcessor {
+
+
+    private volatile boolean acceptTask = true;
+
+    private final AtomicInteger taskCounter = new AtomicInteger();
+
+
+    /**
+     * @see KafkaPublisher
+     */
+    volatile T kafkaResource;
+
+    /**
+     *
+     */
+    @Override
+    public final void onTrigger(final ProcessContext context, final 
ProcessSessionFactory sessionFactory) throws ProcessException {
+        if (this.acceptTask) { // acts as a circuit breaker to allow existing 
tasks to wind down so 'kafkaResource' can be reset before new tasks are 
accepted.
+            this.taskCounter.incrementAndGet();
+            final ProcessSession session = sessionFactory.createSession();
+            try {
+                /*
+                 * We can't be doing double null check here since as a pattern
+                 * it only works for lazy init but not reset, which is what we
+                 * are doing here. In fact the first null check is dangerous
+                 * since 'kafkaResource' can become null right after its null
+                 * check passed causing subsequent NPE.
+                 */
+                synchronized (this) {
+                    if (this.kafkaResource == null) {
+                        this.kafkaResource = this.buildKafkaResource(context, 
session);
+                    }
+                }
+
+                /*
+                 * The 'processed' boolean flag does not imply any failure or 
success. It simply states that:
+                 * - ConsumeKafka - some messages were received form Kafka and 
1_ FlowFile were generated
+                 * - PublishKafka - some messages were sent to Kafka based on 
existence of the input FlowFile
+                 */
+                boolean processed = this.rendezvousWithKafka(context, session);
+                session.commit();
+                if (processed) {
+                    this.postCommit(context);
+                } else {
+                    context.yield();
+                }
+            } catch (Throwable e) {
+                this.acceptTask = false;
+                session.rollback(true);
+                this.getLogger().error("{} failed to process due to {}; 
rolling back session", new Object[] { this, e });
+            } finally {
+                synchronized (this) {
+                    if (this.taskCounter.decrementAndGet() == 0 && 
!this.acceptTask) {
+                        this.close();
+                        this.acceptTask = true;
+                    }
+                }
+            }
+        } else {
+            context.yield();
+        }
+    }
+
+    /**
+     * Will call {@link Closeable#close()} on the target resource after which
+     * the target resource will be set to null. Should only be called when 
there
+     * are no more threads being executed on this processor or when it has been
+     * verified that only a single thread remains.
+     *
+     * @see KafkaPublisher
+     */
+    @OnStopped
+    public void close() {
+        if (this.taskCounter.get() == 0) {
+            try {
+                if (this.kafkaResource != null) {
+                    try {
+                        this.kafkaResource.close();
+                    } catch (Exception e) {
+                        this.getLogger().warn("Failed while closing " + 
this.kafkaResource, e);
+                    }
+                }
+            } finally {
+                this.kafkaResource = null;
+            }
+        }
+    }
+
+    /**
+     * This operation will be executed after {@link ProcessSession#commit()} 
has
+     * been called.
+     */
+    protected void postCommit(ProcessContext context) {
+
+    }
+
+    /**
+     * This operation is called from
+     * {@link #onTrigger(ProcessContext, ProcessSessionFactory)} method and
+     * contains main processing logic for this Processor.
+     */
+    protected abstract boolean rendezvousWithKafka(ProcessContext context, 
ProcessSession session);
+
+    /**
+     * Builds target resource for interacting with Kafka. The target resource
+     * could be one of {@link KafkaPublisher} or {@link KafkaConsumer}
+     */
+    protected abstract T buildKafkaResource(ProcessContext context, 
ProcessSession session) throws ProcessException;
+}

Reply via email to