http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/GetKafkaIntegrationTests.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/GetKafkaIntegrationTests.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/GetKafkaIntegrationTests.java
deleted file mode 100644
index effc8e7..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/GetKafkaIntegrationTests.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.kafka;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-
-import org.apache.nifi.processors.kafka.test.EmbeddedKafka;
-import org.apache.nifi.processors.kafka.test.EmbeddedKafkaProducerHelper;
-import org.apache.nifi.util.MockFlowFile;
-import org.apache.nifi.util.TestRunner;
-import org.apache.nifi.util.TestRunners;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-@Ignore
-// The test is valid and should be ran when working on this module. @Ignore is
-// to speed up the overall build
-public class GetKafkaIntegrationTests {
-
-    private static EmbeddedKafka kafkaLocal;
-
-    private static EmbeddedKafkaProducerHelper producerHelper;
-
-    @BeforeClass
-    public static void bforeClass(){
-        kafkaLocal = new EmbeddedKafka();
-        kafkaLocal.start();
-        producerHelper = new EmbeddedKafkaProducerHelper(kafkaLocal);
-    }
-
-    @AfterClass
-    public static void afterClass() throws Exception {
-        producerHelper.close();
-        kafkaLocal.stop();
-    }
-
-    /**
-     * Will set auto-offset to 'smallest' to ensure that all events (the once
-     * that were sent before and after consumer startup) are received.
-     */
-    @Test
-    public void testGetAllMessages() throws Exception {
-        String topicName = "testGetAllMessages";
-
-        GetKafka getKafka = new GetKafka();
-        final TestRunner runner = TestRunners.newTestRunner(getKafka);
-        runner.setProperty(GetKafka.ZOOKEEPER_CONNECTION_STRING, "localhost:" 
+ kafkaLocal.getZookeeperPort());
-        runner.setProperty(GetKafka.TOPIC, topicName);
-        runner.setProperty(GetKafka.BATCH_SIZE, "5");
-        runner.setProperty(GetKafka.AUTO_OFFSET_RESET, GetKafka.SMALLEST);
-        runner.setProperty("consumer.timeout.ms", "300");
-
-        producerHelper.sendEvent(topicName, "Hello-1");
-        producerHelper.sendEvent(topicName, "Hello-2");
-        producerHelper.sendEvent(topicName, "Hello-3");
-
-        final CountDownLatch latch = new CountDownLatch(1);
-
-        new Thread(new Runnable() {
-            @Override
-            public void run() {
-                try {
-                    runner.run(20, false);
-                } finally {
-                    latch.countDown();
-                }
-            }
-        }).start();
-
-        // Thread.sleep(1000);
-
-        producerHelper.sendEvent(topicName, "Hello-4");
-        producerHelper.sendEvent(topicName, "Hello-5");
-        producerHelper.sendEvent(topicName, "Hello-6");
-
-        latch.await();
-
-        final List<MockFlowFile> flowFiles = 
runner.getFlowFilesForRelationship(GetKafka.REL_SUCCESS);
-        // must be two since we sent 6 messages with batch of 5
-        assertEquals(2, flowFiles.size());
-        MockFlowFile flowFile = flowFiles.get(0);
-        String[] events = new String(flowFile.toByteArray()).split("\\s+");
-        assertEquals(5, events.length);
-        // spot check
-        assertEquals("Hello-1", events[0]);
-        assertEquals("Hello-4", events[3]);
-
-        flowFile = flowFiles.get(1);
-        events = new String(flowFile.toByteArray()).split("\\s+");
-        assertEquals(1, events.length);
-
-        getKafka.shutdownConsumer();
-    }
-
-    /**
-     * Based on auto-offset set to 'largest' events sent before consumer start
-     * should not be consumed.
-     *
-     */
-    @Test
-    public void testGetOnlyMessagesAfterConsumerStartup() throws Exception {
-        String topicName = "testGetOnlyMessagesAfterConsumerStartup";
-
-        GetKafka getKafka = new GetKafka();
-        final TestRunner runner = TestRunners.newTestRunner(getKafka);
-        runner.setProperty(GetKafka.ZOOKEEPER_CONNECTION_STRING, "localhost:" 
+ kafkaLocal.getZookeeperPort());
-        runner.setProperty(GetKafka.TOPIC, topicName);
-        runner.setProperty(GetKafka.BATCH_SIZE, "5");
-        runner.setProperty("consumer.timeout.ms", "300");
-
-        producerHelper.sendEvent(topicName, "Hello-1");
-        producerHelper.sendEvent(topicName, "Hello-2");
-        producerHelper.sendEvent(topicName, "Hello-3");
-
-        final CountDownLatch latch = new CountDownLatch(1);
-
-        new Thread(new Runnable() {
-            @Override
-            public void run() {
-                try {
-                    runner.run(20, false);
-                } finally {
-                    latch.countDown();
-                }
-            }
-        }).start();
-
-        latch.await();
-        List<MockFlowFile> flowFiles = 
runner.getFlowFilesForRelationship(GetKafka.REL_SUCCESS);
-        assertEquals(0, flowFiles.size());
-
-        producerHelper.sendEvent(topicName, "Hello-4");
-        producerHelper.sendEvent(topicName, "Hello-5");
-        producerHelper.sendEvent(topicName, "Hello-6");
-
-        latch.await();
-
-        runner.run(5, false);
-
-        flowFiles = runner.getFlowFilesForRelationship(GetKafka.REL_SUCCESS);
-
-        // must be single since we should only be receiving 4,5 and 6 in batch
-        // of 5
-        assertEquals(1, flowFiles.size());
-        MockFlowFile flowFile = flowFiles.get(0);
-        String[] events = new String(flowFile.toByteArray()).split("\\s+");
-        assertEquals(3, events.length);
-
-        assertEquals("Hello-4", events[0]);
-        assertEquals("Hello-5", events[1]);
-        assertEquals("Hello-6", events[2]);
-
-        getKafka.shutdownConsumer();
-    }
-}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/KafkaPublisherTest.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/KafkaPublisherTest.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/KafkaPublisherTest.java
deleted file mode 100644
index 5bb7c3c..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/KafkaPublisherTest.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.kafka;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-
-import java.io.ByteArrayInputStream;
-import java.io.InputStream;
-import java.nio.charset.StandardCharsets;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.kafka.common.serialization.ByteArraySerializer;
-import org.apache.nifi.logging.ComponentLog;
-import org.apache.nifi.processors.kafka.KafkaPublisher.KafkaPublisherResult;
-import org.apache.nifi.processors.kafka.test.EmbeddedKafka;
-import org.apache.nifi.processors.kafka.test.EmbeddedKafkaProducerHelper;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import kafka.consumer.Consumer;
-import kafka.consumer.ConsumerConfig;
-import kafka.consumer.ConsumerIterator;
-import kafka.consumer.ConsumerTimeoutException;
-import kafka.consumer.KafkaStream;
-import kafka.javaapi.consumer.ConsumerConnector;
-
-// The test is valid and should be ran when working on this module. @Ignore is
-// to speed up the overall build
-public class KafkaPublisherTest {
-
-    private static EmbeddedKafka kafkaLocal;
-
-    private static EmbeddedKafkaProducerHelper producerHelper;
-
-    @BeforeClass
-    public static void bforeClass() {
-        kafkaLocal = new EmbeddedKafka();
-        kafkaLocal.start();
-        producerHelper = new EmbeddedKafkaProducerHelper(kafkaLocal);
-    }
-
-    @AfterClass
-    public static void afterClass() throws Exception {
-        producerHelper.close();
-        kafkaLocal.stop();
-    }
-
-    @Test
-    public void validateSuccessfulSendAsWhole() throws Exception {
-        InputStream contentStream = new ByteArrayInputStream("Hello 
Kafka".getBytes(StandardCharsets.UTF_8));
-        String topicName = "validateSuccessfulSendAsWhole";
-
-        Properties kafkaProperties = this.buildProducerProperties();
-        KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, 
mock(ComponentLog.class));
-
-        PublishingContext publishingContext = new 
PublishingContext(contentStream, topicName);
-        KafkaPublisherResult result = publisher.publish(publishingContext);
-
-        assertEquals(0, result.getLastMessageAcked());
-        assertEquals(1, result.getMessagesSent());
-        contentStream.close();
-        publisher.close();
-
-        ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
-        assertNotNull(iter.next());
-        try {
-            iter.next();
-        } catch (ConsumerTimeoutException e) {
-            // that's OK since this is the Kafka mechanism to unblock
-        }
-    }
-
-    @Test
-    public void validateSuccessfulSendAsDelimited() throws Exception {
-        InputStream contentStream = new ByteArrayInputStream(
-                "Hello Kafka\nHello Kafka\nHello Kafka\nHello 
Kafka\n".getBytes(StandardCharsets.UTF_8));
-        String topicName = "validateSuccessfulSendAsDelimited";
-
-        Properties kafkaProperties = this.buildProducerProperties();
-        KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, 
mock(ComponentLog.class));
-
-        PublishingContext publishingContext = new 
PublishingContext(contentStream, topicName);
-        
publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
-        KafkaPublisherResult result = publisher.publish(publishingContext);
-
-        assertEquals(3, result.getLastMessageAcked());
-        assertEquals(4, result.getMessagesSent());
-        contentStream.close();
-        publisher.close();
-
-        ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
-        assertNotNull(iter.next());
-        assertNotNull(iter.next());
-        assertNotNull(iter.next());
-        assertNotNull(iter.next());
-        try {
-            iter.next();
-            fail();
-        } catch (ConsumerTimeoutException e) {
-            // that's OK since this is the Kafka mechanism to unblock
-        }
-    }
-
-    /*
-     * This test simulates the condition where not all messages were ACKed by
-     * Kafka
-     */
-    @Test
-    public void validateRetries() throws Exception {
-        byte[] testValue = "Hello Kafka1\nHello Kafka2\nHello Kafka3\nHello 
Kafka4\n".getBytes(StandardCharsets.UTF_8);
-        InputStream contentStream = new ByteArrayInputStream(testValue);
-        String topicName = "validateSuccessfulReSendOfFailedSegments";
-
-        Properties kafkaProperties = this.buildProducerProperties();
-
-        KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, 
mock(ComponentLog.class));
-
-        // simulates the first re-try
-        int lastAckedMessageIndex = 1;
-        PublishingContext publishingContext = new 
PublishingContext(contentStream, topicName, lastAckedMessageIndex);
-        
publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
-
-        publisher.publish(publishingContext);
-
-        ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
-        String m1 = new String(iter.next().message());
-        String m2 = new String(iter.next().message());
-        assertEquals("Hello Kafka3", m1);
-        assertEquals("Hello Kafka4", m2);
-        try {
-            iter.next();
-            fail();
-        } catch (ConsumerTimeoutException e) {
-            // that's OK since this is the Kafka mechanism to unblock
-        }
-
-        // simulates the second re-try
-        lastAckedMessageIndex = 2;
-        contentStream = new ByteArrayInputStream(testValue);
-        publishingContext = new PublishingContext(contentStream, topicName, 
lastAckedMessageIndex);
-        
publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
-        publisher.publish(publishingContext);
-
-        m1 = new String(iter.next().message());
-        assertEquals("Hello Kafka4", m1);
-
-        publisher.close();
-    }
-
-    /*
-     * Similar to the above test, but it sets the first retry index to the last
-     * possible message index and second index to an out of bound index. The
-     * expectation is that no messages will be sent to Kafka
-     */
-    @Test
-    public void validateRetriesWithWrongIndex() throws Exception {
-        byte[] testValue = "Hello Kafka1\nHello Kafka2\nHello Kafka3\nHello 
Kafka4\n".getBytes(StandardCharsets.UTF_8);
-        InputStream contentStream = new ByteArrayInputStream(testValue);
-        String topicName = "validateRetriesWithWrongIndex";
-
-        Properties kafkaProperties = this.buildProducerProperties();
-
-        KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, 
mock(ComponentLog.class));
-
-        // simulates the first re-try
-        int lastAckedMessageIndex = 3;
-        PublishingContext publishingContext = new 
PublishingContext(contentStream, topicName, lastAckedMessageIndex);
-        
publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
-
-        publisher.publish(publishingContext);
-
-        ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
-
-        try {
-            iter.next();
-            fail();
-        } catch (ConsumerTimeoutException e) {
-            // that's OK since this is the Kafka mechanism to unblock
-        }
-
-        // simulates the second re-try
-        lastAckedMessageIndex = 6;
-        contentStream = new ByteArrayInputStream(testValue);
-        publishingContext = new PublishingContext(contentStream, topicName, 
lastAckedMessageIndex);
-        
publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
-        publisher.publish(publishingContext);
-        try {
-            iter.next();
-            fail();
-        } catch (ConsumerTimeoutException e) {
-            // that's OK since this is the Kafka mechanism to unblock
-        }
-
-        publisher.close();
-    }
-
-    @Test
-    public void validateWithMultiByteCharactersNoDelimiter() throws Exception {
-        String data = "僠THIS IS MY NEW TEXT.僠IT HAS A NEWLINE.";
-        InputStream contentStream = new 
ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
-        String topicName = "validateWithMultiByteCharacters";
-
-        Properties kafkaProperties = this.buildProducerProperties();
-
-        KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, 
mock(ComponentLog.class));
-        PublishingContext publishingContext = new 
PublishingContext(contentStream, topicName);
-
-        publisher.publish(publishingContext);
-        publisher.close();
-
-        ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
-        String r = new String(iter.next().message(), StandardCharsets.UTF_8);
-        assertEquals(data, r);
-    }
-
-    private Properties buildProducerProperties() {
-        Properties kafkaProperties = new Properties();
-        kafkaProperties.put("key.serializer", 
ByteArraySerializer.class.getName());
-        kafkaProperties.put("value.serializer", 
ByteArraySerializer.class.getName());
-        kafkaProperties.setProperty("bootstrap.servers", "localhost:" + 
kafkaLocal.getKafkaPort());
-        kafkaProperties.put("auto.create.topics.enable", "true");
-        return kafkaProperties;
-    }
-
-    private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) {
-        Properties props = new Properties();
-        props.put("zookeeper.connect", "localhost:" + 
kafkaLocal.getZookeeperPort());
-        props.put("group.id", "test");
-        props.put("consumer.timeout.ms", "5000");
-        props.put("auto.offset.reset", "smallest");
-        ConsumerConfig consumerConfig = new ConsumerConfig(props);
-        ConsumerConnector consumer = 
Consumer.createJavaConsumerConnector(consumerConfig);
-        Map<String, Integer> topicCountMap = new HashMap<>(1);
-        topicCountMap.put(topic, 1);
-        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
consumer.createMessageStreams(topicCountMap);
-        List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
-        ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
-        return iter;
-    }
-}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/PutKafkaTest.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/PutKafkaTest.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/PutKafkaTest.java
deleted file mode 100644
index 8437b00..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/PutKafkaTest.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.kafka;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-
-import java.nio.charset.StandardCharsets;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.nifi.processors.kafka.test.EmbeddedKafka;
-import org.apache.nifi.processors.kafka.test.EmbeddedKafkaProducerHelper;
-import org.apache.nifi.util.TestRunner;
-import org.apache.nifi.util.TestRunners;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import kafka.consumer.Consumer;
-import kafka.consumer.ConsumerConfig;
-import kafka.consumer.ConsumerIterator;
-import kafka.consumer.KafkaStream;
-import kafka.javaapi.consumer.ConsumerConnector;
-
-// The test is valid and should be ran when working on this module. @Ignore is
-// to speed up the overall build
-public class PutKafkaTest {
-
-    private static EmbeddedKafka kafkaLocal;
-
-    private static EmbeddedKafkaProducerHelper producerHelper;
-
-    @BeforeClass
-    public static void beforeClass() {
-        kafkaLocal = new EmbeddedKafka();
-        kafkaLocal.start();
-        producerHelper = new EmbeddedKafkaProducerHelper(kafkaLocal);
-    }
-
-    @AfterClass
-    public static void afterClass() throws Exception {
-        producerHelper.close();
-        kafkaLocal.stop();
-    }
-
-    @Test
-    public void validateSingleCharacterDemarcatedMessages() {
-        String topicName = "validateSingleCharacterDemarcatedMessages";
-        PutKafka putKafka = new PutKafka();
-        TestRunner runner = TestRunners.newTestRunner(putKafka);
-        runner.setProperty(PutKafka.TOPIC, topicName);
-        runner.setProperty(PutKafka.CLIENT_NAME, "foo");
-        runner.setProperty(PutKafka.KEY, "key1");
-        runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + 
kafkaLocal.getKafkaPort());
-        runner.setProperty(PutKafka.MESSAGE_DELIMITER, "\n");
-
-        runner.enqueue("Hello 
World\nGoodbye\n1\n2\n3\n4\n5".getBytes(StandardCharsets.UTF_8));
-        runner.run(1, false);
-
-        runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);
-        ConsumerIterator<byte[], byte[]> consumer = 
this.buildConsumer(topicName);
-        assertEquals("Hello World", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("Goodbye", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("1", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("2", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("3", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("4", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("5", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-
-        runner.shutdown();
-    }
-
-    @Test
-    public void validateMultiCharacterDelimitedMessages() {
-        String topicName = 
"validateMultiCharacterDemarcatedMessagesAndCustomPartitioner";
-        PutKafka putKafka = new PutKafka();
-        TestRunner runner = TestRunners.newTestRunner(putKafka);
-        runner.setProperty(PutKafka.TOPIC, topicName);
-        runner.setProperty(PutKafka.CLIENT_NAME, "foo");
-        runner.setProperty(PutKafka.KEY, "key1");
-        runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + 
kafkaLocal.getKafkaPort());
-        runner.setProperty(PutKafka.MESSAGE_DELIMITER, "foo");
-
-        runner.enqueue("Hello 
WorldfooGoodbyefoo1foo2foo3foo4foo5".getBytes(StandardCharsets.UTF_8));
-        runner.run(1, false);
-
-        runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);
-        ConsumerIterator<byte[], byte[]> consumer = 
this.buildConsumer(topicName);
-        assertEquals("Hello World", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("Goodbye", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("1", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("2", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("3", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("4", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("5", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-
-        runner.shutdown();
-    }
-
-    @Test
-    public void validateDemarcationIntoEmptyMessages() {
-        String topicName = "validateDemarcationIntoEmptyMessages";
-        PutKafka putKafka = new PutKafka();
-        final TestRunner runner = TestRunners.newTestRunner(putKafka);
-        runner.setProperty(PutKafka.TOPIC, topicName);
-        runner.setProperty(PutKafka.KEY, "key1");
-        runner.setProperty(PutKafka.CLIENT_NAME, "foo");
-        runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + 
kafkaLocal.getKafkaPort());
-        runner.setProperty(PutKafka.MESSAGE_DELIMITER, "\n");
-
-        final byte[] bytes = 
"\n\n\n1\n2\n\n\n3\n4\n\n\n".getBytes(StandardCharsets.UTF_8);
-        runner.enqueue(bytes);
-        runner.run(1);
-
-        runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);
-
-        ConsumerIterator<byte[], byte[]> consumer = 
this.buildConsumer(topicName);
-
-        assertNotNull(consumer.next());
-        assertNotNull(consumer.next());
-        assertNotNull(consumer.next());
-        assertNotNull(consumer.next());
-        try {
-            consumer.next();
-            fail();
-        } catch (Exception e) {
-            // ignore
-        }
-    }
-
-    @Test
-    public void validateComplexRightPartialDemarcatedMessages() {
-        String topicName = "validateComplexRightPartialDemarcatedMessages";
-        PutKafka putKafka = new PutKafka();
-        TestRunner runner = TestRunners.newTestRunner(putKafka);
-        runner.setProperty(PutKafka.TOPIC, topicName);
-        runner.setProperty(PutKafka.CLIENT_NAME, "foo");
-        runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + 
kafkaLocal.getKafkaPort());
-        runner.setProperty(PutKafka.MESSAGE_DELIMITER, "僠<僠WILDSTUFF僠>僠
");
-
-        runner.enqueue("Hello World僠<僠WILDSTUFF僠>僠Goodbye僠<僠
WILDSTUFF僠>僠I Mean IT!僠<僠WILDSTUFF僠
>".getBytes(StandardCharsets.UTF_8));
-        runner.run(1, false);
-
-        runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);
-        ConsumerIterator<byte[], byte[]> consumer = 
this.buildConsumer(topicName);
-        assertEquals("Hello World", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("Goodbye", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("I Mean IT!僠<僠WILDSTUFF僠>", new 
String(consumer.next().message(), StandardCharsets.UTF_8));
-        runner.shutdown();
-    }
-
-    @Test
-    public void validateComplexLeftPartialDemarcatedMessages() {
-        String topicName = "validateComplexLeftPartialDemarcatedMessages";
-        PutKafka putKafka = new PutKafka();
-        TestRunner runner = TestRunners.newTestRunner(putKafka);
-        runner.setProperty(PutKafka.TOPIC, topicName);
-        runner.setProperty(PutKafka.CLIENT_NAME, "foo");
-        runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + 
kafkaLocal.getKafkaPort());
-        runner.setProperty(PutKafka.MESSAGE_DELIMITER, "僠<僠WILDSTUFF僠>僠
");
-
-        runner.enqueue("Hello World僠<僠WILDSTUFF僠>僠Goodbye僠<僠
WILDSTUFF僠>僠I Mean IT!僠<僠WILDSTUFF僠>僠<僠WILDSTUFF僠>僠
".getBytes(StandardCharsets.UTF_8));
-        runner.run(1, false);
-
-        runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);
-        ConsumerIterator<byte[], byte[]> consumer = 
this.buildConsumer(topicName);
-        byte[] message = consumer.next().message();
-        assertEquals("Hello World", new String(message, 
StandardCharsets.UTF_8));
-        assertEquals("Goodbye", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("I Mean IT!", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("<僠WILDSTUFF僠>僠", new 
String(consumer.next().message(), StandardCharsets.UTF_8));
-        runner.shutdown();
-    }
-
-    @Test
-    public void validateComplexPartialMatchDemarcatedMessages() {
-        String topicName = "validateComplexPartialMatchDemarcatedMessages";
-        PutKafka putKafka = new PutKafka();
-        TestRunner runner = TestRunners.newTestRunner(putKafka);
-        runner.setProperty(PutKafka.TOPIC, topicName);
-        runner.setProperty(PutKafka.CLIENT_NAME, "foo");
-        runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + 
kafkaLocal.getKafkaPort());
-        runner.setProperty(PutKafka.MESSAGE_DELIMITER, "僠<僠WILDSTUFF僠>僠
");
-
-        runner.enqueue("Hello World僠<僠WILDSTUFF僠>僠Goodbye僠<僠
WILDBOOMSTUFF僠>僠".getBytes(StandardCharsets.UTF_8));
-        runner.run(1, false);
-
-        runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);
-        ConsumerIterator<byte[], byte[]> consumer = 
this.buildConsumer(topicName);
-        assertEquals("Hello World", new String(consumer.next().message(), 
StandardCharsets.UTF_8));
-        assertEquals("Goodbye僠<僠WILDBOOMSTUFF僠>僠", new 
String(consumer.next().message(), StandardCharsets.UTF_8));
-        runner.shutdown();
-    }
-
-    private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) {
-        Properties props = new Properties();
-        props.put("zookeeper.connect", "0.0.0.0:" + 
kafkaLocal.getZookeeperPort());
-        props.put("group.id", "test");
-        props.put("consumer.timeout.ms", "5000");
-        props.put("auto.offset.reset", "smallest");
-        ConsumerConfig consumerConfig = new ConsumerConfig(props);
-        ConsumerConnector consumer = 
Consumer.createJavaConsumerConnector(consumerConfig);
-        Map<String, Integer> topicCountMap = new HashMap<>(1);
-        topicCountMap.put(topic, 1);
-        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
consumer.createMessageStreams(topicCountMap);
-        List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
-        ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
-        return iter;
-    }
-}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/TestGetKafka.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/TestGetKafka.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/TestGetKafka.java
deleted file mode 100644
index dfcf0d9..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/TestGetKafka.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.kafka;
-
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.log4j.BasicConfigurator;
-import org.apache.nifi.processor.ProcessContext;
-import org.apache.nifi.util.MockFlowFile;
-import org.apache.nifi.util.TestRunner;
-import org.apache.nifi.util.TestRunners;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-import kafka.consumer.ConsumerIterator;
-import kafka.message.MessageAndMetadata;
-
-public class TestGetKafka {
-
-    @BeforeClass
-    public static void configureLogging() {
-        System.setProperty("org.slf4j.simpleLogger.log.kafka", "INFO");
-        
System.setProperty("org.slf4j.simpleLogger.log.org.apache.nifi.processors.kafka",
 "INFO");
-        BasicConfigurator.configure();
-    }
-
-    @Test
-    @Ignore("Intended only for local tests to verify functionality.")
-    public void testIntegrationLocally() {
-        final TestRunner runner = TestRunners.newTestRunner(GetKafka.class);
-        runner.setProperty(GetKafka.ZOOKEEPER_CONNECTION_STRING, 
"192.168.0.101:2181");
-        runner.setProperty(GetKafka.TOPIC, "testX");
-        runner.setProperty(GetKafka.KAFKA_TIMEOUT, "3 secs");
-        runner.setProperty(GetKafka.ZOOKEEPER_TIMEOUT, "3 secs");
-
-        runner.run(20, false);
-
-        final List<MockFlowFile> flowFiles = 
runner.getFlowFilesForRelationship(GetKafka.REL_SUCCESS);
-        for (final MockFlowFile flowFile : flowFiles) {
-            System.out.println(flowFile.getAttributes());
-            System.out.println(new String(flowFile.toByteArray()));
-            System.out.println();
-        }
-    }
-
-    @Test
-    public void testWithDelimiter() {
-        final List<String> messages = new ArrayList<>();
-        messages.add("Hello");
-        messages.add("Good-bye");
-
-        final TestableProcessor proc = new TestableProcessor(null, messages);
-        final TestRunner runner = TestRunners.newTestRunner(proc);
-        runner.setProperty(GetKafka.ZOOKEEPER_CONNECTION_STRING, 
"localhost:2181");
-        runner.setProperty(GetKafka.TOPIC, "testX");
-        runner.setProperty(GetKafka.KAFKA_TIMEOUT, "3 secs");
-        runner.setProperty(GetKafka.ZOOKEEPER_TIMEOUT, "3 secs");
-        runner.setProperty(GetKafka.MESSAGE_DEMARCATOR, "\\n");
-        runner.setProperty(GetKafka.BATCH_SIZE, "2");
-
-        runner.run();
-
-        runner.assertAllFlowFilesTransferred(GetKafka.REL_SUCCESS, 1);
-        final MockFlowFile mff = 
runner.getFlowFilesForRelationship(GetKafka.REL_SUCCESS).get(0);
-        mff.assertContentEquals("Hello\nGood-bye");
-    }
-
-    @Test
-    public void testWithDelimiterAndNotEnoughMessages() {
-        final List<String> messages = new ArrayList<>();
-        messages.add("Hello");
-        messages.add("Good-bye");
-
-        final TestableProcessor proc = new TestableProcessor(null, messages);
-        final TestRunner runner = TestRunners.newTestRunner(proc);
-        runner.setProperty(GetKafka.ZOOKEEPER_CONNECTION_STRING, 
"localhost:2181");
-        runner.setProperty(GetKafka.TOPIC, "testX");
-        runner.setProperty(GetKafka.KAFKA_TIMEOUT, "3 secs");
-        runner.setProperty(GetKafka.ZOOKEEPER_TIMEOUT, "3 secs");
-        runner.setProperty(GetKafka.MESSAGE_DEMARCATOR, "\\n");
-        runner.setProperty(GetKafka.BATCH_SIZE, "3");
-
-        runner.run();
-
-        runner.assertAllFlowFilesTransferred(GetKafka.REL_SUCCESS, 1);
-        final MockFlowFile mff = 
runner.getFlowFilesForRelationship(GetKafka.REL_SUCCESS).get(0);
-        mff.assertContentEquals("Hello\nGood-bye");
-    }
-
-    private static class TestableProcessor extends GetKafka {
-
-        private final byte[] key;
-        private final Iterator<String> messageItr;
-
-        public TestableProcessor(final byte[] key, final List<String> 
messages) {
-            this.key = key;
-            messageItr = messages.iterator();
-        }
-
-        @Override
-        public void createConsumers(ProcessContext context) {
-            try {
-                Field f = 
GetKafka.class.getDeclaredField("consumerStreamsReady");
-                f.setAccessible(true);
-                ((AtomicBoolean) f.get(this)).set(true);
-            } catch (Exception e) {
-                throw new IllegalStateException(e);
-            }
-        }
-
-        @Override
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        protected ConsumerIterator<byte[], byte[]> getStreamIterator() {
-            final ConsumerIterator<byte[], byte[]> itr = 
Mockito.mock(ConsumerIterator.class);
-
-            Mockito.doAnswer(new Answer<Boolean>() {
-                @Override
-                public Boolean answer(final InvocationOnMock invocation) 
throws Throwable {
-                    return messageItr.hasNext();
-                }
-            }).when(itr).hasNext();
-
-            Mockito.doAnswer(new Answer<MessageAndMetadata>() {
-                @Override
-                public MessageAndMetadata answer(InvocationOnMock invocation) 
throws Throwable {
-                    final MessageAndMetadata mam = 
Mockito.mock(MessageAndMetadata.class);
-                    Mockito.when(mam.key()).thenReturn(key);
-                    Mockito.when(mam.offset()).thenReturn(0L);
-                    Mockito.when(mam.partition()).thenReturn(0);
-
-                    Mockito.doAnswer(new Answer<byte[]>() {
-                        @Override
-                        public byte[] answer(InvocationOnMock invocation) 
throws Throwable {
-                            return messageItr.next().getBytes();
-                        }
-
-                    }).when(mam).message();
-
-                    return mam;
-                }
-            }).when(itr).next();
-
-            return itr;
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
deleted file mode 100644
index 802f889..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafka.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.kafka.test;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.ServerSocket;
-import java.util.Properties;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.zookeeper.server.ServerCnxnFactory;
-import org.apache.zookeeper.server.ServerConfig;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import kafka.server.KafkaConfig;
-import kafka.server.KafkaServerStartable;
-
-/**
- * Embedded Kafka server, primarily to be used for testing.
- */
-public class EmbeddedKafka {
-
-    private final KafkaServerStartable kafkaServer;
-
-    private final Properties zookeeperConfig;
-
-    private final Properties kafkaConfig;
-
-    private final ZooKeeperServer zkServer;
-
-    private final Logger logger = LoggerFactory.getLogger(EmbeddedKafka.class);
-
-    private final int kafkaPort;
-
-    private final int zookeeperPort;
-
-    private boolean started;
-
-    /**
-     * Will create instance of the embedded Kafka server. Kafka and Zookeeper
-     * configuration properties will be loaded from 'server.properties' and
-     * 'zookeeper.properties' located at the root of the classpath.
-     */
-    public EmbeddedKafka() {
-        this(loadPropertiesFromClasspath("/server.properties"), 
loadPropertiesFromClasspath("/zookeeper.properties"));
-    }
-
-    /**
-     * Will create instance of the embedded Kafka server.
-     *
-     * @param kafkaConfig
-     *            Kafka configuration properties
-     * @param zookeeperConfig
-     *            Zookeeper configuration properties
-     */
-    public EmbeddedKafka(Properties kafkaConfig, Properties zookeeperConfig) {
-        this.cleanupKafkaWorkDir();
-        this.zookeeperConfig = zookeeperConfig;
-        this.kafkaConfig = kafkaConfig;
-        this.kafkaPort = this.availablePort();
-        this.zookeeperPort = this.availablePort();
-
-        this.kafkaConfig.setProperty("port", String.valueOf(this.kafkaPort));
-        this.kafkaConfig.setProperty("zookeeper.connect", "localhost:" + 
this.zookeeperPort);
-        this.zookeeperConfig.setProperty("clientPort", 
String.valueOf(this.zookeeperPort));
-        this.zkServer = new ZooKeeperServer();
-        this.kafkaServer = new KafkaServerStartable(new 
KafkaConfig(kafkaConfig));
-    }
-
-    /**
-     *
-     * @return port for Kafka server
-     */
-    public int getKafkaPort() {
-        if (!this.started) {
-            throw new IllegalStateException("Kafka server is not started. 
Kafka port can't be determined.");
-        }
-        return this.kafkaPort;
-    }
-
-    /**
-     *
-     * @return port for Zookeeper server
-     */
-    public int getZookeeperPort() {
-        if (!this.started) {
-            throw new IllegalStateException("Kafka server is not started. 
Zookeeper port can't be determined.");
-        }
-        return this.zookeeperPort;
-    }
-
-    /**
-     * Will start embedded Kafka server. Its data directories will be created
-     * at 'kafka-tmp' directory relative to the working directory of the 
current
-     * runtime. The data directories will be deleted upon JVM exit.
-     *
-     */
-    public void start() {
-        if (!this.started) {
-            logger.info("Starting Zookeeper server");
-            this.startZookeeper();
-
-            logger.info("Starting Kafka server");
-            this.kafkaServer.startup();
-
-            logger.info("Embeded Kafka is started at localhost:" + 
this.kafkaServer.serverConfig().port()
-                    + ". Zookeeper connection string: " + 
this.kafkaConfig.getProperty("zookeeper.connect"));
-            this.started = true;
-        }
-    }
-
-    /**
-     * Will stop embedded Kafka server, cleaning up all working directories.
-     */
-    public void stop() {
-        if (this.started) {
-            logger.info("Shutting down Kafka server");
-            this.kafkaServer.shutdown();
-            this.kafkaServer.awaitShutdown();
-            logger.info("Shutting down Zookeeper server");
-            this.shutdownZookeeper();
-            logger.info("Embeded Kafka is shut down.");
-            this.cleanupKafkaWorkDir();
-            this.started = false;
-        }
-    }
-
-    /**
-     *
-     */
-    private void cleanupKafkaWorkDir() {
-        File kafkaTmp = new File("target/kafka-tmp");
-        try {
-            FileUtils.deleteDirectory(kafkaTmp);
-        } catch (Exception e) {
-            logger.warn("Failed to delete " + kafkaTmp.getAbsolutePath());
-        }
-    }
-
-    /**
-     * Will start Zookeeper server via {@link ServerCnxnFactory}
-     */
-    private void startZookeeper() {
-        QuorumPeerConfig quorumConfiguration = new QuorumPeerConfig();
-        try {
-            quorumConfiguration.parseProperties(this.zookeeperConfig);
-
-            ServerConfig configuration = new ServerConfig();
-            configuration.readFrom(quorumConfiguration);
-
-            FileTxnSnapLog txnLog = new FileTxnSnapLog(new 
File(configuration.getDataLogDir()), new File(configuration.getDataDir()));
-
-            zkServer.setTxnLogFactory(txnLog);
-            zkServer.setTickTime(configuration.getTickTime());
-            
zkServer.setMinSessionTimeout(configuration.getMinSessionTimeout());
-            
zkServer.setMaxSessionTimeout(configuration.getMaxSessionTimeout());
-            ServerCnxnFactory zookeeperConnectionFactory = 
ServerCnxnFactory.createFactory();
-            
zookeeperConnectionFactory.configure(configuration.getClientPortAddress(),
-                    configuration.getMaxClientCnxns());
-            zookeeperConnectionFactory.startup(zkServer);
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-        } catch (Exception e) {
-            throw new IllegalStateException("Failed to start Zookeeper 
server", e);
-        }
-    }
-
-    /**
-     * Will shut down Zookeeper server.
-     */
-    private void shutdownZookeeper() {
-        zkServer.shutdown();
-    }
-
-    /**
-     * Will load {@link Properties} from properties file discovered at the
-     * provided path relative to the root of the classpath.
-     */
-    private static Properties loadPropertiesFromClasspath(String path) {
-        try {
-            Properties kafkaProperties = new Properties();
-            kafkaProperties.load(Class.class.getResourceAsStream(path));
-            return kafkaProperties;
-        } catch (Exception e) {
-            throw new IllegalStateException(e);
-        }
-    }
-
-    /**
-     * Will determine the available port used by Kafka/Zookeeper servers.
-     */
-    private int availablePort() {
-        ServerSocket s = null;
-        try {
-            s = new ServerSocket(0);
-            s.setReuseAddress(true);
-            return s.getLocalPort();
-        } catch (Exception e) {
-            throw new IllegalStateException("Failed to discover available 
port.", e);
-        } finally {
-            try {
-                s.close();
-            } catch (IOException e) {
-                // ignore
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
deleted file mode 100644
index 0ed00fb..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/java/org/apache/nifi/processors/kafka/test/EmbeddedKafkaProducerHelper.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nifi.processors.kafka.test;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Properties;
-
-import kafka.producer.KeyedMessage;
-import kafka.producer.OldProducer;
-
-/**
- * Helper class which helps to produce events targeting {@link EmbeddedKafka}
- * server.
- */
-public class EmbeddedKafkaProducerHelper implements Closeable {
-
-    private final EmbeddedKafka kafkaServer;
-
-    private final OldProducer producer;
-
-    /**
-     * Will create an instance of EmbeddedKafkaProducerHelper based on default
-     * configurations.<br>
-     * Default configuration includes:<br>
-     * <i>
-     * metadata.broker.list=[determined from the instance of EmbeddedKafka]<br>
-     * serializer.class=kafka.serializer.DefaultEncoder<br>
-     * key.serializer.class=kafka.serializer.DefaultEncoder<br>
-     * auto.create.topics.enable=true
-     * </i><br>
-     * <br>
-     * If you wish to supply additional configuration properties or override
-     * existing use
-     * {@link 
EmbeddedKafkaProducerHelper#EmbeddedKafkaProducerHelper(EmbeddedKafka, 
Properties)}
-     * constructor.
-     *
-     * @param kafkaServer
-     *            instance of {@link EmbeddedKafka}
-     */
-    public EmbeddedKafkaProducerHelper(EmbeddedKafka kafkaServer) {
-        this(kafkaServer, null);
-    }
-
-    /**
-     * Will create an instance of EmbeddedKafkaProducerHelper based on default
-     * configurations and additional configuration properties.<br>
-     * Default configuration includes:<br>
-     * metadata.broker.list=[determined from the instance of EmbeddedKafka]<br>
-     * serializer.class=kafka.serializer.DefaultEncoder<br>
-     * key.serializer.class=kafka.serializer.DefaultEncoder<br>
-     * auto.create.topics.enable=true<br>
-     * <br>
-     *
-     * @param kafkaServer
-     *            instance of {@link EmbeddedKafka}
-     * @param additionalProperties
-     *            instance of {@link Properties} specifying additional producer
-     *            configuration properties.
-     */
-    public EmbeddedKafkaProducerHelper(EmbeddedKafka kafkaServer, Properties 
additionalProperties) {
-        this.kafkaServer = kafkaServer;
-        Properties producerProperties = new Properties();
-        producerProperties.put("metadata.broker.list", "localhost:" + 
this.kafkaServer.getKafkaPort());
-        producerProperties.put("serializer.class", 
"kafka.serializer.DefaultEncoder");
-        producerProperties.put("key.serializer.class", 
"kafka.serializer.DefaultEncoder");
-        producerProperties.put("auto.create.topics.enable", "true");
-        if (additionalProperties != null) {
-            producerProperties.putAll(additionalProperties);
-        }
-        this.producer = new OldProducer(producerProperties);
-    }
-
-    /**
-     * Will send an event to a Kafka topic. If topic doesn't exist it will be
-     * auto-created.
-     *
-     * @param topicName
-     *            Kafka topic name.
-     * @param event
-     *            string representing an event(message) to be sent to Kafka.
-     */
-    public void sendEvent(String topicName, String event) {
-        KeyedMessage<byte[], byte[]> data = new KeyedMessage<byte[], 
byte[]>(topicName, event.getBytes());
-        this.producer.send(data.topic(), data.key(), data.message());
-    }
-
-    /**
-     * Will close the underlying Kafka producer.
-     */
-    @Override
-    public void close() throws IOException {
-        this.producer.close();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/log4j.properties
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/log4j.properties
deleted file mode 100644
index 8e37bb9..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-log4j.rootCategory=INFO, stdout
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %5p %t %c{2}:%L - 
%m%n
-
-log4j.category.org.apache.nifi.processors.kafka=DEBUG

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/server.properties
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/server.properties
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/server.properties
deleted file mode 100644
index 9c44acc..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/server.properties
+++ /dev/null
@@ -1,121 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-############################# Socket Server Settings 
#############################
-
-# The port the socket server listens on
-#port=9092
-
-# Hostname the broker will bind to. If not set, the server will bind to all 
interfaces
-#host.name=localhost
-
-# Hostname the broker will advertise to producers and consumers. If not set, 
it uses the
-# value for "host.name" if configured.  Otherwise, it will use the value 
returned from
-# java.net.InetAddress.getCanonicalHostName().
-#advertised.host.name=<hostname routable by clients>
-
-# The port to publish to ZooKeeper for clients to use. If this is not set,
-# it will publish the same port that the broker binds to.
-#advertised.port=<port accessible by clients>
-
-# The number of threads handling network requests
-num.network.threads=3
- 
-# The number of threads doing disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=102400
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=102400
-
-# The maximum size of a request that the socket server will accept (protection 
against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# A comma seperated list of directories under which to store log files
-log.dirs=target/kafka-tmp/kafka-logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=1
-
-# The number of threads per data directory to be used for log recovery at 
startup and flushing at shutdown.
-# This value is recommended to be increased for installations with data dirs 
located in RAID array.
-num.recovery.threads.per.data.dir=1
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only 
fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data 
to disk. 
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data may be lost if you are not using 
replication.
-#    2. Latency: Very large flush intervals may lead to latency spikes when 
the flush does occur as there will be a lot of data to flush.
-#    3. Throughput: The flush is generally the most expensive operation, and a 
small flush interval may lead to exceessive seeks. 
-# The settings below allow one to configure the flush policy to flush data 
after a period of time or
-# every N messages (or both). This can be done globally and overridden on a 
per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy 
#############################
-
-# The following configurations control the disposal of log segments. The 
policy can
-# be set to delete segments after a period of time, or after a given size has 
accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. 
Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as 
long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log 
segment will be created.
-log.segment.bytes=1073741824
-
-# The interval at which log segments are checked to see if they can be deleted 
according 
-# to the retention policies
-log.retention.check.interval.ms=300000
-
-# By default the log cleaner is disabled and the log retention policy will 
default to just delete segments after their retention expires.
-# If log.cleaner.enable=true is set the cleaner will be enabled and individual 
logs can then be marked for log compaction.
-log.cleaner.enable=false
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=6000

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/zookeeper.properties
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/zookeeper.properties
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/zookeeper.properties
deleted file mode 100644
index df271df..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/test/resources/zookeeper.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=target/kafka-tmp/zookeeper
-# the port at which the clients will connect
-#clientPort=2181
-# disable the per-ip limit on the number of connections since this is a 
non-production config
-maxClientCnxns=0

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/pom.xml
----------------------------------------------------------------------
diff --git a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/pom.xml 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/pom.xml
deleted file mode 100644
index cb3be38..0000000
--- a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/pom.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd";>
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.nifi</groupId>
-        <artifactId>nifi-kafka-bundle</artifactId>
-        <version>1.0.0-SNAPSHOT</version>
-    </parent>
-    <artifactId>nifi-kafka-pubsub-nar</artifactId>
-    <packaging>nar</packaging>
-    <description>NiFi NAR for interacting with Apache Kafka</description>
-    <properties>
-        <maven.javadoc.skip>true</maven.javadoc.skip>
-        <source.skip>true</source.skip>
-    </properties>
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-kafka-pubsub-processors</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-standard-services-api-nar</artifactId>
-            <type>nar</type>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/LICENSE
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/LICENSE
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/LICENSE
deleted file mode 100644
index 84b3bb9..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/LICENSE
+++ /dev/null
@@ -1,299 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-APACHE NIFI SUBCOMPONENTS:
-
-The Apache NiFi project contains subcomponents with separate copyright
-notices and license terms. Your use of the source code for the these
-subcomponents is subject to the terms and conditions of the following
-licenses.
-
-  The binary distribution of this product bundles 'Scala Library' under a BSD
-  style license.
-
-    Copyright (c) 2002-2015 EPFL
-    Copyright (c) 2011-2015 Typesafe, Inc.
-
-    All rights reserved.
-
-    Redistribution and use in source and binary forms, with or without 
modification,
-       are permitted provided that the following conditions are met:
-
-        Redistributions of source code must retain the above copyright notice, 
this list of
-               conditions and the following disclaimer.
-
-           Redistributions in binary form must reproduce the above copyright 
notice, this list of
-               conditions and the following disclaimer in the documentation 
and/or other materials
-               provided with the distribution.
-
-           Neither the name of the EPFL nor the names of its contributors may 
be used to endorse
-               or promote products derived from this software without specific 
prior written permission.
-
-       THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
“AS IS” AND ANY EXPRESS
-       OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 
WARRANTIES OF MERCHANTABILITY
-       AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 
THE COPYRIGHT OWNER OR
-       CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
EXEMPLARY, OR CONSEQUENTIAL
-       DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
OR SERVICES; LOSS OF USE,
-       DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
THEORY OF LIABILITY, WHETHER
-       IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 
OTHERWISE) ARISING IN ANY WAY OUT
-       OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 
DAMAGE.
-
-  The binary distribution of this product bundles 'JLine' under a BSD
-  style license.
-
-    Copyright (c) 2002-2006, Marc Prud'hommeaux <m...@cornell.edu>
-    All rights reserved.
-
-    Redistribution and use in source and binary forms, with or
-    without modification, are permitted provided that the following
-    conditions are met:
-
-    Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-    Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer
-    in the documentation and/or other materials provided with
-    the distribution.
-
-    Neither the name of JLine nor the names of its contributors
-    may be used to endorse or promote products derived from this
-    software without specific prior written permission.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
-    BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
-    AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-    EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
-    OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
-    AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-    LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-    IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-    OF THE POSSIBILITY OF SUCH DAMAGE.
-
-  The binary distribution of this product bundles 'JOpt Simple' under an MIT
-  style license.
-
-    Copyright (c) 2009 Paul R. Holser, Jr.
-
-    Permission is hereby granted, free of charge, to any person obtaining
-    a copy of this software and associated documentation files (the
-    "Software"), to deal in the Software without restriction, including
-    without limitation the rights to use, copy, modify, merge, publish,
-    distribute, sublicense, and/or sell copies of the Software, and to
-    permit persons to whom the Software is furnished to do so, subject to
-    the following conditions:
-
-    The above copyright notice and this permission notice shall be
-    included in all copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/NOTICE
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/NOTICE
 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/NOTICE
deleted file mode 100644
index 3aa101a..0000000
--- 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-nar/src/main/resources/META-INF/NOTICE
+++ /dev/null
@@ -1,72 +0,0 @@
-nifi-kafka-nar
-Copyright 2014-2016 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-******************
-Apache Software License v2
-******************
-
-The following binary components are provided under the Apache Software License 
v2
-
-  (ASLv2) Apache Commons Lang
-    The following NOTICE information applies:
-      Apache Commons Lang
-      Copyright 2001-2014 The Apache Software Foundation
-
-      This product includes software from the Spring Framework,
-      under the Apache License 2.0 (see: StringUtils.containsWhitespace())
-
-  (ASLv2) Apache Kafka
-    The following NOTICE information applies:
-      Apache Kafka
-      Copyright 2012 The Apache Software Foundation.
-
-  (ASLv2) Yammer Metrics
-    The following NOTICE information applies:
-      Metrics
-      Copyright 2010-2012 Coda Hale and Yammer, Inc.
-
-      This product includes software developed by Coda Hale and Yammer, Inc.
-
-      This product includes code derived from the JSR-166 project 
(ThreadLocalRandom), which was released
-      with the following comments:
-
-          Written by Doug Lea with assistance from members of JCP JSR-166
-          Expert Group and released to the public domain, as explained at
-          http://creativecommons.org/publicdomain/zero/1.0/
-
-  (ASLv2) Snappy Java
-    The following NOTICE information applies:
-      This product includes software developed by Google
-       Snappy: http://code.google.com/p/snappy/ (New BSD License)
-
-      This product includes software developed by Apache
-       PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/
-       (Apache 2.0 license)
-
-      This library containd statically linked libstdc++. This inclusion is 
allowed by
-      "GCC RUntime Library Exception"
-      http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
-
-  (ASLv2) Apache ZooKeeper
-    The following NOTICE information applies:
-      Apache ZooKeeper
-      Copyright 2009-2012 The Apache Software Foundation
-
-************************
-Common Development and Distribution License 1.1
-************************
-
-The following binary components are provided under the Common Development and 
Distribution License 1.1. See project link for details.
-
-    (CDDL 1.1) (GPL2 w/ CPE) JavaMail API (compat) (javax.mail:mail:jar:1.4.7 
- http://kenai.com/projects/javamail/mail)
-
-************************
-Common Development and Distribution License 1.0
-************************
-
-The following binary components are provided under the Common Development and 
Distribution License 1.0.  See project link for details.
-
-    (CDDL 1.0) JavaBeans Activation Framework (JAF) 
(javax.activation:activation:jar:1.1 - 
http://java.sun.com/products/javabeans/jaf/index.jsp)

http://git-wip-us.apache.org/repos/asf/nifi/blob/1745c127/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-processors/pom.xml
----------------------------------------------------------------------
diff --git 
a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-processors/pom.xml 
b/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-processors/pom.xml
deleted file mode 100644
index 3ad8e37..0000000
--- a/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-pubsub-processors/pom.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <parent>
-        <groupId>org.apache.nifi</groupId>
-        <artifactId>nifi-kafka-bundle</artifactId>
-        <version>1.0.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-    <artifactId>nifi-kafka-pubsub-processors</artifactId>
-    <packaging>jar</packaging>
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-processor-utils</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-utils</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-ssl-context-service-api</artifactId>
-        </dependency>
-        <dependency>
-                   <groupId>org.apache.kafka</groupId>
-                   <artifactId>kafka-clients</artifactId>
-                   <version>0.9.0.1</version>
-               </dependency>
-        <dependency>
-            <groupId>org.apache.kafka</groupId>
-            <artifactId>kafka_2.10</artifactId>
-            <version>0.9.0.1</version>
-            <exclusions>
-                <!-- Transitive dependencies excluded because they are located
-                in a legacy Maven repository, which Maven 3 doesn't support. 
-->
-                <exclusion>
-                    <groupId>javax.jms</groupId>
-                    <artifactId>jms</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jdmk</groupId>
-                    <artifactId>jmxtools</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jmx</groupId>
-                    <artifactId>jmxri</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.nifi</groupId>
-            <artifactId>nifi-mock</artifactId>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>commons-io</groupId>
-            <artifactId>commons-io</artifactId>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-simple</artifactId>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-</project>

Reply via email to