Github user tdas commented on a diff in the pull request:

    https://github.com/apache/spark/pull/1434#discussion_r15504519
  
    --- Diff: 
extras/spark-kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCount.java
 ---
    @@ -0,0 +1,310 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.spark.examples.streaming;
    +
    +import java.util.List;
    +import java.util.regex.Pattern;
    +
    +import org.apache.log4j.Level;
    +import org.apache.log4j.Logger;
    +import org.apache.spark.SparkConf;
    +import org.apache.spark.api.java.JavaPairRDD;
    +import org.apache.spark.api.java.function.FlatMapFunction;
    +import org.apache.spark.api.java.function.Function;
    +import org.apache.spark.api.java.function.Function2;
    +import org.apache.spark.api.java.function.PairFunction;
    +import org.apache.spark.storage.StorageLevel;
    +import org.apache.spark.streaming.Duration;
    +import org.apache.spark.streaming.Milliseconds;
    +import org.apache.spark.streaming.api.java.JavaDStream;
    +import org.apache.spark.streaming.api.java.JavaPairDStream;
    +import org.apache.spark.streaming.api.java.JavaStreamingContext;
    +import org.apache.spark.streaming.dstream.DStream;
    +import org.apache.spark.streaming.kinesis.KinesisRecordSerializer;
    +import org.apache.spark.streaming.kinesis.KinesisStringRecordSerializer;
    +import org.apache.spark.streaming.kinesis.KinesisUtils;
    +
    +import scala.Tuple2;
    +
    +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
    +import com.amazonaws.services.kinesis.AmazonKinesisClient;
    +import 
com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream;
    +import com.google.common.base.Optional;
    +import com.google.common.collect.Lists;
    +
    +/**
    + * Java-friendly Kinesis Spark Streaming WordCount example
    + *
    + * See 
http://spark.apache.org/docs/latest/streaming-programming-guide.html for more 
details on the Kinesis Spark Streaming integration.
    + *
    + * This example spins up 1 Kinesis Worker (Spark Streaming Receivers) per 
shard of the given stream.
    + * It then starts pulling from the tip of the given <stream-name> and 
<endpoint-url> at the given <batch-interval>.
    + * Because we're pulling from the tip (InitialPositionInStream.LATEST), 
only new stream data will be picked up after the KinesisReceiver starts.
    + * This could lead to missed records if data is added to the stream while 
no KinesisReceivers are running.
    + * In production, you'll want to switch to 
InitialPositionInStream.TRIM_HORIZON which will read up to 24 hours (Kinesis 
limit) of previous stream data 
    + *  depending on the checkpoint frequency.
    + * InitialPositionInStream.TRIM_HORIZON may lead to duplicate processing 
of records depending on the checkpoint frequency.
    + * Record processing should be idempotent when possible.
    + *
    + * This code uses the DefaultAWSCredentialsProviderChain and searches for 
credentials in the following order of precedence: 
    + *         Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
    + *         Java System Properties - aws.accessKeyId and aws.secretKey
    + *         Credential profiles file - default location 
(~/.aws/credentials) shared by all AWS SDKs
    + *         Instance profile credentials - delivered through the Amazon EC2 
metadata service
    + *
    + * Usage: JavaKinesisWordCount <stream-name> <endpoint-url> 
<batch-interval>
    + *         <stream-name> is the name of the Kinesis stream (ie. 
mySparkStream)
    + *         <endpoint-url> is the endpoint of the Kinesis service (ie. 
https://kinesis.us-east-1.amazonaws.com)
    + *         <batch-interval> is the batch interval in milliseconds (ie. 
1000ms)
    + *
    + * Example:
    + *      $ export AWS_ACCESS_KEY_ID=<your-access-key>
    + *      $ export AWS_SECRET_KEY=<your-secret-key>
    + *        $ bin/run-kinesis-example  \
    + *            org.apache.spark.examples.streaming.JavaKinesisWordCount 
mySparkStream https://kinesis.us-east-1.amazonaws.com 1000
    + *
    + * There is a companion helper class called KinesisWordCountProducer which 
puts dummy data onto the Kinesis stream. 
    + * Usage instructions for KinesisWordCountProducer are provided in the 
class definition.
    + */
    +public final class JavaKinesisWordCount {
    +    private static final Pattern WORD_SEPARATOR = Pattern.compile(" ");
    +    private static final Logger logger = 
Logger.getLogger(JavaKinesisWordCount.class);
    +
    +    /**
    +     * Make the constructor private to enforce singleton
    +     */
    +    private JavaKinesisWordCount() {
    +    }
    +
    +    public static void main(String[] args) {
    +        /**
    +         * Check that all required args were passed in.
    +         */
    +        if (args.length < 3) {
    +            System.err.println("Usage: JavaKinesisWordCount <stream-name> 
<kinesis-endpoint-url> <batch-interval>");
    +            System.exit(1);
    +        }
    +
    +        /**
    +         * (This was lifted from the StreamingExamples.scala in order to 
avoid the dependency on the spark-examples artifact.)
    +         * Set reasonable logging levels for streaming if the user has not 
configured log4j.
    +         */
    +        boolean log4jInitialized = Logger.getRootLogger().getAllAppenders()
    +                .hasMoreElements();
    +        if (!log4jInitialized) {
    +            /** We first log something to initialize Spark's default 
logging, then we override the logging level. */
    +            Logger.getRootLogger()
    +                    .info("Setting log level to [ERROR] for streaming 
example."
    +                            + " To override add a custom log4j.properties 
to the classpath.");
    +            Logger.getRootLogger().setLevel(Level.ERROR);
    +            
Logger.getLogger("org.apache.spark.examples.streaming").setLevel(Level.DEBUG);
    +        }
    +
    +        /** Populate the appropriate variables from the given args */
    +        String stream = args[0];
    +        String endpoint = args[1];
    +        Integer batchIntervalMillis = Integer.valueOf(args[2]);
    +
    +        /** Create a Kinesis client in order to determine the number of 
shards for the given stream */
    +        AmazonKinesisClient KinesisClient = new AmazonKinesisClient(
    +                new DefaultAWSCredentialsProviderChain());
    +
    +        /** Determine the number of shards from the stream */
    +        int numShards = KinesisClient.describeStream(stream)
    +                .getStreamDescription().getShards().size();
    +
    +        /** In this example, we're going to create 1 Kinesis 
Worker/Receiver/DStreams for each stream shard */ 
    +        int numStreams = numShards;
    +
    +        /** Must add 1 more thread than the number of receivers or the 
output won't show properly from the driver */
    +        int numSparkThreads = numStreams + 1;
    +
    +        /** Set the app name */
    +        String app = "KinesisWordCount";
    +
    +        /** Setup the Spark config. */
    +        SparkConf sparkConfig = new SparkConf().setAppName(app).setMaster(
    +                "local[" + numSparkThreads + "]");
    +
    +        /**
    +         * Set the batch interval.
    +         * Records will be pulled from the Kinesis stream and stored as a 
single DStream within Spark every batch interval.
    +         */
    +        Duration batchInterval = Milliseconds.apply(batchIntervalMillis);
    +
    +        /**
    +         * It's recommended that you perform a Spark checkpoint between 5 
and 10 times the batch interval. 
    +         * While this is the Spark checkpoint interval, we're going to use 
it for the Kinesis checkpoint interval, as well.
    +         */
    +        Duration checkpointInterval = batchInterval.$times(5);
    +
    +        /** Setup the StreamingContext */
    +        JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, 
batchInterval);
    +
    +        /** Setup the checkpoint directory used by Spark Streaming */
    +        jssc.checkpoint("/tmp/checkpoint");
    +
    +        /** Create the same number of Kinesis Receivers/DStreams as stream 
shards, then union them all */
    +        JavaDStream<byte[]> allStreams = KinesisUtils
    +                .createJavaStream(jssc, app, stream, endpoint, 
checkpointInterval.milliseconds(), 
    +                                    InitialPositionInStream.LATEST, 
StorageLevel.MEMORY_AND_DISK_2());
    +        /** Set the checkpoint interval */
    +        allStreams.checkpoint(checkpointInterval);
    +        for (int i = 1; i < numStreams; i++) {
    +            /** Create a new Receiver/DStream for each stream shard */
    +            JavaDStream<byte[]> dStream = KinesisUtils
    +                    .createJavaStream(jssc, app, stream, endpoint, 
checkpointInterval.milliseconds(), 
    +                                        InitialPositionInStream.LATEST, 
StorageLevel.MEMORY_AND_DISK_2());            
    +            /** Set the Spark checkpoint interval */
    +            dStream.checkpoint(checkpointInterval);
    +
    +            /** Union with the existing streams */
    +            allStreams = allStreams.union(dStream);
    +        }
    +
    +        /** This implementation uses the String-based 
KinesisRecordSerializer impl */
    +        final KinesisRecordSerializer<String> recordSerializer = new 
KinesisStringRecordSerializer();
    +
    +        /**
    +          * Split each line of the union'd DStreams into multiple words 
using flatMap to produce the collection.
    +          * Convert lines of byte[] to multiple Strings by first 
converting to String, then splitting on WORD_SEPARATOR
    +          * We're caching the result here so that we can use it later 
without having to re-materialize the underlying RDDs.
    +          */
    +        JavaDStream<String> words = allStreams
    +                .flatMap(new FlatMapFunction<byte[], String>() {
    +                    /**
    --- End diff --
    
    Better line break is ...
    
    ... allStreams.flatMap(   // new line here


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to