Github user jerryshao commented on a diff in the pull request: https://github.com/apache/spark/pull/14340#discussion_r72177164 --- Diff: external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/KafkaUtils.scala --- @@ -177,3 +182,172 @@ object KafkaUtils extends Logging { } } } + +private[kafka010] class KafkaUtilsPythonHelper extends Logging { + import KafkaUtilsPythonHelper._ + + def createDirectStream( + jssc: JavaStreamingContext, + locationStrategy: LocationStrategy, + consumerStrategy: ConsumerStrategy[Array[Byte], Array[Byte]] + ): JavaDStream[Array[Byte]] = { + fixKafkaParams(consumerStrategy.executorKafkaParams) + val stream = KafkaUtils.createDirectStream(jssc.ssc, locationStrategy, consumerStrategy) + .map { r => + PythonConsumerRecord(r.topic(), r.partition(), r.offset(), r.timestamp(), + r.timestampType().toString, r.checksum(), r.serializedKeySize(), r.serializedValueSize(), + r.key(), r.value()) + }.mapPartitions(picklerIterator) + new JavaDStream(stream) + } + + def createRDD( + jsc: JavaSparkContext, + kafkaParams: ju.Map[String, Object], + offsetRanges: ju.List[OffsetRange], + locationStrategy: LocationStrategy + ): JavaRDD[Array[Byte]] = { + fixKafkaParams(kafkaParams) + val rdd = KafkaUtils.createRDD[Array[Byte], Array[Byte]]( + jsc.sc, + kafkaParams, + offsetRanges.toArray(new Array[OffsetRange](offsetRanges.size())), + locationStrategy) + .map { r => + PythonConsumerRecord(r.topic(), r.partition(), r.offset(), r.timestamp(), + r.timestampType().toString, r.checksum(), r.serializedKeySize(), + r.serializedValueSize(), r.key(), r.value()) + }.mapPartitions(picklerIterator) + new JavaRDD(rdd) + } + + private def fixKafkaParams(kafkaParams: ju.Map[String, Object]): Unit = { + val decoder = classOf[ByteArrayDeserializer].getCanonicalName + + val keyDecoder = kafkaParams.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG) + if (keyDecoder != null && keyDecoder != decoder) { + logWarning(s"${ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG} $keyDecoder is not supported " + + s"for python Kafka API, overriding with $decoder") + } + kafkaParams.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, decoder) + + val valueDecoder = kafkaParams.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG) + if (valueDecoder != null && valueDecoder != decoder) { + logWarning(s"${ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG} $valueDecoder is not " + + s"supported for python Kafka API, overriding with $decoder") + } + kafkaParams.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, decoder) + } + + // Helper functions to convert Python object to Java object + def createOffsetRange(topic: String, partition: JInt, fromOffset: JLong, untilOffset: JLong + ): OffsetRange = OffsetRange.create(topic, partition, fromOffset, untilOffset) + + def createPreferBrokers(): LocationStrategy = LocationStrategies.PreferBrokers + + def createPreferConsistent(): LocationStrategy = LocationStrategies.PreferConsistent + + def createPreferFixed(hostMap: ju.Map[TopicPartition, String]): LocationStrategy = { + LocationStrategies.PreferFixed(hostMap) + } + + def createTopicPartition(topic: String, partition: JInt): TopicPartition = + new TopicPartition(topic, partition) + + def createSubscribe( + topics: ju.Set[String], + kafkaParams: ju.Map[String, Object], + offsets: ju.Map[TopicPartition, JLong]): ConsumerStrategy[Array[Byte], Array[Byte]] = + ConsumerStrategies.Subscribe(topics, kafkaParams, offsets) + + def createSubscribePattern( + pattern: String, + kafkaParams: ju.Map[String, Object], + offsets: ju.Map[TopicPartition, JLong]): ConsumerStrategy[Array[Byte], Array[Byte]] = { + ConsumerStrategies.SubscribePattern(ju.regex.Pattern.compile(pattern), kafkaParams, offsets) + } + + def createAssign( + topicPartitions: ju.Set[TopicPartition], + kafkaParams: ju.Map[String, Object], + offsets: ju.Map[TopicPartition, JLong]): ConsumerStrategy[Array[Byte], Array[Byte]] = { + ConsumerStrategies.Assign(topicPartitions, kafkaParams, offsets) + } + + def offsetRangesOfKafkaRDD(rdd: RDD[_]): ju.List[OffsetRange] = { --- End diff -- Sure, I will also add this.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org