chamikaramj commented on code in PR #17828: URL: https://github.com/apache/beam/pull/17828#discussion_r951479298
########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; + +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.coders.Coder; +import org.apache.beam.sdk.io.range.OffsetRange; +import org.apache.beam.sdk.transforms.DoFn; +import org.apache.beam.sdk.transforms.DoFn.UnboundedPerElement; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.transforms.splittabledofn.ManualWatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.OffsetRangeTracker; +import org.apache.beam.sdk.transforms.splittabledofn.RestrictionTracker; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators; +import org.apache.beam.sdk.transforms.windowing.BoundedWindow; +import org.apache.spark.SparkConf; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A SplittableDoFn which reads from {@link Receiver} that implements {@link HasOffset}. By default, + * a {@link WatermarkEstimators.Manual} watermark estimator is used to track watermark. + * + * <p>Initial range The initial range is {@code [0, Long.MAX_VALUE)} + * + * <p>Resume Processing Every time the sparkConsumer.hasRecords() returns false, {@link + * ReadFromSparkReceiverWithOffsetDoFn} will move to process the next element. + */ +@UnboundedPerElement +class ReadFromSparkReceiverWithOffsetDoFn<V> extends DoFn<byte[], V> { + + private static final Logger LOG = + LoggerFactory.getLogger(ReadFromSparkReceiverWithOffsetDoFn.class); + + /** Constant waiting time after the {@link Receiver} starts. Required to prepare for polling */ + private static final int START_POLL_TIMEOUT_MS = 1000; + + private final SerializableFunction<Instant, WatermarkEstimator<Instant>> + createWatermarkEstimatorFn; + private final SerializableFunction<V, Long> getOffsetFn; + private final SerializableFunction<V, Instant> getWatermarkFn; + private final ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder; + + ReadFromSparkReceiverWithOffsetDoFn(SparkReceiverIO.Read<V> transform) { + createWatermarkEstimatorFn = WatermarkEstimators.Manual::new; + + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = + transform.getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "Spark Receiver Builder can't be null!"); + this.sparkReceiverBuilder = sparkReceiverBuilder; + + SerializableFunction<V, Long> getOffsetFn = transform.getGetOffsetFn(); + checkStateNotNull(getOffsetFn, "Get offset fn can't be null!"); + this.getOffsetFn = getOffsetFn; + + SerializableFunction<V, Instant> getWatermarkFn = transform.getWatermarkFn(); + if (getWatermarkFn == null) { + getWatermarkFn = input -> Instant.now(); + } + this.getWatermarkFn = getWatermarkFn; + } + + @GetInitialRestriction + public OffsetRange initialRestriction(@Element byte[] element) { + return new OffsetRange(0, Long.MAX_VALUE); + } + + @GetInitialWatermarkEstimatorState + public Instant getInitialWatermarkEstimatorState(@Timestamp Instant currentElementTimestamp) { + return currentElementTimestamp; + } + + @NewWatermarkEstimator + public WatermarkEstimator<Instant> newWatermarkEstimator( + @WatermarkEstimatorState Instant watermarkEstimatorState) { + return createWatermarkEstimatorFn.apply(ensureTimestampWithinBounds(watermarkEstimatorState)); + } + + @GetSize + public double getSize(@Element byte[] element, @Restriction OffsetRange offsetRange) { + return restrictionTracker(element, offsetRange).getProgress().getWorkRemaining(); + } + + @NewTracker + public OffsetRangeTracker restrictionTracker( + @Element byte[] element, @Restriction OffsetRange restriction) { + return new OffsetRangeTracker(restriction); + } + + @GetRestrictionCoder + public Coder<OffsetRange> restrictionCoder() { + return new OffsetRange.Coder(); + } + + // Need to do an unchecked cast from Object + // because org.apache.spark.streaming.receiver.ReceiverSupervisor accepts Object in push methods + @SuppressWarnings("unchecked") + private static class SparkConsumerWithOffset<V> implements SparkConsumer<V> { + private final Queue<V> recordsQueue; + private @Nullable Receiver<V> sparkReceiver; + private final Long startOffset; + + SparkConsumerWithOffset(Long startOffset) { + this.startOffset = startOffset; + this.recordsQueue = new ConcurrentLinkedQueue<>(); + } + + @Override + public boolean hasRecords() { + return !recordsQueue.isEmpty(); + } + + @Override + public @Nullable V poll() { + return recordsQueue.poll(); + } + + @Override + public void start(Receiver<V> sparkReceiver) { + this.sparkReceiver = sparkReceiver; + try { + new WrappedSupervisor( + sparkReceiver, + new SparkConf(), + objects -> { + V record = (V) objects[0]; + recordsQueue.offer(record); + return null; + }); + } catch (Exception e) { + LOG.error("Can not init Spark Receiver!", e); + } + ((HasOffset) sparkReceiver).setStartOffset(startOffset); + sparkReceiver.supervisor().startReceiver(); + try { + TimeUnit.MILLISECONDS.sleep(START_POLL_TIMEOUT_MS); + } catch (InterruptedException e) { + LOG.error("SparkReceiver was interrupted before polling started", e); + } + } + + @Override + public void stop() { + if (sparkReceiver != null) { + sparkReceiver.stop("SparkReceiver is stopped."); + } + recordsQueue.clear(); Review Comment: What happens to existing records in the queue ? ########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; + +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.coders.Coder; +import org.apache.beam.sdk.io.range.OffsetRange; +import org.apache.beam.sdk.transforms.DoFn; +import org.apache.beam.sdk.transforms.DoFn.UnboundedPerElement; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.transforms.splittabledofn.ManualWatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.OffsetRangeTracker; +import org.apache.beam.sdk.transforms.splittabledofn.RestrictionTracker; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators; +import org.apache.beam.sdk.transforms.windowing.BoundedWindow; +import org.apache.spark.SparkConf; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A SplittableDoFn which reads from {@link Receiver} that implements {@link HasOffset}. By default, + * a {@link WatermarkEstimators.Manual} watermark estimator is used to track watermark. + * + * <p>Initial range The initial range is {@code [0, Long.MAX_VALUE)} + * + * <p>Resume Processing Every time the sparkConsumer.hasRecords() returns false, {@link + * ReadFromSparkReceiverWithOffsetDoFn} will move to process the next element. + */ +@UnboundedPerElement +class ReadFromSparkReceiverWithOffsetDoFn<V> extends DoFn<byte[], V> { + + private static final Logger LOG = + LoggerFactory.getLogger(ReadFromSparkReceiverWithOffsetDoFn.class); + + /** Constant waiting time after the {@link Receiver} starts. Required to prepare for polling */ + private static final int START_POLL_TIMEOUT_MS = 1000; + + private final SerializableFunction<Instant, WatermarkEstimator<Instant>> + createWatermarkEstimatorFn; + private final SerializableFunction<V, Long> getOffsetFn; + private final SerializableFunction<V, Instant> getWatermarkFn; + private final ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder; + + ReadFromSparkReceiverWithOffsetDoFn(SparkReceiverIO.Read<V> transform) { + createWatermarkEstimatorFn = WatermarkEstimators.Manual::new; + + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = + transform.getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "Spark Receiver Builder can't be null!"); + this.sparkReceiverBuilder = sparkReceiverBuilder; + + SerializableFunction<V, Long> getOffsetFn = transform.getGetOffsetFn(); + checkStateNotNull(getOffsetFn, "Get offset fn can't be null!"); + this.getOffsetFn = getOffsetFn; + + SerializableFunction<V, Instant> getWatermarkFn = transform.getWatermarkFn(); + if (getWatermarkFn == null) { + getWatermarkFn = input -> Instant.now(); + } + this.getWatermarkFn = getWatermarkFn; + } + + @GetInitialRestriction + public OffsetRange initialRestriction(@Element byte[] element) { + return new OffsetRange(0, Long.MAX_VALUE); + } + + @GetInitialWatermarkEstimatorState + public Instant getInitialWatermarkEstimatorState(@Timestamp Instant currentElementTimestamp) { + return currentElementTimestamp; + } + + @NewWatermarkEstimator + public WatermarkEstimator<Instant> newWatermarkEstimator( + @WatermarkEstimatorState Instant watermarkEstimatorState) { + return createWatermarkEstimatorFn.apply(ensureTimestampWithinBounds(watermarkEstimatorState)); + } + + @GetSize + public double getSize(@Element byte[] element, @Restriction OffsetRange offsetRange) { + return restrictionTracker(element, offsetRange).getProgress().getWorkRemaining(); + } + + @NewTracker + public OffsetRangeTracker restrictionTracker( + @Element byte[] element, @Restriction OffsetRange restriction) { + return new OffsetRangeTracker(restriction); + } + + @GetRestrictionCoder + public Coder<OffsetRange> restrictionCoder() { + return new OffsetRange.Coder(); + } + + // Need to do an unchecked cast from Object + // because org.apache.spark.streaming.receiver.ReceiverSupervisor accepts Object in push methods + @SuppressWarnings("unchecked") + private static class SparkConsumerWithOffset<V> implements SparkConsumer<V> { + private final Queue<V> recordsQueue; + private @Nullable Receiver<V> sparkReceiver; + private final Long startOffset; + + SparkConsumerWithOffset(Long startOffset) { + this.startOffset = startOffset; + this.recordsQueue = new ConcurrentLinkedQueue<>(); + } + + @Override + public boolean hasRecords() { + return !recordsQueue.isEmpty(); + } + + @Override + public @Nullable V poll() { + return recordsQueue.poll(); + } + + @Override + public void start(Receiver<V> sparkReceiver) { + this.sparkReceiver = sparkReceiver; + try { + new WrappedSupervisor( + sparkReceiver, + new SparkConf(), + objects -> { + V record = (V) objects[0]; + recordsQueue.offer(record); + return null; + }); + } catch (Exception e) { + LOG.error("Can not init Spark Receiver!", e); Review Comment: We should be raising an exception here (or retry) instead of logging ? ########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/SparkReceiverIO.java: ########## @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; +import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; + +import com.google.auto.value.AutoValue; +import org.apache.beam.sdk.transforms.Impulse; +import org.apache.beam.sdk.transforms.PTransform; +import org.apache.beam.sdk.transforms.ParDo; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.values.PBegin; +import org.apache.beam.sdk.values.PCollection; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Streaming sources for Spark {@link Receiver}. + * + * <h3>Reading using {@link SparkReceiverIO}</h3> + * + * <p>You will need to pass a {@link ReceiverBuilder} which is responsible for instantiating new + * {@link Receiver} objects. + * + * <p>{@link Receiver} that will be used should implement {@link HasOffset} interface. You will need + * to pass {@code getOffsetFn} which is a {@link SerializableFunction} that defines how to get + * {@code Long offset} from {@code V record}. + * + * <p>Optionally you can pass {@code watermarkFn} which is a {@link SerializableFunction} that + * defines how to get {@code Instant watermark} from {@code V record}. + * + * <p>Example of {@link SparkReceiverIO#read()} usage: + * + * <pre>{@code + * Pipeline p = ...; // Create pipeline. + * + * // Create ReceiverBuilder for CustomReceiver + * ReceiverBuilder<String, CustomReceiverWithOffset> receiverBuilder = + * new ReceiverBuilder<>(CustomReceiver.class).withConstructorArgs(); + * + * //Read from CustomReceiver + * p.apply("Spark Receiver Read", + * SparkReceiverIO.Read<String> reader = + * SparkReceiverIO.<String>read() + * .withGetOffsetFn(Long::valueOf) + * .withWatermarkFn(Instant::parse) + * .withSparkReceiverBuilder(receiverBuilder); + * }</pre> + */ +public class SparkReceiverIO { + + private static final Logger LOG = LoggerFactory.getLogger(SparkReceiverIO.class); + + public static <V> Read<V> read() { + return new AutoValue_SparkReceiverIO_Read.Builder<V>().build(); + } + + /** A {@link PTransform} to read from Spark {@link Receiver}. */ + @AutoValue + @AutoValue.CopyAnnotations + public abstract static class Read<V> extends PTransform<PBegin, PCollection<V>> { + + abstract @Nullable ReceiverBuilder<V, ? extends Receiver<V>> getSparkReceiverBuilder(); + + abstract @Nullable SerializableFunction<V, Long> getGetOffsetFn(); + + abstract @Nullable SerializableFunction<V, Instant> getWatermarkFn(); + + abstract Builder<V> toBuilder(); + + @AutoValue.Builder + abstract static class Builder<V> { + + abstract Builder<V> setSparkReceiverBuilder( + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder); + + abstract Builder<V> setGetOffsetFn(SerializableFunction<V, Long> getOffsetFn); + + abstract Builder<V> setWatermarkFn(SerializableFunction<V, Instant> watermarkFn); + + abstract Read<V> build(); + } + + /** Sets {@link ReceiverBuilder} with value and custom Spark {@link Receiver} class. */ + public Read<V> withSparkReceiverBuilder( + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder) { + checkArgument(sparkReceiverBuilder != null, "Spark receiver builder can not be null"); + return toBuilder().setSparkReceiverBuilder(sparkReceiverBuilder).build(); + } + + /** A function to get offset in order to start {@link Receiver} from it. */ + public Read<V> withGetOffsetFn(SerializableFunction<V, Long> getOffsetFn) { + checkArgument(getOffsetFn != null, "Get offset function can not be null"); + return toBuilder().setGetOffsetFn(getOffsetFn).build(); + } + + /** A function to calculate watermark after a record. */ + public Read<V> withWatermarkFn(SerializableFunction<V, Instant> watermarkFn) { Review Comment: Probably should be renamed to getTimestampFn or similar (and also update the Javadoc). ########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/SparkReceiverIO.java: ########## @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; +import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument; + +import com.google.auto.value.AutoValue; +import org.apache.beam.sdk.transforms.Impulse; +import org.apache.beam.sdk.transforms.PTransform; +import org.apache.beam.sdk.transforms.ParDo; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.values.PBegin; +import org.apache.beam.sdk.values.PCollection; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Streaming sources for Spark {@link Receiver}. + * + * <h3>Reading using {@link SparkReceiverIO}</h3> + * + * <p>You will need to pass a {@link ReceiverBuilder} which is responsible for instantiating new + * {@link Receiver} objects. + * + * <p>{@link Receiver} that will be used should implement {@link HasOffset} interface. You will need + * to pass {@code getOffsetFn} which is a {@link SerializableFunction} that defines how to get + * {@code Long offset} from {@code V record}. + * + * <p>Optionally you can pass {@code watermarkFn} which is a {@link SerializableFunction} that + * defines how to get {@code Instant watermark} from {@code V record}. + * + * <p>Example of {@link SparkReceiverIO#read()} usage: + * + * <pre>{@code + * Pipeline p = ...; // Create pipeline. + * + * // Create ReceiverBuilder for CustomReceiver + * ReceiverBuilder<String, CustomReceiverWithOffset> receiverBuilder = + * new ReceiverBuilder<>(CustomReceiver.class).withConstructorArgs(); + * + * //Read from CustomReceiver + * p.apply("Spark Receiver Read", + * SparkReceiverIO.Read<String> reader = + * SparkReceiverIO.<String>read() + * .withGetOffsetFn(Long::valueOf) + * .withWatermarkFn(Instant::parse) + * .withSparkReceiverBuilder(receiverBuilder); + * }</pre> + */ +public class SparkReceiverIO { + + private static final Logger LOG = LoggerFactory.getLogger(SparkReceiverIO.class); + + public static <V> Read<V> read() { + return new AutoValue_SparkReceiverIO_Read.Builder<V>().build(); + } + + /** A {@link PTransform} to read from Spark {@link Receiver}. */ + @AutoValue + @AutoValue.CopyAnnotations + public abstract static class Read<V> extends PTransform<PBegin, PCollection<V>> { + + abstract @Nullable ReceiverBuilder<V, ? extends Receiver<V>> getSparkReceiverBuilder(); + + abstract @Nullable SerializableFunction<V, Long> getGetOffsetFn(); + + abstract @Nullable SerializableFunction<V, Instant> getWatermarkFn(); + + abstract Builder<V> toBuilder(); + + @AutoValue.Builder + abstract static class Builder<V> { + + abstract Builder<V> setSparkReceiverBuilder( + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder); + + abstract Builder<V> setGetOffsetFn(SerializableFunction<V, Long> getOffsetFn); + + abstract Builder<V> setWatermarkFn(SerializableFunction<V, Instant> watermarkFn); + + abstract Read<V> build(); + } + + /** Sets {@link ReceiverBuilder} with value and custom Spark {@link Receiver} class. */ + public Read<V> withSparkReceiverBuilder( + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder) { + checkArgument(sparkReceiverBuilder != null, "Spark receiver builder can not be null"); + return toBuilder().setSparkReceiverBuilder(sparkReceiverBuilder).build(); + } + + /** A function to get offset in order to start {@link Receiver} from it. */ + public Read<V> withGetOffsetFn(SerializableFunction<V, Long> getOffsetFn) { + checkArgument(getOffsetFn != null, "Get offset function can not be null"); + return toBuilder().setGetOffsetFn(getOffsetFn).build(); + } + + /** A function to calculate watermark after a record. */ + public Read<V> withWatermarkFn(SerializableFunction<V, Instant> watermarkFn) { + checkArgument(watermarkFn != null, "Watermark function can not be null"); + return toBuilder().setWatermarkFn(watermarkFn).build(); + } + + @Override + public PCollection<V> expand(PBegin input) { + validateTransform(); + return input.apply(new ReadFromSparkReceiverViaSdf<>(this)); + } + + public void validateTransform() { + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "withSparkReceiverBuilder() is required"); + checkStateNotNull(getGetOffsetFn(), "withGetOffsetFn() is required"); + } + } + + static class ReadFromSparkReceiverViaSdf<V> extends PTransform<PBegin, PCollection<V>> { + + private final Read<V> sparkReceiverRead; + + ReadFromSparkReceiverViaSdf(Read<V> sparkReceiverRead) { + this.sparkReceiverRead = sparkReceiverRead; + } + + @Override + public PCollection<V> expand(PBegin input) { + final ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = + sparkReceiverRead.getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "withSparkReceiverBuilder() is required"); + if (!HasOffset.class.isAssignableFrom(sparkReceiverBuilder.getSparkReceiverClass())) { + throw new UnsupportedOperationException( + String.format( + "Given Spark Receiver class %s doesn't implement HasOffset interface," + + " therefore it is not supported!", + sparkReceiverBuilder.getSparkReceiverClass().getName())); + } else { + LOG.info("{} started reading", ReadFromSparkReceiverWithOffsetDoFn.class.getSimpleName()); + return input + .apply(Impulse.create()) + .apply(ParDo.of(new ReadFromSparkReceiverWithOffsetDoFn<>(sparkReceiverRead))); Review Comment: Seems like we don't do any kind of splitting here which will limit the reading to a single worker. Is it possible to split data from SparkReceiver into multiple workers (for example, Kafka does this by creating splits for different partitions). ########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; + +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.coders.Coder; +import org.apache.beam.sdk.io.range.OffsetRange; +import org.apache.beam.sdk.transforms.DoFn; +import org.apache.beam.sdk.transforms.DoFn.UnboundedPerElement; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.transforms.splittabledofn.ManualWatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.OffsetRangeTracker; +import org.apache.beam.sdk.transforms.splittabledofn.RestrictionTracker; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators; +import org.apache.beam.sdk.transforms.windowing.BoundedWindow; +import org.apache.spark.SparkConf; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A SplittableDoFn which reads from {@link Receiver} that implements {@link HasOffset}. By default, + * a {@link WatermarkEstimators.Manual} watermark estimator is used to track watermark. + * + * <p>Initial range The initial range is {@code [0, Long.MAX_VALUE)} + * + * <p>Resume Processing Every time the sparkConsumer.hasRecords() returns false, {@link + * ReadFromSparkReceiverWithOffsetDoFn} will move to process the next element. + */ +@UnboundedPerElement +class ReadFromSparkReceiverWithOffsetDoFn<V> extends DoFn<byte[], V> { + + private static final Logger LOG = + LoggerFactory.getLogger(ReadFromSparkReceiverWithOffsetDoFn.class); + + /** Constant waiting time after the {@link Receiver} starts. Required to prepare for polling */ + private static final int START_POLL_TIMEOUT_MS = 1000; + + private final SerializableFunction<Instant, WatermarkEstimator<Instant>> + createWatermarkEstimatorFn; + private final SerializableFunction<V, Long> getOffsetFn; + private final SerializableFunction<V, Instant> getWatermarkFn; + private final ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder; + + ReadFromSparkReceiverWithOffsetDoFn(SparkReceiverIO.Read<V> transform) { + createWatermarkEstimatorFn = WatermarkEstimators.Manual::new; + + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = + transform.getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "Spark Receiver Builder can't be null!"); + this.sparkReceiverBuilder = sparkReceiverBuilder; + + SerializableFunction<V, Long> getOffsetFn = transform.getGetOffsetFn(); + checkStateNotNull(getOffsetFn, "Get offset fn can't be null!"); + this.getOffsetFn = getOffsetFn; + + SerializableFunction<V, Instant> getWatermarkFn = transform.getWatermarkFn(); + if (getWatermarkFn == null) { + getWatermarkFn = input -> Instant.now(); + } + this.getWatermarkFn = getWatermarkFn; + } + + @GetInitialRestriction + public OffsetRange initialRestriction(@Element byte[] element) { + return new OffsetRange(0, Long.MAX_VALUE); + } + + @GetInitialWatermarkEstimatorState + public Instant getInitialWatermarkEstimatorState(@Timestamp Instant currentElementTimestamp) { + return currentElementTimestamp; + } + + @NewWatermarkEstimator + public WatermarkEstimator<Instant> newWatermarkEstimator( + @WatermarkEstimatorState Instant watermarkEstimatorState) { + return createWatermarkEstimatorFn.apply(ensureTimestampWithinBounds(watermarkEstimatorState)); + } + + @GetSize + public double getSize(@Element byte[] element, @Restriction OffsetRange offsetRange) { + return restrictionTracker(element, offsetRange).getProgress().getWorkRemaining(); + } + + @NewTracker + public OffsetRangeTracker restrictionTracker( + @Element byte[] element, @Restriction OffsetRange restriction) { + return new OffsetRangeTracker(restriction); + } + + @GetRestrictionCoder + public Coder<OffsetRange> restrictionCoder() { + return new OffsetRange.Coder(); + } + + // Need to do an unchecked cast from Object + // because org.apache.spark.streaming.receiver.ReceiverSupervisor accepts Object in push methods + @SuppressWarnings("unchecked") + private static class SparkConsumerWithOffset<V> implements SparkConsumer<V> { + private final Queue<V> recordsQueue; + private @Nullable Receiver<V> sparkReceiver; + private final Long startOffset; + + SparkConsumerWithOffset(Long startOffset) { + this.startOffset = startOffset; + this.recordsQueue = new ConcurrentLinkedQueue<>(); + } + + @Override + public boolean hasRecords() { + return !recordsQueue.isEmpty(); + } + + @Override + public @Nullable V poll() { + return recordsQueue.poll(); + } + + @Override + public void start(Receiver<V> sparkReceiver) { + this.sparkReceiver = sparkReceiver; + try { + new WrappedSupervisor( + sparkReceiver, + new SparkConf(), + objects -> { + V record = (V) objects[0]; + recordsQueue.offer(record); + return null; Review Comment: "return null" was intended here ? ########## sdks/java/io/sparkreceiver/src/test/java/org/apache/beam/sdk/io/sparkreceiver/CustomReceiverWithOffset.java: ########## @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.spark.storage.StorageLevel; +import org.apache.spark.streaming.receiver.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Imitation of Spark {@link Receiver} that implements {@link HasOffset} interface. Used to test + * {@link SparkReceiverIO#read()}. + */ +public class CustomReceiverWithOffset extends Receiver<String> implements HasOffset { + + private static final Logger LOG = LoggerFactory.getLogger(CustomReceiverWithOffset.class); + private static final int TIMEOUT_MS = 500; + private static final List<String> STORED_RECORDS = new ArrayList<>(); + private static final int RECORDS_COUNT = 20; + private Long startOffset; + + CustomReceiverWithOffset() { + super(StorageLevel.MEMORY_AND_DISK_2()); + } + + @Override + public void setStartOffset(Long startOffset) { + if (startOffset != null) { + this.startOffset = startOffset; + } + } + + @Override + @SuppressWarnings("FutureReturnValueIgnored") + public void onStart() { + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().build()).submit(this::receive); + } + + @Override + public void onStop() {} + + @Override + public Long getEndOffset() { + return Long.MAX_VALUE; + } + + private void receive() { + Long currentOffset = startOffset; + while (!isStopped()) { Review Comment: This operation of the test does not seem to be thread safe currently (source might read while a batch is being loaded). ########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; + +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.coders.Coder; +import org.apache.beam.sdk.io.range.OffsetRange; +import org.apache.beam.sdk.transforms.DoFn; +import org.apache.beam.sdk.transforms.DoFn.UnboundedPerElement; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.transforms.splittabledofn.ManualWatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.OffsetRangeTracker; +import org.apache.beam.sdk.transforms.splittabledofn.RestrictionTracker; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators; +import org.apache.beam.sdk.transforms.windowing.BoundedWindow; +import org.apache.spark.SparkConf; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A SplittableDoFn which reads from {@link Receiver} that implements {@link HasOffset}. By default, + * a {@link WatermarkEstimators.Manual} watermark estimator is used to track watermark. + * + * <p>Initial range The initial range is {@code [0, Long.MAX_VALUE)} + * + * <p>Resume Processing Every time the sparkConsumer.hasRecords() returns false, {@link + * ReadFromSparkReceiverWithOffsetDoFn} will move to process the next element. + */ +@UnboundedPerElement +class ReadFromSparkReceiverWithOffsetDoFn<V> extends DoFn<byte[], V> { + + private static final Logger LOG = + LoggerFactory.getLogger(ReadFromSparkReceiverWithOffsetDoFn.class); + + /** Constant waiting time after the {@link Receiver} starts. Required to prepare for polling */ + private static final int START_POLL_TIMEOUT_MS = 1000; + + private final SerializableFunction<Instant, WatermarkEstimator<Instant>> + createWatermarkEstimatorFn; + private final SerializableFunction<V, Long> getOffsetFn; + private final SerializableFunction<V, Instant> getWatermarkFn; + private final ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder; + + ReadFromSparkReceiverWithOffsetDoFn(SparkReceiverIO.Read<V> transform) { + createWatermarkEstimatorFn = WatermarkEstimators.Manual::new; + + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = + transform.getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "Spark Receiver Builder can't be null!"); + this.sparkReceiverBuilder = sparkReceiverBuilder; + + SerializableFunction<V, Long> getOffsetFn = transform.getGetOffsetFn(); + checkStateNotNull(getOffsetFn, "Get offset fn can't be null!"); + this.getOffsetFn = getOffsetFn; + + SerializableFunction<V, Instant> getWatermarkFn = transform.getWatermarkFn(); Review Comment: This probably should be renamed to timestampFn since it basically provides the timestamp for a given record. ########## sdks/java/io/sparkreceiver/src/main/java/org/apache/beam/sdk/io/sparkreceiver/ReadFromSparkReceiverWithOffsetDoFn.java: ########## @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.beam.sdk.io.sparkreceiver; + +import static org.apache.beam.sdk.util.Preconditions.checkStateNotNull; + +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.coders.Coder; +import org.apache.beam.sdk.io.range.OffsetRange; +import org.apache.beam.sdk.transforms.DoFn; +import org.apache.beam.sdk.transforms.DoFn.UnboundedPerElement; +import org.apache.beam.sdk.transforms.SerializableFunction; +import org.apache.beam.sdk.transforms.splittabledofn.ManualWatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.OffsetRangeTracker; +import org.apache.beam.sdk.transforms.splittabledofn.RestrictionTracker; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimator; +import org.apache.beam.sdk.transforms.splittabledofn.WatermarkEstimators; +import org.apache.beam.sdk.transforms.windowing.BoundedWindow; +import org.apache.spark.SparkConf; +import org.apache.spark.streaming.receiver.Receiver; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A SplittableDoFn which reads from {@link Receiver} that implements {@link HasOffset}. By default, + * a {@link WatermarkEstimators.Manual} watermark estimator is used to track watermark. + * + * <p>Initial range The initial range is {@code [0, Long.MAX_VALUE)} + * + * <p>Resume Processing Every time the sparkConsumer.hasRecords() returns false, {@link + * ReadFromSparkReceiverWithOffsetDoFn} will move to process the next element. + */ +@UnboundedPerElement +class ReadFromSparkReceiverWithOffsetDoFn<V> extends DoFn<byte[], V> { + + private static final Logger LOG = + LoggerFactory.getLogger(ReadFromSparkReceiverWithOffsetDoFn.class); + + /** Constant waiting time after the {@link Receiver} starts. Required to prepare for polling */ + private static final int START_POLL_TIMEOUT_MS = 1000; + + private final SerializableFunction<Instant, WatermarkEstimator<Instant>> + createWatermarkEstimatorFn; + private final SerializableFunction<V, Long> getOffsetFn; + private final SerializableFunction<V, Instant> getWatermarkFn; + private final ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder; + + ReadFromSparkReceiverWithOffsetDoFn(SparkReceiverIO.Read<V> transform) { + createWatermarkEstimatorFn = WatermarkEstimators.Manual::new; + + ReceiverBuilder<V, ? extends Receiver<V>> sparkReceiverBuilder = + transform.getSparkReceiverBuilder(); + checkStateNotNull(sparkReceiverBuilder, "Spark Receiver Builder can't be null!"); + this.sparkReceiverBuilder = sparkReceiverBuilder; + + SerializableFunction<V, Long> getOffsetFn = transform.getGetOffsetFn(); + checkStateNotNull(getOffsetFn, "Get offset fn can't be null!"); + this.getOffsetFn = getOffsetFn; + + SerializableFunction<V, Instant> getWatermarkFn = transform.getWatermarkFn(); + if (getWatermarkFn == null) { + getWatermarkFn = input -> Instant.now(); + } + this.getWatermarkFn = getWatermarkFn; + } + + @GetInitialRestriction + public OffsetRange initialRestriction(@Element byte[] element) { + return new OffsetRange(0, Long.MAX_VALUE); + } + + @GetInitialWatermarkEstimatorState + public Instant getInitialWatermarkEstimatorState(@Timestamp Instant currentElementTimestamp) { + return currentElementTimestamp; + } + + @NewWatermarkEstimator + public WatermarkEstimator<Instant> newWatermarkEstimator( + @WatermarkEstimatorState Instant watermarkEstimatorState) { + return createWatermarkEstimatorFn.apply(ensureTimestampWithinBounds(watermarkEstimatorState)); + } + + @GetSize + public double getSize(@Element byte[] element, @Restriction OffsetRange offsetRange) { + return restrictionTracker(element, offsetRange).getProgress().getWorkRemaining(); + } + + @NewTracker + public OffsetRangeTracker restrictionTracker( + @Element byte[] element, @Restriction OffsetRange restriction) { + return new OffsetRangeTracker(restriction); + } + + @GetRestrictionCoder + public Coder<OffsetRange> restrictionCoder() { + return new OffsetRange.Coder(); + } + + // Need to do an unchecked cast from Object + // because org.apache.spark.streaming.receiver.ReceiverSupervisor accepts Object in push methods + @SuppressWarnings("unchecked") + private static class SparkConsumerWithOffset<V> implements SparkConsumer<V> { + private final Queue<V> recordsQueue; + private @Nullable Receiver<V> sparkReceiver; + private final Long startOffset; + + SparkConsumerWithOffset(Long startOffset) { + this.startOffset = startOffset; + this.recordsQueue = new ConcurrentLinkedQueue<>(); + } + + @Override + public boolean hasRecords() { + return !recordsQueue.isEmpty(); + } + + @Override + public @Nullable V poll() { + return recordsQueue.poll(); + } + + @Override + public void start(Receiver<V> sparkReceiver) { + this.sparkReceiver = sparkReceiver; + try { + new WrappedSupervisor( + sparkReceiver, + new SparkConf(), + objects -> { + V record = (V) objects[0]; + recordsQueue.offer(record); + return null; + }); + } catch (Exception e) { + LOG.error("Can not init Spark Receiver!", e); + } + ((HasOffset) sparkReceiver).setStartOffset(startOffset); + sparkReceiver.supervisor().startReceiver(); + try { + TimeUnit.MILLISECONDS.sleep(START_POLL_TIMEOUT_MS); + } catch (InterruptedException e) { + LOG.error("SparkReceiver was interrupted before polling started", e); Review Comment: Ditto. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
