Repository: incubator-beam Updated Branches: refs/heads/master 7c2124ba4 -> 135790bc9
http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java index 6f05993..229b1d2 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineFns.java @@ -67,7 +67,7 @@ public class CombineFns { * <p>The same {@link TupleTag} cannot be used in a composition multiple times. * * <p>Example: - * <pre><code> + * <pre>{@code * PCollection<KV<K, Integer>> latencies = ...; * * TupleTag<Integer> maxLatencyTag = new TupleTag<Integer>(); @@ -97,7 +97,7 @@ public class CombineFns { * c.output(...some T...); * } * })); - * </code></pre> + * }</pre> */ public static ComposeKeyedCombineFnBuilder composeKeyed() { return new ComposeKeyedCombineFnBuilder(); @@ -110,7 +110,7 @@ public class CombineFns { * <p>The same {@link TupleTag} cannot be used in a composition multiple times. * * <p>Example: - * <pre><code> + * <pre><{@code * PCollection<Integer> globalLatencies = ...; * * TupleTag<Integer> maxLatencyTag = new TupleTag<Integer>(); @@ -140,7 +140,7 @@ public class CombineFns { * c.output(...some T...); * } * })); - * </code></pre> + * }</pre> */ public static ComposeCombineFnBuilder compose() { return new ComposeCombineFnBuilder(); http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java index 9722360..3dd4fe2 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/CombineWithContext.java @@ -65,7 +65,7 @@ public class CombineWithContext { * A combine function that has access to {@code PipelineOptions} and side inputs through * {@code CombineWithContext.Context}. * - * See the equivalent {@link CombineFn} for details about combine functions. + * <p>See the equivalent {@link CombineFn} for details about combine functions. */ public abstract static class CombineFnWithContext<InputT, AccumT, OutputT> extends CombineFnBase.AbstractGlobalCombineFn<InputT, AccumT, OutputT> @@ -182,7 +182,7 @@ public class CombineWithContext { * A keyed combine function that has access to {@code PipelineOptions} and side inputs through * {@code CombineWithContext.Context}. * - * See the equivalent {@link KeyedCombineFn} for details about keyed combine functions. + * <p>See the equivalent {@link KeyedCombineFn} for details about keyed combine functions. */ public abstract static class KeyedCombineFnWithContext<K, InputT, AccumT, OutputT> extends CombineFnBase.AbstractPerKeyCombineFn<K, InputT, AccumT, OutputT> http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java index 3a3da65..eaf68b2 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/GroupByKey.java @@ -63,7 +63,7 @@ import org.apache.beam.sdk.values.PCollection.IsBounded; * {@code Coder} of the values of the input. * * <p>Example of use: - * <pre><code> + * <pre>{@code * PCollection<KV<String, Doc>> urlDocPairs = ...; * PCollection<KV<String, Iterable<Doc>>> urlToDocs = * urlDocPairs.apply(GroupByKey.<String, Doc>create()); @@ -75,7 +75,7 @@ import org.apache.beam.sdk.values.PCollection.IsBounded; * Iterable<Doc> docsWithThatUrl = c.element().getValue(); * ... process all docs having that url ... * }})); - * </code></pre> + * }</pre> * * <p>{@code GroupByKey} is a key primitive in data-parallel * processing, since it is the main way to efficiently bring http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java index 7f13649..83cceca 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/Latest.java @@ -36,15 +36,15 @@ import org.apache.beam.sdk.values.TimestampedValue; * in a {@link PCollection}. * * <p>Example 1: compute the latest value for each session: - * <pre><code> + * <pre>{@code * PCollection<Long> input = ...; * PCollection<Long> sessioned = input * .apply(Window.<Long>into(Sessions.withGapDuration(Duration.standardMinutes(5))); * PCollection<Long> latestValues = sessioned.apply(Latest.<Long>globally()); - * </code></pre> + * }</pre> * * <p>Example 2: track a latest computed value in an aggregator: - * <pre><code> + * <pre>{@code * class MyDoFn extends DoFn<String, String> { * private Aggregator<TimestampedValue<Double>, Double> latestValue = * createAggregator("latestValue", new Latest.LatestFn<Double>()); @@ -56,7 +56,7 @@ import org.apache.beam.sdk.values.TimestampedValue; * // .. * } * } - * </code></pre> + * }</pre> * * <p>For elements with the same timestamp, the element chosen for output is arbitrary. */ @@ -135,8 +135,8 @@ public class Latest { } /** - * Returns a {@link PTransform} that takes as input a {@link PCollection<T>} and returns a - * {@link PCollection<T>} whose contents is the latest element according to its event time, or + * Returns a {@link PTransform} that takes as input a {@code PCollection<T>} and returns a + * {@code PCollection<T>} whose contents is the latest element according to its event time, or * {@literal null} if there are no elements. * * @param <T> The type of the elements being combined. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java index 474efef..87c7095 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/OldDoFn.java @@ -299,7 +299,7 @@ public abstract class OldDoFn<InputT, OutputT> implements Serializable, HasDispl * timestamps can only be shifted forward to future. For infinite * skew, return {@code Duration.millis(Long.MAX_VALUE)}. * - * <p> Note that producing an element whose timestamp is less than the + * <p>Note that producing an element whose timestamp is less than the * current timestamp may result in late data, i.e. returning a non-zero * value here does not impact watermark calculations used for firing * windows. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java index 4a58141..2544a27 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/PTransform.java @@ -275,9 +275,9 @@ public abstract class PTransform<InputT extends PInput, OutputT extends POutput> * Returns the default {@code Coder} to use for the output of this * single-output {@code PTransform} when applied to the given input. * - * @throws CannotProvideCoderException if none can be inferred. - * * <p>By default, always throws. + * + * @throws CannotProvideCoderException if none can be inferred. */ protected Coder<?> getDefaultOutputCoder(@SuppressWarnings("unused") InputT input) throws CannotProvideCoderException { @@ -288,9 +288,9 @@ public abstract class PTransform<InputT extends PInput, OutputT extends POutput> * Returns the default {@code Coder} to use for the given output of * this single-output {@code PTransform} when applied to the given input. * - * @throws CannotProvideCoderException if none can be inferred. - * * <p>By default, always throws. + * + * @throws CannotProvideCoderException if none can be inferred. */ public <T> Coder<T> getDefaultOutputCoder( InputT input, @SuppressWarnings("unused") TypedPValue<T> output) http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java index f9cb557..9d4c9a7 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ParDo.java @@ -84,14 +84,14 @@ import org.apache.beam.sdk.values.TypedPValue; * provided, will be called on the discarded instance.</li> * </ol> * - * Each of the calls to any of the {@link DoFn DoFn's} processing + * <p>Each of the calls to any of the {@link DoFn DoFn's} processing * methods can produce zero or more output elements. All of the * of output elements from all of the {@link DoFn} instances * are included in the output {@link PCollection}. * * <p>For example: * - * <pre><code> + * <pre><{@code * PCollection<String> lines = ...; * PCollection<String> words = * lines.apply(ParDo.of(new DoFn<String, String>() { @@ -110,7 +110,7 @@ import org.apache.beam.sdk.values.TypedPValue; * Integer length = word.length(); * c.output(length); * }})); - * </code></pre> + * }</pre> * * <p>Each output element has the same timestamp and is in the same windows * as its corresponding input element, and the output {@code PCollection} @@ -146,7 +146,7 @@ import org.apache.beam.sdk.values.TypedPValue; * the {@link DoFn} operations via {@link DoFn.ProcessContext#sideInput sideInput}. * For example: * - * <pre><code> + * <pre>{@code * PCollection<String> words = ...; * PCollection<Integer> maxWordLengthCutOff = ...; // Singleton PCollection * final PCollectionView<Integer> maxWordLengthCutOffView = @@ -162,7 +162,7 @@ import org.apache.beam.sdk.values.TypedPValue; * c.output(word); * } * }})); - * </code></pre> + * }</pre> * * <h2>Side Outputs</h2> * @@ -179,7 +179,7 @@ import org.apache.beam.sdk.values.TypedPValue; * {@link DoFn.Context#output}, while an element is added to a side output * {@link PCollection} using {@link DoFn.Context#sideOutput}. For example: * - * <pre><code> + * <pre>{@code * PCollection<String> words = ...; * // Select words whose length is below a cut off, * // plus the lengths of words that are above the cut off. @@ -230,7 +230,7 @@ import org.apache.beam.sdk.values.TypedPValue; * results.get(wordLengthsAboveCutOffTag); * PCollection<String> markedWords = * results.get(markedWordsTag); - * </code></pre> + * }</pre> * * <h2>Properties May Be Specified In Any Order</h2> * http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java index 2744b14..709aa4a 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/RemoveDuplicates.java @@ -105,7 +105,7 @@ public class RemoveDuplicates<T> extends PTransform<PCollection<T>, * A {@link RemoveDuplicates} {@link PTransform} that uses a {@link SerializableFunction} to * obtain a representative value for each input element. * - * Construct via {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)}. + * <p>Construct via {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)}. * * @param <T> the type of input and output element * @param <IdT> the type of representative values used to dedup @@ -143,7 +143,8 @@ public class RemoveDuplicates<T> extends PTransform<PCollection<T>, * Return a {@code WithRepresentativeValues} {@link PTransform} that is like this one, but with * the specified output type descriptor. * - * Required for use of {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)} + * <p>Required for use of + * {@link RemoveDuplicates#withRepresentativeValueFn(SerializableFunction)} * in Java 8 with a lambda as the fn. * * @param type a {@link TypeDescriptor} describing the representative type of this http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java index 767e58e..981d047 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/ViewFn.java @@ -34,7 +34,7 @@ import org.apache.beam.sdk.values.PCollectionView; * available in the SDK. * * @param <PrimitiveViewT> the type of the underlying primitive view, provided by the runner - * <ViewT> the type of the value(s) accessible via this {@link PCollectionView} + * {@code <ViewT>} the type of the value(s) accessible via this {@link PCollectionView} */ public abstract class ViewFn<PrimitiveViewT, ViewT> implements Serializable { /** http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java index 8b061f6..de28ecb 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/WithKeys.java @@ -99,7 +99,7 @@ public class WithKeys<K, V> extends PTransform<PCollection<V>, /** * Return a {@link WithKeys} that is like this one with the specified key type descriptor. * - * For use with lambdas in Java 8, either this method must be called with an appropriate type + * <p>For use with lambdas in Java 8, either this method must be called with an appropriate type * descriptor or {@link PCollection#setCoder(Coder)} must be called on the output * {@link PCollection}. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java index 0b92d9f..394666b 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/display/DisplayData.java @@ -163,7 +163,7 @@ public class DisplayData implements Serializable { * } * </code></pre> * - * Using {@code include(subcomponent)} will associate each of the registered items with the + * <p>Using {@code include(subcomponent)} will associate each of the registered items with the * namespace of the {@code subcomponent} being registered. To register display data in the * current namespace, such as from a base class implementation, use * {@code subcomponent.populateDisplayData(builder)} instead. @@ -224,7 +224,7 @@ public class DisplayData implements Serializable { /** * The key for the display item. Each display item is created with a key and value - * via {@link DisplayData#item). + * via {@link DisplayData#item}. */ @JsonGetter("key") public abstract String getKey(); @@ -254,8 +254,8 @@ public class DisplayData implements Serializable { * value. For example, the {@link #getValue() value} for {@link Type#JAVA_CLASS} items contains * the full class name with package, while the short value contains just the class name. * - * A {@link #getValue() value} will be provided for each display item, and some types may also - * provide a short-value. If a short value is provided, display data consumers may + * <p>A {@link #getValue() value} will be provided for each display item, and some types may + * also provide a short-value. If a short value is provided, display data consumers may * choose to display it instead of or in addition to the {@link #getValue() value}. */ @JsonGetter("shortValue") http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java index 9de6759..eb6961c 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/reflect/DoFnInvoker.java @@ -22,7 +22,7 @@ import org.apache.beam.sdk.transforms.DoFn; /** * Interface for invoking the {@code DoFn} processing methods. * - * Instantiating a {@link DoFnInvoker} associates it with a specific {@link DoFn} instance, + * <p>Instantiating a {@link DoFnInvoker} associates it with a specific {@link DoFn} instance, * referred to as the bound {@link DoFn}. */ public interface DoFnInvoker<InputT, OutputT> { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java index 9690be8..e2463d8 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/AfterWatermark.java @@ -30,7 +30,7 @@ import org.apache.beam.sdk.util.TimeDomain; import org.joda.time.Instant; /** - * <p>{@code AfterWatermark} triggers fire based on progress of the system watermark. This time is a + * {@code AfterWatermark} triggers fire based on progress of the system watermark. This time is a * lower-bound, sometimes heuristically established, on event times that have been fully processed * by the pipeline. * @@ -54,7 +54,7 @@ import org.joda.time.Instant; * * <p>The watermark is the clock that defines {@link TimeDomain#EVENT_TIME}. * - * Additionaly firings before or after the watermark can be requested by calling + * <p>Additionaly firings before or after the watermark can be requested by calling * {@code AfterWatermark.pastEndOfWindow.withEarlyFirings(OnceTrigger)} or * {@code AfterWatermark.pastEndOfWindow.withEarlyFirings(OnceTrigger)}. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java index e1f5d4d..5f20465 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Never.java @@ -25,8 +25,7 @@ import org.joda.time.Instant; /** * A trigger which never fires. * - * <p> - * Using this trigger will only produce output when the watermark passes the end of the + * <p>Using this trigger will only produce output when the watermark passes the end of the * {@link BoundedWindow window} plus the {@link Window#withAllowedLateness allowed * lateness}. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java index 727a492..7e712b2 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/PaneInfo.java @@ -94,7 +94,7 @@ public final class PaneInfo { * And a {@code LATE} pane cannot contain locally on-time elements. * </ol> * - * However, note that: + * <p>However, note that: * <ol> * <li> An {@code ON_TIME} pane may contain locally late elements. It may even contain only * locally late elements. Provided a locally late element finds its way into an {@code ON_TIME} @@ -256,7 +256,7 @@ public final class PaneInfo { /** * The zero-based index of this trigger firing among non-speculative panes. * - * <p> This will return 0 for the first non-{@link Timing#EARLY} timer firing, 1 for the next one, + * <p>This will return 0 for the first non-{@link Timing#EARLY} timer firing, 1 for the next one, * etc. * * <p>Always -1 for speculative data. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java index ec21723..1eb56f7 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/SlidingWindows.java @@ -186,8 +186,7 @@ public class SlidingWindows extends NonMergingWindowFn<Object, IntervalWindow> { /** * Ensures that later sliding windows have an output time that is past the end of earlier windows. * - * <p> - * If this is the earliest sliding window containing {@code inputTimestamp}, that's fine. + * <p>If this is the earliest sliding window containing {@code inputTimestamp}, that's fine. * Otherwise, we pick the earliest time that doesn't overlap with earlier windows. */ @Experimental(Kind.OUTPUT_TIME) http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java index 52b7858..57f7716 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/transforms/windowing/Window.java @@ -40,11 +40,11 @@ import org.joda.time.Duration; * {@link org.apache.beam.sdk.transforms.GroupByKey GroupByKeys}, * including one within composite transforms, will group by the combination of * keys and windows. - + * * <p>See {@link org.apache.beam.sdk.transforms.GroupByKey} * for more information about how grouping with windows works. * - * <h2> Windowing </h2> + * <h2>Windowing</h2> * * <p>Windowing a {@code PCollection} divides the elements into windows based * on the associated event time for each element. This is especially useful @@ -58,13 +58,13 @@ import org.joda.time.Duration; * The following example demonstrates how to use {@code Window} in a pipeline * that counts the number of occurrences of strings each minute: * - * <pre> {@code + * <pre>{@code * PCollection<String> items = ...; * PCollection<String> windowed_items = items.apply( * Window.<String>into(FixedWindows.of(Duration.standardMinutes(1)))); * PCollection<KV<String, Long>> windowed_counts = windowed_items.apply( * Count.<String>perElement()); - * } </pre> + * }</pre> * * <p>Let (data, timestamp) denote a data element along with its timestamp. * Then, if the input to this pipeline consists of @@ -83,7 +83,7 @@ import org.joda.time.Duration; * <p>Additionally, custom {@link WindowFn}s can be created, by creating new * subclasses of {@link WindowFn}. * - * <h2> Triggers </h2> + * <h2>Triggers</h2> * * <p>{@link Window.Bound#triggering(Trigger)} allows specifying a trigger to control when * (in processing time) results for the given window can be produced. If unspecified, the default @@ -103,7 +103,7 @@ import org.joda.time.Duration; * (The use of watermark time to stop processing tends to be more robust if the data source is slow * for a few days, etc.) * - * <pre> {@code + * <pre>{@code * PCollection<String> items = ...; * PCollection<String> windowed_items = items.apply( * Window.<String>into(FixedWindows.of(Duration.standardMinutes(1))) @@ -114,12 +114,12 @@ import org.joda.time.Duration; * .withAllowedLateness(Duration.standardDays(1))); * PCollection<KV<String, Long>> windowed_counts = windowed_items.apply( * Count.<String>perElement()); - * } </pre> + * }</pre> * * <p>On the other hand, if we wanted to get early results every minute of processing * time (for which there were new elements in the given window) we could do the following: * - * <pre> {@code + * <pre>{@code * PCollection<String> windowed_items = items.apply( * Window.<String>into(FixedWindows.of(Duration.standardMinutes(1)) * .triggering( @@ -128,7 +128,7 @@ import org.joda.time.Duration; * .withEarlyFirings(AfterProcessingTime * .pastFirstElementInPane().plusDelayOf(Duration.standardMinutes(1)))) * .withAllowedLateness(Duration.ZERO)); - * } </pre> + * }</pre> * * <p>After a {@link org.apache.beam.sdk.transforms.GroupByKey} the trigger is set to * a trigger that will preserve the intent of the upstream trigger. See http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java index 9ee55ad..45bbe75 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/BaseExecutionContext.java @@ -37,12 +37,12 @@ import org.apache.beam.sdk.values.TupleTag; * <p>BaseExecutionContext is generic to allow implementing subclasses to return a concrete subclass * of {@link StepContext} from {@link #getOrCreateStepContext(String, String)} and * {@link #getAllStepContexts()} without forcing each subclass to override the method, e.g. - * <pre> + * <pre>{@code * @Override * StreamingModeExecutionContext.StepContext getOrCreateStepContext(...) { * return (StreamingModeExecutionContext.StepContext) super.getOrCreateStepContext(...); * } - * </pre> + * }</pre> * * <p>When a subclass of {@code BaseExecutionContext} has been downcast, the return types of * {@link #createStepContext(String, String)}, http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java index 5a98f84..e2c7e42 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/ExposedByteArrayOutputStream.java @@ -60,6 +60,7 @@ public class ExposedByteArrayOutputStream extends ByteArrayOutputStream { * Write {@code b} to the stream and take the ownership of {@code b}. * If the stream is empty, {@code b} itself will be used as the content of the stream and * no content copy will be involved. + * * <p><i>Note: After passing any byte array to this method, it must not be modified again.</i> * * @throws IOException http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java index 0f2ecd0..a2a6e17 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/GatherAllPanes.java @@ -31,8 +31,7 @@ import org.apache.beam.sdk.values.TypeDescriptor; /** * Gathers all panes of each window into exactly one output. * - * <p> - * Note that this will delay the output of a window until the garbage collection time (when the + * <p>Note that this will delay the output of a window until the garbage collection time (when the * watermark passes the end of the window plus allowed lateness) even if the upstream triggers * closed the window earlier. */ @@ -41,10 +40,9 @@ public class GatherAllPanes<T> /** * Gathers all panes of each window into a single output element. * - * <p> - * This will gather all output panes into a single element, which causes them to be colocated on a - * single worker. As a result, this is only suitable for {@link PCollection PCollections} where - * all of the output elements for each pane fit in memory, such as in tests. + * <p>This will gather all output panes into a single element, which causes them to be colocated + * on a single worker. As a result, this is only suitable for {@link PCollection PCollections} + * where all of the output elements for each pane fit in memory, such as in tests. */ public static <T> GatherAllPanes<T> globally() { return new GatherAllPanes<>(); http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java index c537eb3..35d0f2d 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PerKeyCombineFnRunners.java @@ -50,7 +50,7 @@ public class PerKeyCombineFnRunners { /** * An implementation of {@link PerKeyCombineFnRunner} with {@link KeyedCombineFn}. * - * It forwards functions calls to the {@link KeyedCombineFn}. + * <p>It forwards functions calls to the {@link KeyedCombineFn}. */ private static class KeyedCombineFnRunner<K, InputT, AccumT, OutputT> implements PerKeyCombineFnRunner<K, InputT, AccumT, OutputT> { @@ -146,7 +146,7 @@ public class PerKeyCombineFnRunners { /** * An implementation of {@link PerKeyCombineFnRunner} with {@link KeyedCombineFnWithContext}. * - * It forwards functions calls to the {@link KeyedCombineFnWithContext}. + * <p>It forwards functions calls to the {@link KeyedCombineFnWithContext}. */ private static class KeyedCombineFnWithContextRunner<K, InputT, AccumT, OutputT> implements PerKeyCombineFnRunner<K, InputT, AccumT, OutputT> { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java index bb6aa93..1ac5511 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubClient.java @@ -86,6 +86,7 @@ public abstract class PubsubClient implements Closeable { /** * Return the timestamp (in ms since unix epoch) to use for a Pubsub message with {@code * attributes} and {@code pubsubTimestamp}. + * * <p>If {@code timestampLabel} is non-{@literal null} then the message attributes must contain * that label, and the value of that label will be taken as the timestamp. * Otherwise the timestamp will be taken from the Pubsub publish timestamp {@code @@ -299,6 +300,7 @@ public abstract class PubsubClient implements Closeable { /** * A message to be sent to Pubsub. + * * <p>NOTE: This class is {@link Serializable} only to support the {@link PubsubTestClient}. * Java serialization is never used for non-test clients. */ @@ -357,6 +359,7 @@ public abstract class PubsubClient implements Closeable { /** * A message received from Pubsub. + * * <p>NOTE: This class is {@link Serializable} only to support the {@link PubsubTestClient}. * Java serialization is never used for non-test clients. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java index 6e5ba46..3fab151 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubTestClient.java @@ -43,7 +43,7 @@ public class PubsubTestClient extends PubsubClient { /** * Mimic the state of the simulated Pubsub 'service'. * - * Note that the {@link PubsubTestClientFactory} is serialized/deserialized even when running + * <p>Note that the {@link PubsubTestClientFactory} is serialized/deserialized even when running * test pipelines. Meanwhile it is valid for multiple {@link PubsubTestClient}s to be created * from the same client factory and run in parallel. Thus we can't enforce aliasing of the * following data structures over all clients and must resort to a static. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java index dd3b773..161037d 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/TimerInternals.java @@ -108,7 +108,7 @@ public interface TimerInternals { * <li>However will never be behind the global input watermark for any following computation. * </ol> * - * <p> In pictures: + * <p>In pictures: * <pre> * | | | | | * | | D | C | B | A http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java index 98987cd..1b3791d 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/PInput.java @@ -46,7 +46,7 @@ public interface PInput { public Collection<? extends PValue> expand(); /** - * <p>After building, finalizes this {@code PInput} to make it ready for + * After building, finalizes this {@code PInput} to make it ready for * being used as an input to a {@link org.apache.beam.sdk.transforms.PTransform}. * * <p>Automatically invoked whenever {@code apply()} is invoked on http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java index 498c3d0..7a78131 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/values/TypeDescriptors.java @@ -180,8 +180,8 @@ public class TypeDescriptors { * <pre> * new TypeDescriptor<KV<K,V>>() {}; * </pre> - * <p> - * Example of use: + * + * <p>Example of use: * <pre> * {@code * PCollection<String> words = ...; @@ -211,8 +211,8 @@ public class TypeDescriptors { * <pre> * new TypeDescriptor<Set<E>>() {}; * </pre> - * <p> - * Example of use: + * + * <p>Example of use: * <pre> * {@code * PCollection<String> words = ...; @@ -239,8 +239,8 @@ public class TypeDescriptors { * <pre> * new TypeDescriptor<List<E>>() {}; * </pre> - * <p> - * Example of use: + * + * <p>Example of use: * <pre> * {@code * PCollection<String> words = ...; @@ -267,8 +267,8 @@ public class TypeDescriptors { * <pre> * new TypeDescriptor<Iterable<E>>() {}; * </pre> - * <p> - * Example of use: + * + * <p>Example of use: * <pre> * {@code * PCollection<String> words = ...; http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java index 810b6f1..2a321ec 100644 --- a/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java +++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/testing/SystemNanoTimeSleeper.java @@ -28,7 +28,7 @@ import java.util.concurrent.locks.LockSupport; * href="https://blogs.oracle.com/dholmes/entry/inside_the_hotspot_vm_clocks"> * article</a> goes into further detail about this issue. * - * This {@link Sleeper} uses {@link System#nanoTime} + * <p>This {@link Sleeper} uses {@link System#nanoTime} * as the timing source and {@link LockSupport#parkNanos} as the wait method. * Note that usage of this sleeper may impact performance because * of the relatively more expensive methods being invoked when compared to http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java ---------------------------------------------------------------------- diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java index f208488..ac76b2e 100644 --- a/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java +++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/transforms/DoFnTesterTest.java @@ -365,7 +365,7 @@ public class DoFnTesterTest { /** * A {@link DoFn} that adds values to an aggregator and converts input to String in - * {@link OldDoFn#processElement). + * {@link OldDoFn#processElement}. */ private static class CounterDoFn extends DoFn<Long, String> { Aggregator<Long, Long> agg = createAggregator("ctr", new Sum.SumLongFn()); http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java index 7800108..6a9ea6b 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryAvroUtils.java @@ -84,7 +84,7 @@ class BigQueryAvroUtils { /** * Utility function to convert from an Avro {@link GenericRecord} to a BigQuery {@link TableRow}. * - * See <a href="https://cloud.google.com/bigquery/exporting-data-from-bigquery#config"> + * <p>See <a href="https://cloud.google.com/bigquery/exporting-data-from-bigquery#config"> * "Avro format"</a> for more information. */ static TableRow convertGenericRecordToTableRow(GenericRecord record, TableSchema schema) { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java index 5aa952c..5914ba2 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java @@ -134,6 +134,7 @@ import org.slf4j.LoggerFactory; * <a href="https://developers.google.com/bigquery/">BigQuery</a> tables. * * <h3>Table References</h3> + * * <p>A fully-qualified BigQuery table name consists of three components: * <ul> * <li>{@code projectId}: the Cloud project id (defaults to @@ -155,6 +156,7 @@ import org.slf4j.LoggerFactory; * </ul> * * <h3>Reading</h3> + * * <p>To read from a BigQuery table, apply a {@link BigQueryIO.Read} transformation. * This produces a {@link PCollection} of {@link TableRow TableRows} as output: * <pre>{@code @@ -177,6 +179,7 @@ import org.slf4j.LoggerFactory; * Pipeline construction will fail with a validation error if neither or both are specified. * * <h3>Writing</h3> + * * <p>To write to a BigQuery table, apply a {@link BigQueryIO.Write} transformation. * This consumes a {@link PCollection} of {@link TableRow TableRows} as input. * <pre>{@code @@ -200,6 +203,7 @@ import org.slf4j.LoggerFactory; * {@link Write.WriteDisposition#WRITE_APPEND}. * * <h3>Sharding BigQuery output tables</h3> + * * <p>A common use case is to dynamically generate BigQuery table names based on * the current window. To support this, * {@link BigQueryIO.Write#to(SerializableFunction)} @@ -224,6 +228,7 @@ import org.slf4j.LoggerFactory; * <p>Per-window tables are not yet supported in batch mode. * * <h3>Permissions</h3> + * * <p>Permission requirements depend on the {@link PipelineRunner} that is used to execute the * Dataflow job. Please refer to the documentation of corresponding {@link PipelineRunner}s for * more details. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java index ca7e491..07dc06e 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServices.java @@ -104,7 +104,7 @@ interface BigQueryServices extends Serializable { /** * Gets the specified {@link Job} by the given {@link JobReference}. * - * Returns null if the job is not found. + * <p>Returns null if the job is not found. */ Job getJob(JobReference jobRef) throws IOException, InterruptedException; } http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java index c50c23a..635e222 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreIO.java @@ -20,7 +20,7 @@ package org.apache.beam.sdk.io.gcp.datastore; import org.apache.beam.sdk.annotations.Experimental; /** - * <p>{@link DatastoreIO} provides an API for reading from and writing to + * {@link DatastoreIO} provides an API for reading from and writing to * <a href="https://developers.google.com/datastore/">Google Cloud Datastore</a> over different * versions of the Cloud Datastore Client libraries. * http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java index 45871f1..45b2d6f 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java @@ -87,7 +87,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * <p>{@link DatastoreV1} provides an API to Read, Write and Delete {@link PCollection PCollections} + * {@link DatastoreV1} provides an API to Read, Write and Delete {@link PCollection PCollections} * of <a href="https://developers.google.com/datastore/">Google Cloud Datastore</a> version v1 * {@link Entity} objects. * http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java index 72ab7c2..4dd1608 100644 --- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/SplitQueryFnIT.java @@ -35,13 +35,13 @@ import org.junit.runners.JUnit4; /** * Integration tests for {@link DatastoreV1.Read.SplitQueryFn}. * - * <p> It is hard to mock the exact behavior of Cloud Datastore, especially for the statistics + * <p>It is hard to mock the exact behavior of Cloud Datastore, especially for the statistics * queries. Also the fact that DatastoreIO falls back gracefully when querying statistics fails, * makes it hard to catch these issues in production. This test here ensures we interact with * the Cloud Datastore directly, query the actual stats and verify that the SplitQueryFn generates * the expected number of query splits. * - * <p> These tests are brittle as they rely on statistics data in Cloud Datastore. If the data + * <p>These tests are brittle as they rely on statistics data in Cloud Datastore. If the data * gets lost or changes then they will begin failing and this test should be disabled. * At the time of writing, the Cloud Datastore has the following statistics, * <ul> http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java index 9998833..37ad064 100644 --- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1ReadIT.java @@ -61,7 +61,7 @@ public class V1ReadIT { /** * An end-to-end test for {@link DatastoreV1.Read}. * - * Write some test entities to datastore and then run a dataflow pipeline that + * <p>Write some test entities to datastore and then run a dataflow pipeline that * reads and counts the total number of entities. Verify that the count matches the * number of entities written. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java index fa7c140..e97e80a 100644 --- a/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java +++ b/sdks/java/io/google-cloud-platform/src/test/java/org/apache/beam/sdk/io/gcp/datastore/V1WriteIT.java @@ -54,7 +54,7 @@ public class V1WriteIT { /** * An end-to-end test for {@link DatastoreV1.Write}. * - * Write some test entities to datastore through a dataflow pipeline. + * <p>Write some test entities to Cloud Datastore. * Read and count all the entities. Verify that the count matches the * number of entities written. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java index c71a58c..5b0c5b6 100644 --- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java +++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSource.java @@ -85,7 +85,8 @@ import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; * } * </pre> * - * Implementation note: Since Hadoop's {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat} + * <p>Implementation note: Since Hadoop's + * {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat} * determines the input splits, this class extends {@link BoundedSource} rather than * {@link org.apache.beam.sdk.io.OffsetBasedSource}, since the latter * dictates input splits. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java index f3569ea..96ba87a 100644 --- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java +++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/WritableCoder.java @@ -35,7 +35,7 @@ import org.apache.hadoop.io.Writable; /** * A {@code WritableCoder} is a {@link Coder} for a Java class that implements {@link Writable}. * - * <p> To use, specify the coder type on a PCollection: + * <p>To use, specify the coder type on a PCollection: * <pre> * {@code * PCollection<MyRecord> records = http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java index d37ced9..547413f 100644 --- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java +++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthAvroHDFSFileSource.java @@ -31,7 +31,7 @@ import org.apache.hadoop.mapreduce.InputSplit; /** * Source for Avros on Hadoop/HDFS with Simple Authentication. * - * Allows to set arbitrary username as HDFS user, which is used for reading Avro from HDFS. + * <p>Allows to set arbitrary username as HDFS user, which is used for reading Avro from HDFS. */ public class SimpleAuthAvroHDFSFileSource<T> extends AvroHDFSFileSource<T> { // keep this field to pass Hadoop user between workers http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java index e2c2c90..28accfa 100644 --- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java +++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSink.java @@ -30,7 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation; * A {@code Sink} for writing records to a Hadoop filesystem using a Hadoop file-based output * format with Simple Authentication. * - * Allows arbitrary username as HDFS user, which is used for writing to HDFS. + * <p>Allows arbitrary username as HDFS user, which is used for writing to HDFS. * * @param <K> The type of keys to be written to the sink. * @param <V> The type of values to be written to the sink. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java index d2cab57..22191f0 100644 --- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java +++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/simpleauth/SimpleAuthHDFSFileSource.java @@ -33,7 +33,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; /** * Source for Hadoop/HDFS with Simple Authentication. * - * Allows to set arbitrary username as HDFS user, which is used for reading from HDFS. + * <p>Allows to set arbitrary username as HDFS user, which is used for reading from HDFS. */ public class SimpleAuthHDFSFileSource<K, V> extends HDFSFileSource<K, V> { private final String username; http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java b/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java index 1c35f6e..00b91ad 100644 --- a/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java +++ b/sdks/java/io/jms/src/main/java/org/apache/beam/sdk/io/jms/JmsIO.java @@ -81,7 +81,7 @@ import org.slf4j.LoggerFactory; * * <h3>Writing to a JMS destination</h3> * - * JmsIO sink supports writing text messages to a JMS destination on a broker. + * <p>JmsIO sink supports writing text messages to a JMS destination on a broker. * To configure a JMS sink, you must specify a {@link javax.jms.ConnectionFactory} and a * {@link javax.jms.Destination} name. * For instance: @@ -175,7 +175,7 @@ public class JmsIO { * that they can be stored in all JNDI naming contexts. In addition, it is recommended that * these implementations follow the JavaBeansTM design patterns." * - * So, a {@link ConnectionFactory} implementation is serializable. + * <p>So, a {@link ConnectionFactory} implementation is serializable. */ protected ConnectionFactory connectionFactory; @Nullable @@ -201,8 +201,8 @@ public class JmsIO { } /** - * Creates an {@link UnboundedSource<JmsRecord, ?>} with the configuration in - * {@link Read}. Primary use case is unit tests, should not be used in an + * Creates an {@link UnboundedSource UnboundedSource<JmsRecord, ?>} with the configuration + * in {@link Read}. Primary use case is unit tests, should not be used in an * application. */ @VisibleForTesting http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java index 6769b31..e26f7c5 100644 --- a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java +++ b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java @@ -108,7 +108,7 @@ import org.slf4j.LoggerFactory; * <p>Although most applications consumer single topic, the source can be configured to consume * multiple topics or even a specific set of {@link TopicPartition}s. * - * <p> To configure a Kafka source, you must specify at the minimum Kafka <tt>bootstrapServers</tt> + * <p>To configure a Kafka source, you must specify at the minimum Kafka <tt>bootstrapServers</tt> * and one or more topics to consume. The following example illustrates various options for * configuring the source : * @@ -157,7 +157,7 @@ import org.slf4j.LoggerFactory; * * <h3>Writing to Kafka</h3> * - * KafkaIO sink supports writing key-value pairs to a Kafka topic. Users can also write + * <p>KafkaIO sink supports writing key-value pairs to a Kafka topic. Users can also write * just the values. To configure a Kafka sink, you must specify at the minimum Kafka * <tt>bootstrapServers</tt> and the topic to write to. The following example illustrates various * options for configuring the sink: @@ -179,7 +179,7 @@ import org.slf4j.LoggerFactory; * ); * }</pre> * - * Often you might want to write just values without any keys to Kafka. Use {@code values()} to + * <p>Often you might want to write just values without any keys to Kafka. Use {@code values()} to * write records with default empty(null) key: * * <pre>{@code @@ -499,8 +499,8 @@ public class KafkaIO { } /** - * Creates an {@link UnboundedSource<KafkaRecord<K, V>, ?>} with the configuration in - * {@link TypedRead}. Primary use case is unit tests, should not be used in an + * Creates an {@link UnboundedSource UnboundedSource<KafkaRecord<K, V>, ?>} with the + * configuration in {@link TypedRead}. Primary use case is unit tests, should not be used in an * application. */ @VisibleForTesting @@ -633,7 +633,7 @@ public class KafkaIO { * {@code min(desiredNumSplits, totalNumPartitions)}, though better not to depend on the exact * count. * - * <p> It is important to assign the partitions deterministically so that we can support + * <p>It is important to assign the partitions deterministically so that we can support * resuming a split from last checkpoint. The Kafka partitions are sorted by * {@code <topic, partition>} and then assigned to splits in round-robin order. */ @@ -1297,8 +1297,8 @@ public class KafkaIO { } /** - * Same as Write<K, V> without a Key. Null is used for key as it is the convention is Kafka - * when there is no key specified. Majority of Kafka writers don't specify a key. + * Same as {@code Write<K, V>} without a Key. Null is used for key as it is the convention is + * Kafka when there is no key specified. Majority of Kafka writers don't specify a key. */ private static class KafkaValueWrite<V> extends PTransform<PCollection<V>, PDone> { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java index 4515f38..4317a59 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/CustomOptional.java @@ -19,7 +19,7 @@ package org.apache.beam.sdk.io.kinesis; import java.util.NoSuchElementException; -/*** +/** * Similar to Guava {@code Optional}, but throws {@link NoSuchElementException} for missing element. */ abstract class CustomOptional<T> { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java index c0f00de..5a34d7d 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/GetKinesisRecordsResult.java @@ -24,7 +24,7 @@ import com.google.common.base.Function; import java.util.List; import javax.annotation.Nullable; -/*** +/** * Represents the output of 'get' operation on Kinesis stream. */ class GetKinesisRecordsResult { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java index 36c8953..c7fd7f6 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisClientProvider.java @@ -23,7 +23,7 @@ import java.io.Serializable; /** * Provides instances of {@link AmazonKinesis} interface. * - * Please note, that any instance of {@link KinesisClientProvider} must be + * <p>Please note, that any instance of {@link KinesisClientProvider} must be * {@link Serializable} to ensure it can be sent to worker machines. */ interface KinesisClientProvider extends Serializable { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java index acff33f..945eff6 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisIO.java @@ -71,13 +71,13 @@ import org.joda.time.Instant; * (for example if you're using more sophisticated authorization methods like Amazon STS, etc.) * you can do it by implementing {@link KinesisClientProvider} class: * - * <pre>{@code} + * <pre>{@code * public class MyCustomKinesisClientProvider implements KinesisClientProvider { * @Override * public AmazonKinesis get() { * // set up your client here * } - * } + * }} * </pre> * * Usage is pretty straightforward: @@ -105,7 +105,7 @@ import org.joda.time.Instant; * */ public final class KinesisIO { - /*** + /** * A {@link PTransform} that reads from a Kinesis stream. */ public static final class Read { @@ -118,7 +118,7 @@ public final class KinesisIO { this.initialPosition = checkNotNull(initialPosition, "initialPosition"); } - /*** + /** * Specify reading from streamName at some initial position. */ public static Read from(String streamName, InitialPositionInStream initialPosition) { @@ -126,7 +126,7 @@ public final class KinesisIO { checkNotNull(initialPosition, "initialPosition"))); } - /*** + /** * Specify reading from streamName beginning at given {@link Instant}. * This {@link Instant} must be in the past, i.e. before {@link Instant#now()}. */ @@ -135,7 +135,7 @@ public final class KinesisIO { checkNotNull(initialTimestamp, "initialTimestamp"))); } - /*** + /** * Allows to specify custom {@link KinesisClientProvider}. * {@link KinesisClientProvider} provides {@link AmazonKinesis} instances which are later * used for communication with Kinesis. @@ -149,7 +149,7 @@ public final class KinesisIO { initialPosition)); } - /*** + /** * Specify credential details and region to be used to read from Kinesis. * If you need more sophisticated credential protocol, then you should look at * {@link Read#using(KinesisClientProvider)}. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java index 219a705..2138094 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReader.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/*** +/** * Reads data from multiple kinesis shards in a single thread. * It uses simple round robin algorithm when fetching data from shards. */ @@ -52,7 +52,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> { this.source = source; } - /*** + /** * Generates initial checkpoint and instantiates iterators for shards. */ @Override @@ -74,7 +74,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> { return advance(); } - /*** + /** * Moves to the next record in one of the shards. * If current shard iterator can be move forward (i.e. there's a record present) then we do it. * If not, we iterate over shards in a round-robin manner. @@ -106,7 +106,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> { return currentRecord.get(); } - /*** + /** * When {@link KinesisReader} was advanced to the current record. * We cannot use approximate arrival timestamp given for each record by Kinesis as it * is not guaranteed to be accurate - this could lead to mark some records as "late" @@ -121,7 +121,7 @@ class KinesisReader extends UnboundedSource.UnboundedReader<KinesisRecord> { public void close() throws IOException { } - /*** + /** * Current time. * We cannot give better approximation of the watermark with current semantics of * {@link KinesisReader#getCurrentTimestamp()}, because we don't know when the next http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java index 663ba44..f0fa45d 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisReaderCheckpoint.java @@ -30,7 +30,7 @@ import java.util.List; import javax.annotation.Nullable; import org.apache.beam.sdk.io.UnboundedSource; -/*** +/** * Checkpoint representing a total progress in a set of shards in single stream. * The set of shards covered by {@link KinesisReaderCheckpoint} may or may not be equal to set of * all shards present in the stream. @@ -59,7 +59,7 @@ class KinesisReaderCheckpoint implements Iterable<ShardCheckpoint>, UnboundedSou })); } - /*** + /** * Splits given multi-shard checkpoint into partitions of approximately equal size. * * @param desiredNumSplits - upper limit for number of partitions to generate. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java index 5b13e31..fc087b5 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisRecordCoder.java @@ -29,7 +29,7 @@ import org.apache.beam.sdk.coders.StringUtf8Coder; import org.apache.beam.sdk.coders.VarLongCoder; import org.joda.time.Instant; -/*** +/** * A {@link Coder} for {@link KinesisRecord}. */ class KinesisRecordCoder extends AtomicCoder<KinesisRecord> { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java index 62cba08..45e0b51 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/KinesisSource.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/*** +/** * Represents source for single stream in Kinesis. */ class KinesisSource extends UnboundedSource<KinesisRecord, KinesisReaderCheckpoint> { @@ -50,7 +50,7 @@ class KinesisSource extends UnboundedSource<KinesisRecord, KinesisReaderCheckpoi validate(); } - /*** + /** * Generate splits for reading from the stream. * Basically, it'll try to evenly split set of shards in the stream into * {@code desiredNumSplits} partitions. Each partition is then a split. @@ -71,7 +71,7 @@ class KinesisSource extends UnboundedSource<KinesisRecord, KinesisReaderCheckpoi return sources; } - /*** + /** * Creates reader based on given {@link KinesisReaderCheckpoint}. * If {@link KinesisReaderCheckpoint} is not given, then we use * {@code initialCheckpointGenerator} to generate new checkpoint. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java index 4c7f39a..40e65fc 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RecordFilter.java @@ -24,9 +24,9 @@ import java.util.List; /** * Filters out records, which were already processed and checkpointed. - * <p> - * We need this step, because we can get iterators from Kinesis only with "sequenceNumber" accuracy, - * not with "subSequenceNumber" accuracy. + * + * <p>We need this step, because we can get iterators from Kinesis only with "sequenceNumber" + * accuracy, not with "subSequenceNumber" accuracy. */ class RecordFilter { public List<KinesisRecord> apply(List<KinesisRecord> records, ShardCheckpoint checkpoint) { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java index 7adae4b..e4ff541 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/RoundRobin.java @@ -23,7 +23,7 @@ import static com.google.common.collect.Queues.newArrayDeque; import java.util.Deque; import java.util.Iterator; -/*** +/** * Very simple implementation of round robin algorithm. */ class RoundRobin<T> implements Iterable<T> { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java index 9920aca..6aa3504 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardCheckpoint.java @@ -31,7 +31,7 @@ import java.io.Serializable; import org.joda.time.Instant; -/*** +/** * Checkpoint mark for single shard in the stream. * Current position in the shard is determined by either: * <ul> @@ -96,7 +96,7 @@ class ShardCheckpoint implements Serializable { this.timestamp = timestamp; } - /*** + /** * Used to compare {@link ShardCheckpoint} object to {@link KinesisRecord}. Depending * on the the underlying shardIteratorType, it will either compare the timestamp or the * {@link ExtendedSequenceNumber}. @@ -151,7 +151,7 @@ class ShardCheckpoint implements Serializable { return shardIteratorType == AFTER_SEQUENCE_NUMBER && subSequenceNumber != null; } - /*** + /** * Used to advance checkpoint mark to position after given {@link Record}. * * @param record http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java index d17996a..872f604 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/ShardRecordsIterator.java @@ -25,7 +25,7 @@ import java.util.Deque; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/*** +/** * Iterates over records in a single shard. * Under the hood records are retrieved from Kinesis in batches and stored in the in-memory queue. * Then the caller of {@link ShardRecordsIterator#next()} can read from queue one by one. @@ -56,7 +56,7 @@ class ShardRecordsIterator { shardIterator = checkpoint.getShardIterator(kinesis); } - /*** + /** * Returns record if there's any present. * Returns absent() if there are no new records at this time in the shard. */ http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java index 96267d1..3e3984a 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/SimplifiedKinesisClient.java @@ -36,7 +36,7 @@ import java.util.List; import java.util.concurrent.Callable; import org.joda.time.Instant; -/*** +/** * Wraps {@link AmazonKinesis} class providing much simpler interface and * proper error handling. */ @@ -91,7 +91,7 @@ class SimplifiedKinesisClient { }); } - /*** + /** * Gets records from Kinesis and deaggregates them if needed. * * @return list of deaggregated records @@ -102,7 +102,7 @@ class SimplifiedKinesisClient { return getRecords(shardIterator, streamName, shardId, null); } - /*** + /** * Gets records from Kinesis and deaggregates them if needed. * * @return list of deaggregated records @@ -126,7 +126,7 @@ class SimplifiedKinesisClient { }); } - /*** + /** * Wraps Amazon specific exceptions into more friendly format. * * @throws TransientKinesisException - in case of recoverable situation, i.e. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java index b7ee917..d8842c4 100644 --- a/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java +++ b/sdks/java/io/kinesis/src/main/java/org/apache/beam/sdk/io/kinesis/StartingPoint.java @@ -26,7 +26,7 @@ import java.io.Serializable; import java.util.Objects; import org.joda.time.Instant; -/*** +/** * Denotes a point at which the reader should start reading from a Kinesis stream. * It can be expressed either as an {@link InitialPositionInStream} enum constant or a timestamp, * in which case the reader will start reading at the specified point in time. http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java index 65a7605..324de46 100644 --- a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java +++ b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisTestOptions.java @@ -21,7 +21,7 @@ import org.apache.beam.sdk.options.Default; import org.apache.beam.sdk.options.Description; import org.apache.beam.sdk.testing.TestPipelineOptions; -/*** +/** * Options for Kinesis integration tests. */ public interface KinesisTestOptions extends TestPipelineOptions { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java index b1c212b..7518ff7 100644 --- a/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java +++ b/sdks/java/io/kinesis/src/test/java/org/apache/beam/sdk/io/kinesis/KinesisUploader.java @@ -32,7 +32,7 @@ import com.google.common.collect.Lists; import java.nio.ByteBuffer; import java.util.List; -/*** +/** * Sends records to Kinesis in reliable way. */ public class KinesisUploader { http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java index bdf0e53..8c9a65c 100644 --- a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java +++ b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbGridFSIO.java @@ -66,19 +66,17 @@ import org.joda.time.Instant; * and the bucket name. If unspecified, the default values from the GridFS driver are used.</p> * * <p>The following example illustrates various options for configuring the - * source:</p> + * source: * * <pre>{@code - * * pipeline.apply(MongoDbGridFSIO.<String>read() * .withUri("mongodb://localhost:27017") * .withDatabase("my-database") * .withBucket("my-bucket")) - * * }</pre> * * <p>The source also accepts an optional configuration: {@code withQueryFilter()} allows you to - * define a JSON filter to get subset of files in the database.</p> + * define a JSON filter to get subset of files in the database. * * <p>There is also an optional {@code Parser} (and associated {@code Coder}) that can be * specified that can be used to parse the InputStream into objects usable with Beam. By default, http://git-wip-us.apache.org/repos/asf/incubator-beam/blob/9e30a989/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java ---------------------------------------------------------------------- diff --git a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java index d5659e9..20b9265 100644 --- a/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java +++ b/sdks/java/io/mongodb/src/main/java/org/apache/beam/sdk/io/mongodb/MongoDbIO.java @@ -51,11 +51,11 @@ import org.slf4j.LoggerFactory; * <h3>Reading from MongoDB</h3> * * <p>MongoDbIO source returns a bounded collection of String as {@code PCollection<String>}. - * The String is the JSON form of the MongoDB Document.</p> + * The String is the JSON form of the MongoDB Document. * * <p>To configure the MongoDB source, you have to provide the connection URI, the database name * and the collection name. The following example illustrates various options for configuring the - * source:</p> + * source: * * <pre>{@code * @@ -282,9 +282,8 @@ public class MongoDbIO { * <li>_id: 109</li> * <li>_id: 256</li> * </ul> - * </p> * - * This method will generate a list of range filters performing the following splits: + * <p>This method will generate a list of range filters performing the following splits: * <ul> * <li>from the beginning of the collection up to _id 56, so basically data with * _id lower than 56</li>