markap14 commented on code in PR #10964:
URL: https://github.com/apache/nifi/pull/10964#discussion_r2906908197


##########
nifi-extension-bundles/nifi-aws-bundle/nifi-aws-kinesis/src/main/java/org/apache/nifi/processors/aws/kinesis/ConsumeKinesis.java:
##########
@@ -30,154 +29,132 @@
 import org.apache.nifi.components.DescribedValue;
 import org.apache.nifi.components.PropertyDescriptor;
 import org.apache.nifi.components.Validator;
-import org.apache.nifi.controller.NodeTypeProvider;
 import org.apache.nifi.flowfile.FlowFile;
 import org.apache.nifi.logging.ComponentLog;
 import org.apache.nifi.migration.PropertyConfiguration;
-import org.apache.nifi.migration.ProxyServiceMigration;
+import org.apache.nifi.migration.RelationshipConfiguration;
 import org.apache.nifi.processor.AbstractProcessor;
 import org.apache.nifi.processor.DataUnit;
 import org.apache.nifi.processor.ProcessContext;
 import org.apache.nifi.processor.ProcessSession;
 import org.apache.nifi.processor.Relationship;
 import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.io.OutputStreamCallback;
 import org.apache.nifi.processor.util.StandardValidators;
 import 
org.apache.nifi.processors.aws.credentials.provider.AwsCredentialsProviderService;
-import org.apache.nifi.processors.aws.kinesis.MemoryBoundRecordBuffer.Lease;
-import 
org.apache.nifi.processors.aws.kinesis.ReaderRecordProcessor.ProcessingResult;
-import org.apache.nifi.processors.aws.kinesis.RecordBuffer.ShardBufferId;
-import 
org.apache.nifi.processors.aws.kinesis.converter.InjectMetadataRecordConverter;
-import org.apache.nifi.processors.aws.kinesis.converter.KinesisRecordConverter;
-import org.apache.nifi.processors.aws.kinesis.converter.ValueRecordConverter;
-import org.apache.nifi.processors.aws.kinesis.converter.WrapperRecordConverter;
 import org.apache.nifi.processors.aws.region.RegionUtil;
 import org.apache.nifi.proxy.ProxyConfiguration;
-import org.apache.nifi.proxy.ProxyConfigurationService;
 import org.apache.nifi.proxy.ProxySpec;
+import org.apache.nifi.schema.access.SchemaNotFoundException;
+import org.apache.nifi.serialization.MalformedRecordException;
+import org.apache.nifi.serialization.RecordReader;
 import org.apache.nifi.serialization.RecordReaderFactory;
+import org.apache.nifi.serialization.RecordSetWriter;
 import org.apache.nifi.serialization.RecordSetWriterFactory;
+import org.apache.nifi.serialization.SimpleRecordSchema;
+import org.apache.nifi.serialization.record.MapRecord;
+import org.apache.nifi.serialization.record.RecordField;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.serialization.record.RecordSchema;
 import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
-import software.amazon.awssdk.http.Protocol;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.http.SdkHttpClient;
+import software.amazon.awssdk.http.apache.ApacheHttpClient;
 import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
-import software.amazon.awssdk.http.nio.netty.Http2Configuration;
 import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
 import software.amazon.awssdk.regions.Region;
-import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
-import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
+import software.amazon.awssdk.services.dynamodb.DynamoDbClientBuilder;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
 import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder;
-import software.amazon.kinesis.common.ConfigsBuilder;
-import software.amazon.kinesis.common.InitialPositionInStream;
-import software.amazon.kinesis.common.InitialPositionInStreamExtended;
-import software.amazon.kinesis.coordinator.Scheduler;
-import software.amazon.kinesis.coordinator.WorkerStateChangeListener;
-import software.amazon.kinesis.lifecycle.events.InitializationInput;
-import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
-import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
-import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
-import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput;
-import software.amazon.kinesis.metrics.LogMetricsFactory;
-import software.amazon.kinesis.metrics.MetricsFactory;
-import software.amazon.kinesis.metrics.NullMetricsFactory;
-import software.amazon.kinesis.processor.ShardRecordProcessor;
-import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
-import software.amazon.kinesis.processor.SingleStreamTracker;
-import software.amazon.kinesis.retrieval.KinesisClientRecord;
-import software.amazon.kinesis.retrieval.RetrievalSpecificConfig;
-import software.amazon.kinesis.retrieval.fanout.FanOutConfig;
-import software.amazon.kinesis.retrieval.polling.PollingConfig;
-
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.KinesisClientBuilder;
+import software.amazon.awssdk.services.kinesis.model.Shard;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.math.BigInteger;
+import java.net.Proxy;
 import java.net.URI;
-import java.nio.channels.Channels;
-import java.nio.channels.WritableByteChannel;
+import java.nio.charset.StandardCharsets;
 import java.time.Duration;
 import java.time.Instant;
-import java.util.Date;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Optional;
 import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.APPROXIMATE_ARRIVAL_TIMESTAMP;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.FIRST_SEQUENCE_NUMBER;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.FIRST_SUB_SEQUENCE_NUMBER;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.LAST_SEQUENCE_NUMBER;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.LAST_SUB_SEQUENCE_NUMBER;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.MIME_TYPE;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.PARTITION_KEY;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.RECORD_COUNT;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.RECORD_ERROR_MESSAGE;
-import static 
org.apache.nifi.processors.aws.kinesis.ConsumeKinesisAttributes.SHARD_ID;
+import java.util.concurrent.TimeUnit;
+
 import static org.apache.nifi.processors.aws.region.RegionUtil.CUSTOM_REGION;
 import static org.apache.nifi.processors.aws.region.RegionUtil.REGION;
 
 @InputRequirement(InputRequirement.Requirement.INPUT_FORBIDDEN)
 @Tags({"amazon", "aws", "kinesis", "consume", "stream", "record"})
 @CapabilityDescription("""
-        Consumes data from the specified AWS Kinesis stream and outputs a 
FlowFile for every processed Record (raw)
-        or a FlowFile for a batch of processed records if a Record Reader and 
Record Writer are configured.
-        The processor may take a few minutes on the first start and several 
seconds on subsequent starts
-        to initialize before starting to fetch data.
-        Uses DynamoDB for check pointing and coordination, and (optional) 
CloudWatch for metrics.
-        """)
+        Consumes records from an Amazon Kinesis Data Stream. Uses \
+        DynamoDB-based checkpointing for reliable resumption after restarts.
+
+        Note: when a shard is split or multiple shards are merged, this 
processor will consume from \
+        child and parent shards concurrently. It does not wait for parent 
shards to be fully consumed \
+        before reading child shards, so record ordering is not guaranteed 
across a split or merge \
+        boundary.""")
 @WritesAttributes({
-        @WritesAttribute(attribute = ConsumeKinesisAttributes.STREAM_NAME,
-                description = "The name of the Kinesis Stream from which all 
Kinesis Records in the FlowFile were read"),
-        @WritesAttribute(attribute = SHARD_ID,
-                description = "Shard ID from which all Kinesis Records in the 
FlowFile were read"),
-        @WritesAttribute(attribute = PARTITION_KEY,
+        @WritesAttribute(attribute = "aws.kinesis.stream.name",
+                description = "The name of the Kinesis Stream from which 
records were read"),
+        @WritesAttribute(attribute = "aws.kinesis.shard.id",
+                description = "Shard ID from which records were read"),
+        @WritesAttribute(attribute = "aws.kinesis.partition.key",
                 description = "Partition key of the last Kinesis Record in the 
FlowFile"),
-        @WritesAttribute(attribute = FIRST_SEQUENCE_NUMBER,
-                description = "A Sequence Number of the first Kinesis Record 
in the FlowFile"),
-        @WritesAttribute(attribute = FIRST_SUB_SEQUENCE_NUMBER,
-                description = "A SubSequence Number of the first Kinesis 
Record in the FlowFile. Generated by KPL when aggregating records into a single 
Kinesis Record"),
-        @WritesAttribute(attribute = LAST_SEQUENCE_NUMBER,
-                description = "A Sequence Number of the last Kinesis Record in 
the FlowFile"),
-        @WritesAttribute(attribute = LAST_SUB_SEQUENCE_NUMBER,
-                description = "A SubSequence Number of the last Kinesis Record 
in the FlowFile. Generated by KPL when aggregating records into a single 
Kinesis Record"),
-        @WritesAttribute(attribute = APPROXIMATE_ARRIVAL_TIMESTAMP,
+        @WritesAttribute(attribute = "aws.kinesis.first.sequence.number",
+                description = "Sequence Number of the first Kinesis Record in 
the FlowFile"),
+        @WritesAttribute(attribute = "aws.kinesis.first.subsequence.number",
+                description = "Sub-Sequence Number of the first Kinesis Record 
in the FlowFile"),
+        @WritesAttribute(attribute = "aws.kinesis.last.sequence.number",
+                description = "Sequence Number of the last Kinesis Record in 
the FlowFile"),
+        @WritesAttribute(attribute = "aws.kinesis.last.subsequence.number",
+                description = "Sub-Sequence Number of the last Kinesis Record 
in the FlowFile"),
+        @WritesAttribute(attribute = 
"aws.kinesis.approximate.arrival.timestamp.ms",
                 description = "Approximate arrival timestamp of the last 
Kinesis Record in the FlowFile"),
-        @WritesAttribute(attribute = MIME_TYPE,
+        @WritesAttribute(attribute = "mime.type",
                 description = "Sets the mime.type attribute to the MIME Type 
specified by the Record Writer (if configured)"),
-        @WritesAttribute(attribute = RECORD_COUNT,
-                description = "Number of records written to the FlowFiles by 
the Record Writer (if configured)"),
-        @WritesAttribute(attribute = RECORD_ERROR_MESSAGE,
-                description = "This attribute provides on failure the error 
message encountered by the Record Reader or Record Writer (if configured)")
+        @WritesAttribute(attribute = "record.count",
+                description = "Number of records written to the FlowFile"),
+        @WritesAttribute(attribute = "record.error.message",
+                description = "Error message encountered by the Record Reader 
or Record Writer (if configured)"),
+        @WritesAttribute(attribute = "kinesis.millis.behind",
+                description = "How far behind the stream tail we are, in 
milliseconds")
 })
 @DefaultSettings(yieldDuration = "100 millis")
 @SystemResourceConsideration(resource = SystemResource.CPU, description = """
-        The processor uses additional CPU resources when consuming data from 
Kinesis.
-        The consumption is started immediately after this Processor is 
scheduled. The consumption ends only when the Processor is stopped.""")
+        The processor uses additional CPU resources when consuming data from 
Kinesis.""")
 @SystemResourceConsideration(resource = SystemResource.NETWORK, description = 
"""
-        The processor will continually poll for new Records,
-        requesting up to a maximum number of Records/bytes per call. This can 
result in sustained network usage.""")
+        The processor will continually poll for new Records.""")
 @SystemResourceConsideration(resource = SystemResource.MEMORY, description = 
"""
-        ConsumeKinesis buffers Kinesis Records in memory until they can be 
processed.
-        The maximum size of the buffer is controlled by the 'Max Bytes to 
Buffer' property.
-        In addition, the processor may cache some amount of data for each 
shard when the processor's buffer is full.""")
+        ConsumeKinesis buffers Kinesis Records in memory until they can be 
processed. \
+        The maximum size of the buffer is controlled by the 'Max Batch Size' 
property.""")
 public class ConsumeKinesis extends AbstractProcessor {
 
-    private static final Duration HTTP_CLIENTS_CONNECTION_TIMEOUT = 
Duration.ofSeconds(30);
-    private static final Duration HTTP_CLIENTS_READ_TIMEOUT = 
Duration.ofMinutes(3);
-
-    private static final int KINESIS_HTTP_CLIENT_WINDOW_SIZE_BYTES = 512 * 
1024; // 512 KiB
-    private static final Duration KINESIS_HTTP_HEALTH_CHECK_PERIOD = 
Duration.ofMinutes(1);
-
-    /**
-     * How long to wait for a Scheduler initialization to complete in the 
OnScheduled method.
-     * If the initialization takes longer than this, the processor will 
continue initialization checks in the onTrigger method.
-     */
-    private static final Duration 
KINESIS_SCHEDULER_ON_SCHEDULED_INITIALIZATION_TIMEOUT = Duration.ofSeconds(30);
-    private static final Duration KINESIS_SCHEDULER_GRACEFUL_SHUTDOWN_TIMEOUT 
= Duration.ofMinutes(3);
+    static final String ATTR_STREAM_NAME = "aws.kinesis.stream.name";
+    static final String ATTR_SHARD_ID = "aws.kinesis.shard.id";
+    static final String ATTR_FIRST_SEQUENCE = 
"aws.kinesis.first.sequence.number";
+    static final String ATTR_LAST_SEQUENCE = 
"aws.kinesis.last.sequence.number";
+    static final String ATTR_FIRST_SUBSEQUENCE = 
"aws.kinesis.first.subsequence.number";
+    static final String ATTR_LAST_SUBSEQUENCE = 
"aws.kinesis.last.subsequence.number";
+    static final String ATTR_PARTITION_KEY = "aws.kinesis.partition.key";
+    static final String ATTR_ARRIVAL_TIMESTAMP = 
"aws.kinesis.approximate.arrival.timestamp.ms";
+    static final String ATTR_MILLIS_BEHIND = "kinesis.millis.behind";
+
+    private static final long QUEUE_POLL_TIMEOUT_MILLIS = 100;
+    private static final Duration API_CALL_TIMEOUT = Duration.ofSeconds(30);
+    private static final Duration API_CALL_ATTEMPT_TIMEOUT = 
Duration.ofSeconds(10);
+    private static final byte[] NEWLINE_DELIMITER = new byte[] {'\n'};

Review Comment:
   No. The separator should not depend on the OS of the host.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to