zhaijack commented on a change in pull request #1103: PIP-13-1/3: Provide `TopicsConsumer` to consume from several topics under same namespace URL: https://github.com/apache/incubator-pulsar/pull/1103#discussion_r167264860
########## File path: pulsar-client/src/main/java/org/apache/pulsar/client/impl/TopicsConsumerImpl.java ########## @@ -0,0 +1,881 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pulsar.client.impl; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; + +import com.google.common.collect.Lists; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import org.apache.pulsar.client.api.Consumer; +import org.apache.pulsar.client.api.ConsumerConfiguration; +import org.apache.pulsar.client.api.Message; +import org.apache.pulsar.client.api.MessageId; +import org.apache.pulsar.client.api.PulsarClientException; +import org.apache.pulsar.client.api.SubscriptionType; +import org.apache.pulsar.client.util.ConsumerName; +import org.apache.pulsar.client.util.FutureUtil; +import org.apache.pulsar.common.api.proto.PulsarApi.CommandAck.AckType; +import org.apache.pulsar.common.naming.DestinationName; +import org.apache.pulsar.common.naming.NamespaceName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TopicsConsumerImpl extends ConsumerBase { + + // All topics should be in same namespace + protected NamespaceName namespaceName; + + // Map <topic+partition, consumer>, when get do ACK, consumer will by find by topic name + private final ConcurrentHashMap<String, ConsumerImpl> consumers; + + // Map <topic, partitionNumber>, store partition number for each topic + private final ConcurrentHashMap<String, Integer> topics; + + // Queue of partition consumers on which we have stopped calling receiveAsync() because the + // shared incoming queue was full + private final ConcurrentLinkedQueue<ConsumerImpl> pausedConsumers; + + // Threshold for the shared queue. When the size of the shared queue goes below the threshold, we are going to + // resume receiving from the paused consumer partitions + private final int sharedQueueResumeThreshold; + + // sum of topicPartitions, simple topic has 1, partitioned topic equals to partition number. + AtomicInteger numberTopicPartitions; + + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + private final ConsumerStats stats; + private final UnAckedMessageTracker unAckedMessageTracker; + private final ConsumerConfiguration internalConfig; + + TopicsConsumerImpl(PulsarClientImpl client, Collection<String> topics, String subscription, + ConsumerConfiguration conf, ExecutorService listenerExecutor, + CompletableFuture<Consumer> subscribeFuture) { + super(client, "TopicsConsumerFakeTopicName" + ConsumerName.generateRandomName(), subscription, + conf, Math.max(2, conf.getReceiverQueueSize()), listenerExecutor, + subscribeFuture); + + checkArgument(conf.getReceiverQueueSize() > 0, + "Receiver queue size needs to be greater than 0 for Topics Consumer"); + + this.topics = new ConcurrentHashMap<>(); + this.consumers = new ConcurrentHashMap<>(); + this.pausedConsumers = new ConcurrentLinkedQueue<>(); + this.sharedQueueResumeThreshold = maxReceiverQueueSize / 2; + this.numberTopicPartitions = new AtomicInteger(0); + + if (conf.getAckTimeoutMillis() != 0) { + this.unAckedMessageTracker = new UnAckedMessageTracker(client, this, conf.getAckTimeoutMillis()); + } else { + this.unAckedMessageTracker = UnAckedMessageTracker.UNACKED_MESSAGE_TRACKER_DISABLED; + } + + this.internalConfig = getInternalConsumerConfig(); + this.stats = client.getConfiguration().getStatsIntervalSeconds() > 0 ? new ConsumerStats() : null; + + if (topics.isEmpty()) { + this.namespaceName = null; + setState(State.Ready); + // We have successfully created N consumers, so we can start receiving messages now Review comment: Thanks. This is an obsolete comments. will remove it. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services