Added: kafka/site/083/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html URL: http://svn.apache.org/viewvc/kafka/site/083/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html?rev=1659720&view=auto ============================================================================== --- kafka/site/083/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html (added) +++ kafka/site/083/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html Fri Feb 13 23:54:00 2015 @@ -0,0 +1,1002 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> +<!-- NewPage --> +<html lang="en"> +<head> +<!-- Generated by javadoc (version 1.7.0_51) on Fri Feb 13 15:47:44 PST 2015 --> +<title>KafkaConsumer (clients 0.8.3-SNAPSHOT API)</title> +<meta name="date" content="2015-02-13"> +<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> +</head> +<body> +<script type="text/javascript"><!-- + if (location.href.indexOf('is-external=true') == -1) { + parent.document.title="KafkaConsumer (clients 0.8.3-SNAPSHOT API)"; + } +//--> +</script> +<noscript> +<div>JavaScript is disabled on your browser.</div> +</noscript> +<!-- ========= START OF TOP NAVBAR ======= --> +<div class="topNav"><a name="navbar_top"> +<!-- --> +</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> +<!-- --> +</a> +<ul class="navList" title="Navigation"> +<li><a href="../../../../../overview-summary.html">Overview</a></li> +<li><a href="package-summary.html">Package</a></li> +<li class="navBarCell1Rev">Class</li> +<li><a href="package-tree.html">Tree</a></li> +<li><a href="../../../../../deprecated-list.html">Deprecated</a></li> +<li><a href="../../../../../index-all.html">Index</a></li> +<li><a href="../../../../../help-doc.html">Help</a></li> +</ul> +</div> +<div class="subNav"> +<ul class="navList"> +<li><a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRecords.html" title="class in org.apache.kafka.clients.consumer"><span class="strong">Prev Class</span></a></li> +<li><a href="../../../../../org/apache/kafka/clients/consumer/MockConsumer.html" title="class in org.apache.kafka.clients.consumer"><span class="strong">Next Class</span></a></li> +</ul> +<ul class="navList"> +<li><a href="../../../../../index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html" target="_top">Frames</a></li> +<li><a href="KafkaConsumer.html" target="_top">No Frames</a></li> +</ul> +<ul class="navList" id="allclasses_navbar_top"> +<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li> +</ul> +<div> +<script type="text/javascript"><!-- + allClassesLink = document.getElementById("allclasses_navbar_top"); + if(window==top) { + allClassesLink.style.display = "block"; + } + else { + allClassesLink.style.display = "none"; + } + //--> +</script> +</div> +<div> +<ul class="subNavList"> +<li>Summary: </li> +<li>Nested | </li> +<li>Field | </li> +<li><a href="#constructor_summary">Constr</a> | </li> +<li><a href="#method_summary">Method</a></li> +</ul> +<ul class="subNavList"> +<li>Detail: </li> +<li>Field | </li> +<li><a href="#constructor_detail">Constr</a> | </li> +<li><a href="#method_detail">Method</a></li> +</ul> +</div> +<a name="skip-navbar_top"> +<!-- --> +</a></div> +<!-- ========= END OF TOP NAVBAR ========= --> +<!-- ======== START OF CLASS DATA ======== --> +<div class="header"> +<div class="subTitle">org.apache.kafka.clients.consumer</div> +<h2 title="Class KafkaConsumer" class="title">Class KafkaConsumer<K,V></h2> +</div> +<div class="contentContainer"> +<ul class="inheritance"> +<li>java.lang.Object</li> +<li> +<ul class="inheritance"> +<li>org.apache.kafka.clients.consumer.KafkaConsumer<K,V></li> +</ul> +</li> +</ul> +<div class="description"> +<ul class="blockList"> +<li class="blockList"> +<dl> +<dt>All Implemented Interfaces:</dt> +<dd>java.io.Closeable, java.lang.AutoCloseable, <a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><K,V></dd> +</dl> +<hr> +<br> +<pre>public class <span class="strong">KafkaConsumer<K,V></span> +extends java.lang.Object +implements <a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><K,V></pre> +<div class="block">A Kafka client that consumes records from a Kafka cluster. + <p> + It will transparently handle the failure of servers in the Kafka cluster, and transparently adapt as partitions of + data it subscribes to migrate within the cluster. This client also interacts with the server to allow groups of + consumers to load balance consumption using consumer groups (as described below). + <p> + The consumer maintains TCP connections to the necessary brokers to fetch data for the topics it subscribes to. + Failure to close the consumer after use will leak these connections. + <p> + The consumer is thread safe but generally will be used only from within a single thread. The consumer client has no + threads of it's own, all work is done in the caller's thread when calls are made on the various methods exposed. + + <h3>Offsets and Consumer Position</h3> + Kafka maintains a numerical offset for each record in a partition. This offset acts as a kind of unique identifier of + a record within that partition, and also denotes the position of the consumer in the partition. That is, a consumer + which has position 5 has consumed records with offsets 0 through 4 and will next receive record with offset 5. There + are actually two notions of position relevant to the user of the consumer. + <p> + The <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#position(org.apache.kafka.common.TopicPartition)"><code>position</code></a> of the consumer gives the offset of the next record that will be given + out. It will be one larger than the highest offset the consumer has seen in that partition. It automatically advances + every time the consumer receives data calls <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll(long)</code></a> and receives messages. + <p> + The <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(org.apache.kafka.clients.consumer.CommitType)"><code>committed position</code></a> is the last offset that has been saved securely. Should the + process fail and restart, this is the offset that it will recover to. The consumer can either automatically commit + offsets periodically, or it can choose to control this committed position manually by calling + <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(org.apache.kafka.clients.consumer.CommitType)"><code>commit</code></a>. + <p> + This distinction gives the consumer control over when a record is considered consumed. It is discussed in further + detail below. + + <h3>Consumer Groups</h3> + + Kafka uses the concept of <i>consumer groups</i> to allow a pool of processes to divide up the work of consuming and + processing records. These processes can either be running on the same machine or, as is more likely, they can be + distributed over many machines to provide additional scalability and fault tolerance for processing. + <p> + Each Kafka consumer must specify a consumer group that it belongs to. Kafka will deliver each message in the + subscribed topics to one process in each consumer group. This is achieved by balancing the partitions in the topic + over the consumer processes in each group. So if there is a topic with four partitions, and a consumer group with two + processes, each process would consume from two partitions. This group membership is maintained dynamically: if a + process fails the partitions assigned to it will be reassigned to other processes in the same group, and if a new + process joins the group, partitions will be moved from existing consumers to this new process. + <p> + So if two processes subscribe to a topic both specifying different groups they will each get all the records in that + topic; if they both specify the same group they will each get about half the records. + <p> + Conceptually you can think of a consumer group as being a single logical subscriber that happens to be made up of + multiple processes. As a multi-subscriber system, Kafka naturally supports having any number of consumer groups for a + given topic without duplicating data (additional consumers are actually quite cheap). + <p> + This is a slight generalization of the functionality that is common in messaging systems. To get semantics similar to + a queue in a traditional messaging system all processes would be part of a single consumer group and hence record + delivery would be balanced over the group like with a queue. Unlike a traditional messaging system, though, you can + have multiple such groups. To get semantics similar to pub-sub in a traditional messaging system each process would + have it's own consumer group, so each process would subscribe to all the records published to the topic. + <p> + In addition, when offsets are committed they are always committed for a given consumer group. + <p> + It is also possible for the consumer to manually specify the partitions it subscribes to, which disables this dynamic + partition balancing. + + <h3>Usage Examples</h3> + The consumer APIs offer flexibility to cover a variety of consumption use cases. Here are some examples to + demonstrate how to use them. + + <h4>Simple Processing</h4> + This example demonstrates the simplest usage of Kafka's consumer api. + + <pre> + Properties props = new Properties(); + props.put("metadata.broker.list", "localhost:9092"); + props.put("group.id", "test"); + props.put("enable.auto.commit", "true"); + props.put("auto.commit.interval.ms", "1000"); + props.put("session.timeout.ms", "30000"); + props.put("key.serializer", "org.apache.kafka.common.serializers.StringSerializer"); + props.put("value.serializer", "org.apache.kafka.common.serializers.StringSerializer"); + KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props); + consumer.subscribe("foo", "bar"); + while (true) { + ConsumerRecords<String, String> records = consumer.poll(100); + for (ConsumerRecord<String, String> record : records) + System.out.printf("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value()); + } + </pre> + + Setting <code>enable.auto.commit</code> means that offsets are committed automatically with a frequency controlled by + the config <code>auto.commit.interval.ms</code>. + <p> + The connection to the cluster is bootstrapped by specifying a list of one or more brokers to contact using the + configuration <code>metadata.broker.list</code>. This list is just used to discover the rest of the brokers in the + cluster and need not be an exhaustive list of servers in the cluster (though you may want to specify more than one in + case there are servers down when the client is connecting). + <p> + In this example the client is subscribing to the topics <i>foo</i> and <i>bar</i> as part of a group of consumers + called <i>test</i> as described above. + <p> + The broker will automatically detect failed processes in the <i>test</i> group by using a heartbeat mechanism. The + consumer will automatically ping the cluster periodically, which let's the cluster know that it is alive. As long as + the consumer is able to do this it is considered alive and retains the right to consume from the partitions assigned + to it. If it stops heartbeating for a period of time longer than <code>session.timeout.ms</code> then it will be + considered dead and it's partitions will be assigned to another process. + <p> + The serializers settings specify how to turn the objects the user provides into bytes. By specifying the string + serializers we are saying that our record's key and value will just be simple strings. + + <h4>Controlling When Messages Are Considered Consumed</h4> + + In this example we will consume a batch of records and batch them up in memory, when we have sufficient records + batched we will insert them into a database. If we allowed offsets to auto commit as in the previous example messages + would be considered consumed after they were given out by the consumer, and it would be possible that our process + could fail after we have read messages into our in-memory buffer but before they had been inserted into the database. + To avoid this we will manually commit the offsets only once the corresponding messages have been inserted into the + database. This gives us exact control of when a message is considered consumed. This raises the opposite possibility: + the process could fail in the interval after the insert into the database but before the commit (even though this + would likely just be a few milliseconds, it is a possibility). In this case the process that took over consumption + would consume from last committed offset and would repeat the insert of the last batch of data. Used in this way + Kafka provides what is often called "at-least once delivery" guarantees, as each message will likely be delivered one + time but in failure cases could be duplicated. + + <pre> + Properties props = new Properties(); + props.put("metadata.broker.list", "localhost:9092"); + props.put("group.id", "test"); + props.put("enable.auto.commit", "false"); + props.put("auto.commit.interval.ms", "1000"); + props.put("session.timeout.ms", "30000"); + props.put("key.serializer", "org.apache.kafka.common.serializers.StringSerializer"); + props.put("value.serializer", "org.apache.kafka.common.serializers.StringSerializer"); + KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props); + consumer.subscribe("foo", "bar"); + int commitInterval = 200; + List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>(); + while (true) { + ConsumerRecords<String, String> records = consumer.poll(100); + for (ConsumerRecord<String, String> record : records) { + buffer.add(record); + if (buffer.size() >= commitInterval) { + insertIntoDb(buffer); + consumer.commit(CommitType.SYNC); + buffer.clear(); + } + } + } + </pre> + + <h4>Subscribing To Specific Partitions</h4> + + In the previous examples we subscribed to the topics we were interested in and let Kafka give our particular process + a fair share of the partitions for those topics. This provides a simple load balancing mechanism so multiple + instances of our program can divided up the work of processing records. + <p> + In this mode the consumer will just get the partitions it subscribes to and if the consumer instance fails no attempt + will be made to rebalance partitions to other instances. + <p> + There are several cases where this makes sense: + <ul> + <li>The first case is if the process is maintaining some kind of local state associated with that partition (like a + local on-disk key-value store) and hence it should only get records for the partition it is maintaining on disk. + <li>Another case is if the process itself is highly available and will be restarted if it fails (perhaps using a + cluster management framework like YARN, Mesos, or AWS facilities, or as part of a stream processing framework). In + this case there is no need for Kafka to detect the failure and reassign the partition, rather the consuming process + will be restarted on another machine. + </ul> + <p> + This mode is easy to specify, rather than subscribing to the topic, the consumer just subscribes to particular + partitions: + + <pre> + String topic = "foo"; + TopicPartition partition0 = new TopicPartition(topic, 0); + TopicPartition partition1 = new TopicPartition(topic, 1); + consumer.subscribe(partition0); + consumer.subscribe(partition1); + </pre> + + The group that the consumer specifies is still used for committing offsets, but now the set of partitions will only + be changed if the consumer specifies new partitions, and no attempt at failure detection will be made. + <p> + It isn't possible to mix both subscription to specific partitions (with no load balancing) and to topics (with load + balancing) using the same consumer instance. + + <h4>Managing Your Own Offsets</h4> + + The consumer application need not use Kafka's built-in offset storage, it can store offsets in a store of it's own + choosing. The primary use case for this is allowing the application to store both the offset and the results of the + consumption in the same system in a way that both the results and offsets are stored atomically. This is not always + possible, but when it is it will make the consumption fully atomic and give "exactly once" semantics that are + stronger than the default "at-least once" semantics you get with Kafka's offset commit functionality. + <p> + Here are a couple of examples of this type of usage: + <ul> + <li>If the results of the consumption are being stored in a relational database, storing the offset in the database + as well can allow committing both the results and offset in a single transaction. Thus either the transaction will + succeed and the offset will be updated based on what was consumed or the result will not be stored and the offset + won't be updated. + <li>If the results are being stored in a local store it may be possible to store the offset there as well. For + example a search index could be built by subscribing to a particular partition and storing both the offset and the + indexed data together. If this is done in a way that is atomic, it is often possible to have it be the case that even + if a crash occurs that causes unsync'd data to be lost, whatever is left has the corresponding offset stored as well. + This means that in this case the indexing process that comes back having lost recent updates just resumes indexing + from what it has ensuring that no updates are lost. + </ul> + + Each record comes with it's own offset, so to manage your own offset you just need to do the following: + <ol> + <li>Configure <code>enable.auto.commit=false</code> + <li>Use the offset provided with each <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRecord.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerRecord</code></a> to save your position. + <li>On restart restore the position of the consumer using <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seek(org.apache.kafka.common.TopicPartition, long)"><code>seek(TopicPartition, long)</code></a>. + </ol> + + This type of usage is simplest when the partition assignment is also done manually (this would be likely in the + search index use case described above). If the partition assignment is done automatically special care will also be + needed to handle the case where partition assignments change. This can be handled using a special callback specified + using <code>rebalance.callback.class</code>, which specifies an implementation of the interface + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer"><code>ConsumerRebalanceCallback</code></a>. When partitions are taken from a consumer the consumer will want to commit its + offset for those partitions by implementing + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html#onPartitionsRevoked(org.apache.kafka.clients.consumer.Consumer, java.util.Collection)"><code>ConsumerRebalanceCallback.onPartitionsRevoked(Consumer, Collection)</code></a>. When partitions are assigned to a + consumer, the consumer will want to look up the offset for those new partitions an correctly initialize the consumer + to that position by implementing <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html#onPartitionsAssigned(org.apache.kafka.clients.consumer.Consumer, java.util.Collection)"><code>ConsumerRebalanceCallback.onPartitionsAssigned(Consumer, Collection)</code></a>. + <p> + Another common use for <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer"><code>ConsumerRebalanceCallback</code></a> is to flush any caches the application maintains for + partitions that are moved elsewhere. + + <h4>Controlling The Consumer's Position</h4> + + In most use cases the consumer will simply consume records from beginning to end, periodically committing it's + position (either automatically or manually). However Kafka allows the consumer to manually control it's position, + moving forward or backwards in a partition at will. This means a consumer can re-consume older records, or skip to + the most recent records without actually consuming the intermediate records. + <p> + There are several instances where manually controlling the consumer's position can be useful. + <p> + One case is for time-sensitive record processing it may make sense for a consumer that falls far enough behind to not + attempt to catch up processing all records, but rather just skip to the most recent records. + <p> + Another use case is for a system that maintains local state as described in the previous section. In such a system + the consumer will want to initialize it's position on start-up to whatever is contained in the local store. Likewise + if the local state is destroyed (say because the disk is lost) the state may be recreated on a new machine by + reconsuming all the data and recreating the state (assuming that Kafka is retaining sufficient history). + + Kafka allows specifying the position using <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seek(org.apache.kafka.common.TopicPartition, long)"><code>seek(TopicPartition, long)</code></a> to specify the new position. Special + methods for seeking to the earliest and latest offset the server maintains are also available ( + <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seekToBeginning(org.apache.kafka.common.TopicPartition...)"><code>seekToBeginning(TopicPartition...)</code></a> and <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seekToEnd(org.apache.kafka.common.TopicPartition...)"><code>seekToEnd(TopicPartition...)</code></a> respectively). + + <h3>Multithreaded Processing</h3> + + The Kafka consumer is threadsafe but coarsely synchronized. All network I/O happens in the thread of the application + making the call. We have intentionally avoided implementing a particular threading model for processing. + <p> + This leaves several options for implementing multi-threaded processing of records. + + <h4>1. One Consumer Per Thread</h4> + + A simple option is to give each thread it's own consumer instance. Here are the pros and cons of this approach: + <ul> + <li><b>PRO</b>: It is the easiest to implement + <li><b>PRO</b>: It is often the fastest as no inter-thread co-ordination is needed + <li><b>PRO</b>: It makes in-order processing on a per-partition basis very easy to implement (each thread just + processes messages in the order it receives them). + <li><b>CON</b>: More consumers means more TCP connections to the cluster (one per thread). In general Kafka handles + connections very efficiently so this is generally a small cost. + <li><b>CON</b>: Multiple consumers means more requests being sent to the server and slightly less batching of data + which can cause some drop in I/O throughput. + <li><b>CON</b>: The number of total threads across all processes will be limited by the total number of partitions. + </ul> + + <h4>2. Decouple Consumption and Processing</h4> + + Another alternative is to have one or more consumer threads that do all data consumption and hands off + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRecords.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerRecords</code></a> instances to a blocking queue consumed by a pool of processor threads that actually handle + the record processing. + + This option likewise has pros and cons: + <ul> + <li><b>PRO</b>: This option allows independently scaling the number of consumers and processors. This makes it + possible to have a single consumer that feeds many processor threads, avoiding any limitation on partitions. + <li><b>CON</b>: Guaranteeing order across the processors requires particular care as the threads will execute + independently an earlier chunk of data may actually be processed after a later chunk of data just due to the luck of + thread execution timing. For processing that has no ordering requirements this is not a problem. + <li><b>CON</b>: Manually committing the position becomes harder as it requires that all threads co-ordinate to ensure + that processing is complete for that partition. + </ul> + + There are many possible variations on this approach. For example each processor thread can have it's own queue, and + the consumer threads can hash into these queues using the TopicPartition to ensure in-order consumption and simplify + commit.</div> +</li> +</ul> +</div> +<div class="summary"> +<ul class="blockList"> +<li class="blockList"> +<!-- ======== CONSTRUCTOR SUMMARY ======== --> +<ul class="blockList"> +<li class="blockList"><a name="constructor_summary"> +<!-- --> +</a> +<h3>Constructor Summary</h3> +<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation"> +<caption><span>Constructors</span><span class="tabEnd"> </span></caption> +<tr> +<th class="colOne" scope="col">Constructor and Description</th> +</tr> +<tr class="altColor"> +<td class="colOne"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#KafkaConsumer(java.util.Map)">KafkaConsumer</a></strong>(java.util.Map<java.lang.String,java.lang.Object> configs)</code> +<div class="block">A consumer is instantiated by providing a set of key-value pairs as configuration.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colOne"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#KafkaConsumer(java.util.Map, org.apache.kafka.clients.consumer.ConsumerRebalanceCallback, org.apache.kafka.common.serialization.Deserializer, org.apache.kafka.common.serialization.Deserializer)">KafkaConsumer</a></strong>(java.util.Map<java.lang.String,java.lang.Object> configs, + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer">ConsumerRebalanceCallback</a> callback, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>> keyDeserializer, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>> valueDeserializer)</code> +<div class="block">A consumer is instantiated by providing a set of key-value pairs as configuration, a + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer"><code>ConsumerRebalanceCallback</code></a> implementation, a key and a value <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colOne"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#KafkaConsumer(java.util.Properties)">KafkaConsumer</a></strong>(java.util.Properties properties)</code> +<div class="block">A consumer is instantiated by providing a <code>Properties</code> object as configuration.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colOne"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#KafkaConsumer(java.util.Properties, org.apache.kafka.clients.consumer.ConsumerRebalanceCallback, org.apache.kafka.common.serialization.Deserializer, org.apache.kafka.common.serialization.Deserializer)">KafkaConsumer</a></strong>(java.util.Properties properties, + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer">ConsumerRebalanceCallback</a> callback, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>> keyDeserializer, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>> valueDeserializer)</code> +<div class="block">A consumer is instantiated by providing a <code>Properties</code> object as configuration and a + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer"><code>ConsumerRebalanceCallback</code></a> implementation, a key and a value <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>.</div> +</td> +</tr> +</table> +</li> +</ul> +<!-- ========== METHOD SUMMARY =========== --> +<ul class="blockList"> +<li class="blockList"><a name="method_summary"> +<!-- --> +</a> +<h3>Method Summary</h3> +<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation"> +<caption><span>Methods</span><span class="tabEnd"> </span></caption> +<tr> +<th class="colFirst" scope="col">Modifier and Type</th> +<th class="colLast" scope="col">Method and Description</th> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#close()">close</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(org.apache.kafka.clients.consumer.CommitType)">commit</a></strong>(<a href="../../../../../org/apache/kafka/clients/consumer/CommitType.html" title="enum in org.apache.kafka.clients.consumer">CommitType</a> commitType)</code> +<div class="block">Commits offsets returned on the last <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll()</code></a> for the subscribed list of topics and partitions.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(java.util.Map, org.apache.kafka.clients.consumer.CommitType)">commit</a></strong>(java.util.Map<<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>,java.lang.Long> offsets, + <a href="../../../../../org/apache/kafka/clients/consumer/CommitType.html" title="enum in org.apache.kafka.clients.consumer">CommitType</a> commitType)</code> +<div class="block">Commits the specified offsets for the specified list of topics and partitions to Kafka.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>long</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#committed(org.apache.kafka.common.TopicPartition)">committed</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a> partition)</code> +<div class="block">Fetches the last committed offset for the given partition (whether the commit happened by this process or + another).</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>java.util.Map<<a href="../../../../../org/apache/kafka/common/MetricName.html" title="class in org.apache.kafka.common">MetricName</a>,? extends <a href="../../../../../org/apache/kafka/common/Metric.html" title="interface in org.apache.kafka.common">Metric</a>></code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#metrics()">metrics</a></strong>()</code> +<div class="block">Get the metrics kept by the consumer</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>java.util.List<<a href="../../../../../org/apache/kafka/common/PartitionInfo.html" title="class in org.apache.kafka.common">PartitionInfo</a>></code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#partitionsFor(java.lang.String)">partitionsFor</a></strong>(java.lang.String topic)</code> +<div class="block">Get metadata about the partitions for a given topic.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRecords.html" title="class in org.apache.kafka.clients.consumer">ConsumerRecords</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)">poll</a></strong>(long timeout)</code> +<div class="block">Fetches data for the topics or partitions specified using one of the subscribe APIs.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>long</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#position(org.apache.kafka.common.TopicPartition)">position</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a> partition)</code> +<div class="block">Returns the offset of the <i>next record</i> that will be fetched (if a record with that offset exists).</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seek(org.apache.kafka.common.TopicPartition, long)">seek</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a> partition, + long offset)</code> +<div class="block">Overrides the fetch offsets that the consumer will use on the next <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll(timeout)</code></a>.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seekToBeginning(org.apache.kafka.common.TopicPartition...)">seekToBeginning</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</code> +<div class="block">Seek to the first offset for each of the given partitions</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seekToEnd(org.apache.kafka.common.TopicPartition...)">seekToEnd</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</code> +<div class="block">Seek to the last offset for each of the given partitions</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscribe(java.lang.String...)">subscribe</a></strong>(java.lang.String... topics)</code> +<div class="block">Incrementally subscribes to the given list of topics and uses the consumer's group management functionality</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscribe(org.apache.kafka.common.TopicPartition...)">subscribe</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</code> +<div class="block">Incrementally subscribes to a specific topic partition and does not use the consumer's group management + functionality.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>java.util.Set<<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>></code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscriptions()">subscriptions</a></strong>()</code> +<div class="block">The set of partitions currently assigned to this consumer.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#unsubscribe(java.lang.String...)">unsubscribe</a></strong>(java.lang.String... topics)</code> +<div class="block">Unsubscribe from the specific topics.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#unsubscribe(org.apache.kafka.common.TopicPartition...)">unsubscribe</a></strong>(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</code> +<div class="block">Unsubscribe from the specific topic partitions.</div> +</td> +</tr> +</table> +<ul class="blockList"> +<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object"> +<!-- --> +</a> +<h3>Methods inherited from class java.lang.Object</h3> +<code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li> +</ul> +</li> +</ul> +</li> +</ul> +</div> +<div class="details"> +<ul class="blockList"> +<li class="blockList"> +<!-- ========= CONSTRUCTOR DETAIL ======== --> +<ul class="blockList"> +<li class="blockList"><a name="constructor_detail"> +<!-- --> +</a> +<h3>Constructor Detail</h3> +<a name="KafkaConsumer(java.util.Map)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>KafkaConsumer</h4> +<pre>public KafkaConsumer(java.util.Map<java.lang.String,java.lang.Object> configs)</pre> +<div class="block">A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings + are documented <a href="http://kafka.apache.org/documentation.html#consumerconfigs" >here</a>. Values can be + either strings or objects of the appropriate type (for example a numeric configuration would accept either the + string "42" or the integer 42). + <p> + Valid configuration strings are documented at <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerConfig.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerConfig</code></a></div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>configs</code> - The consumer configs</dd></dl> +</li> +</ul> +<a name="KafkaConsumer(java.util.Map, org.apache.kafka.clients.consumer.ConsumerRebalanceCallback, org.apache.kafka.common.serialization.Deserializer, org.apache.kafka.common.serialization.Deserializer)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>KafkaConsumer</h4> +<pre>public KafkaConsumer(java.util.Map<java.lang.String,java.lang.Object> configs, + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer">ConsumerRebalanceCallback</a> callback, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>> keyDeserializer, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>> valueDeserializer)</pre> +<div class="block">A consumer is instantiated by providing a set of key-value pairs as configuration, a + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer"><code>ConsumerRebalanceCallback</code></a> implementation, a key and a value <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>. + <p> + Valid configuration strings are documented at <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerConfig.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerConfig</code></a></div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>configs</code> - The consumer configs</dd><dd><code>callback</code> - A callback interface that the user can implement to manage customized offsets on the start and + end of every rebalance operation.</dd><dd><code>keyDeserializer</code> - The deserializer for key that implements <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>. The configure() method + won't be called in the consumer when the deserializer is passed in directly.</dd><dd><code>valueDeserializer</code> - The deserializer for value that implements <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>. The configure() method + won't be called in the consumer when the deserializer is passed in directly.</dd></dl> +</li> +</ul> +<a name="KafkaConsumer(java.util.Properties)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>KafkaConsumer</h4> +<pre>public KafkaConsumer(java.util.Properties properties)</pre> +<div class="block">A consumer is instantiated by providing a <code>Properties</code> object as configuration. Valid + configuration strings are documented at <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerConfig.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerConfig</code></a> A consumer is instantiated by providing a + <code>Properties</code> object as configuration. Valid configuration strings are documented at + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerConfig.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerConfig</code></a></div> +</li> +</ul> +<a name="KafkaConsumer(java.util.Properties, org.apache.kafka.clients.consumer.ConsumerRebalanceCallback, org.apache.kafka.common.serialization.Deserializer, org.apache.kafka.common.serialization.Deserializer)"> +<!-- --> +</a> +<ul class="blockListLast"> +<li class="blockList"> +<h4>KafkaConsumer</h4> +<pre>public KafkaConsumer(java.util.Properties properties, + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer">ConsumerRebalanceCallback</a> callback, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>> keyDeserializer, + <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization">Deserializer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>> valueDeserializer)</pre> +<div class="block">A consumer is instantiated by providing a <code>Properties</code> object as configuration and a + <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRebalanceCallback.html" title="interface in org.apache.kafka.clients.consumer"><code>ConsumerRebalanceCallback</code></a> implementation, a key and a value <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>. + <p> + Valid configuration strings are documented at <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerConfig.html" title="class in org.apache.kafka.clients.consumer"><code>ConsumerConfig</code></a></div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>properties</code> - The consumer configuration properties</dd><dd><code>callback</code> - A callback interface that the user can implement to manage customized offsets on the start and + end of every rebalance operation.</dd><dd><code>keyDeserializer</code> - The deserializer for key that implements <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>. The configure() method + won't be called in the consumer when the deserializer is passed in directly.</dd><dd><code>valueDeserializer</code> - The deserializer for value that implements <a href="../../../../../org/apache/kafka/common/serialization/Deserializer.html" title="interface in org.apache.kafka.common.serialization"><code>Deserializer</code></a>. The configure() method + won't be called in the consumer when the deserializer is passed in directly.</dd></dl> +</li> +</ul> +</li> +</ul> +<!-- ============ METHOD DETAIL ========== --> +<ul class="blockList"> +<li class="blockList"><a name="method_detail"> +<!-- --> +</a> +<h3>Method Detail</h3> +<a name="subscriptions()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>subscriptions</h4> +<pre>public java.util.Set<<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>> subscriptions()</pre> +<div class="block">The set of partitions currently assigned to this consumer. If subscription happened by directly subscribing to + partitions using <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscribe(org.apache.kafka.common.TopicPartition...)"><code>subscribe(TopicPartition...)</code></a> then this will simply return the list of partitions that + were subscribed to. If subscription was done by specifying only the topic using <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscribe(java.lang.String...)"><code>subscribe(String...)</code></a> + then this will give the set of topics currently assigned to the consumer (which may be none if the assignment + hasn't happened yet, or the partitions are in the process of getting reassigned).</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#subscriptions()">subscriptions</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscriptions()"><code>subscriptions()</code></a></dd></dl> +</li> +</ul> +<a name="subscribe(java.lang.String...)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>subscribe</h4> +<pre>public void subscribe(java.lang.String... topics)</pre> +<div class="block">Incrementally subscribes to the given list of topics and uses the consumer's group management functionality + <p> + As part of group management, the consumer will keep track of the list of consumers that belong to a particular + group and will trigger a rebalance operation if one of the following events trigger - + <ul> + <li>Number of partitions change for any of the subscribed list of topics + <li>Topic is created or deleted + <li>An existing member of the consumer group dies + <li>A new member is added to an existing consumer group via the join API + </ul></div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#subscribe(java.lang.String...)">subscribe</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>topics</code> - A variable list of topics that the consumer wants to subscribe to</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscribe(java.lang.String...)"><code>subscribe(String...)</code></a></dd></dl> +</li> +</ul> +<a name="subscribe(org.apache.kafka.common.TopicPartition...)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>subscribe</h4> +<pre>public void subscribe(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</pre> +<div class="block">Incrementally subscribes to a specific topic partition and does not use the consumer's group management + functionality. As such, there will be no rebalance operation triggered when group membership or cluster and topic + metadata change. + <p></div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#subscribe(org.apache.kafka.common.TopicPartition...)">subscribe</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>partitions</code> - Partitions to incrementally subscribe to</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#subscribe(org.apache.kafka.common.TopicPartition...)"><code>subscribe(TopicPartition...)</code></a></dd></dl> +</li> +</ul> +<a name="unsubscribe(java.lang.String...)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>unsubscribe</h4> +<pre>public void unsubscribe(java.lang.String... topics)</pre> +<div class="block">Unsubscribe from the specific topics. This will trigger a rebalance operation and records for this topic will not + be returned from the next <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll()</code></a> onwards</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#unsubscribe(java.lang.String...)">unsubscribe</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>topics</code> - Topics to unsubscribe from</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#unsubscribe(java.lang.String...)"><code>unsubscribe(String...)</code></a></dd></dl> +</li> +</ul> +<a name="unsubscribe(org.apache.kafka.common.TopicPartition...)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>unsubscribe</h4> +<pre>public void unsubscribe(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</pre> +<div class="block">Unsubscribe from the specific topic partitions. records for these partitions will not be returned from the next + <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll()</code></a> onwards</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#unsubscribe(org.apache.kafka.common.TopicPartition...)">unsubscribe</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>partitions</code> - Partitions to unsubscribe from</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#unsubscribe(org.apache.kafka.common.TopicPartition...)"><code>unsubscribe(TopicPartition...)</code></a></dd></dl> +</li> +</ul> +<a name="poll(long)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>poll</h4> +<pre>public <a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRecords.html" title="class in org.apache.kafka.clients.consumer">ConsumerRecords</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>> poll(long timeout)</pre> +<div class="block">Fetches data for the topics or partitions specified using one of the subscribe APIs. It is an error to not have + subscribed to any topics or partitions before polling for data. + <p> + The offset used for fetching the data is governed by whether or not <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seek(org.apache.kafka.common.TopicPartition, long)"><code>seek(TopicPartition, long)</code></a> is used. + If <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seek(org.apache.kafka.common.TopicPartition, long)"><code>seek(TopicPartition, long)</code></a> is used, it will use the specified offsets on startup and on every + rebalance, to consume data from that offset sequentially on every poll. If not, it will use the last checkpointed + offset using <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(java.util.Map, org.apache.kafka.clients.consumer.CommitType)"><code>commit(offsets, sync)</code></a> for the subscribed list of partitions.</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#poll(long)">poll</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>timeout</code> - The time, in milliseconds, spent waiting in poll if data is not available. If 0, waits + indefinitely. Must not be negative</dd> +<dt><span class="strong">Returns:</span></dt><dd>map of topic to records since the last fetch for the subscribed list of topics and partitions</dd> +<dt><span class="strong">Throws:</span></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/NoOffsetForPartitionException.html" title="class in org.apache.kafka.clients.consumer">NoOffsetForPartitionException</a></code> - If there is no stored offset for a subscribed partition and no automatic + offset reset policy has been configured.</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll(long)</code></a></dd></dl> +</li> +</ul> +<a name="commit(java.util.Map, org.apache.kafka.clients.consumer.CommitType)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>commit</h4> +<pre>public void commit(java.util.Map<<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>,java.lang.Long> offsets, + <a href="../../../../../org/apache/kafka/clients/consumer/CommitType.html" title="enum in org.apache.kafka.clients.consumer">CommitType</a> commitType)</pre> +<div class="block">Commits the specified offsets for the specified list of topics and partitions to Kafka. + <p> + This commits offsets to Kafka. The offsets committed using this API will be used on the first fetch after every + rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used. + <p> + A non-blocking commit will attempt to commit offsets asychronously. No error will be thrown if the commit fails. + A blocking commit will wait for a response acknowledging the commit. In the event of an error it will retry until + the commit succeeds.</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#commit(java.util.Map, org.apache.kafka.clients.consumer.CommitType)">commit</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>offsets</code> - The list of offsets per partition that should be committed to Kafka.</dd><dd><code>commitType</code> - Control whether the commit is blocking</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(java.util.Map, org.apache.kafka.clients.consumer.CommitType)"><code>commit(Map, CommitType)</code></a></dd></dl> +</li> +</ul> +<a name="commit(org.apache.kafka.clients.consumer.CommitType)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>commit</h4> +<pre>public void commit(<a href="../../../../../org/apache/kafka/clients/consumer/CommitType.html" title="enum in org.apache.kafka.clients.consumer">CommitType</a> commitType)</pre> +<div class="block">Commits offsets returned on the last <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll()</code></a> for the subscribed list of topics and partitions. + <p> + This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after + every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API + should not be used.</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#commit(org.apache.kafka.clients.consumer.CommitType)">commit</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>commitType</code> - Whether or not the commit should block until it is acknowledged.</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#commit(org.apache.kafka.clients.consumer.CommitType)"><code>commit(CommitType)</code></a></dd></dl> +</li> +</ul> +<a name="seek(org.apache.kafka.common.TopicPartition, long)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>seek</h4> +<pre>public void seek(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a> partition, + long offset)</pre> +<div class="block">Overrides the fetch offsets that the consumer will use on the next <a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#poll(long)"><code>poll(timeout)</code></a>. If this API + is invoked for the same partition more than once, the latest offset will be used on the next poll(). Note that + you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#seek(org.apache.kafka.common.TopicPartition, long)">seek</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seek(org.apache.kafka.common.TopicPartition, long)"><code>seek(TopicPartition, long)</code></a></dd></dl> +</li> +</ul> +<a name="seekToBeginning(org.apache.kafka.common.TopicPartition...)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>seekToBeginning</h4> +<pre>public void seekToBeginning(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</pre> +<div class="block">Seek to the first offset for each of the given partitions</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#seekToBeginning(org.apache.kafka.common.TopicPartition...)">seekToBeginning</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seekToBeginning(org.apache.kafka.common.TopicPartition...)"><code>seekToBeginning(TopicPartition...)</code></a></dd></dl> +</li> +</ul> +<a name="seekToEnd(org.apache.kafka.common.TopicPartition...)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>seekToEnd</h4> +<pre>public void seekToEnd(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a>... partitions)</pre> +<div class="block">Seek to the last offset for each of the given partitions</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#seekToEnd(org.apache.kafka.common.TopicPartition...)">seekToEnd</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#seekToEnd(org.apache.kafka.common.TopicPartition...)"><code>seekToEnd(TopicPartition...)</code></a></dd></dl> +</li> +</ul> +<a name="position(org.apache.kafka.common.TopicPartition)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>position</h4> +<pre>public long position(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a> partition)</pre> +<div class="block">Returns the offset of the <i>next record</i> that will be fetched (if a record with that offset exists).</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#position(org.apache.kafka.common.TopicPartition)">position</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>partition</code> - The partition to get the position for</dd> +<dt><span class="strong">Returns:</span></dt><dd>The offset</dd> +<dt><span class="strong">Throws:</span></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/NoOffsetForPartitionException.html" title="class in org.apache.kafka.clients.consumer">NoOffsetForPartitionException</a></code> - If a position hasn't been set for a given partition, and no reset policy is + available.</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#position(org.apache.kafka.common.TopicPartition)"><code>position(TopicPartition)</code></a></dd></dl> +</li> +</ul> +<a name="committed(org.apache.kafka.common.TopicPartition)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>committed</h4> +<pre>public long committed(<a href="../../../../../org/apache/kafka/common/TopicPartition.html" title="class in org.apache.kafka.common">TopicPartition</a> partition)</pre> +<div class="block">Fetches the last committed offset for the given partition (whether the commit happened by this process or + another). This offset will be used as the position for the consumer in the event of a failure. + <p> + This call may block to do a remote call if the partition in question isn't assigned to this consumer or if the + consumer hasn't yet initialized it's cache of committed offsets.</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#committed(org.apache.kafka.common.TopicPartition)">committed</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>partition</code> - The partition to check</dd> +<dt><span class="strong">Returns:</span></dt><dd>The last committed offset or null if no offset has been committed</dd> +<dt><span class="strong">Throws:</span></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/NoOffsetForPartitionException.html" title="class in org.apache.kafka.clients.consumer">NoOffsetForPartitionException</a></code> - If no offset has ever been committed by any process for the given + partition.</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#committed(org.apache.kafka.common.TopicPartition)"><code>committed(TopicPartition)</code></a></dd></dl> +</li> +</ul> +<a name="metrics()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>metrics</h4> +<pre>public java.util.Map<<a href="../../../../../org/apache/kafka/common/MetricName.html" title="class in org.apache.kafka.common">MetricName</a>,? extends <a href="../../../../../org/apache/kafka/common/Metric.html" title="interface in org.apache.kafka.common">Metric</a>> metrics()</pre> +<div class="block">Get the metrics kept by the consumer</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#metrics()">metrics</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#metrics()"><code>metrics()</code></a></dd></dl> +</li> +</ul> +<a name="partitionsFor(java.lang.String)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>partitionsFor</h4> +<pre>public java.util.List<<a href="../../../../../org/apache/kafka/common/PartitionInfo.html" title="class in org.apache.kafka.common">PartitionInfo</a>> partitionsFor(java.lang.String topic)</pre> +<div class="block">Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it + does not already have any metadata about the given topic.</div> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#partitionsFor(java.lang.String)">partitionsFor</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">Parameters:</span></dt><dd><code>topic</code> - The topic to get partition metadata for</dd> +<dt><span class="strong">Returns:</span></dt><dd>The list of partitions</dd><dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#partitionsFor(java.lang.String)"><code>partitionsFor(String)</code></a></dd></dl> +</li> +</ul> +<a name="close()"> +<!-- --> +</a> +<ul class="blockListLast"> +<li class="blockList"> +<h4>close</h4> +<pre>public void close()</pre> +<dl> +<dt><strong>Specified by:</strong></dt> +<dd><code>close</code> in interface <code>java.io.Closeable</code></dd> +<dt><strong>Specified by:</strong></dt> +<dd><code>close</code> in interface <code>java.lang.AutoCloseable</code></dd> +<dt><strong>Specified by:</strong></dt> +<dd><code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html#close()">close</a></code> in interface <code><a href="../../../../../org/apache/kafka/clients/consumer/Consumer.html" title="interface in org.apache.kafka.clients.consumer">Consumer</a><<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">K</a>,<a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html" title="type parameter in KafkaConsumer">V</a>></code></dd> +<dt><span class="strong">See Also:</span></dt><dd><a href="../../../../../org/apache/kafka/clients/consumer/KafkaConsumer.html#close()"><code>close()</code></a></dd></dl> +</li> +</ul> +</li> +</ul> +</li> +</ul> +</div> +</div> +<!-- ========= END OF CLASS DATA ========= --> +<!-- ======= START OF BOTTOM NAVBAR ====== --> +<div class="bottomNav"><a name="navbar_bottom"> +<!-- --> +</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> +<!-- --> +</a> +<ul class="navList" title="Navigation"> +<li><a href="../../../../../overview-summary.html">Overview</a></li> +<li><a href="package-summary.html">Package</a></li> +<li class="navBarCell1Rev">Class</li> +<li><a href="package-tree.html">Tree</a></li> +<li><a href="../../../../../deprecated-list.html">Deprecated</a></li> +<li><a href="../../../../../index-all.html">Index</a></li> +<li><a href="../../../../../help-doc.html">Help</a></li> +</ul> +</div> +<div class="subNav"> +<ul class="navList"> +<li><a href="../../../../../org/apache/kafka/clients/consumer/ConsumerRecords.html" title="class in org.apache.kafka.clients.consumer"><span class="strong">Prev Class</span></a></li> +<li><a href="../../../../../org/apache/kafka/clients/consumer/MockConsumer.html" title="class in org.apache.kafka.clients.consumer"><span class="strong">Next Class</span></a></li> +</ul> +<ul class="navList"> +<li><a href="../../../../../index.html?org/apache/kafka/clients/consumer/KafkaConsumer.html" target="_top">Frames</a></li> +<li><a href="KafkaConsumer.html" target="_top">No Frames</a></li> +</ul> +<ul class="navList" id="allclasses_navbar_bottom"> +<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li> +</ul> +<div> +<script type="text/javascript"><!-- + allClassesLink = document.getElementById("allclasses_navbar_bottom"); + if(window==top) { + allClassesLink.style.display = "block"; + } + else { + allClassesLink.style.display = "none"; + } + //--> +</script> +</div> +<div> +<ul class="subNavList"> +<li>Summary: </li> +<li>Nested | </li> +<li>Field | </li> +<li><a href="#constructor_summary">Constr</a> | </li> +<li><a href="#method_summary">Method</a></li> +</ul> +<ul class="subNavList"> +<li>Detail: </li> +<li>Field | </li> +<li><a href="#constructor_detail">Constr</a> | </li> +<li><a href="#method_detail">Method</a></li> +</ul> +</div> +<a name="skip-navbar_bottom"> +<!-- --> +</a></div> +<!-- ======== END OF BOTTOM NAVBAR ======= --> +</body> +</html>