hi - I've enabled SSL for Kafka & i'm trying to publish messages using console Producer
Error is as shown below, any ideas ? > > 1. /usr/hdp/2.5.3.0-37/kafka/bin/kafka-console-producer.sh --broker-list > nwk2-bdp-kafka-05.gdcs-qa.apple.com:6668,nwk2-bdp-kafka-04.gdcs-qa. > apple.com:6668,nwk2-bdp-kafka-06.gdcs-qa.apple.com:6668 --topic > sslTopic1 --producer.config /tmp/ssl-kafka/client-ssl.properties -- > security-protocol SSL > 2. > 3. hi > 4. > 5. [2017-07-25 19:10:54,750] ERROR Error when sending message to topic > sslTopic1 with key: null, value: 2 bytes with error: (org.apache.kafka. > clients.producer.internals.ErrorLoggingCallback)org.apache.kafka.common > .errors.TimeoutException: Failed to update metadata after 60000 ms. > > client-ssl.properties > 1. security.protocol=SSL > 2. ssl.truststore.location=/tmp/ssl-kafka/client.truststore.jks > 3. ssl.truststore.password=changeit > 4. ssl.keystore.location=/tmp/ssl-kafka/client.keystore.jks > 5. ssl.keystore.password=changeitssl.key.password=changeit > 6. ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1ssl.keystore.type=JKS > 7. ssl.truststore.type=JKS > > Attaching the server.properties
# Generated by Apache Ambari. Tue Jul 25 17:59:28 2017 auto.create.topics.enable=true auto.leader.rebalance.enable=true compression.type=producer controlled.shutdown.enable=true controlled.shutdown.max.retries=3 controlled.shutdown.retry.backoff.ms=5000 controller.message.queue.size=10 controller.socket.timeout.ms=30000 default.replication.factor=1 delete.topic.enable=false external.kafka.metrics.exclude.prefix=kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec external.kafka.metrics.include.prefix=kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request fetch.purgatory.purge.interval.requests=10000 kafka.ganglia.metrics.group=kafka kafka.ganglia.metrics.host=localhost kafka.ganglia.metrics.port=8671 kafka.ganglia.metrics.reporter.enabled=true kafka.metrics.reporters=org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter kafka.timeline.metrics.host=nwk2-bdp-hadoop-07.gdcs-qa.apple.com kafka.timeline.metrics.maxRowCacheSize=10000 kafka.timeline.metrics.port=6188 kafka.timeline.metrics.protocol=http kafka.timeline.metrics.reporter.enabled=true kafka.timeline.metrics.reporter.sendInterval=5900 kafka.timeline.metrics.truststore.password=bigdata kafka.timeline.metrics.truststore.path=/etc/security/clientKeys/all.jks kafka.timeline.metrics.truststore.type=jks leader.imbalance.check.interval.seconds=300 leader.imbalance.per.broker.percentage=10 listeners=SSL://nwk2-bdp-kafka-04.gdcs-qa.apple.com:6668,PLAINTEXT://nwk2-bdp-kafka-04.gdcs-qa.apple.com:6667 log.cleanup.interval.mins=10 log.dirs=/kafka-logs log.index.interval.bytes=4096 log.index.size.max.bytes=10485760 log.retention.bytes=-1 log.retention.hours=168 log.roll.hours=168 log.segment.bytes=1073741824 message.max.bytes=1000000 min.insync.replicas=1 num.io.threads=8 num.network.threads=3 num.partitions=1 num.recovery.threads.per.data.dir=1 num.replica.fetchers=1 offset.metadata.max.bytes=4096 offsets.commit.required.acks=-1 offsets.commit.timeout.ms=5000 offsets.load.buffer.size=5242880 offsets.retention.check.interval.ms=600000 offsets.retention.minutes=86400000 offsets.topic.compression.codec=0 offsets.topic.num.partitions=50 offsets.topic.replication.factor=3 offsets.topic.segment.bytes=104857600 port=6667 producer.purgatory.purge.interval.requests=10000 queued.max.requests=500 replica.fetch.max.bytes=1048576 replica.fetch.min.bytes=1 replica.fetch.wait.max.ms=500 replica.high.watermark.checkpoint.interval.ms=5000 replica.lag.max.messages=4000 replica.lag.time.max.ms=10000 replica.socket.receive.buffer.bytes=65536 replica.socket.timeout.ms=30000 security.inter.broker.protocol=SSL socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 socket.send.buffer.bytes=102400 ssl.client.auth=none ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1 ssl.endpoint.identification.algorithm=HTTPS ssl.key.password=changeit ssl.keystore.location=/tmp/ssl-kafka/server.keystore.jks ssl.keystore.password=changeit ssl.keystore.type=JKS ssl.secure.random.implementation=SHA1PRNG ssl.truststore.location=/tmp/ssl-kafka/server.truststore.jks ssl.truststore.password=changeit ssl.truststore.type=JKS zookeeper.connect=nwk2-bdp-kafka-05.gdcs-qa.apple.com:2181,nwk2-bdp-kafka-04.gdcs-qa.apple.com:2181,nwk2-bdp-kafka-06.gdcs-qa.apple.com:2181 zookeeper.connection.timeout.ms=25000 zookeeper.session.timeout.ms=30000 zookeeper.sync.time.ms=2000