Hi Experts,

I am getting the below error when running the consumer
"kafka-console-consumer.sh" .

I am using the new version 0.9.0.1.
Topic name: test


[2015-12-28 06:13:34,409] WARN
[console-consumer-61657_localhost-1451283204993-5512891d-leader-finder-thread],
Failed to find leader for Set([test,0])
(kafka.consumer.ConsumerFetcherManager$LeaderFinderThread)
kafka.common.BrokerEndPointNotAvailableException: End point PLAINTEXT not
found for broker 0
        at kafka.cluster.Broker.getBrokerEndPoint(Broker.scala:136)


Please find the current configuration below.

Configuration:


[root@localhost config]# grep -v "^#" consumer.properties
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=60000
group.id=test-consumer-group
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name="kafka"


[root@localhost config]# grep -v "^#" producer.properties
metadata.broker.list=localhost:9094,localhost:9095
producer.type=sync
compression.codec=none
serializer.class=kafka.serializer.DefaultEncoder
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name="kafka"

[root@localhost config]# grep -v "^#" server1.properties

broker.id=0
listeners=SASL_PLAINTEXT://localhost:9094
delete.topic.enable=true
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka_2.11-0.9.0.0/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=false
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=60000
inter.broker.protocol.version=0.9.0.0
security.inter.broker.protocol=SASL_PLAINTEXT
allow.everyone.if.no.acl.found=true


[root@localhost config]# grep -v "^#" server4.properties
broker.id=1
listeners=SASL_PLAINTEXT://localhost:9095
delete.topic.enable=true
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka_2.11-0.9.0.0/kafka-logs-1
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=false
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=60000
inter.broker.protocol.version=0.9.0.0
security.inter.broker.protocol=SASL_PLAINTEXT
zookeeper.sasl.client=zkclient

[root@localhost config]# grep -v "^#" zookeeper.properties
dataDir=/data/kafka_2.11-0.9.0.0/zookeeper
clientPort=2181
maxClientCnxns=0
requireClientAuthScheme=sasl
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
jaasLoginRenew=3600000


Need your valuable inputs on this issue.
-- 
Regards,

Prabhu.V

Reply via email to