生产环境实战spark (11)分布式集群 5台设备 Zookeeper集群、Kafka集群安装部署
http://www.apache.org/dyn/closer.cgi/zookeeper/
http://apache.fayea.com/zookeeper/zookeeper-3.4.10/
安装版本:zookeeper-3.4.10
https://www.apache.org/dyn/closer.cgi?path=/kafka/0.8.2.0/kafka_2.11-0.8.2.0.tgz
[root@master ~]# cd /usr/local
[root@master local]# ls
bin games include lib libexec rhzf_spark_setupTools scala-2.11.8 spark-2.1.0-bin-hadoop2.6
etc hadoop-2.6.5 jdk1.8.0_121 lib64 rhzf_setup_scripts sbin share src
[root@master local]# cd rhzf_spark_setupTools
[root@master rhzf_spark_setupTools]# ls
hadoop-2.6.5.tar.gz jdk-8u121-linux-x64.tar.gz kafka_2.11-0.8.2.0.tar scala-2.11.8.zip spark-2.1.0-bin-hadoop2.6.tgz zookeeper-3.4.10.tar.gz
[root@master rhzf_spark_setupTools]# ls -ltr
total 642028
-rw-r--r--. 1 root root 28729349 Sep 9 2016 scala-2.11.8.zip
-rw-r--r--. 1 root root 183246769 Apr 10 10:37 jdk-8u121-linux-x64.tar.gz
-rw-r--r--. 1 root root 199635269 Apr 19 10:55 hadoop-2.6.5.tar.gz
-rw-r--r--. 1 root root 193281941 Apr 19 12:19 spark-2.1.0-bin-hadoop2.6.tgz
-rw-r--r--. 1 root root 17489920 Apr 28 10:25 kafka_2.11-0.8.2.0.tar
-rw-r--r--. 1 root root 35042811 Apr 28 10:42 zookeeper-3.4.10.tar.gz
[root@master rhzf_spark_setupTools]#
[root@master rhzf_spark_setupTools]# tar -zxvf zookeeper-3.4.10.tar.gz
[root@master rhzf_spark_setupTools]# ls
hadoop-2.6.5.tar.gz jdk-8u121-linux-x64.tar.gz kafka_2.11-0.8.2.0.tar scala-2.11.8.zip spark-2.1.0-bin-hadoop2.6.tgz zookeeper-3.4.10 zookeeper-3.4.10.tar.gz
[root@master rhzf_spark_setupTools]# mv zookeeper-3.4.10 /usr/local
[root@master rhzf_spark_setupTools]# cd /usr/local
[root@master local]# ls
bin games include lib libexec rhzf_spark_setupTools scala-2.11.8 spark-2.1.0-bin-hadoop2.6 zookeeper-3.4.10
etc hadoop-2.6.5 jdk1.8.0_121 lib64 rhzf_setup_scripts sbin share src
[root@master local]#
[root@master zookeeper-3.4.10]# pwd
/usr/local/zookeeper-3.4.10
[root@master zookeeper-3.4.10]# vi /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_121
export SCALA_HOME=/usr/local/scala-2.11.8
export HADOOP_HOME=/usr/local/hadoop-2.6.5
export SPARK_HOME=/usr/local/spark-2.1.0-bin-hadoop2.6
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.10
export PATH=.:$PATH:$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$ZOOKEEPER_HOME/bin
"/etc/profile" 101L, 2104C written
[root@master zookeeper-3.4.10]# source /etc/profile
[root@master zookeeper-3.4.10]#
[root@master conf]# mv zoo_sample.cfg zoo.cfg
[root@master conf]# vi zoo.cfg
dataDir=/usr/local/zookeeper-3.4.10/tmp/zkdata
dataLogDir=/usr/local/zookeeper-3.4.10/tmp/zkdatalog
server.1=master:2888:3888
server.2=worker01:2888:3888
server.3=worker02:2888:3888
[root@master conf]# cat zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
dataDir=/usr/local/zookeeper-3.4.10/tmp/zkdata
dataLogDir=/usr/local/zookeeper-3.4.10/tmp/zkdatalog
server.1=master:2888:3888
server.2=worker01:2888:3888
server.3=worker02:2888:3888
[root@master conf]#
"/etc/profile" 101L, 2104C written
[root@master zookeeper-3.4.10]# source /etc/profile
[root@master zookeeper-3.4.10]#
[root@master zookeeper-3.4.10]# mkdir tmp
[root@master zookeeper-3.4.10]# ls
bin conf dist-maven ivysettings.xml lib NOTICE.txt README.txt src zookeeper-3.4.10.jar zookeeper-3.4.10.jar.md5
build.xml contrib docs ivy.xml LICENSE.txt README_packaging.txt recipes tmp zookeeper-3.4.10.jar.asc zookeeper-3.4.10.jar.sha1
[root@master zookeeper-3.4.10]# cd tmp
[root@master tmp]# ls
[root@master tmp]# mkdir zkdata
[root@master tmp]# mkdir zkdatalog
[root@master tmp]# ls
zkdata zkdatalog
[root@master tmp]# cd zkdata
[root@master zkdata]# vi myid
1
[root@master local]# ls
bin games include lib libexec rhzf_spark_setupTools scala-2.11.8 spark-2.1.0-bin-hadoop2.6 zookeeper-3.4.10
etc hadoop-2.6.5 jdk1.8.0_121 lib64 rhzf_setup_scripts sbin share src
[root@master local]# cd rhzf_setup_scripts
[root@master rhzf_setup_scripts]# ls
rhzf_hadoop.sh rhzf_hosts_scp.sh rhzf_jdk.sh rhzf_scala.sh rhzf_spark.sh rhzf_ssh.sh
[root@master rhzf_setup_scripts]# vi rhzf_zookeeper.sh
#!/bin/sh
for i in 238 239
do
scp -rq /usr/local/zookeeper-3.4.10 root@10.*.*.$i:/usr/local/zookeeper-3.4.10
scp -rq /etc/profile root@10.*.*.$i:/etc/profile
ssh root@10.*.*.$i source /etc/profile
done
[root@master
rhzf_setup_scripts]# ls
rhzf_hadoop.sh rhzf_hosts_scp.sh rhzf_jdk.sh rhzf_scala.sh rhzf_spark.sh rhzf_ssh.sh rhzf_zookeeper.sh
[root@master rhzf_setup_scripts]# chmod u+x rhzf_zookeeper.sh
[root@master rhzf_setup_scripts]# pwd
/usr/local/rhzf_setup_scripts
[root@master rhzf_setup_scripts]# ./rhzf_zookeeper.sh
Last login: Thu Apr 27 12:08:33 2017 from 132.150.75.19
[root@worker01 ~]# cd /usr/local/zookeeper-3.4.10/tmp/zkdata
[root@worker01 zkdata]# ls
myid
[root@worker01 zkdata]# vi myid
2
[root@worker01 zkdata]# ls
myid
[root@worker01 zkdata]# cat myid
2
[root@worker01 zkdata]#
Last login: Tue Apr 25 14:58:04 2017 from 132.150.75.19
[root@worker02 ~]# cd /usr/local/zookeeper-3.4.10/tmp/zkdata
[root@worker02 zkdata]# ls
myid
[root@worker02 zkdata]# vi myid
3
[root@worker02 zkdata]# ls
myid
[root@worker02 zkdata]# cat myid
3
[root@worker02 zkdata]#
master启动正常 ,follower
[root@master bin]# jps
32195 NameNode
32421 HistoryServer
1701 SparkSubmit
31670 Master
32518 SecondaryNameNode
1911 Jps
[root@master bin]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@master bin]# jps
32195 NameNode
32421 HistoryServer
1701 SparkSubmit
31670 Master
32518 SecondaryNameNode
1975 Jps
1945 QuorumPeerMain
[root@master bin]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: follower
[root@master bin]#
[root@worker01 zkdata]# ls
myid
[root@worker01 zkdata]# cat myid
2
[root@worker01 zkdata]# jps
32081 DataNode
15507 Jps
31146 Worker
[root@worker01 zkdata]# jps
32081 DataNode
31146 Worker
15535 Jps
[root@worker01 zkdata]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@worker01 zkdata]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: leader
[root@worker01 zkdata]# jps
32081 DataNode
15573 QuorumPeerMain
15754 Jps
31146 Worker
[root@worker01 zkdata]#
[root@worker02 zkdata]# ls
myid
[root@worker02 zkdata]# cat myid
3
[root@worker02 zkdata]# jps
29760 Worker
30822 DataNode
14159 Jps
[root@worker02 zkdata]# jps
29760 Worker
14179 Jps
30822 DataNode
[root@worker02 zkdata]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@worker02 zkdata]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: follower
[root@worker02 zkdata]# jps
29760 Worker
14402 Jps
30822 DataNode
14217 QuorumPeerMain
[root@worker02 zkdata]#
worker03,worker04上没有安装zookeper
[root@worker03 local]# zkServer.sh start
bash: zkServer.sh: command not found...
[root@worker03 local]# zkServer.sh status
bash: zkServer.sh: command not found...
[root@worker03 local]#
[root@worker04 ~]# zkServer.sh start
bash: zkServer.sh: command not found...
[root@worker04 ~]# zkServer.sh status
bash: zkServer.sh: command not found...
[root@worker04 ~]#
[root@master rhzf_spark_setupTools]# tar -xf kafka_2.11-0.8.2.0.tar
[root@master rhzf_spark_setupTools]# ls
hadoop-2.6.5.tar.gz jdk-8u121-linux-x64.tar.gz kafka_2.11-0.8.2.0 kafka_2.11-0.8.2.0.tar scala-2.11.8.zip spark-2.1.0-bin-hadoop2.6.tgz zookeeper-3.4.10.tar.gz
[root@master rhzf_spark_setupTools]# mv kafka_2.11-0.8.2.0 /usr/local
[root@master rhzf_spark_setupTools]# ls
hadoop-2.6.5.tar.gz jdk-8u121-linux-x64.tar.gz kafka_2.11-0.8.2.0.tar scala-2.11.8.zip spark-2.1.0-bin-hadoop2.6.tgz zookeeper-3.4.10.tar.gz
[root@master rhzf_spark_setupTools]# cd ..
[root@master local]# ls
bin games include kafka_2.11-0.8.2.0 lib64 rhzf_setup_scripts sbin share src
etc hadoop-2.6.5 jdk1.8.0_121 lib libexec rhzf_spark_setupTools scala-2.11.8 spark-2.1.0-bin-hadoop2.6 zookeeper-3.4.10
[root@master local]# cd kafka_2.11-0.8.2.0
[root@master kafka_2.11-0.8.2.0]# pwd
/usr/local/kafka_2.11-0.8.2.0
[root@master kafka_2.11-0.8.2.0]# vi /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_121
export SCALA_HOME=/usr/local/scala-2.11.8
export HADOOP_HOME=/usr/local/hadoop-2.6.5
export SPARK_HOME=/usr/local/spark-2.1.0-bin-hadoop2.6
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.10
export KAFKA_HOME=/usr/local/kafka_2.11-0.8.2.0
export PATH=.:$PATH:$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$ZOOKEEPER_HOME/bin:$KAFKA_HOME/bin
[root@master kafka_2.11-0.8.2.0]# source /etc/profile
[root@master config]# pwd
/usr/local/kafka_2.11-0.8.2.0/config
[root@master config]# vi server.properties
host.name=10.*.*.237
zookeeper.connect=10.*.*.237:2181,10.*.*.238:2181,10.*.*.239:2181
[root@master config]# cat server.properties
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The port the socket server listens on
port=9092
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
#host.name=localhost
host.name=10.*.*.237
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
#advertised.host.name=
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=
# The number of threads handling network requests
num.network.threads=3
# The number of threads doing disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma seperated list of directories under which to store log files
log.dirs=/tmp/kafka-logs
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
#zookeeper.connect=localhost:2181
zookeeper.connect=10.*.*.237:2181,10.*.*.238:2181,10.*.*.239:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
[root@master config]#
[root@master rhzf_setup_scripts]# vi rhzf_kafka.sh
#!/bin/sh
for i in 238 239
do
scp -rq /usr/local/kafka_2.11-0.8.2.0 root@10.*.*.$i:/usr/local/kafka_2.11-0.8.2.0
scp -rq /etc/profile root@10.*.*.$i:/etc/profile
ssh root@10.*.*.$i source /etc/profile
done
[root@master rhzf_setup_scripts]# chmod u+X rhzf_kafka.sh
[root@master rhzf_setup_scripts]# ./rhzf_kafka.sh
-bash: ./rhzf_kafka.sh: Permission denied
[root@master rhzf_setup_scripts]# ls
rhzf_hadoop.sh rhzf_hosts_scp.sh rhzf_jdk.sh rhzf_kafka.sh rhzf_scala.sh rhzf_spark.sh rhzf_ssh.sh rhzf_zookeeper.sh
[root@master rhzf_setup_scripts]# chmod u+x rhzf_kafka.sh
[root@master rhzf_setup_scripts]# ls
rhzf_hadoop.sh rhzf_hosts_scp.sh rhzf_jdk.sh rhzf_kafka.sh rhzf_scala.sh rhzf_spark.sh rhzf_ssh.sh rhzf_zookeeper.sh
[root@master rhzf_setup_scripts]# ./rhzf_kafka.sh
[root@master rhzf_setup_scripts]#
[root@worker01 kafka_2.11-0.8.2.0]# ls
bin config kafka_2.11-0.8.2.0 libs LICENSE NOTICE
[root@worker01 kafka_2.11-0.8.2.0]# cd config
[root@worker01 config]# ls
consumer.properties log4j.properties producer.properties server.properties test-log4j.properties tools-log4j.properties zookeeper.properties
[root@worker01 config]# vi server.properties
broker.id=1
[root@worker02 ~]# cd /usr/local
[root@worker02 local]# cd kafka_2.11-0.8.2.0
[root@worker02 kafka_2.11-0.8.2.0]# ls
bin config kafka_2.11-0.8.2.0 libs LICENSE NOTICE
[root@worker02 kafka_2.11-0.8.2.0]# cd config
[root@worker02 config]# ls
consumer.properties log4j.properties producer.properties server.properties test-log4j.properties tools-log4j.properties zookeeper.properties
[root@worker02 config]# vi server.properties
broker.id=2
[root@master bin]# nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties &
[1] 4112
[root@master bin]# nohup: ignoring input and appending output to 鈥榥ohup.out鈥
[root@master bin]# jps
4192 Jps
4112 Kafka
32195 NameNode
32421 HistoryServer
1701 SparkSubmit
31670 Master
32518 SecondaryNameNode
1945 QuorumPeerMain
[root@master bin]# jps
4112 Kafka
32195 NameNode
4243 Jps
32421 HistoryServer
1701 SparkSubmit
31670 Master
32518 SecondaryNameNode
1945 QuorumPeerMain
[root@master bin]#
[root@worker01 config]# nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties &
[1] 16843
[root@worker01 config]# nohup: ignoring input and appending output to 鈥榥ohup.out鈥
[1]+ Exit 1 nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties
[root@worker01 config]# jps
32081 DataNode
15573 QuorumPeerMain
31146 Worker
16893 Jps
[root@worker02 config]# nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties &
[1] 15529
[root@worker02 config]# nohup: ignoring input and appending output to 鈥榥ohup.out鈥
[1]+ Exit 1 nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties
[root@worker02 config]# jps
29760 Worker
15571 Jps
30822 DataNode
14217 QuorumPeerMain
worker01 02 再改一下
worker02上 /usr/local/kafka_2.11-0.8.2.0/config/server.properties改一个地址
host.name=10.*.*.238
worker03上 /usr/local/kafka_2.11-0.8.2.0/config/server.properties改一个地址
host.name=10.*.*.239
再次启动KAFKA集群 ,搞定
[root@worker01 config]# nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties &
[1] 17250
[root@worker01 config]# [1] 411nohup: 2ignoring input and appending output to 鈥榥ohup.out鈥
bash: [1]: command not found...
[root@worker01 config]# jps
32081 DataNode
17313 Jps
17250 Kafka
15573 QuorumPeerMain
31146 Worker
[root@worker01 config]#
[root@worker02 config]# nohup /usr/local/kafka_2.11-0.8.2.0/bin/kafka-server-start.sh /usr/local/kafka_2.11-0.8.2.0/config/server.properties &
[1] 15875
[root@worker02 config]# nohup: ignoring input and appending output to 鈥榥ohup.out鈥
[1] 4112
bash: [1]: command not found...
[root@worker02 config]# jps
29760 Worker
15875 Kafka
15939 Jps
30822 DataNode
14217 QuorumPeerMain
[root@worker02 config]#
6,KAFKA集群测试一个
[root@master bin]# kafka-topics.sh --create --zookeeper master:2181,worker1:2181,worker2:2181 --replication-factor 1 --partitions 1 --topic AdClickedTest
Exception in thread "main" org.I0Itec.zkclient.exception.ZkException: Unable to connect to master:2181,worker1:2181,worker2:2181
at org.I0Itec.zkclient.ZkConnection.connect(ZkConnection.java:66)
at org.I0Itec.zkclient.ZkClient.connect(ZkClient.java:876)
at org.I0Itec.zkclient.ZkClient.(ZkClient.java:98)
at org.I0Itec.zkclient.ZkClient.(ZkClient.java:84)
at kafka.admin.TopicCommand$.main(TopicCommand.scala:51)
at kafka.admin.TopicCommand.main(TopicCommand.scala)
Caused by: java.net.UnknownHostException: worker1: Name or service not known
at java.net.Inet6AddressImpl.lookupAllHostAddr(Native Method)
at java.net.InetAddress$2.lookupAllHostAddr(InetAddress.java:928)
at java.net.InetAddress.getAddressesFromNameService(InetAddress.java:1323)
at java.net.InetAddress.getAllByName0(InetAddress.java:1276)
at java.net.InetAddress.getAllByName(InetAddress.java:1192)
at java.net.InetAddress.getAllByName(InetAddress.java:1126)
at org.apache.zookeeper.client.StaticHostProvider.(StaticHostProvider.java:61)
at org.apache.zookeeper.ZooKeeper.(ZooKeeper.java:445)
at org.apache.zookeeper.ZooKeeper.(ZooKeeper.java:380)
at org.I0Itec.zkclient.ZkConnection.connect(ZkConnection.java:64)
... 5 more
work1写错了 改一下worker01 ,测试成功,新建立了一个TOPIC
[root@master config]# kafka-topics.sh --create --zookeeper master:2181,worker01:2181,worker02:2181 --replication-factor 1 --partitions 1 --topic AdClickedTest
Created topic "AdClickedTest".
[root@master config]# kafka-topics.sh --describe --zookeeper master:2181,worker01:2181,worker02:2181
Topic:AdClickedTest PartitionCount:1 ReplicationFactor:1 Configs:
Topic: AdClickedTest Partition: 0 Leader: 2 Replicas: 2 Isr: 2
[root@master config]#
在master节点上生产数据
[root@worker01 config]# kafka-console-consumer.sh --zookeeper master:2181,worker01:2181,worker02:2181 --topic AdClickedTest
This is a message
This is another message
welcome to Beijing!
[root@worker01 config]# kafka-console-consumer.sh --zookeeper master:2181,worker01:2181,worker02:2181 --topic AdClickedTest
This is a message
This is another message
welcome to Beijing!
验证KAFKA部署成功!