我们可以将zookeeper加到系统服务中, 增加一个/etc/init.d/zookeeper文件。
cd /opt
wget http://apache.fayea.com/apache-mirror/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz
tar zxvf zookeeper-3.4.6.tar.gz
vi /etc/init.d/zookeeper
将 https://raw.githubusercontent.com/apache/zookeeper/trunk/src/packages/rpm/init.d/zookeeper
文件的内容拷贝到这个文件,修改其中的运行zookeeper的用户以及zookeeper的文件夹位置。
......
start() {
echo -n [ DISCUZ_CODE_1 ]quot;Starting $desc (zookeeper): "
daemon --user root /opt/zookeeper-3.4.6/zkServer.sh start
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/zookeeper
return $RETVAL
}
stop() {
echo -n [ DISCUZ_CODE_1 ]quot;Stopping $desc (zookeeper): "
daemon --user root /opt/zookeeper-3.4.6/zkServer.sh stop
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/zookeeper $PIDFILE
}
......
chmod 755 /etc/init.d/zookeeper
service zookeeper start
如果你不想加到服务,也可以直接运行zookeeper。/opt/zookeeper-3.4.6/zkServer.sh start
安装Kafka
wget http://apache.01link.hk/kafka/0.8.2-beta/kafka_2.9.1-0.8.2-beta.tgz
tar zxvf kafka_2.9.1-0.8.2-beta.tgz
cd kafka_2.9.1-0.8.2-beta
bin/kafka-server-start.sh config/server.properties
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
public class NativeProducer {
public static void main(String[] args) {
String topic= "test";
long events = 100;
Random rand = new Random();
Properties props = new Properties();
props.put("metadata.broker.list", "localhost:9092");
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("request.required.acks", "1");
ProducerConfig config = new ProducerConfig(props);
Producer producer = new Producer(config);
for (long nEvents = 0; nEvents < events; nEvents++) {
String msg = "NativeMessage-" + rand.nextInt() ;
KeyedMessage data = new KeyedMessage(topic, nEvents + "", msg);
producer.send(data);
}
producer.close();
}
}
java -cp target/lib/*:target/spring-kafka-demo-0.2.0-SNAPSHOT.jar com.colobu.spring_kafka_demo.NativeProducer
使用Kafka High Level API接收消息
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
public class NativeConsumer {
private final ConsumerConnector consumer;
private final String topic;
private ExecutorService executor;
public NativeConsumer(String a_zookeeper, String a_groupId, String a_topic) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(a_zookeeper, a_groupId));
this.topic = a_topic;
}
public void shutdown() {
if (consumer != null)
consumer.shutdown();
if (executor != null)
executor.shutdown();
}
public void run(int a_numThreads) {
Map topicCountMap = new HashMap();
topicCountMap.put(topic, new Integer(a_numThreads));
Map>> consumerMap = consumer.createMessageStreams(topicCountMap);
List> streams = consumerMap.get(topic);
// now launch all the threads
//
executor = Executors.newFixedThreadPool(a_numThreads);
// now create an object to consume the messages
//
int threadNumber = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerTest(stream, threadNumber));
threadNumber++;
}
}
private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "400");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
return new ConsumerConfig(props);
}
public static void main(String[] args) {
String zooKeeper = "localhost:2181";
String groupId = "mygroup";
String topic = "test";
int threads = 1;
NativeConsumer example = new NativeConsumer(zooKeeper, groupId, topic);
example.run(threads);
try {
Thread.sleep(10000);
} catch (InterruptedException ie) {
}
//example.shutdown();
}
}
class ConsumerTest implements Runnable {
private KafkaStream m_stream;
private int m_threadNumber;
public ConsumerTest(KafkaStream a_stream, int a_threadNumber) {
m_threadNumber = a_threadNumber;
m_stream = a_stream;
}
public void run() {
ConsumerIterator it = m_stream.iterator();
while (it.hasNext())
System.out.println("Thread " + m_threadNumber + ": " + new String(it.next().message()));
System.out.println("Shutting down Thread: " + m_threadNumber);
}
}
import java.util.Random;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.messaging.MessageChannel;
public class Producer {
private static final String CONFIG = "/context.xml";
private static Random rand = new Random();
public static void main(String[] args) {
final ClassPathXmlApplicationContext ctx = new ClassPathXmlApplicationContext(CONFIG, Producer.class);
ctx.start();
final MessageChannel channel = ctx.getBean("inputToKafka", MessageChannel.class);
for (int i = 0; i < 100; i++) {
channel.send(MessageBuilder.withPayload("Message-" + rand.nextInt()).setHeader("messageKey", String.valueOf(i)).setHeader("topic", "test").build());
}
try {
Thread.sleep(100000);
} catch (InterruptedException e) {
e.printStackTrace();
}
ctx.close();
}
}
Spring 配置文件:
3600000
5
kafka.serializer.StringEncoder
1
broker-list List of comma separated brokers that this producer connects to
topic Topic name or Java regex pattern of topic name
compression-codec Compression method to be used. Default is no compression. Supported compression codec are gzip and snappy.
Anything else would result in no compression
value-encoder Serializer to be used for encoding messages.
key-encoder Serializer to be used for encoding the partition key
key-class-type Type of the key class. This will be ignored if no key-encoder is provided
value-class-type Type of the value class. This will be ignored if no value-encoder is provided.
partitioner Custom implementation of a Kafka Partitioner interface.
async True/False - default is false. Setting this to true would make the Kafka producer to use
an async producer
batch-num-messages Number of messages to batch at the producer. If async is false, then this has no effect.
package com.colobu.spring_kafka_demo;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.LoggerFactory;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.messaging.Message;
import ch.qos.logback.classic.Level;
public class Consumer {
private static final String CONFIG = "/consumer_context.xml";
private static Random rand = new Random();
@SuppressWarnings({ "unchecked", "unchecked", "rawtypes" })
public static void main(String[] args) {
ch.qos.logback.classic.Logger rootLogger = (ch.qos.logback.classic.Logger)LoggerFactory.getLogger(ch.qos.logback.classic.Logger.ROOT_LOGGER_NAME);
rootLogger.setLevel(Level.toLevel("info"));
final ClassPathXmlApplicationContext ctx = new ClassPathXmlApplicationContext(CONFIG, Consumer.class);
ctx.start();
final QueueChannel channel = ctx.getBean("inputFromKafka", QueueChannel.class);
Message msg;
while((msg = channel.receive()) != null) {
HashMap map = (HashMap)msg.getPayload();
Set set = map.entrySet();
for (Map.Entry entry : set) {
String topic = (String)entry.getKey();
System.out.println("Topic:" + topic);
ConcurrentHashMap> messages = (ConcurrentHashMap>)entry.getValue();
Collection> values = messages.values();
for (Iterator> iterator = values.iterator(); iterator.hasNext();) {
List list = iterator.next();
for (byte[] object : list) {
String message = new String(object);
System.out.println("\tMessage: " + message);
}
}
}
}
try {
Thread.sleep(100000);
} catch (InterruptedException e) {
e.printStackTrace();
}
ctx.close();
}
}
转自********************http://www.aboutyun.com/forum.php?mod=viewthread&tid=10321*************