always_fly 发表于 2018-3-6 16:14:24

java实现kafka单机版测试

我的系统是centos7(64位)

java环境是:



kafka安装目录:



需要修改config目录下的server.properties

host.name=192.168.3.224(本机ip)

log.dirs=/opt/local/kafka-0.8.1.1-src/logs(日志路径-自定义)



然后是启动:bin/zookeeper-server-start.sh config/zookeeper.properties&

                  bin/kafka-server-start.sh config/server.properties &

查看是否启动成功,可以查看9092端口和2181端口



创建test主题:bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partiti
ons 1 --topic test

打开生产者:bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

打开消费者:bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginni
ng

在生产者输入内容,消费者就会马上看到

下面是java实现的发送消息和消费消息

java生产者:
import java.util.Date;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class TestProducer {
    public static void main(String[] args) {
   
      // 设置配置属性
      Properties props = new Properties();
      props.put("metadata.broker.list","192.168.3.224:9092");
      props.put("serializer.class", "kafka.serializer.StringEncoder");
      // key.serializer.class默认为serializer.class
      props.put("key.serializer.class", "kafka.serializer.StringEncoder");
      // 可选配置,如果不配置,则使用默认的partitioner
//      props.put("partitioner.class", "com.catt.kafka.demo.PartitionerDemo");
      // 触发acknowledgement机制,否则是fire and forget,可能会引起数据丢失
      // 值为0,1,-1,可以参考
      // http://kafka.apache.org/08/configuration.html
      props.put("request.required.acks", "1");
      ProducerConfig config = new ProducerConfig(props);
   
      // 创建producer
      Producer<String, String> producer = new Producer<String, String>(config);
      // 产生并发送消息
      long start=System.currentTimeMillis();
      long runtime = new Date().getTime();
      String ip = "192.168.3.224" ;   //rnd.nextInt(255);
      String msg = runtime + "小张666777" + ip;
      //如果topic不存在,则会自动创建,默认replication-factor为1,partitions为0
      KeyedMessage<String, String> data = new KeyedMessage<String, String>(
                "test456", ip, msg);
      producer.send(data);
      System.out.println("耗时:" + (System.currentTimeMillis() - start));
      // 关闭producer
      producer.close();
    }
}

java消费者:
view plain copy
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

public class Consumer extends Thread {
    private final ConsumerConnector consumer;
    private final String topic;
    private final String name;

    public Consumer(String name, String topic) {
      consumer = kafka.consumer.Consumer
                .createJavaConsumerConnector(createConsumerConfig());
      this.topic = topic;
      this.name = name;
    }

    private static ConsumerConfig createConsumerConfig() {
      Properties props = new Properties();
      props.put("zookeeper.connect","192.168.3.224:2181");
      props.put("group.id","jd-group");
      props.put("zookeeper.session.timeout.ms", "60000");
      props.put("zookeeper.sync.time.ms", "2000");
      props.put("auto.commit.interval.ms", "1000");
      // 每次最少接收的字节数,默认是1
      // props.put("fetch.min.bytes", "1024");
      // 每次最少等待时间,默认是100
      // props.put("fetch.wait.max.ms", "600000");
      return new ConsumerConfig(props);
    }

    public void run() {
      Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
      topicCountMap.put(topic, new Integer(1));
      Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer
                .createMessageStreams(topicCountMap);
      KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
      ConsumerIterator<byte[], byte[]> it = stream.iterator();
      while (it.hasNext()) {
            System.out.println("************" + name + "    gets    "
                  + new String(it.next().message()));
      }
    }
}
view plain copy
public class KafkaConsumerDemo {
    public static void main(String[] args) {
         Consumer consumerThread1 = new Consumer("Consumer1","test123");

         consumerThread1.start();
    }
}

海海豚 发表于 2018-3-6 17:01:38

谢谢分享~
页: [1]
查看完整版本: java实现kafka单机版测试