《日子》.分布式-Kafka集群安裝

1、zookeeper集群安裝

  參考《日子》.分布式之開篇-Zookeeper集群安裝

  zookeeper 三臺

? ? ? ?192.168.0.70

? ? ? ?192.168.0.71

? ? ? ?192.168.0.72

2、kafka集群安裝

? ?部署機器三臺:

? ? 192.168.0.73

? ? 192.168.0.74

? ? 192.168.0.75

? ? java 環(huán)境安裝:

? ? ? jdk包 jdk-7u51-linux-x64.rpm 上傳至目錄 /cluster/install/jdk-7u51-linux-x64.rpm

? ? ? [root@localhost #] rpm -ivh /cluster/install/jdk-7u51-linux-x64.rpm

  ?準備開發(fā)包:kafka_2.10-0.8.2.0.tgz 上傳至目錄 /cluster/install/kafka_2.10-0.8.2.0.tgz

? ? ?解壓縮包:[root@localhost #] tar zxvf /cluster/install/kafka_2.10-0.8.2.0.tgz

? ? ?復制到cluster目錄下[root@localhost #] cp /cluster/install/kafka_2.10-0.8.2.0 ?/cluster

? ? ?修改server.properties文件

  [root@localhost #] vi /cluster/kafka_2.10-0.8.2.0/config/server.properties

? ? broker.id=73 --分別對應每個機器IP末尾數

  host.name=192.168.0.73
? ? zookeeper.connect=192.168.0.70:2181,192.168.0.71:2181,192.168.0.72:2181

  復制到其他機器[root@localhost #] scp ?-r /cluster root192.168.0.74:/

? ? ? ? ? ? ? ? ? ? ? ? ? [root@localhost #] scp? -r /cluster root192.168.0.75:/

? ?更改相應的broker.id 、host.name

? 啟動:? [root@localhost bin#]./kafka-server-start.sh /cluster/kafka_2.10-0.8.2.0/config/server.properties

? ?忍不住來張圖,剛啟動時74是leader,嘗試了下down掉74,現在轉為73了

192.168.0.73,192.168.0.74,192.168.0.75,開啟端口 9092

[root@localhost #]/sbin/iptables -I INPUT -p tcp --dport 9092 -j ACCEPT

[root@localhost #]/etc/rc.d/init.d/iptables save #將更改進行保存

[root@localhost #]/etc/init.d/iptables restart #重啟防火墻以便改動生效

3、代碼示例

   最關鍵的部分,所有程序員最心切的期待開始

屬性配制:

public interface KafkaProperties {

   final static String zkConnect = "192.168.0.70:2181,192.168.0.71:2181,192.168.0.72:2181";

   final static String groupId = "group1";

   final static String topic = "topic1";

   final static String kafkaServerURL = "192.168.0.73,192.168.0.74,192.168.0.75";

   final static int kafkaServerPort = 9092;

   final static int kafkaProducerBufferSize = 64 * 1024;

   final static int connectionTimeOut = 20000;

   final static int reconnectInterval = 10000;

   final static String topic2 = "topic2";

   final static String topic3 = "topic3";

   final static String clientId = "SimpleConsumerDemoClient";

}

消息生產者

KafkaProducer

import java.util.Properties;

import kafka.producer.KeyedMessage;

import kafka.producer.ProducerConfig;

public class KafkaProducer extends Thread

{

   private final kafka.javaapi.producer.Producerproducer;

   private final String topic;

   private final Properties props = new Properties();

   public KafkaProducer(String topic)

   {

    props.put("serializer.class", "kafka.serializer.StringEncoder");

    props.put("metadata.broker.list","192.168.0.73:9092,192.168.0.74:9092,192.168.0.75:9092");

    producer = new kafka.javaapi.producer.Producer(new ProducerConfig(props));

    this.topic = topic;

   }

   @Override

   public void run() {

    int messageNo = 1;

    Long start=new java.util.Date().getTime();

    while (true)

    {

     String messageStr = new String("Message_" + messageNo);

     System.out.println("Send:" + messageStr);

     producer.send(new KeyedMessage(topic,messageStr));

     messageNo++;

     try {

      sleep(3000);

      } catch (InterruptedException e) {

       e.printStackTrace();

      }

   }

  }

}

消息消費者

import java.util.HashMap;

import java.util.List;

import java.util.Map;

import java.util.Properties;

import kafka.consumer.ConsumerConfig;

import kafka.consumer.ConsumerIterator;

import kafka.consumer.KafkaStream;

import

kafka.javaapi.consumer.ConsumerConnector;

/**

*@author leicui bourne_cui@163.com

*/

public class KafkaConsumer extends Thread

{

    private final ConsumerConnector consumer;

    private final String topic;

    public KafkaConsumer(String topic)

    {

      consumer = kafka.consumer.Consumer.createJavaConsumerConnector(

      createConsumerConfig());

      this.topic = topic;

    }

    private static ConsumerConfig createConsumerConfig()

    {

      Properties props = new Properties();

      props.put("zookeeper.connect", KafkaProperties.zkConnect);

      props.put("group.id", KafkaProperties.groupId);

      props.put("zookeeper.session.timeout.ms", "40000");

      props.put("zookeeper.sync.time.ms", "200");

      props.put("auto.commit.interval.ms", "1000");

      return new ConsumerConfig(props);

     }

     @Override

     public void run() {

      Map topicCountMap = new HashMap();

      topicCountMap.put(topic, new Integer(1));

      Map<String,List<KafkaSteam<byte[],byte[]>>> consumerMap = ? ? ? ? ? ? ? ? ?consumer.createMessageStreams(topicCountMap);

? ? ? ? ? ? ? ? KafkaStream<byte[],byte[]> stream =consumerMap.get(topic).get(0);

? ? ? ? ? ? ? ? ConsumerIterator<byte[],byte[]> it = stream.iterator();

? ? ? ? ? ? ? ? while (it.hasNext()) {

? ? ? ? ? ? ? ? System.out.println("receive:" + new

? ? ? ? ? ? ? ? String(it.next().message()));

? ? ? ? ? ? ? ?try {

? ? ? ? ? ? ? ? ?sleep(3000);

? ? ? ? ? ? ? ? ?} catch (InterruptedException e) {

? ? ? ? ? ? ? ? ? ?e.printStackTrace();

? ? ? ? ? ? ? ? ? }

? ? ? ? ? ? }

? ? ? ?}

}

運行入口:

public class KafkaConsumerProducerDemo {

? ?public static void main(String[] args)

? ?{

? ? ? ?KafkaProducer producerThread = new KafkaProducer(KafkaProperties.topic);

? ? ? ? producerThread.start();

? ? ? ? ?KafkaConsumer consumerThread = new KafkaConsumer(KafkaProperties.topic);

? ? ? ?consumerThread.start();

? ? }

}

最后編輯于
?著作權歸作者所有,轉載或內容合作請聯系作者
【社區(qū)內容提示】社區(qū)部分內容疑似由AI輔助生成,瀏覽時請結合常識與多方信息審慎甄別。
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發(fā)布,文章內容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務。

相關閱讀更多精彩內容

友情鏈接更多精彩內容