docker 配置 kafka+zookeeper,golang操作kafka


https://www.jianshu.com/p/97fa257622ee
https://segmentfault.com/a/1190000021746086

環境:
centos8的服務器
安裝好docker和docker-compose

首先需要開放對外的ip端口,如阿里雲的

mkdir -p /usr/local/kafka
vim docker-compose.yml

寫入下面的代碼

version: '3.7'
services:
  zookeeper:
    image: wurstmeister/zookeeper
    restart: always
    volumes:
      - ./data:/data
    ports:
      - 2181:2181

  kafka:
    image: wurstmeister/kafka
    restart: always
    ports:
      - 9092:9092
    environment:
      KAFKA_BROKER_ID: 0
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.10.10.140:9092
      #KAFKA_ADVERTISED_HOST_NAME: 182.92.234.23 
      KAFKA_CREATE_TOPICS: "test:2:0"   #kafka啟動后初始化一個有2個partition(分區)0個副本名叫test的topic 
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
      KAFKA_HEAP_OPTS: "-Xmx256M -Xms128M"
    volumes:
      - ./kafka-logs:/kafka
    depends_on:
      - zookeeper

kafka listeners 和 advertised.listeners 的區別及應用

https://blog.csdn.net/weixin_38251332/article/details/105638535

啟動服務

docker-compose up -d

A窗口 查看/發送消息到web_log里

# 進入kafka_kafka_1中
docker exec -it kafka_kafka_1 bash

#查看已經建好的topic (docker-compose.yml)
$KAFKA_HOME/bin/kafka-topics.sh --list --zookeeper kafka_zookeeper_1:2181 

#發送消息
$KAFKA_HOME/bin/kafka-console-producer.sh --topic=web_log --broker-list kafka_kafka_1:9092
>1
>2
>3
>

B窗口接收消息web_log里信息

# 另開一個窗口,進入kafka_kafka_1中
docker exec -it kafka_kafka_1 bash
#接收消息
$KAFKA_HOME/bin/kafka-console-consumer.sh --bootstrap-server  kafka_kafka_1:9092 --from-beginning --topic web_log

1
2
3

停止服務

docker-compose down

go操作kafka

producer 消費者

package main

import (
	"fmt"

	"github.com/Shopify/sarama"
)

var Topic = "web_log" //主題名稱

// 基於sarama第三方庫開發的kafka client
func main() {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll          // 發送完數據需要leader和follow都確認
	config.Producer.Partitioner = sarama.NewRandomPartitioner // 新選出一個partition
	config.Producer.Return.Successes = true                   // 成功交付的消息將在success channel返回


	// 連接kafka
	client, err := sarama.NewSyncProducer([]string{"182.92.234.24:9092"}, config)
	if err != nil {
		fmt.Println("producer closed, err:", err)
		return
	}
	defer client.Close()
	//例子一發單個消息
	// 構造一個消息
	msg := &sarama.ProducerMessage{}
	msg.Topic = Topic
	content := "this is a test log"
	send01(client,msg,content)

	//例子二發多個消息
	for _, word := range []string{"Welcome11", "to", "the", "Confluent", "Kafka", "Golang", "client"} {
		send01(client,msg,word)
	}
}

//發消息
func send01(client sarama.SyncProducer,msg *sarama.ProducerMessage,content string)  {
	msg.Value = sarama.StringEncoder(content)

	// 發送消息
	pid, offset, err := client.SendMessage(msg)
	if err != nil {
		fmt.Println("send msg failed, err:", err)
		return
	}
	fmt.Printf("pid:%v offset:%v\n", pid, offset)

}

consumer 消費者

package main

import (
	"fmt"

	"github.com/Shopify/sarama"
)

// kafka consumer

var Topic = "web_log" //主題名稱

func main() {
	consumer, err := sarama.NewConsumer([]string{"182.92.234.24:9092"}, nil)
	if err != nil {
		fmt.Printf("fail to start consumer, err:%v\n", err)
		return
	}
	partitionList, err := consumer.Partitions(Topic) // 根據topic取到所有的分區
	if err != nil {
		fmt.Printf("fail to get list of partition:err%v\n", err)
		return
	}
	fmt.Println(partitionList)
	for partition := range partitionList { // 遍歷所有的分區
		// 針對每個分區創建一個對應的分區消費者
		pc, err := consumer.ConsumePartition(Topic, int32(partition), sarama.OffsetNewest)
		if err != nil {
			fmt.Printf("failed to start consumer for partition %d,err:%v\n", partition, err)
			return
		}
		defer pc.AsyncClose()
		// 異步從每個分區消費信息
		go func(sarama.PartitionConsumer) {
			for msg := range pc.Messages() {
				fmt.Printf("Partition:%d Offset:%d Key:%v Value:%v\n", msg.Partition, msg.Offset, msg.Key, string(msg.Value))
			}
		}(pc)
	}
	select{} //阻塞進程
}

結合gin框架操作kafka

package main

import (
	"fmt"
	"github.com/Shopify/sarama"
	"github.com/gin-gonic/gin"
	"sync"
	"time"
)

var Topic = "web_log" //主題名稱
var kafkaIp = "182.92.234.24:9092"

func Test(ctx *gin.Context) {
	//讀取
	ctx.JSON(200, gin.H{
		"data": "product",
	})
}

func main() {

	//啟動消息者
	go InitConsumer()

	r := gin.Default()
	r.GET("/ping", func(c *gin.Context) {
		c.JSON(200, gin.H{
			"message": "pong",
		})
	})
	r.GET("/send", SendMessage)  //http://localhost:8082/send

	r.Run("0.0.0.0:8082") // 監聽並在 0.0.0.0:8080 上啟動服務



}

//發消息到kakfa
func SendMessage(ctx *gin.Context) {
	fmt.Println("SendMessage")
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll          // 發送完數據需要leader和follow都確認
	config.Producer.Partitioner = sarama.NewRandomPartitioner // 新選出一個partition
	config.Producer.Return.Successes = true                   // 成功交付的消息將在success channel返回


	// 連接kafka
	client, err := sarama.NewSyncProducer([]string{kafkaIp}, config)
	if err != nil {
		fmt.Println("producer closed, err:", err)
		return
	}
	defer client.Close()
	//例子一發單個消息
	// 構造一個消息
	msg := &sarama.ProducerMessage{}
	msg.Topic = Topic
	content := "this is a test log"
	sendTokafka(client,msg,content)

	//例子二發多個消息
	for _, word := range []string{"Welcome11", "to", "the", "Confluent", "Kafka", "Golang", "client"} {
		sendTokafka(client,msg,word)
	}
}

//發消息函數
func sendTokafka(client sarama.SyncProducer,msg *sarama.ProducerMessage,content string)  {
	msg.Value = sarama.StringEncoder(content)

	// 發送消息
	pid, offset, err := client.SendMessage(msg)
	if err != nil {
		fmt.Println("send msg failed, err:", err)
		return
	}
	fmt.Printf("pid:%v offset:%v\n", pid, offset)

}


func InitConsumer() {
	time.Sleep(time.Second * 3)
	fmt.Println("init Counsumer success")

	var wg sync.WaitGroup
	consumer, err := sarama.NewConsumer([]string{kafkaIp}, nil)
	if err != nil {
		fmt.Printf("fail to start consumer, err:%v\n", err)
		return
	}
	partitionList, err := consumer.Partitions(Topic) // 根據topic取到所有的分區
	if err != nil {
		fmt.Printf("fail to get list of partition:err%v\n", err)
		return
	}
	fmt.Println(partitionList)


	for partition := range partitionList { // 遍歷所有的分區
		wg.Add(1)
		// 針對每個分區創建一個對應的分區消費者
		pc, err := consumer.ConsumePartition(Topic, int32(partition), sarama.OffsetNewest)
		if err != nil {
			fmt.Printf("failed to start consumer for partition %d,err:%v\n", partition, err)
			return
		}
		// 異步從每個分區消費信息
		go func(sarama.PartitionConsumer) {
			for msg := range pc.Messages() {
				fmt.Printf("Partition:%d Offset:%d Key:%v Value:%v\n", msg.Partition, msg.Offset, msg.Key, string(msg.Value))
			}
			defer pc.AsyncClose()
			wg.Done()
		}(pc)
	}
	//select{} //阻塞進程
	wg.Wait()
	consumer.Close()
}

訪問:
http://localhost:8082/send

結果:

SendMessage
pid:0 offset:250
Partition:0 Offset:250 Key:[] Value:this is a test log
Partition:0 Offset:251 Key:[] Value:Welcome11
pid:0 offset:251
Partition:0 Offset:252 Key:[] Value:to
pid:0 offset:252
pid:0 offset:253
Partition:0 Offset:253 Key:[] Value:the
Partition:0 Offset:254 Key:[] Value:Confluent
pid:0 offset:254
Partition:0 Offset:255 Key:[] Value:Kafka
pid:0 offset:255
pid:0 offset:256
Partition:0 Offset:256 Key:[] Value:Golang
Partition:0 Offset:257 Key:[] Value:client
pid:0 offset:257


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM