카이도스의 Tech Blog

Kafka Cluster 설치 본문

Kafka

Kafka Cluster 설치

카이도스 2024. 2. 26. 16:56
728x90
반응형

서버 5대로 Kafka Cluster 설치 공유드립니다.


환경
Ubuntu 22.04
kafka_2.13-3.6.1
apache-zookeeper-3.9.1


기본 설치

# /etc/hosts 구성
sudo su -
cat <<EOT>> /etc/hosts

# kafka
10.10.x.161 kafka1
10.10.x.162 kafka2
10.10.x.163 kafka3
10.10.x.164 kafka4
10.10.x.165 kafka5
EOT

# Apt 최신화
sudo apt update && sudo apt upgrade -y

# jdk install
sudo apt install -y default-jdk
java --version
openjdk 11.0.21 2023-10-17
OpenJDK Runtime Environment (build 11.0.21+9-post-Ubuntu-0ubuntu122.04)
OpenJDK 64-Bit Server VM (build 11.0.21+9-post-Ubuntu-0ubuntu122.04, mixed mode, sharing)

# limit 설정
cat <<EOT>> /etc/security/limits.conf
*         hard    nofile     1048576
*         soft    nofile     1048576
*         hard    nproc      unlimited
*         soft    nproc      unlimited
*         hard    stack      unlimited
*         soft    stack      unlimited
*         hard    core       unlimited
*         soft    core       unlimited
EOT

# sysctl 설정
cat <<EOT>> /etc/sysctl.conf
vm.swappiness=1

net.core.netdev_max_backlog=250000
net.ipv4.tcp_max_syn_backlog=30000

net.ipv4.tcp_syn_retries=6
net.ipv4.tcp_retries1=3

net.core.somaxconn=65535
net.ipv4.tcp_fin_timeout=60
vm.max_map_count=262144
net.ipv4.ip_local_port_range=1024 61000
net.ipv4.tcp_max_tw_buckets=5000000
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_fin_timeout=15
EOT

# 리부팅
reboot

Zookeeper 클러스터 구성

더보기
# 설치
cd /data
sudo wget https://dlcdn.apache.org/zookeeper/zookeeper-3.9.1/apache-zookeeper-3.9.1-bin.tar.gz
sudo tar -xzvf apache-zookeeper-3.9.1-bin.tar.gz && sudo rm -rf apache-zookeeper-3.9.1-bin.tar.gz
ln -s apache-zookeeper-3.9.1-bin zookeeper

# 개인 사용자 환경 설정
echo 'export ZOOKEEPER_HOME=/data/zookeeper' >> ~/.bashrc
source ~/.bashrc

# 적용 되었는지 확인
env | grep ZOOKEEPER

# config 설정
cd $ZOOKEEPER_HOME
sudo cp ./conf/zoo_sample.cfg ./conf/zoo.cfg
sudo vi $ZOOKEEPER_HOME/conf/zoo.cfg
--------------------------------
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data/zookeeper/data
dataLogDir=/data/zookeeper/logs
clientPort=2181
server.1=kafka1:2888:3888
server.2=kafka2:2888:3888
server.3=kafka3:2888:3888
server.4=kafka4:2888:3888
server.5=kafka5:2888:3888

# data, logs 폴더 생성 
mkdir -p /data/zookeeper/data
mkdir -p /data/zookeeper/logs

# 사용자에게 권한 부여
sudo chown -R $USER:$USER /data/apache-zookeeper-3.9.1-bin

# myid에 번호 기입
vi /data/zookeeper/data/myid
1
2
3
4
5

# systemd 생성
sudo su -
cat <<EOT>> /etc/systemd/system/zookeeper-server.service
[Unit]
Description=zookeeper-server
After=network.target

[Service]
Type=forking
User=ubuntu
Group=ubuntu
SyslogIdentifier=zookeeper-server
WorkingDirectory=/data/zookeeper
Restart=always
RestartSec=0s
ExecStart=/data/zookeeper/bin/zkServer.sh start
ExecStop=/data/zookeeper/bin/zkServer.sh stop

[Install]
WantedBy=default.target
EOT

# 확인
sudo systemctl daemon-reload
sudo systemctl enable zookeeper-server.service
sudo systemctl start zookeeper-server.service
sudo systemctl status zookeeper-server.service

jps
8038 QuorumPeerMain
8092 Jps

# Leader, Follower 확인
cd /data/zookeeper/bin
./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader

Broker 클러스터 구성

더보기
# kafka 패키지 설치
cd /data
wget https://archive.apache.org/dist/kafka/3.6.1/kafka_2.13-3.6.1.tgz
tar zxvf kafka_2.13-3.6.1.tgz && rm -rf kafka_2.13-3.6.1.tgz
ln -s kafka_2.13-3.6.1 kafka
sudo chown -R $USER:$USER kafka_2.13-3.6.1

# 개인 사용자 환경 설정
echo 'export KAFKA_HOME=/data/kafka' >> ~/.bashrc
source ~/.bashrc

# 적용 되었는지 확인
env | grep KAFKA

# kafka 서버 설정
vi /data/kafka/config/server.properties
broker.id=10                                           # kafka 서버 순서대로 10, 20, 30, 40, 50 할당
advertised.listeners=PLAINTEXT://10.10.X.161:9092       # broker 서버 ip 설정

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=104857600
socket.receive.buffer.bytes=104857600
queued.max.requests=100

log.dirs=/data/kafka/logs
num.partitions=5
offsets.topic.replication.factor=2

log.retention.hours=-1

zookeeper.connect=10.10.X.161:2181, 10.10.X.162:2181, 10.10.X.163:2181, 10.10.X.164:2181, 10.10.X.165:2181
zookeeper.connection.timeout.ms=6000

delete.topic.enable = true

acks=all
min.insync.replicas=1
message.max.bytes=209715200

# kafka log4j 설정
vi log4j.properties
log4j.logger.kafka = ERROR

# 각 appender에 MaxFileSize, MaxBackupIndex 설정
log4j.appender.kafkaAppender.MaxFileSize=256MB
log4j.appender.kafkaAppender.MaxBackupIndex=30

# 로그 path 변경
vi ../bin/kafka-run-class.sh

# Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
  LOG_DIR="/data/kafka/logs"          # 224번 라인
fi

# 데이터 디렉토리 생성
mkdir -p /data/kafka/logs

# java heap 메모리 설정
vi /data/kafka/bin/kafka-server-start.sh
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
    export KAFKA_HEAP_OPTS="-Xmx6G -Xms6G"       # 29번 라인
fi

# kafka systemd 데몬 생성
sudo su -
cat <<EOT>> /etc/systemd/system/kafka.service
[Unit]
Requires=network.target remote-fs.target
After=network.target remote-fs.target

[Service]
Type=simple
User=ubuntu
ExecStart=/bin/sh -c '/data/kafka/bin/kafka-server-start.sh /data/kafka/config/server.properties'
ExecStop=/data/kafka/bin/kafka-server-stop.sh
Restart=on-abnormal

[Install]
WantedBy=multi-user.target
EOT

# 프로세스 실행
sudo systemctl daemon-reload
sudo systemctl enable kafka
sudo systemctl start kafka
sudo systemctl status kafka

# kafka cluster 상태 확인
cd /data/kafka/bin
./zookeeper-shell.sh 10.10.X.161:2181 ls /brokers/ids                  # broker id가 전부 출력되면 정상
Connecting to 10.10.X.161:2181

WATCHER::

WatchedEvent state:SyncConnected type:None path:null
[10, 20, 30, 40, 50]

# kafka log 확인
cat /data/kafka/logs/kafka*

 


Kafka UI

더보기
# 도커 설치 및 경로 변경
curl -fsSL https://get.docker.com | sh

# 도커 정보 확인 : client - server, Docker Root Dir, Registry
docker info

# 도커 정보 확인 : Docker Engine - Community
docker version

# 도커 프로세스 내리기
sudo systemctl stop docker.service
sudo systemctl stop docker.socket
sudo systemctl status docker.service
sudo systemctl status docker.socket

# 폴더 복사
cd /var/lib
sudo cp -av docker/ /data/

# 도커 폴더 변경
cat <<EOT>> /etc/docker/daemon.json
{
        "data-root": "/data/docker"
}
EOT

# 재시작 후 확인
sudo systemctl start docker
sudo systemctl status docker.service
sudo systemctl status docker.socket

# 변경된 디렉터리 확인
sudo docker info | grep "Docker Root Dir"
Docker Root Dir: /data/docker
----------------------------------------------------------------------
# git clone 
cd /data
git clone https://github.com/provectus/kafka-ui.git
sudo chown -R $USER:$USER /data/kafka-ui/

# 설정
vi /data/kafka-ui/docker-compose.yaml
version: '2'
services:
  kafka-ui:
    image: provectuslabs/kafka-ui
    container_name: kafka-ui
    ports:
      - "80:8080"
    restart: always
    environment:
      - KAFKA_CLUSTERS_0_NAME=xg-kafka
      - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka1:9092,kafka2:9092,kafka3:9092,kafka4:9092,kafka5:9092
      - KAFKA_CLUSTERS_0_ZOOKEEPER=kafka1:2181,kafka2:2181,kafka3:2181,kafka4:2181,kafka5:2181
    cpus: ".20"
    mem_limit: "16g"

# 실행
sudo su -
cd /data/kafka-ui
docker compose up -d

# 로그 확인
docker logs kafka-ui

# 삭제
docker compose down

728x90
반응형
Comments