搭建Zookeeper、Kafka集群
搭建Zookeeper、Kafka集群
Zookeeper、Kafka集群系统环境配置
配置IP
ssh root@192.168.1.190 "rm -rf /etc/machine-id; systemd-machine-id-setup;reboot"
ssh root@192.168.1.192 "rm -rf /etc/machine-id; systemd-machine-id-setup;reboot"
ssh root@192.168.1.194 "rm -rf /etc/machine-id; systemd-machine-id-setup;reboot"
ssh root@192.168.1.190 "nmcli con delete uuid d1141403-18c6-3149-907c-ed5f09663a7f;nmcli con add type ethernet ifname ens160 con-name ens160;nmcli con up ens160"
ssh root@192.168.1.192 "nmcli con delete uuid d1141403-18c6-3149-907c-ed5f09663a7f;nmcli con add type ethernet ifname ens160 con-name ens160;nmcli con up ens160"
ssh root@192.168.1.194 "nmcli con delete uuid d1141403-18c6-3149-907c-ed5f09663a7f;nmcli con add type ethernet ifname ens160 con-name ens160;nmcli con up ens160"
ssh root@192.168.1.190 "nmcli con mod ens160 ipv4.addresses 192.168.1.61/24; nmcli con mod ens160 ipv4.gateway 192.168.1.1; nmcli con mod ens160 ipv4.method manual; nmcli con mod ens160 ipv4.dns "8.8.8.8"; nmcli con up ens160"
ssh root@192.168.1.192 "nmcli con mod ens160 ipv4.addresses 192.168.1.62/24; nmcli con mod ens160 ipv4.gateway 192.168.1.1; nmcli con mod ens160 ipv4.method manual; nmcli con mod ens160 ipv4.dns "8.8.8.8"; nmcli con up ens160"
ssh root@192.168.1.194 "nmcli con mod ens160 ipv4.addresses 192.168.1.63/24; nmcli con mod ens160 ipv4.gateway 192.168.1.1; nmcli con mod ens160 ipv4.method manual; nmcli con mod ens160 ipv4.dns "8.8.8.8"; nmcli con up ens160"
ssh root@192.168.1.61 "nmcli con mod ens160 ipv6.addresses fc00::61/8; nmcli con up ens160"
ssh root@192.168.1.62 "nmcli con mod ens160 ipv6.addresses fc00::62/8; nmcli con up ens160"
ssh root@192.168.1.63 "nmcli con mod ens160 ipv6.addresses fc00::63/8; nmcli con up ens160"
[root@localhost ~]# cat /etc/NetworkManager/system-connections/ens160.nmconnection
[connection]
id=ens160
uuid=94c63fdf-cd5a-427c-9846-5a447de2a4f5
type=ethernet
interface-name=ens160
timestamp=1744436596
[ethernet]
[ipv4]
address1=192.168.1.61/24,192.168.1.1
dns=192.168.1.99;
method=manual
[ipv6]
addr-gen-mode=default
address1=fc00::61/8
method=auto
[proxy]
设置主机名
hostnamectl set-hostname zk-1
hostnamectl set-hostname zk-2
hostnamectl set-hostname zk-3
关闭防火墙、selinux
# 关闭防火墙
systemctl disable --now firewalld
# 关闭selinux
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
安装JDK
# 查看是否存在jdk、如果安装先卸载jdk
rpm -qa|grep jdk
# 下载jdk
https://www.oracle.com/java/technologies/downloads/#java8
# 解压jdk
tar -xvf jdk-8u441-linux-x64.tar.gz
# 移动到安装目录
mv jdk1.8.0_441/ /usr/local/
# 编辑环境变量
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_441
export CLASSPATH=$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
# 刷新环境变量
source /etc/profile
# 查看版本
[root@zk-1 ~]# java -version
java version "1.8.0_441"
Java(TM) SE Runtime Environment (build 1.8.0_441-b07)
Java HotSpot(TM) 64-Bit Server VM (build 25.441-b07, mixed mode)
安装Zookeeper集群
# 下载Zookeeper
# https://archive.apache.org/dist/zookeeper/
wget https://archive.apache.org/dist/zookeeper/zookeeper-3.9.3/apache-zookeeper-3.9.3-bin.tar.gz
# 创建应用目录
mkdir -vp /app/zookeeper-cluster
cd /app/zookeeper-cluster
# 解压安装
mv /root/apache-zookeeper-3.9.3-bin.tar.gz .
tar xvf apache-zookeeper-3.9.3-bin.tar.gz
mv apache-zookeeper-3.9.3-bin zk
修改zookeeper配置
# 创建目录来存放数据和日志
cd zk
mkdir data logs
# 拷贝配置样例
cd conf
cp zoo_sample.cfg zoo.cfg
# 修改配置文件
vi zoo.cfg
# 设置日志和存储目录
dataDir=/app/zookeeper-cluster/zk/data
dataLogDir=/app/zookeeper-cluster/zk/logs
# 添加节点信息
server.1=192.168.1.61:2888:3888
server.2=192.168.1.62:2888:3888
server.3=192.168.1.63:2888:3888
# 添加sasl
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
jaasLoginRenew=3600000
requireClientAuthScheme=sasl
zookeeper.sasl.client=true
我的完整配置
[root@zk-1 conf]# cat zoo.cfg | grep -Ev '^$|#'
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/app/zookeeper-cluster/zk/data
dataLogDir=/app/zookeeper-cluster/zk/logs
clientPort=2181
server.1=192.168.1.61:2888:3888
server.2=192.168.1.62:2888:3888
server.3=192.168.1.63:2888:3888
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
jaasLoginRenew=3600000
requireClientAuthScheme=sasl
zookeeper.sasl.client=true
设置sasl认证
# 设置sasl认证
cat <<EOF | tee /app/zookeeper-cluster/zk/conf/zk_jaas.conf
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_cby="Cby123..";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="cby"
password="Cby123..";
};
EOF
# 修改zookeeper环境变量
# 在最下添加变量
vim zkEnv.sh
export JVMFLAGS="-Djava.security.auth.login.config=/app/zookeeper-cluster/zk/conf/zk_jaas.conf ${JVMFLAGS}"
# 我的结果
[root@zk-1 bin]# cat zkEnv.sh | grep JVMFLAGS
export SERVER_JVMFLAGS="-Xmx${ZK_SERVER_HEAP}m $SERVER_JVMFLAGS"
export CLIENT_JVMFLAGS="-Xmx${ZK_CLIENT_HEAP}m $CLIENT_JVMFLAGS"
export JVMFLAGS="-Djava.security.auth.login.config=/app/zookeeper-cluster/zk/conf/zk_jaas.conf ${JVMFLAGS}"
设置启动方式
# 在data目录中创建myid文件
# 节点1上配置
echo 1 >/app/zookeeper-cluster/zk/data/myid
# 节点2上配置
echo 2 >/app/zookeeper-cluster/zk/data/myid
# 节点3上配置
echo 3 >/app/zookeeper-cluster/zk/data/myid
# 创建系统启动文件
cat <<EOF | tee /usr/lib/systemd/system/zookeeper.service
[Unit]
Description=zookeeper
After=network.target
[Service]
Type=forking
Environment=JAVA_HOME=/usr/local/jdk1.8.0_441
ExecStart=/app/zookeeper-cluster/zk/bin/zkServer.sh start
ExecStop=/app/zookeeper-cluster/zk/bin/zkServer.sh stop
PIDFile=/app/zookeeper-cluster/zk/data/zookeeper_server.pid
KillMode=none
User=root
Group=root
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
启动停止
# 重载
systemctl daemon-reload
# 启动
systemctl start zookeeper
# 停止
systemctl stop zookeeper
# 设置开机自启并启动
systemctl enable --now zookeeper
# 查看状态
systemctl status zookeeper
使用测试
# 登录每个节点 进行设置sasl
/app/zookeeper-cluster/zk/bin/zkCli.sh -server 192.168.1.61:2181
/app/zookeeper-cluster/zk/bin/zkCli.sh -server 192.168.1.62:2181
/app/zookeeper-cluster/zk/bin/zkCli.sh -server 192.168.1.63:2181
# 设置Acl读写权限
setAcl / sasl:cby:crdwa
# 创建目录及Acl验证命令:
addauth digest super:cby
create /cby
getAcl /
......略.....
[zk: 192.168.1.63:2181(CONNECTED) 0] setAcl / sasl:cby:crdwa
[zk: 192.168.1.63:2181(CONNECTED) 1]
[zk: 192.168.1.63:2181(CONNECTED) 1] addauth digest super:cby
[zk: 192.168.1.63:2181(CONNECTED) 2] create /cby
Node already exists: /cby
[zk: 192.168.1.63:2181(CONNECTED) 3] getAcl /
'sasl,'cby
: cdrwa
[zk: 192.168.1.63:2181(CONNECTED) 4]
安装Kafka集群
# 下载Kafka
# http://kafka.apache.org/downloads
# wget https://dlcdn.apache.org/kafka/3.9.0/kafka_2.13-3.9.0.tgz
wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.9.0/kafka_2.13-3.9.0.tgz
# 创建应用目录
mkdir -vp /app/kafka-cluster
cd /app/kafka-cluster
# 解压安装
mv /root/kafka_2.13-3.9.0.tgz .
tar xvf kafka_2.13-3.9.0.tgz
mv kafka_2.13-3.9.0 kafka
mkdir -p /app/kafka-cluster/kafka/kafka-logs
修改配置
# 修改配置
cd /app/kafka-cluster/kafka/config
vim server.properties
# 修改项:broker.id、log.dirs、zookeeper.connect、listeners、advertised.listeners,并新增以下配置
# broker.id 三台设置为不一致的
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
allow.everyone.if.no.acl.found=false
super.users=User:cby
我的配置
# 我的配置
[root@zk-1 config]# cat server.properties | grep -Ev '^$|#'
broker.id=1
listeners=SASL_PLAINTEXT://:9092
advertised.listeners=SASL_PLAINTEXT://192.168.1.61:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/app/kafka-cluster/kafka/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.1.61:2181,192.168.1.62:2181,192.168.1.63:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
allow.everyone.if.no.acl.found=false
super.users=User:cby
# 修改配置
cd /app/kafka-cluster/kafka/config
vim producer.properties
[root@zk-1 config]# cat producer.properties | grep -Ev '^$|#'
bootstrap.servers=192.168.1.61:9092,192.168.1.62:9092,192.168.1.63:9092
compression.type=none
服务配置认证
# 服务端配置登录认证
cat <<EOF | tee /app/kafka-cluster/kafka/config/kafka_server_jaas.conf
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="cby"
password="Cby123.."
user_cby="Cby123..";
};
Client {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="cby"
password="Cby123..";
};
EOF
客户配置认证
# 客户端配置登录认证
cat <<EOF | tee /app/kafka-cluster/kafka/config/kafka_client_jaas.conf
KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="cby"
password="Cby123..";
};
EOF
配置启动认证
# 修改kafka-server-start.sh脚本,新增 -Djava.security.auth.login.config=/app/kafka-cluster/kafka/config/kafka_server_jaas.conf
vim kafka-server-start.sh
[root@zk-3 bin]# cat kafka-server-start.sh | grep -Ev '^$|#'
then
echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
exit 1
fi
base_dir=$(dirname $0)
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
fi
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G -Djava.security.auth.login.config=/app/kafka-cluster/kafka/config/kafka_server_jaas.conf"
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
[root@zk-3 bin]#
配置生产消费认证
# 配置生产和消费者的登录验证
# 在export KAFKA_HEAP_OPTS="-Xmx512M后添加 -Djava.security.auth.login.config=/app/kafka-cluster/kafka/config/kafka_client_jaas.conf
vim kafka-console-consumer.sh
[root@zk-1 bin]# cat kafka-console-consumer.sh | grep -Ev '^$|#'
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M -Djava.security.auth.login.config=/app/kafka-cluster/kafka/config/kafka_client_jaas.conf"
fi
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.consumer.ConsoleConsumer "$@"
[root@zk-1 bin]#
vim kafka-console-producer.sh
[root@zk-1 bin]# cat kafka-console-producer.sh | grep -Ev '^$|#'
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M -Djava.security.auth.login.config=/app/kafka-cluster/kafka/config/kafka_client_jaas.conf"
fi
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"
[root@zk-1 bin]#
配置启动停止
# 创建系统启动文件
cat <<EOF | tee /usr/lib/systemd/system/kafka.service
[Unit]
Description=kafka-node01
After=network.target
[Service]
Type=simple
Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/jdk1.8.0_441/bin"
User=root
Group=root
LimitNOFILE=100000
ExecStart=/app/kafka-cluster/kafka/bin/kafka-server-start.sh /app/kafka-cluster/kafka/config/server.properties
ExecStop=/app/kafka-cluster/kafka/bin/kafka-server-stop.sh
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
启动停止
# 重载
systemctl daemon-reload
# 启动
systemctl start kafka
# 停止
systemctl stop kafka
# 设置开机自启并启动
systemctl enable --now kafka
# 查看状态
systemctl status kafka
配置账号密码
#因为配置了SSL所以需要配置加密认证文件
cat <<EOF | tee /app/kafka-cluster/kafka/config/admin.conf
security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="cby" password="Cby123..";
EOF
测试使用
# 创建topic
./kafka-topics.sh --create --topic cby --replication-factor 3 --partitions 3 --bootstrap-server 192.168.1.61:9092,192.168.1.62:9092,192.168.1.63:9092 --command-config ../config/admin.conf
# 查看topic
./kafka-topics.sh --list --bootstrap-server 192.168.1.61:9092,192.168.1.62:9092,192.168.1.63:9092 --command-config ../config/admin.conf
# 发送消息
./kafka-console-producer.sh --broker-list 192.168.1.61:9092,192.168.1.62:9092,192.168.1.63:9092 --topic cby --producer.config ../config/admin.conf
>123
>321
# 查看消息
./kafka-console-consumer.sh --bootstrap-server 192.168.1.61:9092,192.168.1.62:9092,192.168.1.63:9092 --topic cby --from-beginning --consumer.config ../config/admin.conf
123
321
# 删除topic
./kafka-topics.sh --delete --topic cby --bootstrap-server 192.168.1.61:9092,192.168.1.62:9092,192.168.1.63:9092 --command-config ../config/admin.conf
关于
https://www.oiox.cn/index.php/start-page.html
CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客
全网可搜《小陈运维》
搭建Zookeeper、Kafka集群的更多相关文章
- 搭建zookeeper+kafka集群
搭建zookeeper+kafka集群 一.环境及准备 集群环境: 软件版本: 部署前操作: 关闭防火墙,关闭selinux(生产环境按需关闭或打开) 同步服务器时间,选择公网ntpd服务器或 ...
- window环境搭建zookeeper,kafka集群
为了演示集群的效果,这里准备一台虚拟机(window 7),在虚拟机中搭建了单IP多节点的zookeeper集群(多IP节点的也是同理的),并且在本机(win 7)和虚拟机中都安装了kafka. 前期 ...
- Docker搭建Zookeeper&Kafka集群
最近在学习Kafka,准备测试集群状态的时候感觉无论是开三台虚拟机或者在一台虚拟机开辟三个不同的端口号都太麻烦了(嗯..主要是懒). 环境准备 一台可以上网且有CentOS7虚拟机的电脑 为什么使用虚 ...
- 虚拟机搭建Zookeeper服务器集群完整笔记
虚拟机搭建Zookeeper服务器集群完整笔记 本笔记主要记录自己搭建Zookeeper服务器的全过程,默认已经安装部署好Centos7. 一.虚拟机下Centos无法联网解决方案 1.首先调整虚拟机 ...
- zookeeper+kafka集群安装之二
zookeeper+kafka集群安装之二 此为上一篇文章的续篇, kafka安装需要依赖zookeeper, 本文与上一篇文章都是真正分布式安装配置, 可以直接用于生产环境. zookeeper安装 ...
- zookeeper+kafka集群安装之一
zookeeper+kafka集群安装之一 准备3台虚拟机, 系统是RHEL64服务版. 1) 每台机器配置如下: $ cat /etc/hosts ... # zookeeper hostnames ...
- Zookeeper+Kafka集群部署(转)
Zookeeper+Kafka集群部署 主机规划: 10.200.3.85 Kafka+ZooKeeper 10.200.3.86 Kafka+ZooKeeper 10.200.3.87 Kaf ...
- zookeeper+kafka集群安装之中的一个
版权声明:本文为博主原创文章.未经博主同意不得转载. https://blog.csdn.net/cheungmine/article/details/26678877 zookeeper+kafka ...
- Zookeeper+Kafka集群部署
Zookeeper+Kafka集群部署 主机规划: 10.200.3.85 Kafka+ZooKeeper 10.200.3.86 Kafka+ZooKeeper 10.200.3.87 Kaf ...
- CentOS6.3搭建ZooKeeper伪集群
1. 将zookeeper安装包移动至/home, 解压后改名为zookeeper 相关命令 # 解压 .tar.gz # 重命名 zookeeper 2. 进入zookeeper/conf/目录下, ...
随机推荐
- 在 Windows 10 上实现免密码 SSH 登录
前言 在日常开发中,SSH(Secure Shell)作为一种安全的远程登录协议,广泛用于 Linux 和 Windows 系统之间的连接.为了提高效率,我们可以通过配置免密码登录,省去每次连接时输入 ...
- oracle的新发现for语句
今天为了解决一个查询结果想两次遍历的方法,去ORACLE官网文档中心 https://docs.oracle.com/en/database/oracle/,意外发现这个有意思的for语句.还是官方资 ...
- OSI七层经典模型架构以及网络的基本概念
在大.中型网络中,通常通过模块化方式将网络功能结构进行分解.但是在各个模块内部,还是存在结构的扩展和弹性问题. 譬如一个园区网络需要接入大量用户等,这个问题一般通过网络的层次化来解决. 传统的网络采用 ...
- 第10章 LINQ to XML
第10章 LINQ to XML 10.1 架构概述--DOM 和 LINQ to XML 的 DOM XML 文档可以用一棵对象树完整的表示,这称为"文档对象模型(document obj ...
- Q:rdp远程桌面如何传输文件
通过自带的rdp远程连接传输文件 方法: 1.按下win+r键 输入 mstsc.打开远程桌面选择选项如下图 2.选择本地资源选项卡,打开详细信息选项 3.在详细信息选项卡中点开驱动器前面的+号,选择 ...
- Flink监控看板Dashboard解析
一. 二.常见问题排查 1.数据反压 背压(Backpressure)机制排查 点击JobName 点击某个算子 点击Backpressure查看,状态为HIGH时,则存在数据反压问题 注:若流程为A ...
- Java微信小程序登录接口获取openid
根据官方文档,wx.login()的回调函数中,需要我们传递生成的用户登录凭证到code2accessToken的接口中 小程序登录方法 code2accessToken的方法中要求传入如下参数 ...
- Redis 大 Key 分析利器:支持 TOP N、批量分析与从节点优先
背景 Redis 大 key 分析工具主要分为两类: 1. 离线分析 基于 RDB 文件进行解析,常用工具是 redis-rdb-tools(https://github.com/sripathikr ...
- Linux系统下nginx的安装与卸载
1.1 安装 准备依赖环境 1.安装 gcc 依赖库 yum install gcc-c++ 2.安装 PCRE pcre-devel 依赖库 yum install -y pcre pcre-dev ...
- Keepalived学习,双主热备高可用
双主热备可以看做双机主备的升级(双机主备链接 https://www.cnblogs.com/hmxs/p/12041735.html),它是为了让两台设备都能提供服务,而不是主节点正常时,备用节点一 ...