author:JevonWei

版权声明:原创作品


Keepalive实战之LVS-DR

实验目的:构建LVS-DR架构,为了达到LVS的高可用目的,故在LVS-DR的Director端做Keepalive集群,在Director-A上做keepalive-A,在Director上做keepalive-B,LVS-RS1和LVS-RS2为后端的两台web服务器,通过在Director上做keepalive集群实现高可用的目的

网络拓扑图

实验环境(keepalive节点同时作为LVS的directory节点)

keepalive-A(Director-A) 172.16.253.108
keepalive-B(Director-A) 172.16.253.105
LVS-RS1 172.16.250.127
LVS-RS2 172.16.253.193
VIP 172.16.253.150
client 172.16.253.177

LVS-RS web集群

为了更好的观察实验结果,故在此将RS1和RS2的web页面内容设置不一致,以致可以更清晰的区分RS1服务端和RS2服务端

LVS-RS1

[root@LVS-RS1 ~]# systemctl restart chronyd  \\多台服务器时间同步
[root@LVS-RS1 ~]# iptables -F
[root@LVS-RS1 ~]# setenforce 0
[root@LVS-RS1 ~]# yum -y install nginx
[root@LVS-RS1 ~]# vim /usr/share/nginx/html/index.html
<h1> Web RS1 </h1>
[root@LVS-RS1 ~]# systemctl start nginx 修改内核参数并添加VIP地址
[root@LVS-RS1 ~]# vim lvs_dr.sh
#!/bin/bash
#
vip=172.16.253.150
mask=255.255.255.255
iface="lo:0" case $1 in
start)
ifconfig $iface $vip netmask $mask broadcast $vip up
route add -host $vip dev $iface
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
;;
stop)
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $iface down
;;
*)
echo "Usage:$(basename $0) start|stop"
exit 1
;;
esac
[root@LVS-RS1 ~]# bash lvs_dr.sh start
[root@LVS-RS1 ~]# ifconfig
lo:0: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 172.16.253.150 netmask 255.255.255.255
loop txqueuelen 1 (Local Loopback)

LVS-RS2

[root@LVS-RS2 ~]# systemctl restart chronyd  \\多台服务器时间同步
[root@LVS-RS2 ~]# iptables -F
[root@LVS-RS2 ~]# setenforce 0
[root@LVS-RS2 ~]# yum -y install nginx
[root@LVS-RS2 ~]# vim /usr/share/nginx/html/index.html
<h1> Web RS2 </h1>
[root@LVS-RS2 ~]# systemctl start nginx 修改内核参数并添加VIP地址
[root@LVS-RS2 ~]# vim lvs_dr.sh
#!/bin/bash
#
vip=172.16.253.150
mask=255.255.255.255
iface="lo:0" case $1 in
start)
ifconfig $iface $vip netmask $mask broadcast $vip up
route add -host $vip dev $iface
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
;;
stop)
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $iface down
;;
*)
echo "Usage:$(basename $0) start|stop"
exit 1
;;
esac
[root@LVS-RS1 ~]# bash lvs_dr.sh start
[root@LVS-RS1 ~]# ifconfig
lo:0: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 172.16.253.150 netmask 255.255.255.255
loop txqueuelen 1 (Local Loopback)

Keepalive集群

Director节点搭建

keepalive-A

[root@keepaliveA ~]# systemctl restart chronyd  \\多台服务器时间同步
[root@keepaliveA ~]# yum -y install ipvsadm

keepalive-B

[root@keepaliveB ~]# systemctl restart chronyd  \\多台服务器时间同步
[root@keepaliveB ~]# yum -y install ipvsadm

keepalive上配置web的sorry server

keepalive-A

[root@keepaliveA ~]# yum -y install nginx
[root@keepaliveA ~]# vim /usr/share/nginx/html/index.html
</h1> sorry from Director-A(keepalive-A) </h1>
[root@keepaliveA ~]# systemctl start nginx

keepalive-B

[root@keepalive-B ~]# yum -y install nginx
[root@keepalive-B ~]# vim /usr/share/nginx/html/index.html
</h1> sorry from Director-B(keepalive-B) </h1>
[root@keepaliveB ~]# systemctl start nginx

keepalive-A配置keepalive

keepalive-A

[root@keepalive-A ~]# iptables -F
[root@keepalive-A ~]# yum -y install keepalived
[root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email { \\定义邮件通知设置
jevon@danran.com \\定义邮件接收地址
}
notification_email_from ka_admin@danran.com \\邮件发送者
smtp_server 127.0.0.1 \\邮件server服务器
smtp_connect_timeout 30 \\连接超时
router_id keepaliveA \\route的ID信息,自定义
vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18
}
vrrp_instance VI_A {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass qr8hQHuL
}
virtual_ipaddress {
172.16.253.150/32 dev ens33
} virtual_server 172.16.253.150 80 {
delay_loop 6 \\服务轮询的时间间隔
lb_algo rr \\定义调度方法;
lb_kind DR \\集群的类型;
protocol TCP \\服务协议,仅支持TCP;
sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面 real_server 172.16.250.127 80 {
weight 1 \\权重
SSL_GET { \\应用层检测
url {
path / \\定义要监控的URL
#digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码;
status_code 200 \\判断上述检测机制为健康状态的响应码
}
connect_timeout 3 \\连接请求的超时时长;
nb_get_retry 3 \\重试次数
delay_before_retry 1 \\重试之前的延迟时长
}
}
real_server 172.16.253.193 80 {
weight 1
SSL_GET {
url {
path /
#digest ff20ad2481f97b1754ef3e12ecd3a9cc
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 1
}
}
}
[root@keepaliveA ~]# systemctl start keepalived
[root@keepaliveA ~]# ip a l
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff
inet 172.16.253.150/32 scope global ens33
valid_lft forever preferred_lft forever
[root@keepaliveA ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.16.253.150:80 rr
-> 172.16.250.127:80 Route 1 0 0
-> 172.16.253.193:80 Route 1 0 0

keepalive-B配置keepalive

keepalive-B

[root@keepalive-B ~]# iptables -F
[root@keepalive-B ~]# yum -y install keepalived
[root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email { \\定义邮件通知设置
jevon@danran.com \\定义邮件接收地址
}
notification_email_from ka_admin@danran.com \\邮件发送者
smtp_server 127.0.0.1 \\邮件server服务器
smtp_connect_timeout 30 \\连接超时
router_id keepaliveA \\route的ID信息,自定义
vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18
}
vrrp_instance VI_A {
state BACKUP
interface ens33
virtual_router_id 51
priority 95
advert_int 1
authentication {
auth_type PASS
auth_pass qr8hQHuL
}
virtual_ipaddress {
172.16.253.150/32 dev ens33
} virtual_server 172.16.253.150 80 {
delay_loop 6 \\服务轮询的时间间隔
lb_algo rr \\定义调度方法;
lb_kind DR \\集群的类型;
protocol TCP \\服务协议,仅支持TCP;
sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面 real_server 172.16.250.127 80 {
weight 1 \\权重
SSL_GET { \\应用层检测
url {
path / \\定义要监控的URL
#digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码;
status_code 200 \\判断上述检测机制为健康状态的响应码
}
connect_timeout 3 \\连接请求的超时时长;
nb_get_retry 3 \\重试次数
delay_before_retry 1 \\重试之前的延迟时长
}
}
real_server 172.16.253.193 80 {
weight 1
SSL_GET {
url {
path /
#digest ff20ad2481f97b1754ef3e12ecd3a9cc
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 1
}
}
}
[root@keepaliveB ~]# systemctl start keepalived
[root@keepalive-B ~]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.16.253.150:http rr
-> 172.16.250.127:http Route 1 0 0
-> 172.16.253.193:http Route 1 0 0

访问测试

client测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
<h1> Web RS1 </h1>
<h1> Web RS2 </h1>
<h1> Web RS1 </h1>
<h1> Web RS2 </h1>
<h1> Web RS1 </h1>

当keepalive-A故障时

[root@keepaliveA ~]# systemctl stop keepalived

keepalive-B自动成为MASTER主节点,则LVS的director调度服务器切换为keepalive-B上,LVS-RS1和LVS-RS2的web服务正常使用

client访问测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
<h1> Web RS2 </h1>
<h1> Web RS1 </h1>
<h1> Web RS2 </h1>
<h1> Web RS1 </h1>
<h1> Web RS2 </h1>

当keepalive-A修恢复正常时,keepalive-A再次成为MASTER主节点

[root@keepaliveA ~]# systemctl start keepalived
[root@keepaliveA ~]# ip a l
: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff
inet 172.16.253.150/32 scope global ens33
valid_lft forever preferred_lft forever

当LVS-RS1的web服务故障时

[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT

client访问

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
<h1> Web RS2 </h1>
<h1> Web RS2 </h1>
<h1> Web RS2 </h1>
<h1> Web RS2 </h1>

当LVS-RS1和LVS-RS2的web服务全部故障时

[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT
[root@LVS-RS2 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT

client访问到的时sorry server服务器,且sorry server服务器为keepalive-A

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
</h1> sorry from Director-A(keepalive-A) </h1>
</h1> sorry from Director-A(keepalive-A) </h1>
</h1> sorry from Director-A(keepalive-A) </h1>
</h1> sorry from Director-A(keepalive-A) </h1>
</h1> sorry from Director-A(keepalive-A) </h1>

当keepalive-A故障时

[root@keepaliveA ~]# systemctl stop keepalived.service

client访问sorry server服务页面,且sorry server服务器为keepalive-B

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
</h1> sorry from Director-B(keepalive-B) </h1>
</h1> sorry from Director-B(keepalive-B) </h1>
</h1> sorry from Director-B(keepalive-B) </h1>
</h1> sorry from Director-B(keepalive-B) </h1>
</h1> sorry from Director-B(keepalive-B) </h1>

LVS-RS1的web服务恢复正常后

[root@LVS-RS1 ~]# iptables -F

client访问测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
<h1> Web RS1 </h1>
<h1> Web RS1 </h1>
<h1> Web RS1 </h1>
<h1> Web RS1 </h1>
<h1> Web RS1 </h1>

LVS-RS1和LVS-RS2的web服务全部恢复正常后

[root@LVS-RS1 ~]# iptables -F  [root@LVS-RS2 ~]# iptables -F

client访问测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
<h1> Web RS2 </h1>
<h1> Web RS1 </h1>
<h1> Web RS2 </h1>
<h1> Web RS1 </h1>
<h1> Web RS2 </h1>

保存及重载规则

保存:建议保存至/etc/sysconfig/ipvsadm

ipvsadm-save > /PATH/TO/IPVSADM_FILE
ipvsadm -S > /PATH/TO/IPVSADM_FILE
systemctl stop ipvsadm.service

重载:

ipvsadm-restore < /PATH/FROM/IPVSADM_FILE
ipvsadm -R < /PATH/FROM/IPVSADM_FILE
systemctl restart ipvsadm.service

keepalive节点通过DNS域名解析指向实现

获取web主页面内容的校验码

[root@keepaliveA ~]# genhash -s 172.16.250.127 -p 80 -u /

keepalive之LVS-DR架构的更多相关文章

  1. [svc]高并发场景 LVS DR +KeepAlive高可用实现及ka的persistence_timeout参数

    LVS-DR+keepalived模式是一种非常经典的常用生产组合 高可用场景及LVS架构 一般都用一(负载)拖多(Server Array)方式 使用LVS架设的服务器集群系统有三个部分组成: (1 ...

  2. Linux centosVMware Linux集群架构LVS DR模式搭建、keepalived + LVS

    一.LVS DR模式搭建 三台机器 分发器,也叫调度器(简写为dir) davery :1.101 rs1 davery01:1.106 rs2 davery02:11.107 vip 133.200 ...

  3. Keepalived+LVS DR模式高可用架构实践

    Keepalived最初是为LVS设计,专门监控各服务器节点的状态(LVS不带健康检查功能,所以使用keepalived进行健康检查),后来加入了VRRP(虚拟路由热备协议(Virtual Route ...

  4. centos LB负载均衡集群 三种模式区别 LVS/NAT 配置 LVS/DR 配置 LVS/DR + keepalived配置 nginx ip_hash 实现长连接 LVS是四层LB 注意down掉网卡的方法 nginx效率没有LVS高 ipvsadm命令集 测试LVS方法 第三十三节课

    centos   LB负载均衡集群 三种模式区别 LVS/NAT 配置  LVS/DR 配置  LVS/DR + keepalived配置  nginx ip_hash 实现长连接  LVS是四层LB ...

  5. 一个公网地址部署LVS/DR模式

    http://blog.chinaunix.net/uid-7411781-id-3436142.html 一个公网地址部署LVS/DR模式   网上看了很多关于LVS的文章,在选取2种模式LVS/D ...

  6. Linux下部署LVS(DR)+keepalived+Nginx负载均衡

    架构部署 LVS/keepalived(master):192.168.21.3  LVS/keepalived(Slave):192.168.21.6  Nginx1:192.168.21.4  N ...

  7. LVS DR模式搭建 keepalived lvs

    LVS DR模式搭建• 三台机器 • 分发器,也叫调度器(简写为dir)172.16.161.130 • rs1 172.16.161.131 • rs2 172.16.161.132 • vip 1 ...

  8. lvs dr 模式请求过程

    一. lvs dr 模式请求过程 1.整个请求过程如下: client在发起请求之前,会发一个arp广播的包,在网络中找"谁是vip",由于所有的服务器,lvs和rs都有vip,为 ...

  9. lvs/dr配置

    lvs/dr Director server : DIP:192.168.1.100/24  eth0 VIP:192.168.1.101/24  eth0:0 Real server: Real1: ...

  10. LVS DR模式 负载均衡服务搭建

    LVS 负载均衡 最近在研究服务器负载均衡,阅读了网上的一些资料,发现主要的软件负载均衡方案有nginx(针对HTTP服务的负载均衡),LVS(针对IP层,MAC层的负载均衡).LVS模式工作在网络层 ...

随机推荐

  1. [COGS 1752] 摩基亚Mokia

    照例先上题面 1752. [BOI2007]摩基亚Mokia 输入文件:mokia.in   输出文件:mokia.out 时间限制:1.5 s   内存限制:128 MB [题目描述] 摩尔瓦多的移 ...

  2. ECMAScript6新特性之let、const

    第一次在博客园写博客,想把自己每一天学习到的知识点记录下来,心里有点紧张(PS:不知道自己能不能写好......嘿嘿).言归正传,咱们先来说说"ECMAScript"这到底是啥玩意 ...

  3. [js高手之路] es6系列教程 - 箭头函数详解

    箭头函数是es6新增的非常有意思的特性,初次写起来,可能会觉得别扭,习惯之后,会发现很精简. 什么是箭头函数? 箭头函数是一种使用箭头( => )定义函数的新语法, 主要有以下特性: 不能通过n ...

  4. Round #427 A. Key races(Div.2)

      time limit per test 1 second memory limit per test 256 megabytes input standard input output stand ...

  5. 快速排序算法的C语言实现

    #include<stdio.h> int partition(int a[],int low,int high) { int key=a[low]; while(low<high) ...

  6. Open-Falcon第一步环境准备(小米开源互联网企业级监控系统)

    1.环境安装 本文采取rpm安装方式,大家也可以用源码包安装. wget http://download.fedoraproject.org/pub/epel/6/i386/epel-release- ...

  7. 一次和matplotlib和numpy的初识及简单的异常值清理

    1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111 ...

  8. 【机器学习笔记之三】CART 分类与回归树

    本文结构: CART算法有两步 回归树的生成 分类树的生成 剪枝 CART - Classification and Regression Trees 分类与回归树,是二叉树,可以用于分类,也可以用于 ...

  9. 使用sed命令向文件中追加可变字符串

    1.如何向文件追加可变字符串,有如下两种方法 sed -i '1a '$s'' filename sed -i "1a $s" filename 注意: 以上命令是假定向文件fil ...

  10. Oracle execute and call

    --execute和call的区别 -------------------------2014/01/14 EXEC is a sqlplus command that put its argumen ...