两台主机:

192.168.2.163

192.168.2.165

# yum安装haproxy
yum install haproxy
# cat /etc/haproxy/haproxy.cfg 实际使用的:
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
user haproxy
group haproxy
daemon
nbproc 4
maxconn 100000
tune.ssl.default-dh-param 2048 defaults
log global
option httplog
option forwardfor
option abortonclose
option dontlognull
retries 2
maxconn 100000
timeout connect 5s
timeout client 10m
timeout server 10m listen admin_stats
mode http
bind *:8899
stats enable
stats refresh 30s
stats uri /stats
stats realm XingCloud\ Haproxy
stats auth admin:admin
stats hide-version listen www
bind 0.0.0.0:8888 # 80端口被占用了,这里改用8888端口
mode http
balance roundrobin
server www1 192.168.2.162:8080 check inter 2000 rise 30 fall 15
server www2 192.168.2.164:8080 check inter 2000 rise 30 fall 15 #### 以下这些是参考的 ##########
global
log 127.0.0.1 local0
maxconn 100000
user haproxy
group haproxy
daemon
nbproc 4
tune.ssl.default-dh-param 2048
defaults
log global
mode http
#option httpclose
option redispatch
option forwardfor
option abortonclose
option dontlognull
retries 2
maxconn 100000
#balance source
timeout connect 10000
timeout client 100000
timeout server 100000 listen admin_stats
bind *:8899
mode http
option httplog
log 127.0.0.1 local0 err
maxconn 10
stats refresh 30s
stats uri /stats
stats realm XingCloud\ Haproxy
stats auth admin:admin
stats hide-version
listen redis
bind 0.0.0.0:6379
mode tcp
balance roundrobin
server node1 10.10.72.45:6379 minconn 4 maxconn 10000 check inter 2000 rise 2 fall 5
server node2 10.10.72.46:6379 minconn 4 maxconn 10000 check inter 2000 rise 2 fall 5 listen gxpt-dsqz
bind 0.0.0.0:52001
mode http
balance roundrobin
option httpchk GET /
server node1 10.10.72.29:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.10.72.30:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node3 10.10.72.31:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node4 10.10.72.32:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node5 10.10.72.33:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node6 10.10.72.34:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2 listen gxpt-dsqz-ssl
bind 0.0.0.0:54001 ssl crt /opt/cert/gxpt.pem verify none
mode http
balance roundrobin
option httpchk GET /
server node1 10.10.72.2:5001 ssl verify none minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.10.72.3:5001 ssl verify none minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
# 在client增加cookie
backend backend_www
option forwardfor
cookie SERVERID insert indirect nocache #插入session信息
option redispatch #当后端rs挂了,可立即切换,不会出现503错误
option httpchk HEAD / HTTP/1.0
balance roundrobin
server www1 192.168.1.198:80 cookie www1check inter 2000 rise 30 fall 15
server www2 192.168.1.52:80 cookie www2 checkinter 2000 rise 30 fall 15 # balance source 根据原ip,经过hash计算后,指定后端固定的rs
backend backend_www
option forwardfor
option httpchk HEAD / HTTP/1.0
balance source
server www1 192.168.1.198:80 check inter2000 rise 30 fall 15
server www2 192.168.1.52:80check inter 2000 rise 30 fall 15 frontend frontend_58001
bind 0.0.0.0:58001
mode http
option tcplog
acl fpcloud-yypt path_beg -i /fpcloud-yypt
use_backend fpcloud-yypt if fpcloud-yypt
acl fpcloud-web path_beg -i /fpcloud-web
use_backend fpcloud-web if fpcloud-web backend fpcloud-web
mode http
balance leastconn
server node1 10.72.1.233:58001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.72.1.241:58001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2 backend fpcloud-yypt
mode http
balance leastconn
server node1 10.72.1.233:58002 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.72.1.241:58002 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2 # 启动haproxy
systemctl start haproxy.service
systemctl enable haproxy.service
2	配置haproxy 日志
# 编辑haproxy配置文件,这一步配置文件中已经写过了,这里不用再修改了
# vim haproxy.cfg
global
log 127.0.0.1 local2
#local2是设备,对应于/etc/rsyslog.conf中的配置,默认是info的日志级别
defaults
log global # 必须配置
option httplog # 配置 # 编辑系统日志配置
# 为haproxy创建一个独立的配置文件
# vim /etc/rsyslog.d/haproxy.conf
$ModLoad imudp
$UDPServerRun 514
local2.* /opt/var/logs/haproxy/haproxy.log
local2.warning /opt/var/logs/haproxy/haproxy_warn.log
# 如果不加下面的的配置则除了在/opt/var/logs/haproxy/haproxy.log 中写入日志外,也会写入message文件 # vim /etc/rsyslog.conf
默认有下面的设置,会读取 /etc/rsyslog.d/*.conf目录下的配置文件
$IncludeConfig /etc/rsyslog.d/*.conf # 禁止写入message
*.info;mail.none;authpriv.none;cron.none;local2.none /var/log/messages # mkdir /opt/var/logs/haproxy/ -p # 配置rsyslog的主配置文件,开启远程日志
# vim /etc/sysconfig/rsyslog
SYSLOGD_OPTIONS="-c 2 -r -m 0"
#-c 2 使用兼容模式,默认是 -c 5
#-r 开启远程日志
#-m 0 标记时间戳。单位是分钟,为0时,表示禁用该功能 # 重启haproxy和rsyslog服务 # centos7
# systemctl restart rsyslog
# systemctl restart haproxy
# systemctl enable rsyslog
3 配置haproxy日志轮转
# vim /etc/logrotate.d/haproxy
/opt/var/logs/haproxy/haproxy*.log {
daily
rotate 7
create
missingok
notifempty
dateext
compress
sharedscripts
postrotate
# /bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
# /bin/kill -HUP `cat /var/run/rsyslogd.pid 2> /dev/null` 2> /dev/null || true
/etc/init.d/rsyslog restart
endscript
} 参考系统默认配置:
/opt/var/logs/haproxy/*.log {
daily
rotate 10
missingok
notifempty
compress
sharedscripts
postrotate
/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
/bin/kill -HUP `cat /var/run/rsyslogd.pid 2> /dev/null` 2> /dev/null || true
endscript
} # 强制轮转测试
# logrotate -vf /etc/logrotate.d/haproxy

安装keeplived

yum -y install epel-release

yum -y install keepalived


# 163主机操作,作为master
# vim /etc/keepalived/keepalived.conf
global_defs {
router_id haproxy_ha1
}
vrrp_script chk_maintaince_down {
script "[[ -f /etc/keepalived/down ]] && exit 1 || exit 0"
interval 1
weight 2
}
vrrp_script chk_haproxy {
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 2
timeout 2
fall 3
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 29
priority 100
authentication {
auth_type PASS
auth_pass 1e3459f77aba4ded
}
track_interface {
ens33
}
virtual_ipaddress {
192.168.2.250 dev ens33 label ens33:1
}
track_script {
chk_haproxy
}
notify_master "/etc/keepalived/scripts/haproxy_master.sh"
}

165主机操作,作为back

# vim /etc/keepalived/keepalived.conf
global_defs {
router_id haproxy_ha1
}
vrrp_script chk_maintaince_down {
script "[[ -f /etc/keepalived/down ]] && exit 1 || exit 0"
interval 1
weight 2
}
vrrp_script chk_haproxy {
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 2
timeout 2
fall 3
}
vrrp_instance VI_1 {
state BACK # 与上面的不同
interface ens33
virtual_router_id 29
priority 90 # 比上面的小
authentication {
auth_type PASS
auth_pass 1e3459f77aba4ded
}
track_interface {
ens33
}
virtual_ipaddress {
192.168.2.250 dev ens33 label ens33:1
}
track_script {
chk_haproxy
}
notify_master "/etc/keepalived/scripts/haproxy_master.sh"
}

两台主机都需要做的操作:

mkdir -p /etc/keepalived/scripts
mkdir -p /opt/var/logs/keepalived/ # vim /etc/keepalived/scripts/haproxy_check.sh
#!/bin/bash
LOGFILE="/opt/var/logs/keepalived/keepalived-haproxy-state.log"
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
date >> $LOGFILE
systemctl restart haproxy
sleep 1
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
echo "fail: check_haproxy status" >> $LOGFILE
exit 1
else
echo "success: restart_haproxy status" >> $LOGFILE
exit 0
fi
else
exit 0
fi # vim /etc/keepalived/scripts/haproxy_master.sh
#!/bin/bash
LOGFILE="/opt/var/logs/keepalived/keepalived-haproxy-state.log"
echo "Being Master ..." >> $LOGFILE chmod a+x /etc/keepalived/scripts/haproxy_check.sh /etc/keepalived/scripts/haproxy_master.sh

两台主机启动keepalived

163主机网卡信息

2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:50:56:3a:cc:20 brd ff:ff:ff:ff:ff:ff
inet 192.168.2.163/24 brd 192.168.2.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.2.250/32 scope global ens33:1
valid_lft forever preferred_lft forever
inet6 fe80::8041:19f:b29:7354/64 scope link noprefixroute
valid_lft forever preferred_lft forever

165主机网卡信息

2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:50:56:35:92:64 brd ff:ff:ff:ff:ff:ff
inet 192.168.2.165/24 brd 192.168.2.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::7320:404e:a7f2:6fbf/64 scope link noprefixroute
valid_lft forever preferred_lft forever
inet6 fe80::6435:91f7:6c5:fa28/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::8ebe:5815:b0b3:d833/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever

haproxy故障漂移测试

目前脚本的作用是在keepalive vip 那台服务器 停止haproxy服务,会立刻再启动haproxy服务,除非这台主机关机,没法再启动haproxy服务,

此时keepalive vip 才会漂移到另外一台haproxy服务上。

当原有主机再次启动haproxy服务后,keepalive vip 又会回来。

问题:

1.在keepalive vip 漂移过程中会有短暂的服务访问缓慢的情况

2.haproxy中设置的是轮询,火狐浏览器上会看到效果,谷歌浏览器上效果不明显

haproxy + keeplived的更多相关文章

  1. 【原】基于 HAproxy 1.6.3 Keeplived 在 Centos 7 中实现mysql mariadb galera cluster 集群分发读写 —— 上篇

    前言 有一段时间没有写blogs,乘着周末开始整理下haproxy + keeplived 实现 mysql mariadb galera cluster 集群访问环境的搭建工作. 本文集中讲hapr ...

  2. 负载均衡之Haproxy配置详解(及httpd配置)

    下图描述了使用keepalived+Haproxy主从配置来达到能够针对前段流量进行负载均衡到多台后端web1.web2.web3.img1.img2.但是由于haproxy会存在单点故障问题,因此使 ...

  3. k8s Kubernetes v1.10 最简易安装 shell

    k8s Kubernetes v1.10 最简易安装 shell # Master 单节点快速安装 # 最简单的安装shell,只为快速部署k8s测试环境 #环境centos 7.4 #1 初始化环境 ...

  4. Percona XtraDB Cluster(PXC)-高可用架构设计说明

    Mycat+PXC高可用集群 一.架构图 架构说明: 1.mysql 集群高可用部分: l 针对业务场景选用Percona XtraDB Cluter(PXC)复制集群.两个片集群 PXC-dataN ...

  5. 记录一次k8s环境尝试过程(初始方案,现在已经做过很多完善,例如普罗米修斯)

    记录一次Team k8s环境搭建过程(初始方案,现在已经做过很多完善,例如普罗米修斯) span::selection, .CodeMirror-line > span > span::s ...

  6. RabbitMQ从零到集群高可用(.NetCore5.0) -高可用集群构建落地

    系列文章: RabbitMQ从零到集群高可用(.NetCore5.0) - RabbitMQ简介和六种工作模式详解 RabbitMQ从零到集群高可用(.NetCore5.0) - 死信队列,延时队列 ...

  7. HAProxy(三):Keeplived+HAProxy搭建高可用负载均衡动静分离架构基础配置示例

    一.安装环境 1.软件版本 HAProxy:1.5.18 Keepalived:1.3.5 Nginx:1.12.2 PHP:7.2 系统版本:CentOS 7.4 2.IP分配与架构图 3.安装软件 ...

  8. [Z]haproxy+keepalived高可用群集

    http://blog.51cto.com/13555423/2067131 Haproxy是目前比较流行的一种集群调度工具Haproxy 与LVS.Nginx的比较LVS性能最好,但是搭建相对复杂N ...

  9. Nginx + Keeplived双主测试

    Author: JinDate: 20130613Title: Nginx + Keeplived 双主测试 前言:一年多前做过一次测试,时间久了忘记了,现在又重新做一次 一.环境1.基本信息和规划p ...

随机推荐

  1. MIT 6.824 Lab2D Raft之日志压缩

    书接上文Raft Part C | MIT 6.824 Lab2C Persistence. 实验准备 实验代码:git://g.csail.mit.edu/6.824-golabs-2021/src ...

  2. Windows 远程连接后,自动断开,所有程序都自动关闭(待验证,待更新)

    win+r输入regedit打开注册表编辑SecurityLayer,将值改为2 计算机\HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Ter ...

  3. docker数据卷技术

    数据卷技术 数据卷手动挂载 数据卷容器 part1:数据卷挂载方式 数据卷手动挂载 -v 主机目录:容器目录 #核心参数 #示例 docker run -it --name=centos_test - ...

  4. css基础02

    熟练快捷键!方便,要多练!  css复合选择器 不会选孙子,有一个儿子和另一个儿子的孩子(也是孙子)同名了,但子选择器子选择儿子,同名的孙子不选.和后代选择器有一点不一样的. " ,&quo ...

  5. 在DELL服务器上安装windows2012 r2服务器系统

    主要过程: 1.准备安装光盘,开启服务器,当出现画面按F10进入服务器自带光盘系统安装向导.(若没有系统光盘,可以用软蝶通刻一个服务系统到+R的光盘).进入后选择设置和安装系统. 2.开始安装前,提示 ...

  6. 优雅退出在Golang中的实现

    背景 为什么需要优雅关停 在Linux下运行我们的go程序,通常有这样2种方式: 前台启动.打开终端,在终端中直接启动某个进程,此时终端被阻塞,按CTRL+C退出程序,可以输入其他命令,关闭终端后程序 ...

  7. React报错之Encountered two children with the same key

    正文从这开始~ 总览 当我们从map()方法返回的两个或两个以上的元素具有相同的key属性时,会产生"Encountered two children with the same key&q ...

  8. LuoguP2523 [HAOI2011]Problem c(概率DP)

    傻逼概率\(DP\),熊大坐这,熊二坐这,两熊体积从右往左挤,挤到\(FFF\)没座位了就不合理了 否则就向左歇斯底里爬,每个\(FFF\)编号就组合一下,完闭 #include <iostre ...

  9. MyBatis-Plus 代码生成

    MyBatis-Plus官网的代码生成器配置不是特别全,在此整理了较为完整的配置,供自己和大家查阅学习. // 代码生成器 AutoGenerator mpg = new AutoGenerator( ...

  10. DS队列----银行单队列多窗口模拟

    题目描述 假设银行有K个窗口提供服务,窗口前设一条黄线,所有顾客按到达时间在黄线后排成一条长龙.当有窗口空闲时,下一位顾客即去该窗口处理事务.当有多个窗口可选择时,假设顾客总是选择编号最小的窗口. 本 ...