#地址见:https://github.com/SILLKY/kubernetes-pro/tree/master/Master-HA
#包括其他一些文件,适当版本1.6.1
#!/bin/bash host=master.k8s
host1=master1.k8s
host2=master2.k8s ip0=192.168.255.125
ip1=192.168.255.126
ip2=192.168.255.127 #etcd数据备份
kube::etcd_backup()
{ docker stop etcd_test&&docker rm etcd_test
rm -rf /home/etcd && cp -r /var/lib/etcd /home
docker run --name etcd_test -d -p 12379:2379 -p 12380:2380 -v /home/etcd:/data appcelerator/etcd:3.0.15
echo '备份数据完毕'
} #复制需要的文件到node节点
kube::copy_file()
{
#修改本机的etcd和apiserver
cp etcd.yaml /etc/kubernetes/manifests/
cp kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml #修改mininode节点上的配置
scp /etc/hosts root@$host1:/etc/
scp /etc/hosts root@$host2:/etc/ scp -r /etc/kubernetes/manifests root@$host1:/etc/kubernetes/
scp -r /etc/kubernetes/pki root@$host1:/etc/kubernetes/
scp /etc/kubernetes/controller-manager.conf root@$host1:/etc/kubernetes/
scp /etc/kubernetes/scheduler.conf root@$host1:/etc/kubernetes/
scp /etc/kubernetes/admin.conf root@$host1:/etc/kubernetes/
scp etcd1.yaml root@$host1:/etc/kubernetes/manifests/etcd.yaml
scp kube-apiserver1.yaml root@$host1:/etc/kubernetes/manifests/kube-apiserver.yaml
scp create-pki1.sh root@$host1:/root/create-pki.sh scp -r /etc/kubernetes/manifests root@$host2:/etc/kubernetes/
scp -r /etc/kubernetes/pki root@$host2:/etc/kubernetes/
scp /etc/kubernetes/controller-manager.conf root@$host2:/etc/kubernetes/
scp /etc/kubernetes/scheduler.conf root@$host2:/etc/kubernetes/
scp /etc/kubernetes/admin.conf root@$host2:/etc/kubernetes/
scp etcd2.yaml root@$host2:/etc/kubernetes/manifests/etcd.yaml
scp kube-apiserver2.yaml root@$host2:/etc/kubernetes/manifests/kube-apiserver.yaml
scp create-pki2.sh root@$host2:/root/create-pki.sh
} # 修改相应的配置
kube::edit_file()
{
sed -i "s/{{ip}}/$ip0/g" /etc/kubernetes/manifests/kube-apiserver.yaml
rm -rf /var/lib/etcd ssh root@$host1 "sed -i \"s/{{ip}}/$ip1/g\"" /etc/kubernetes/manifests/kube-apiserver.yaml
ssh root@$host1 "sed -i \"s/{{ip}}/$ip1/g\"" /root/create-pki.sh
ssh root@$host1 "sed -i \"s/$ip0/$ip1/g\"" /etc/kubernetes/admin.conf
ssh root@$host1 "sed -i \"s/$ip0/$ip1/g\"" /etc/kubernetes/kubelet.conf
ssh root@$host1 "sed -i \"s/$ip0/$ip1/g\"" /etc/kubernetes/controller-manager.conf
ssh root@$host1 "sed -i \"s/$ip0/$ip1/g\"" /etc/kubernetes/scheduler.conf
ssh root@$host1 'cd /root &&chmod +x create-pki.sh&&./create-pki.sh'
ssh root@$host1 'rm -rf /var/lib/etcd' ssh root@$host2 "sed -i \"s/{{ip}}/$ip2/g\"" /etc/kubernetes/manifests/kube-apiserver.yaml
ssh root@$host2 "sed -i \"s/{{ip}}/$ip2/g\"" /root/create-pki.sh
ssh root@$host2 "sed -i \"s/$ip0/$ip2/g\"" /etc/kubernetes/admin.conf
ssh root@$host2 "sed -i \"s/$ip0/$ip2/g\"" /etc/kubernetes/kubelet.conf
ssh root@$host2 "sed -i \"s/$ip0/$ip2/g\"" /etc/kubernetes/controller-manager.conf
ssh root@$host2 "sed -i \"s/$ip0/$ip2/g\"" /etc/kubernetes/scheduler.conf
ssh root@$host2 'cd /root &&chmod +x create-pki.sh&&./create-pki.sh'
ssh root@$host2 'rm -rf /var/lib/etcd' } # 重启kubelet
kube::reset_kubelet()
{
systemctl daemon-reload
systemctl restart kubelet
ssh root@$host2 'systemctl daemon-reload'
ssh root@$host2 'systemctl restart kubelet'
ssh root@$host1 'systemctl daemon-reload'
ssh root@$host1 'systemctl restart kubelet'
} # 恢复etcd的数据到新的集群中
kube::etcd_recover()
{
#等待ETCD集群启动完毕
while :
do
result=$(kubectl get componentstatuses)
result0=$(echo $result | grep 'etcd-0 Healthy {"health": "true"}')
result1=$(echo $result | grep 'etcd-1 Healthy {"health": "true"}')
result2=$(echo $result | grep 'etcd-2 Healthy {"health": "true"}')
if [[ $result0 != "" && $result1 != "" && $result2 != "" ]]
then
echo "etcd集群启动完毕"
break
else
echo "等待etcd集群启动......"
fi
sleep 10
done
sleep 30
echo '准备还原数据'
echo "export ETCDCTL_API=3&&etcdctl make-mirror $ip0:2379 --endpoints=$ip0:12379"
docker exec etcd_test sh -c "export ETCDCTL_API=3&&etcdctl make-mirror $ip0:2379 --endpoints=$ip0:12379"&
echo '等待还原数据.....'
sleep 60
docker stop etcd_test && docker rm etcd_test
} # 最后设置loadBalace
kube::loadBalace()
{
#设置代理为loadBalaceIp
loadBalaceIp=kubernetes.default.svc
kubectl get configmap/kube-proxy -n kube-system -o yaml > kube-proxy-configmap.yaml
sed -i "s/$ip0/$loadBalaceIp/g" kube-proxy-configmap.yaml
kubectl apply -f kube-proxy-configmap.yaml #删除flanne,kube-proxyl让其自动重建
kubectl delete pod -l app=flannel -n kube-system
kubectl delete pod -l k8s-app=kube-proxy -n kube-system
} main()
{
kube::etcd_backup
kube::copy_file
kube::edit_file
kube::reset_kubelet
kube::etcd_recover
kube::loadBalace
sleep 20
echo "-------------------------------------------------------"
echo "已经完成master HA"
echo "-------------------------------------------------------"
kubectl get pods -n kube-system -o wide
}
main $@

kubernetes master 高可用一键部署的更多相关文章

  1. 关于Kubernetes Master高可用的一些策略

    关于Kubernetes Master高可用的一些策略 Kubernetes高可用也许是完成了初步的技术评估,打算将生产环境迁移进Kubernetes集群之前普遍面临的问题. 为了减少因为服务器当机引 ...

  2. centos7.1使用kubeadm部署kubernetes 1.16.2的master高可用

    机器列表,配置域名解析 cat /etc/hosts192.168.200.210 k8s-master1192.168.200.211 k8s-master2192.168.200.212 k8s- ...

  3. kubernetes二进制高可用部署实战

    环境: 192.168.30.20 VIP(虚拟) 192.168.30.21 master1 192.168.30.22 master2 192.168.30.23 node1 192.168.30 ...

  4. 【葵花宝典】lvs+keepalived部署kubernetes(k8s)高可用集群

    一.部署环境 1.1 主机列表 主机名 Centos版本 ip docker version flannel version Keepalived version 主机配置 备注 lvs-keepal ...

  5. K8S集群Master高可用实践

    K8S集群Master高可用实践    https://blog.51cto.com/ylw6006/2164981 本文将在前文基础上介绍k8s集群的高可用实践,一般来讲,k8s集群高可用主要包含以 ...

  6. openstack pike 集群高可用 安装 部署 目录汇总

    # openstack pike 集群高可用 安装部署#安装环境 centos 7 史上最详细的openstack pike版 部署文档欢迎经验分享,欢迎笔记分享欢迎留言,或加QQ群663105353 ...

  7. Centos下SFTP双机高可用环境部署记录

    SFTP(SSH File Transfer Protocol),安全文件传送协议.有时也被称作 Secure File Transfer Protocol 或 SFTP.它和SCP的区别是它允许用户 ...

  8. Haproxy+Keepalived高可用环境部署梳理(主主和主从模式)

    Nginx.LVS.HAProxy 是目前使用最广泛的三种负载均衡软件,本人都在多个项目中实施过,通常会结合Keepalive做健康检查,实现故障转移的高可用功能. 1)在四层(tcp)实现负载均衡的 ...

  9. LVS+Keepalived 高可用环境部署记录(主主和主从模式)

    之前的文章介绍了LVS负载均衡-基础知识梳理, 下面记录下LVS+Keepalived高可用环境部署梳理(主主和主从模式)的操作流程: 一.LVS+Keepalived主从热备的高可用环境部署 1)环 ...

随机推荐

  1. TP3.2实例化复杂模型类

    1.表名:xxf_witkey_member_oauth M方法,直接实例化对象:M('member_oauth','xxf_witkey_'[,'db_config']); 具体解析:M方法三个参数 ...

  2. Pycharm中 import 引入同级文件失败问题

    Pycharm中 import 引入同级文件失败,如下所示:  “This inspection detects names that should resolve but don't. Due to ...

  3. 《JAVA与模式》之参考资料

    1.书籍 <JHead First 设计模式(中文版)> <JAVA与模式> <大话设计模式> 2.连接地址 http://blog.csdn.net/jason0 ...

  4. Python cos() 函数

    描述 cos() 返回x的弧度的余弦值. 语法 以下是 cos() 方法的语法: import math math.cos(x) 注意:cos()是不能直接访问的,需要导入 math 模块,然后通过 ...

  5. eclipse逆向生成实体类注解方式或者xml方式

    转载自:http://www.2cto.com/database/201501/372023.html http://blog.csdn.net/wangpeng047/article/details ...

  6. 成员函数的重载&amp;&amp;隐藏&amp;&amp;覆盖

    /* *成员函数的重载,覆盖,隐藏 *重载: *1.同样的范围(在同一个类中) *2.函数名同样 *3.參数不同 *4.virtualkeyword可有可无 *覆盖是指派生类覆盖基类的函数,特征是: ...

  7. log4net 存储到oracle 调试 Could not load type [log4net.Appender.OracleAppender]

    近期在弄webfrom oracle 调用 log4net 開始调试时不出数据,打开了log4net 自己的debug功能后发现: log4net: Logger [root] level set t ...

  8. java NIO中的buffer和channel

    缓冲区(Buffer):一,在 Java NIO 中负责数据的存取.缓冲区就是数组.用于存储不同数据类型的数据 根据数据类型不同(boolean 除外),提供了相应类型的缓冲区:ByteBufferC ...

  9. 改变mysql数据库用户的权限

    mysql> grant all on *.* to test@'%';Query OK, 0 rows affected (0.00 sec) mysql> flush privileg ...

  10. JAVA文件转换为Base64

    JAVA文件转换为Base64 import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream ...