一键安装基于dns的高可用k8s集群(3节点,etcd https)
在公司,使用dns切换,可能会比keepalived+haproxy,更精简的易维护。
毕竟,高可用只是偶尔切换,不是时时切换。
且dns解析在自己可控时,更不会影响k8s线上使用了。
(部分代码,由于担心太冗长,已使用xxx代替,这些xxx完全可以自己生成的)
使用步骤如下:
先在每个master上安装etcd,运行如下命令:
sh k8s.sh k8s.xxx.com.cn etcd
等每个master上的etcd安装好之后,运行如下命令:
sh k8s.sh k8s.xxx.com.cn master
最后,在每个node节点上,运行如下命令:
sh k8s.sh k8s.xxx.com.cn node
#! /usr/bin/env bash
set -e
set -u
set -x
#使用系统的PATH环境
export PATH=$(echo $PATH)
#接收及判断参数合法性
#sh k8s.sh k8s.xxx.com.cn etcd
#sh k8s.sh k8s.xxx.com.cn master
#sh k8s.sh k8s.xxx.com.cn node
K8S_DOMAIN=$1
OP_STEP=$2
#判断参数
if [ $# -ne 2 ]; then
echo "wrong args."
echo "usage `basename $0` [K8S_DOMAIN] [init|cert|etcd|master|node]"
exit 110
fi
#每一个新集群,此处必须修改
#指定使用集群ip的网络端口,THIS_HOST必是HOST_1,HOST_2,HOST_3中的一个
ETH=eth0
HOST_1=1.2.3.4
HOST_2=1.2.3.5
HOST_3=1.2.3.6
THIS_HOST=$(ip addr show ${ETH} |grep inet|grep -v inet6|awk '{print $2}'|awk -F '/' '{print $1}')
LOCAL_HOST=$(hostname)
LOCAL_HOST_L=${LOCAL_HOST,,}
#定义常量,如有必要,可有更多常量加入
#安装k8s版本
K8S_VER=v1.12.6
#用于判断是否安装了kubelet等软件
KUBE_VER=${K8S_VER/v/}
#apiserver端口
K8S_API_PORT=6443
#k8s加入的token
K8S_JOIN_TOKEN=xxxxxx.xxxxxxxxxabcdef
#可以操作kubectl的普通用户
General_user=k8s
#生成cert证书的cfssl软件目录
cs="./cfssl"
csj="./cfssljson"
#cert证书在k8s中的安装目录
pki_dir="/etc/kubernetes/pki"
#所有docker的来源仓库,不再需要外网
REGISTRY=harbor.demo.cn/3rd_part/k8s.gcr.io
#etcd集群版本(最好和k8s要求的版本一致3.2.24)
ETCD_VERSION=3.2.18
#etcd服务端口及其它集群状态(peer之间,未用https)
ETCD_CLI_PORT=2379
ETCD_CLU_PORT=2380
TOKEN=pa-k8s-etcd-token
CLUSTER_STATE=new
CLUSTER=${HOST_1}=http://${HOST_1}:${ETCD_CLU_PORT},${HOST_2}=http://${HOST_2}:${ETCD_CLU_PORT},${HOST_3}=http://${HOST_3}:${ETCD_CLU_PORT}
etcd_data_dir=/etcd/etcd-data
#定义当前目录
CUR_DIR=$(cd $(dirname $0); pwd)
#判断eth0 IP是否在集群内
function ip_in_cluster() {
if [[ ${THIS_HOST} != ${HOST_1} && ${THIS_HOST} != ${HOST_2} && ${THIS_HOST} != ${HOST_3} ]]; then
echo "ip not in the etcd cluster host."
exit 110
fi
}
#每次全新生成文件
function if_file_exist_del() {
if [ -e $1 ]; then
rm -f $1
fi
}
#重置iptables
function iptables_clear() {
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -t nat -P PREROUTING ACCEPT
iptables -t nat -P POSTROUTING ACCEPT
iptables -t nat -P OUTPUT ACCEPT
iptables -t mangle -P PREROUTING ACCEPT
iptables -t mangle -P OUTPUT ACCEPT
iptables -F
iptables -t nat -F
iptables -t mangle -F
iptables -X
iptables -t nat -X
iptables -t mangle -X
}
#服务器初化,尽量标准化
function system_init() {
#停止firewall防火墙,并禁止开机自启动
systemctl stop firewalld.service
systemctl disable firewalld.service
#重置iptables
iptables_clear
#禁止selinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config
set +e
setenforce 0
set -e
#关闭swap交换内存,K8S强制的,不然安装会报错
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
#配置k8s内核参数
k8s_kernel_conf=/etc/sysctl.d/k8s.conf
if_file_exist_del $k8s_kernel_conf
cat<<EOF>$k8s_kernel_conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
vm.swappiness=0
EOF
#使配置生效
sysctl -p
sysctl --system
# 安装工具及加载br_netfilter, ipvs
yum install bridge-utils ipset ipvsadm sysstat libseccomp conntrack conntrack-tools socat -y
modprobe br_netfilter
modprobe -- ip_vs
modprobe -- ipip
modprobe -- tun
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- nf_conntrack_ipv6
ipvs_no=$(cat /etc/rc.local|grep ip_vs|wc -l)
if [ $ipvs_no -eq 0 ]; then
echo "modprobe br_netfilter" >> /etc/rc.local
echo "modprobe -- ip_vs" >> /etc/rc.local
echo "modprobe -- ipip" >> /etc/rc.local
echo "modprobe -- tun" >> /etc/rc.local
echo "modprobe -- ip_vs_rr" >> /etc/rc.local
echo "modprobe -- ip_vs_wrr" >> /etc/rc.local
echo "modprobe -- ip_vs_sh" >> /etc/rc.local
echo "modprobe -- nf_conntrack_ipv4" >> /etc/rc.local
echo "modprobe -- nf_conntrack_ipv6" >> /etc/rc.local
fi
# 为etcd数据和k8s证书生成目录
mkdir -p ${pki_dir} && mkdir -p ${etcd_data_dir}
rm -rf ${pki_dir}/* && mkdir -p ${etcd_data_dir}/*
chown -R docker.docker ${pki_dir} ${etcd_data_dir}
chmod -R 755 ${pki_dir} ${etcd_data_dir}
#安装必须软件,将其cp到指定运行目录
kube_version=$(rpm -qa|grep kubelet)
kube_version=${kube_version:=None}
if [[ ${kube_version} =~ ${KUBE_VER} ]]; then
kubeadm reset -f
else
set +e
yum remove kubeadm -y
yum remove kubectl -y
yum remove kubelet -y
set -e
yum localinstall *.rpm -y
tar xf cni-plugins-amd64-v0.7.5.tgz -C /opt/cni/bin
/bin/cp /usr/bin/kube* /usr/local/bin/
fi
#配置docker用户可以命令行的相关命令
k8s_sudoers_conf=/etc/sudoers.d/k8s_sudoers
if_file_exist_del $k8s_sudoers_conf
cat<<EOF>$k8s_sudoers_conf
docker ALL = (root) NOPASSWD:/bin/systemctl restart docker
docker ALL = (root) NOPASSWD:/bin/systemctl reload docker
docker ALL = (root) NOPASSWD:/bin/systemctl daemon-reload
docker ALL = (root) NOPASSWD:/bin/systemctl start kubelet
docker ALL = (root) NOPASSWD:/bin/systemctl stop kubelet'
docker ALL = (root) NOPASSWD:/bin/systemctl restart kubelet'
docker ALL = (root) NOPASSWD:/bin/systemctl status kubelet'
docker ALL = (root) NOPASSWD:/usr/local/bin/kubeadm'
docker ALL = (root) NOPASSWD:/usr/local/bin/kubectl'
EOF
#如果以前安装过k8s,清除相关的虚拟网络接口
ifconfig -a|grep -vE '(^[[:space:]]|^$)'|grep -E '(veth|flannel|kube|cni|dummy)'|awk -F ":" '{print $1}'|awk '{for(i=1;i<=NF;i++){print "ip link set " $i " down";}}'|sh
ifconfig -a|grep -vE '(^[[:space:]]|^$)'|grep -E '(veth|flannel|kube|cni|dummy)'|awk -F ":" '{print $1}'|awk '{for(i=1;i<=NF;i++){print "ip link delete " $i;}}'|sh
modprobe -r ipip
modprobe -r ip_gre
modprobe ipip
modprobe ip_gre
#自定义pause的image仓库,摆脱对外上网的依赖
kubelet_sysconfig=/etc/sysconfig/kubelet
if_file_exist_del $kubelet_sysconfig
cat >$kubelet_sysconfig<<EOF
KUBELET_EXTRA_ARGS="--pod-infra-container-image=harbor.paic.com.cn/3rd_part/k8s.gcr.io/pause:3.1"
EOF
# 启动
systemctl daemon-reload
systemctl enable kubelet && systemctl restart kubelet
echo "================================="
echo "k8s init system success"
}
#将相应的文件写入shell,摆脱对太多yaml文件或key文件的依赖
function saPub() {
cat<<EOF>saen.pub
xxx
EOF
}
function saKey() {
cat<<EOF>saen.key
xxx
EOF
}
function caCrt() {
cat<<EOF>caen.crt
xxx
EOF
}
function caKey() {
cat<<EOF>caen.key
xxx
EOF
}
function frontProxyCrt() {
cat<<EOF>front-proxy-ca.crt
xxx
-----END CERTIFICATE-----
EOF
}
function frontProxyKey() {
cat<<EOF>front-proxy-ca.key
xxx
EOF
}
function caCsr() {
cat<<EOF>ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "438000h"
}
}
EOF
}
function caConfig() {
cat<<EOF>ca-config.json
{
"signing": {
"default": {
"expiry": "438000h"
},
"profiles": {
"server": {
"expiry": "438000h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "438000h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "438000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
}
function etcdServer() {
cat<<EOF>etcd-server.json
{
"CN": "etcdServer",
"hosts": [
"127.0.0.1",
"localhost",
"${LOCAL_HOST_L}",
"${K8S_DOMAIN}",
"${HOST_1}",
"${HOST_2}",
"${HOST_3}"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"C": "CN",
"L": "ShangHai",
"ST": "ShangHai"
}
]
}
EOF
}
function etcdPeer() {
cat<<EOF>etcd-peer.json
{
"CN": "etcdPeer",
"hosts": [
"127.0.0.1",
"localhost",
"${LOCAL_HOST_L}",
"${K8S_DOMAIN}",
"${HOST_1}",
"${HOST_2}",
"${HOST_3}"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"C": "CN",
"L": "ShangHai",
"ST": "ShangHai"
}
]
}
EOF
}
function etcdClient() {
cat<<EOF>etcd-client.json
{
"CN": "etcdClient",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"C": "CN",
"L": "ShangHai",
"ST": "ShangHai"
}
]
}
EOF
}
function frontProxyClient() {
cat<<EOF>front-proxy-client.json
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
}
function apiServer() {
cat<<EOF>apiserver.json
{
"CN": "kube-apiserver",
"hosts": [
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"10.96.0.1",
"${LOCAL_HOST_L}",
"${K8S_DOMAIN}",
"${HOST_1}",
"${HOST_2}",
"${HOST_3}"
],
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
}
function apiKubClient() {
cat<<EOF>apiserver-kubelet-client.json
{
"CN": "kube-apiserver-kubelet-client",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "system:masters"
}
]
}
EOF
}
#########################################
#########################################
#生成kubeadm安装文件
kubeadm_conf=kubeadm-config.yaml
if_file_exist_del $kubeadm_conf
cat << EOF >$kubeadm_conf
apiVersion: kubeadm.k8s.io/v1alpha3
kind: InitConfiguration
apiEndpoint:
advertiseAddress: ${THIS_HOST}
bindPort: ${K8S_API_PORT}
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: ${K8S_JOIN_TOKEN}
ttl: 0s
usages:
- signing
- authentication
---
apiVersion: kubeadm.k8s.io/v1alpha3
kind: ClusterConfiguration
etcd:
external:
endpoints:
- https://${HOST_1}:${ETCD_CLI_PORT}
- https://${HOST_2}:${ETCD_CLI_PORT}
- https://${HOST_3}:${ETCD_CLI_PORT}
caFile: ${pki_dir}/etcd/ca.crt
certFile: ${pki_dir}/apiserver-etcd-client.crt
keyFile: ${pki_dir}/apiserver-etcd-client.key
imageRepository: ${REGISTRY}
kubernetesVersion: ${K8S_VER}
#controlPlaneEndpoint: ${K8S_DOMAIN}:${K8S_API_PORT}
networking:
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
EOF
#生成node节点自动续期证书
auto_cet_server=node-auto-cert-server.yaml
if_file_exist_del $auto_cet_server
cat << EOF >$auto_cet_server
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
rules:
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/selfnodeserver
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubeadm:node-autoapprove-certificate-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
EOF
#生成flannel 网络插件
flannel=flannel.yaml
if_file_exist_del $flannel
#将相应的文件写入shell,摆脱对太多yaml文件或key文件的依赖
cat << EOF >$flannel
xxx
EOF
#初始化证书,记得延长证书过期时间,及api的ca一定不能和front proxy的ca相同。
function cert_init() {
# caCsr
set +x
rm *.csr&&rm *.json&&rm *.crt&&rm *.key&&rm *.pem&&rm *.pub
set -x
saPub
saKey
caCrt
caKey
frontProxyCrt
frontProxyKey
caConfig
etcdServer
etcdPeer
etcdClient
frontProxyClient
apiServer
apiKubClient
# $cs gencert -initca ca-csr.json |$csj -bare ca
# mv ca.pem ca.crt&&mv ca-key.pem ca.key
$cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=server etcd-server.json|$csj -bare server
$cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=client etcd-client.json|$csj -bare client
$cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=peer etcd-peer.json|$csj -bare peer
$cs gencert -ca=front-proxy-ca.crt -ca-key=front-proxy-ca.key -config=ca-config.json -profile=client front-proxy-client.json|$csj -bare front-proxy-client
$cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=server apiserver.json|$csj -bare apiserver
$cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=client apiserver-kubelet-client.json|$csj -bare apiserver-kubelet-client
mkdir -p $pki_dir/etcd
cp server.pem $pki_dir/etcd/server.crt&cp server-key.pem $pki_dir/etcd/server.key
cp client.pem $pki_dir/etcd/healthcheck-client.crt&&cp client-key.pem $pki_dir/etcd/healthcheck-client.key
cp client.pem $pki_dir/apiserver-etcd-client.crt&&cp client-key.pem $pki_dir/apiserver-etcd-client.key
cp peer.pem $pki_dir/etcd/peer.crt&&cp peer-key.pem $pki_dir/etcd/peer.key
cp ca.crt $pki_dir/etcd/ca.crt&&cp ca.key $pki_dir/etcd/ca.key
cp front-proxy-ca.crt $pki_dir/front-proxy-ca.crt&&cp front-proxy-ca.key $pki_dir/front-proxy-ca.key
cp front-proxy-client.pem $pki_dir/front-proxy-client.crt&&cp front-proxy-client-key.pem $pki_dir/front-proxy-client.key
cp ca.crt $pki_dir/ca.crt&&cp ca.key $pki_dir/ca.key
cp apiserver.pem $pki_dir/apiserver.crt&cp apiserver-key.pem $pki_dir/apiserver.key
cp apiserver-kubelet-client.pem $pki_dir/apiserver-kubelet-client.crt&&cp apiserver-kubelet-client-key.pem $pki_dir/apiserver-kubelet-client.key
cp sa.pub $pki_dir/sa.pub&&cp sa.key $pki_dir/sa.key
rm *.csr&&rm *.json&&rm *.crt&&rm *.key&&rm *.pem&&rm *.pub
echo "================================="
echo "all k8s cert(include etcd) create success"
}
# etcd容器化安装,最好需要在所有master节点上先安装好etcd
function etcd_install() {
# 如果有以前数据,先清除
set +e
docker stop etcd && docker rm etcd
rm -rf ${etcd_data_dir}/*
systemctl restart docker
set -e
# 运行docker
docker run \
-d \
-p ${ETCD_CLI_PORT}:${ETCD_CLI_PORT} \
-p ${ETCD_CLU_PORT}:${ETCD_CLU_PORT} \
--volume=${etcd_data_dir}:${etcd_data_dir} \
--volume=${pki_dir}:${pki_dir} \
--name etcd ${REGISTRY}/etcd-amd64:${ETCD_VERSION} \
/usr/local/bin/etcd \
--data-dir=/etcd-data --name ${THIS_HOST} \
--initial-advertise-peer-urls http://${THIS_HOST}:${ETCD_CLU_PORT} \
--listen-peer-urls http://0.0.0.0:${ETCD_CLU_PORT} \
--advertise-client-urls https://${THIS_HOST}:${ETCD_CLI_PORT} \
--listen-client-urls https://0.0.0.0:${ETCD_CLI_PORT} \
--initial-cluster ${CLUSTER} \
--initial-cluster-state ${CLUSTER_STATE} \
--initial-cluster-token ${TOKEN} \
--cert-file=${pki_dir}/etcd/server.crt \
--key-file=${pki_dir}/etcd/server.key \
--trusted-ca-file=${pki_dir}/etcd/ca.crt
echo "================================="
echo "etcd start success"
}
function etcd_reset() {
set +e
docker stop etcd
rm -rf ${etcd_data_dir}/*
docker rm etcd
set -e
}
#master安装及附带操作
function master_install(){
ipvsadm -C
kubeadm init --config $kubeadm_conf
mkdir -p $HOME/.kube
\cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
#为docker生成config文件
General_user_HOME=`cat /etc/passwd |grep -e ^${General_user} |awk -F: '{print $6}'`
mkdir -p ${General_user_HOME}/.kube
\cp -f /etc/kubernetes/admin.conf ${General_user_HOME}/.kube/config
chown -R $(id -u ${General_user}):$(id -g ${General_user}) ${General_user_HOME}/.kube
chown $(id -u ${General_user}):$(id -g ${General_user}) ${General_user_HOME}/.kube/config
#应用flannel及节点证书自动验证
kubectl apply -f $auto_cet_server
kubectl apply -f $flannel
rm $kubeadm_conf
rm $auto_cet_server
rm $flannel
echo "================================="
echo "master install success(include flannel and node cert auto approv)"
}
#node节点加入命令,由于设置了token为0s,所以token可以固定下来。
function node_join(){
system_init
kubeadm join ${K8S_DOMAIN}:${K8S_API_PORT} --token ${K8S_JOIN_TOKEN} --discovery-token-unsafe-skip-ca-verification
echo "================================="
echo "node join success"
}
#命令shell选项,产生分叉
case ${OP_STEP} in
"etcd")
ip_in_cluster
system_init
cert_init
etcd_install
;;
"etcd_reset")
ip_in_cluster
etcd_reset
;;
"master")
ip_in_cluster
master_install
;;
"node")
node_join
;;
*)
echo "usage `basename $0` [K8S_DOMAIN] [etcd|master|node]"
;;
esac
一键安装基于dns的高可用k8s集群(3节点,etcd https)的更多相关文章
- kubespray -- 快速部署高可用k8s集群 + 扩容节点 scale.yaml
主机 系统版本 配置 ip Mater.Node,ansible CentOS 7.2 4 ...
- 企业运维实践-还不会部署高可用的kubernetes集群?使用kubeadm方式安装高可用k8s集群v1.23.7
关注「WeiyiGeek」公众号 设为「特别关注」每天带你玩转网络安全运维.应用开发.物联网IOT学习! 希望各位看友[关注.点赞.评论.收藏.投币],助力每一个梦想. 文章目录: 0x00 前言简述 ...
- 使用kubeadm部署一套高可用k8s集群
使用kubeadm部署一套高可用k8s集群 有疑问的地方可以看官方文档 准备环境 我的机器如下, 系统为ubuntu20.04, kubernetes版本1.21.0 hostname IP 硬件配置 ...
- 使用Docker Compose部署基于Sentinel的高可用Redis集群
使用Docker Compose部署基于Sentinel的高可用Redis集群 https://yq.aliyun.com/articles/57953 Docker系列之(五):使用Docker C ...
- 基于LevelDB的高可用ActiveMQ集群
基于LevelDB的高可用ActiveMQ集群 http://donald-draper.iteye.com/blog/2347913
- Kubeadm部署高可用K8S集群
一 基础环境 1.1 资源 节点名称 ip地址 VIP 192.168.12.150 master01 192.168.12.48 master02 192.168.12.242 master03 1 ...
- centos7使用kubeadm配置高可用k8s集群
CountingStars_ 关注 2018.08.12 09:06* 字数 464 阅读 88评论 0喜欢 0 简介 使用kubeadm配置多master节点,实现高可用. 安装 实验环境说明 实验 ...
- Rancher 2.2.2 - HA 部署高可用k8s集群
对于生产环境,需以高可用的配置安装 Rancher,确保用户始终可以访问 Rancher Server.当安装在Kubernetes集群中时,Rancher将与集群的 etcd 集成,并利用Kuber ...
- kubeadm部署高可用K8S集群(v1.14.2)
1. 简介 测试环境Kubernetes 1.14.2版本高可用搭建文档,搭建方式为kubeadm 2. 服务器版本和架构信息 系统版本:CentOS Linux release 7.6.1810 ( ...
随机推荐
- 用ethtool 命令解决Linux 网卡丢包【转】
转自:https://blog.csdn.net/chengxuyuanyonghu/article/details/73739516 生产中有一台Linux设备并发比较大,droped包比较多,尤其 ...
- Linux中error while loading shared libraries错误解决办法
默认情况下,编译器只会使用/lib和/usr/lib这两个目录下的库文件,通常通过源码包进行安装时,如果不指定--prefix,会将库安装在/usr/local/lib目录下:当运行程序需要链接动态库 ...
- jqueryui插件slider的简单使用
<!DOCTYPE html> <html> <head> <title>slider</title> <meta charset=& ...
- 【原创】大叔经验分享(31)CM金丝雀Canary报错
CM金丝雀Canary报错 1 HDFS 金丝雀Canary 测试无法为 /tmp/.cloudera_health_monitoring_canary_files 创建父目录. 2 Hive Met ...
- jqgrid获取数据条数
function getResult() {//获取结果结合的函数,可以通过此函数获取查询后匹配的所有数据行. var o = jQuery("#jqgrid"); ...
- tomcat占用cpu比较多
在Linux中当Tomcat启动后,我们只是去查看应用是否能够正常访问来判断Tomcat启动是否正常.一般情况下这样看是没有问题的,但是有时候我们会发现当Tomcat使用了一段时间后,开始出现CPU或 ...
- JavaScript 删除某个数组中指定的对象
返回对象在数组中的下标: _arr表示一个Array数组,里面包括了很多的对象如下图: _obj表示某一个数组对象 function getIndex (_arr,_obj) { var le ...
- 饿了么vue-cli3.0+cube-ui笔记
1.目录结构 模板文件是public里的index.html,运行项目的时候,会引用src/main.js(入口文件) 详细文档在这里:https://cli.vuejs.org/zh/config/ ...
- MySQL多表查询 三表查询 连接查询的套路
多表查询 * 当我们的一条记录 分散不同的表中时,就需要进行多表查询 例如 一对一 一对多 多对多 1.笛卡尔积查询 意思是将两个表中的所有数据 全部关联在一起 例如 a表 有2条 b表有3条 ...
- Django框架之第三篇模板语法(重要!!!)
一.什么是模板? 只要是在html里面有模板语法就不是html文件了,这样的文件就叫做模板. 二.模板语法分类 一.模板语法之变量:语法为 {{ }}: 在 Django 模板中遍历复杂数据结构的关键 ...