搭建前的准备:

主机名配置

cat >/etc/hosts<<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.19.61 k8s-api.virtual.local k8s-api
192.168.19.61 etcd-1
192.168.19.62 etcd-2
192.168.19.63 etcd-3
EOF

环境变量配置:

[root@k8s-api ~]# cat env.sh
# TLS Bootstrapping 使用的Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
BOOTSTRAP_TOKEN="2f84230ea8fd1994946a994c7b458559"

# 建议使用未用的网段来定义服务网段和Pod 网段
# 服务网段(Service CIDR),部署前路由不可达,部署后集群内部使用IP:Port可达
SERVICE_CIDR="10.254.0.0/16"
# Pod 网段(Cluster CIDR),部署前路由不可达,部署后路由可达(flanneld 保证)
CLUSTER_CIDR="172.30.0.0/16"

# 服务端口范围(NodePort Range)
NODE_PORT_RANGE="30000-32766"

# etcd集群服务地址列表
ETCD_ENDPOINTS="https://192.168.19.61:2379,https://192.168.19.62:2379,https://192.168.19.63:2379"

# flanneld 网络配置前缀
FLANNEL_ETCD_PREFIX="/kubernetes/network"

# kubernetes 服务IP(预先分配,一般为SERVICE_CIDR中的第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"

# 集群 DNS 服务IP(从SERVICE_CIDR 中预先分配)
CLUSTER_DNS_SVC_IP="10.254.0.2"

# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="cluster.local."

# MASTER API Server 地址
MASTER_URL="k8s-api.virtual.local"

主机直接无密码登陆:

都要执行,都要互相拷贝给对方

ssh-keygen -t rsa

ssh-copy-id -i ~/.ssh/id_rsa.pub root@etcd-3

for host in $(cat remote-hosts)
do
#ssh $host "yum -y install wget"
#ssh $host "mkdir -p /etc/kubernetes/ssl"
#ssh $host "mkdir -p /usr/k8s/bin"
#scp env.sh $host:/usr/k8s/bin
#ssh $host "chmod +x /usr/k8s/bin/env.sh"
#ssh $host "echo "export PATH=/usr/k8s/bin:$PATH" >> /etc/rc.local "
scp /root/ssl/ca* $host:/etc/kubernetes/ssl
#scp token.csv $host:/etc/kubernetes/
#scp kubernetes-server-linux-amd64.tar.gz $host:kubernetes-server-linux-amd64.tar.gz
#scp /etc/kubernetes/*.kubeconfig $host:/etc/kubernetes/
#ssh $host "yum -y install etcd"
#ssh $host "mkdir -p /opt/etcd"
#ssh $host "chmod -R 777 /opt/etcd"
#ssh $host "yum -y install ntpdate;ntpdate cn.ntp.org.cn;ln -sf /usr/share/zoneinfo/Asia/Chongqing /etc/localtime"
#ssh $host "chmod -R 775 /etc/kubernetes/"
#ssh $host "yum install -y flannel"
#ssh $host "yum install docker -y"
done

创建ca证书和密匙(每个机器都执行了)

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
sudo mv cfssl_linux-amd64 /usr/k8s/bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
sudo mv cfssljson_linux-amd64 /usr/k8s/bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
sudo mv cfssl-certinfo_linux-amd64 /usr/k8s/bin/cfssl-certinfo

export PATH=/usr/k8s/bin:$PATH
mkdir ssl && cd ssl
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json

cat >ca-config.json<<EOF

{

"signing": {

"default": {

"expiry": "87600h"

},

"profiles": {

"kubernetes": {

"expiry": "87600h",

"usages": [

"signing",

"key encipherment",

"server auth",

"client auth"

]

}

}

}

}

EOF

cat > ca-csr.json << EOF

{

"CN": "kubernetes",

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"L": "BeiJing",

"ST": "BeiJing",

"O": "k8s",

"OU": "System"

}

]

}

EOF

export NODE_NAME=etcd01

export NODE_IP=192.168.19.61

export NODE_IPS="192.168.19.61 192.168.19.62 192.168.19.63"

export ETCD_NODES=etcd01=https://192.168.19.61:2380,etcd02=https://192.168.19.62:2380,etcd03=https://192.168.19.63:2380

sudo mv etcd.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable etcd

sudo systemctl restart etcd

sudo systemctl status etcd

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

chmod +x cfssl_linux-amd64

sudo mv cfssl_linux-amd64 /usr/k8s/bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

chmod +x cfssljson_linux-amd64

sudo mv cfssljson_linux-amd64 /usr/k8s/bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod +x cfssl-certinfo_linux-amd64

sudo mv cfssl-certinfo_linux-amd64 /usr/k8s/bin/cfssl-certinfo

ntpd cn.ntp.org.cn;ntpdate cn.ntp.org.cn;rm -rf /etc/localtime;ln -s /usr/share/zoneinfo/Asia/Chongqing /etc/localtime

tar -xzvf kubernetes-client-linux-amd64.tar.gz

sudo cp kubernetes/client/bin/kube* /usr/k8s/bin/

sudo chmod a+x /usr/k8s/bin/kube*

export PATH=/usr/k8s/bin:$PATH

sudo cp flanneld.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable flanneld

sudo systemctl start flanneld

systemctl status flanneld

export PATH=/usr/k8s/bin:$PATH

export NODE_IP=192.168.19.65

source /usr/k8s/bin/env.sh

cat > flanneld-csr.json <<EOF

{

"CN": "flanneld",

"hosts": [],

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"ST": "BeiJing",

"L": "BeiJing",

"O": "k8s",

"OU": "System"

}

]

}

EOF

cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld

sudo mkdir -p /etc/flanneld/ssl

sudo mv flanneld*.pem /etc/flanneld/ssl

etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   set ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'

mkdir flannel

tar -xzvf flannel-v0.9.0-linux-amd64.tar.gz -C flannel

sudo cp flannel/{flanneld,mk-docker-opts.sh} /usr/k8s/bin

cat > flanneld.service << EOF

[Unit]

Description=Flanneld overlay address etcd agent

After=network.target

After=network-online.target

Wants=network-online.target

After=etcd.service

Before=docker.service

[Service]

Type=notify

ExecStart=/usr/k8s/bin/flanneld \\

-etcd-cafile=/etc/kubernetes/ssl/ca.pem \\

-etcd-certfile=/etc/flanneld/ssl/flanneld.pem \\

-etcd-keyfile=/etc/flanneld/ssl/flanneld-key.pem \\

-etcd-endpoints=${ETCD_ENDPOINTS} \\

-etcd-prefix=${FLANNEL_ETCD_PREFIX}

ExecStartPost=/usr/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker

Restart=on-failure

[Install]

WantedBy=multi-user.target

RequiredBy=docker.service

EOF

sudo cp flanneld.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable flanneld

sudo systemctl start flanneld

systemctl status flanneld

ip addr

etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   get ${FLANNEL_ETCD_PREFIX}/config

etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   ls ${FLANNEL_ETCD_PREFIX}/subnets

etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   get ${FLANNEL_ETCD_PREFIX}/subnets/172.30.66.0-24

etcdctl   --endpoints=${ETCD_ENDPOINTS}   --ca-file=/etc/kubernetes/ssl/ca.pem   --cert-file=/etc/flanneld/ssl/flanneld.pem   --key-file=/etc/flanneld/ssl/flanneld-key.pem   ls ${FLANNEL_ETCD_PREFIX}/subnets

cat > token.csv <<EOF

${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"

EOF

sudo cp kube-apiserver.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-apiserver

sudo systemctl start kube-apiserver

sudo systemctl status kube-apiserver

sudo cp kube-controller-manager.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-controller-manager

sudo systemctl start kube-controller-manager

sudo systemctl status kube-controller-manager

sudo cp kube-scheduler.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-scheduler

sudo systemctl start kube-scheduler

sudo systemctl status kube-scheduler

export NODE_IP=192.168.19.63

source /usr/k8s/bin/env.sh

tar -xzvf kubernetes-server-linux-amd64.tar.gz

sudo cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /usr/k8s/bin/

cd kubernetes

sudo cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /usr/k8s/bin/

cat > kubernetes-csr.json <<EOF

{

"CN": "kubernetes",

"hosts": [

"127.0.0.1",

"${NODE_IP}",

"${MASTER_URL}",

"${CLUSTER_KUBERNETES_SVC_IP}",

"kubernetes",

"kubernetes.default",

"kubernetes.default.svc",

"kubernetes.default.svc.cluster",

"kubernetes.default.svc.cluster.local"

],

"key": {

"algo": "rsa",

"size": 2048

},

"names": [

{

"C": "CN",

"ST": "BeiJing",

"L": "BeiJing",

"O": "k8s",

"OU": "System"

}

]

}

EOF

cat kubernetes-csr.json

cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem   -ca-key=/etc/kubernetes/ssl/ca-key.pem   -config=/etc/kubernetes/ssl/ca-config.json   -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

ls kubernetes*

sudo mkdir -p /etc/kubernetes/ssl/

sudo mv kubernetes*.pem /etc/kubernetes/ssl/

cat > token.csv <<EOF

${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"

EOF

cat token.csv

sudo mv token.csv /etc/kubernetes/

cat  > kube-apiserver.service <<EOF

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

[Service]

ExecStart=/usr/k8s/bin/kube-apiserver \\

--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\

--advertise-address=${NODE_IP} \\

--bind-address=0.0.0.0 \\

--insecure-bind-address=${NODE_IP} \\

--authorization-mode=Node,RBAC \\

--runtime-config=rbac.authorization.k8s.io/v1alpha1 \\

--kubelet-https=true \\

--experimental-bootstrap-token-auth \\

--token-auth-file=/etc/kubernetes/token.csv \\

--service-cluster-ip-range=${SERVICE_CIDR} \\

--service-node-port-range=${NODE_PORT_RANGE} \\

--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\

--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\

--client-ca-file=/etc/kubernetes/ssl/ca.pem \\

--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\

--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\

--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \\

--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \\

--etcd-servers=${ETCD_ENDPOINTS} \\

--enable-swagger-ui=true \\

--allow-privileged=true \\

--apiserver-count=2 \\

--audit-log-maxage=30 \\

--audit-log-maxbackup=3 \\

--audit-log-maxsize=100 \\

--audit-log-path=/var/lib/audit.log \\

--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\

--event-ttl=1h \\

--logtostderr=true \\

--v=6

Restart=on-failure

RestartSec=5

Type=notify

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

EOF

cat > /etc/kubernetes/audit-policy.yaml << EOF

apiVersion: audit.k8s.io/v1beta1 # This is required.

kind: Policy

# Don't generate audit events for all requests in RequestReceived stage.

omitStages:

- "RequestReceived"

rules:

# Log pod changes at RequestResponse level

- level: RequestResponse

resources:

- group: ""

# Resource "pods" doesn't match requests to any subresource of pods,

# which is consistent with the RBAC policy.

resources: ["pods"]

# Log "pods/log", "pods/status" at Metadata level

- level: Metadata

resources:

- group: ""

resources: ["pods/log", "pods/status"]

# Don't log requests to a configmap called "controller-leader"

- level: None

resources:

- group: ""

resources: ["configmaps"]

resourceNames: ["controller-leader"]

# Don't log watch requests by the "system:kube-proxy" on endpoints or services

- level: None

users: ["system:kube-proxy"]

verbs: ["watch"]

resources:

- group: "" # core API group

resources: ["endpoints", "services"]

# Don't log authenticated requests to certain non-resource URL paths.

- level: None

userGroups: ["system:authenticated"]

nonResourceURLs:

- "/api*" # Wildcard matching.

- "/version"

# Log the request body of configmap changes in kube-system.

- level: Request

resources:

- group: "" # core API group

resources: ["configmaps"]

# This rule only applies to resources in the "kube-system" namespace.

# The empty string "" can be used to select non-namespaced resources.

namespaces: ["kube-system"]

# Log configmap and secret changes in all other namespaces at the Metadata level.

- level: Metadata

resources:

- group: "" # core API group

resources: ["secrets", "configmaps"]

# Log all other resources in core and extensions at the Request level.

- level: Request

resources:

- group: "" # core API group

- group: "extensions" # Version of group should NOT be included.

# A catch-all rule to log all other requests at the Metadata level.

- level: Metadata

# Long-running requests like watches that fall under this rule will not

# generate an audit event in RequestReceived.

omitStages:

- "RequestReceived"

EOF

sudo cp kube-apiserver.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-apiserver

sudo systemctl start kube-apiserver

sudo systemctl status kube-apiserver

cat > kube-controller-manager.service <<EOF

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

ExecStart=/usr/k8s/bin/kube-controller-manager \\

--address=127.0.0.1 \\

--master=http://${MASTER_URL}:8080 \\

--allocate-node-cidrs=true \\

--service-cluster-ip-range=${SERVICE_CIDR} \\

--cluster-cidr=${CLUSTER_CIDR} \\

--cluster-name=kubernetes \\

--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\

--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\

--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\

--root-ca-file=/etc/kubernetes/ssl/ca.pem \\

--leader-elect=true \\

--v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

EOF

sudo cp kube-controller-manager.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-controller-manager

sudo systemctl start kube-controller-manager

sudo systemctl status kube-controller-manager

cat > kube-scheduler.service <<EOF

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

ExecStart=/usr/k8s/bin/kube-scheduler \\

--address=127.0.0.1 \\

--master=http://${MASTER_URL}:8080 \\

--leader-elect=true \\

--v=2

Restart=on-failure

RestartSec=5

[Install]

WantedBy=multi-user.target

EOF

sudo cp kube-scheduler.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-scheduler

sudo systemctl start kube-scheduler

sudo systemctl status kube-scheduler

kubectl get componentstatuses

sudo systemctl start haproxy

sudo systemctl enable haproxy

sudo systemctl status haproxy

systemctl start keepalived

systemctl enable keepalived

sudo systemctl daemon-reload

sudo systemctl stop firewalld

sudo systemctl disable firewalld

sudo iptables -F && sudo iptables -X && sudo iptables -F -t nat && sudo iptables -X -t nat

sudo systemctl enable docker

sudo systemctl start docker

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

tar -xzvf  kubernetes-src.tar.gz

sudo cp -r ./server/bin/{kube-proxy,kubelet} /usr/k8s/bin/

sudo cp kubelet.service /etc/systemd/system/kubelet.service

sudo systemctl daemon-reload

sudo systemctl enable kubelet

sudo systemctl start kubelet

systemctl status kubelet

sudo cp kube-proxy.service /etc/systemd/system/

sudo systemctl daemon-reload

sudo systemctl enable kube-proxy

sudo systemctl start kube-proxy

systemctl status kube-proxy

192.168.19.61etcd-1192.168.19.62etcd-2192.168.19.63etcd-3cat >/etc/hosts<<EOF127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4::1         localhost localhost.localdomain localhost6 localhost6.localdomain6192.168.19.61 k8s-api.virtual.local k8s-api192.168.19.61etcd-1192.168.19.62etcd-2192.168.19.63etcd-3EOF
ssh-keygen -t rsassh-copy-id -i ~/.ssh/id_rsa.pub root@etcd-3
for host in $(cat remote-hosts)do     ssh $host "yum -y install wget"done

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64chmod +x cfssl_linux-amd64sudo mv cfssl_linux-amd64 /usr/local/bin/cfsslwget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64chmod +x cfssljson_linux-amd64sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljsonwget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64chmod +x cfssl-certinfo_linux-amd64sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

mkdir /root/sslcd /root/sslcfssl print-defaults csr > ca-csr.json#对CA证书签名请求修改为下tee  ca-csr.json  <<-'EOF'               {"CN": "panjb-k8s","key": {    "algo": "rsa",    "size": 2048    },"names": [    {    "C": "CN",    "ST": "SiChuan",    "L": "chengdu",    "O": "k8s",    "OU": "System"    }        ]}EOFcfssl gencert -initca ca-csr.json | cfssljson -bare ca   #生产CA证书和私钥$ lsca.csr  ca-csr.json  ca-key.pem  ca.pem

cfssl print-defaults config >ca-config.jsontee ca-config.json <<-'EOF'{  "signing": {    "default": {      "expiry": "87600h"    },    "profiles": {      "panjb-k8s": {        "usages": [            "signing",            "key encipherment",            "server auth",            "client auth"        ],        "expiry": "87600h"      }    }  }}EOF

tee kubernetes-csr.json <<-'EOF'{    "CN": "panjb-k8s",    "hosts": [      "127.0.0.1",      "192.168.19.61",      "192.168.19.62",      "192.168.19.63",      "10.254.0.1",      "kubernetes",      "kubernetes.default",      "kubernetes.default.svc",      "kubernetes.default.svc.cluster",      "kubernetes.default.svc.cluster.local"    ],    "key": {        "algo": "rsa",        "size": 2048    },    "names": [    {    "C": "CN",    "ST": "SiChuan",    "L": "chengdu",    "O": "k8s",    "OU": "System"    }    ]}EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=panjb-k8s kubernetes-csr.json | cfssljson -bare kubernetes
tee admin-csr.json <<-'EOF'{  "CN": "admin",  "hosts": [],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {    "C": "CN",    "ST": "SiChuan",    "L": "chengdu",    "O": "system:masters",    "OU": "System"    }  ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=panjb-k8s admin-csr.json | cfssljson -bare admin
tee kube-proxy-csr.json <<-'EOF'{  "CN": "system:kube-proxy",  "hosts": [],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {    "C": "CN",    "ST": "SiChuan",    "L": "chengdu",    "O": "k8s",    "OU": "System"    }  ]}EOFcfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=panjb-k8s  kube-proxy-csr.json | cfssljson -bare kube-proxy
openssl x509  -noout -text -in  kubernetes.pem
 cfssl-certinfo -cert kubernetes.pem   cat > token.csv <<EOF${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOF

tar -xzvf kubernetes-client-linux-amd64.tar.gzcp kubernetes/client/bin/kube* /usr/bin/chmod a+x /usr/bin/kube*
cat > /usr/lib/systemd/system/etcd.service <<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.targetDocumentation=https://github.com/coreos[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=-/etc/etcd/etcd.confUser=etcdExecStart=/usr/bin/etcd \\  --name=etcd-1 \\  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \\  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\  --initial-advertise-peer-urls=https://192.168.19.61:2380 \\  --listen-peer-urls=https://192.168.19.61:2380 \\  --listen-client-urls=https://192.168.19.61:2379,https://127.0.0.1:2379 \\  --advertise-client-urls=https://192.168.19.61:2379 \\  --initial-cluster-token=etcd-cluster-0 \\  --initial-cluster=etcd-1=https://192.168.19.61:2380,etcd-2=https://192.168.19.62:2380,etcd-3=https://192.168.19.63:2380 \\  --initial-cluster-state new \\  --data-dir=/opt/etcdRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOFsystemctl daemon-reload && systemctl start etcd && systemctl enable etcd
cat > /usr/lib/systemd/system/etcd.service <<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.targetDocumentation=https://github.com/coreos[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=-/etc/etcd/etcd.confUser=etcdExecStart=/usr/bin/etcd \\  --name=etcd-2 \\  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \\  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\  --initial-advertise-peer-urls=https://192.168.19.62:2380 \\  --listen-peer-urls=https://192.168.19.62:2380 \\  --listen-client-urls=https://192.168.19.62:2379,https://127.0.0.1:2379 \\  --advertise-client-urls=https://192.168.19.62:2379 \\  --initial-cluster-token=etcd-cluster-0 \\  --initial-cluster=etcd-1=https://192.168.19.61:2380,etcd-2=https://192.168.19.62:2380,etcd-3=https://192.168.19.63:2380 \\  --initial-cluster-state=new \\  --data-dir=/opt/etcdRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOFsystemctl daemon-reload && systemctl start etcd && systemctl enable etcd
cat > /usr/lib/systemd/system/etcd.service <<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.targetDocumentation=https://github.com/coreos[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=-/etc/etcd/etcd.confUser=etcdExecStart=/usr/bin/etcd \\  --name=etcd-3 \\  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \\  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\  --initial-advertise-peer-urls=https://192.168.19.63:2380 \\  --listen-peer-urls=https://192.168.19.63:2380 \\  --listen-client-urls=https://192.168.19.63:2379,https://127.0.0.1:2379 \\  --advertise-client-urls=https://192.168.19.63:2379 \\  --initial-cluster-token=etcd-cluster-0 \\  --initial-cluster=etcd-1=https://192.168.19.61:2380,etcd-2=https://192.168.19.62:2380,etcd-3=https://192.168.19.63:2380 \\  --initial-cluster-state=new \\  --data-dir=/opt/etcdRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF
systemctl daemon-reload && systemctl start etcd && systemctl enable etcd
etcdctl \  --endpoints=https://192.168.19.61:2379 \  --ca-file=/etc/kubernetes/ssl/ca.pem \  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \  cluster-health
 yum install -y flannel  cat >  /usr/lib/systemd/system/flanneld.service  << EOF[Unit]Description=Flanneld overlay address etcd agentAfter=network.targetAfter=network-online.targetWants=network-online.targetAfter=etcd.serviceBefore=docker.service[Service]Type=notifyEnvironmentFile=/etc/sysconfig/flanneldEnvironmentFile=-/etc/sysconfig/docker-networkExecStart=/usr/bin/flanneld-start \$FLANNEL_OPTIONSExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/dockerRestart=on-failure[Install]WantedBy=multi-user.targetRequiredBy=docker.serviceEOF
cat > /etc/sysconfig/flanneld<< EOF# Flanneld configuration options  # etcd url location.  Point this to the server where etcd runsFLANNEL_ETCD_ENDPOINTS="https://192.168.19.61:2379,https://192.168.19.62:2379,https://192.168.19.63:2379"# etcd config key.  This is the configuration key that flannel queries# For address range assignmentFLANNEL_ETCD_PREFIX="/kube-centos/network"# Any additional options that you want to passFLANNEL_OPTIONS="-etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem"EOF  mkdir -p /kube-centos/network只在一个节点执行:  etcdctl --endpoints=https://192.168.19.61:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem set /kube-centos/network/config '{"Network":"10.250.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
etcdctl --endpoints=https://192.168.19.61:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem ls /kube-centos/network/subnets

k8s本地搭建相信步骤的更多相关文章

  1. 简单三分钟,本地搭建 k8s

    使用 minikube 在本地搭建 k8s 已经比以前要简单很多了.本文,我们通过简短的三分钟来重现一下在本地搭建 k8s 实验环境的步骤. Newbe.Claptrap 是一个用于轻松应对并发问题的 ...

  2. Mac下的Docker及Kubernetes(k8s)本地环境搭建与应用部署、管理界面kubernetes-dashboard

    mac安装docker: brew cask install docker 当然也可以直接去官网下载docker的pkg文件安装 mac的docker国内镜像:网易的镜像地址:http://hub-m ...

  3. 如何在本地搭建DVWA环境

    如何在本地搭建DVWA环境 1.工具下载:  (1)phpStudy:   http://phpstudy.php.cn/download.html (2)DVWA:http://www.dvwa.c ...

  4. 最新版本elasticsearch本地搭建入门篇

    最新版本elasticsearch本地搭建入门篇 项目介绍 最近工作用到elasticsearch,主要是用于网站搜索,和应用搜索. 工欲善其事,必先利其器. 自己开始关注elasticsearch, ...

  5. 本地搭建GitLab

    现在很多企业都开始使用gitLab,因为他的权限管理强大,后台项目管理也很方便.下面就介绍本地搭建方法: 为避免损失,建议在虚拟机测试.虚拟机最低配置(内存2G,cpu:2核,硬盘:20G) 1.安装 ...

  6. Windows本地搭建Edusoho环境

    Windows搭建Edusoho比Linux还要轻松的多.因为有很多环境集成工具如xampp.wampserver.phpstudy等.基本上安装号wampserver工具,直接将edusoho项目扔 ...

  7. 本地搭建3节点kubernetes

    kubernetes本地搭建版本选择 CentOS Linux release 7.7.1908 kubernetesVersion: v1.17.0 weave-kube:2.6.0 ceph/ce ...

  8. Docker & k8s 系列二:本机k8s环境搭建

    本篇将会讲解k8s是什么?本机k8s环境搭建,部署一个pod并演示几个kubectl命令,k8s dashboard安装. k8s是什么 k8s是kubernetes的简写,它是一个全新的基于容器技术 ...

  9. 爬虫管理平台以及wordpress本地搭建

    爬虫管理平台以及wordpress本地搭建 学习目标: 各爬虫管理平台了解 scrapydweb gerapy crawlab 各爬虫管理平台的本地搭建 Windows下的wordpress搭建 爬虫 ...

随机推荐

  1. spring JDBC 事务管理

    spring JDBC 事务管理 一.Spring 中的JDBC Spring中封装了JDBC的ORM框架,可以用它来操作数据,不需要再使用外部的OEM框架(MyBatis),一些小的项目用它. 步骤 ...

  2. MSCOCO - pycocoDemo 学习版

    Reference: https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoDemo.ipynb https://git ...

  3. Junit4 单元测试框架的常用方法介绍

    Junit 介绍: Junit是一套框架(用于JAVA语言),由 Erich Gamma 和 Kent Beck 编写的一个回归测试框架(regression testing framework),即 ...

  4. c# 调用c++dll二次总结

    1.pinvoke结构不对称,添加语句(网上有) 2.含回调函数,成员参数的结构体必须完全,尽管自己用不到. 3.加深对c++指针的理解.一般情况下,类型加*等效于c++中的ref.但对于short* ...

  5. 基础算法学习2-dp

    一.算法题: 最大子阵 给定一个n×m 的矩阵 A,求A 中的一个非空子矩阵,使这个子矩阵中的元素和最大.其中,A 的子矩阵指在 A 中行和列均连续的一部分.输入格式输入的第一行包含两个整数 n,m( ...

  6. 漫漫征途,java开发(未完待续)

    前言 2018年,大二上,有幸加入服务外包实验室的考核,在考核中,主动加入xxx项目的后端,一是为了积累项目经验,二是为了学到更多东西,进入了之后发现原来要学的这么多,时间这么紧!但唯有学习! 心得体 ...

  7. JS 中substring() , substr(), slice() 的区别

    substr(start, length) : 截取从start索引开始的字符,长度为length的字符串 substring(start, end) : 截取从start索引开始的字符,以end索引 ...

  8. JAVA第三次笔记

  9. 【Leetcode】72 Edit Distance

    72. Edit Distance Given two words word1 and word2, find the minimum number of steps required to conv ...

  10. mybatis(一)MyBatis Generator

    在gradle中使用MyBatis Generator时,build.gradle配置如下: dependencies { mybatisGenerator group: 'org.mybatis.g ...