#证书自签名脚本
root@k8s-master: ~/k8s/k8s-cert ::
$ cat k8s-cert.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF cfssl gencert -initca ca-csr.json | cfssljson -bare ca - #-----------------------
#hosts内容slb节点ip,master节点ip,下边是node节点ip(node节点多写一写冗余IP地址为后续使用)
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.1.63",
"192.168.1.64",
"192.168.1.65",
"192.168.1.66",
"192.168.1.60",
"192.168.1.61",
"192.168.1.62",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server #----------------------- cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin #----------------------- cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
root@k8s-master: ~/k8s/k8s-cert ::
$ . kube-apiserver
. kube-controller-manager . kube-scheduler
配置文件 -> systemd管理组件 -> 启动 ==================================================kube-apiserver==================================================

# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
mv token.csv /opt/kubernetes/cfg

,部署kube-apiserver
()创建apiserver的文件存放目录
root@k8s-master: ~/soft ::
$ mkdir /opt/kubernetes/{bin,ssl,cfg} -p ()解压tar包,将核心组件复制到/opt/kubernetes/bin下kube-apiserver,kube-controller-manager,kube-scheduler
root@k8s-master: ~/soft ::
$ tar zxvf kubernetes-server-linux-amd64.tar.gz
root@k8s-master: ~ ::
$ cd /root/soft/kubernetes/server/bin
root@k8s-master: ~/soft/kubernetes/server/bin ::
$ cp kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/ ()kube-apiserver配置文件脚本
root@k8s-master: ~/k8s ::
$ cat apiserver.sh
#!/bin/bash
#master主机节点ip地址,传入变量
MASTER_ADDRESS=$
#etcd所有节点ip地址
ETCD_SERVERS=$ cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
#true开启日志默认写到/var/log/messages,第二选项flase,并在下边指定log写入目录--logs-dir=/opt/kubernetes/logs
KUBE_APISERVER_OPTS="--logtostderr=true \\
#日志登记,登记越高日志越少
--v= \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port= \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
#负载均衡节点ip范文,下边是端口
--service-cluster-ip-range=10.0.0.0/ \\
--service-node-port-range=- \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
#互相通信的token,身份认证标识
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
#apiserver的ssl自签名证书
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
#下边是etcd的ssl自签名证书。因为都是https
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem" EOF #配置systemctl管理apiserver
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
root@k8s-master: ~/k8s ::
$ ()将apiserver自签名证书移动到/opt/kubernetes/ssl
root@k8s-master: /opt/kubernetes/ssl ::
$ pwd
/opt/kubernetes/ssl
root@k8s-master: /opt/kubernetes/ssl ::
$ ls
ca-key.pem ca.pem server-key.pem server.pem
root@k8s-master: /opt/kubernetes/ssl ::
$ ()复制apiserver自签名证书到/opt/kubernetes/ssl
执行脚本
root@k8s-master: ~/k8s ::
$ ./apiserver.sh 192.168.1.63 https://192.168.1.63:2379,https://192.168.1.65:2379,https://192.168.1.66:2379 ()验证apiserver是否启动成功
root@k8s-master: /opt/kubernetes/ssl ::
$ netstat -lntup |grep
tcp 127.0.0.1: 0.0.0.0:* LISTEN /kube-apiserver
root@k8s-master: /opt/kubernetes/ssl ::
root@k8s-master: /opt/kubernetes/ssl ::
$ ps -ef|grep kube
root : ? :: /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v= --etcd-servers=https://192.168.1.63:2379,https://192.168.1.65:2379,https://192.168.1.66:2379 --bind-address=192.168.1.63 --secure-port=6443 --advertise-address=192.168.1.63 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root : ? :: /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v= --master=127.0.0.1: --leader-elect
root : ? :: /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v= --master=127.0.0.1: --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.0.0.0/ --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root : pts/ :: grep --color=auto kube
root@k8s-master: /opt/kubernetes/ssl ::
$
####报错排查方式
$ source /opt/kubernetes/cfg/kube-apiserver
$ /opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS ==================================================kube-controller-manager================================================== root@k8s-master: ~/k8s ::
$ cat controller-manager.sh
#!/bin/bash
#传参master节点ip地址
MASTER_ADDRESS=$ cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v= \\
#master-apiserver运行端口8080所有引用传参变量
--master=${MASTER_ADDRESS}: \\
#选举,自动做高可用
--leader-elect=true \\
#这个服务只在本地运行所以能跟apiserver通信就可以了
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/ \\
--cluster-name=kubernetes \\
#颁发证书
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s" EOF
##使用systemctl管理controller工具
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
root@k8s-master: ~/k8s ::
$ ==================================================kube-scheduler================================================== root@k8s-master: ~/k8s ::
$ cat scheduler.sh
#!/bin/bash MASTER_ADDRESS=$
##scheduler四行,定义日志,指定masterip,自动选举
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v= \\
--master=${MASTER_ADDRESS}: \\
--leader-elect" EOF cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler ==================================================kubectl==================================================
部署完成验证
root@k8s-master: ~/k8s ::
$ cp /root/soft/kubernetes/server/bin/kubectl /usr/bin/
#检查当前集群节点的健康状态
$ kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd- Healthy {"health":"true"}
etcd- Healthy {"health":"true"}
etcd- Healthy {"health":"true"}
root@k8s-master: ~/k8s ::
$ ###ps:cs为缩写

k8s集群---apiserver,controller-manager,scheduler部署的更多相关文章

  1. 记录一个奇葩的问题:k8s集群中master节点上部署一个单节点的nacos,导致master节点状态不在线

    情况详细描述; k8s集群,一台master,两台worker 在master节点上部署一个单节点的nacos,导致master节点状态不在线(不论是否修改nacos的默认端口号都会导致master节 ...

  2. 还不会用 K8s 集群控制器?那你会用冰箱吗?(多图详解)

    作者 | 阿里云售后技术专家 声东 导读:当我们尝试去理解 K8s 集群工作原理的时候,控制器(Controller)肯定是一个难点.这是因为控制器有很多,具体实现大相径庭:且控制器的实现用到了一些较 ...

  3. centos7.8 安装部署 k8s 集群

    centos7.8 安装部署 k8s 集群 目录 centos7.8 安装部署 k8s 集群 环境说明 Docker 安装 k8s 安装准备工作 Master 节点安装 k8s 版本查看 安装 kub ...

  4. 在 Nebula K8s 集群中使用 nebula-spark-connector 和 nebula-algorithm

    本文首发于 Nebula Graph Community 公众号 解决思路 解决 K8s 部署 Nebula Graph 集群后连接不上集群问题最方便的方法是将 nebula-algorithm / ...

  5. K8s集群部署(二)------ Master节点部署

    Master节点要部署三个服务:API Server.Scheduler.Controller Manager. apiserver提供集群管理的REST API接口,包括认证授权.数据校验以 及集群 ...

  6. 菜鸟系列k8s——k8s集群部署(2)

    k8s集群部署 1. 角色分配 角色 IP 安装组件 k8s-master 10.0.0.170 kube-apiserver,kube-controller-manager,kube-schedul ...

  7. 二进制方法-部署k8s集群部署1.18版本

    二进制方法-部署k8s集群部署1.18版本 1. 前置知识点 1.1 生产环境可部署kubernetes集群的两种方式 目前生产部署Kubernetes集群主要有两种方式 kuberadm Kubea ...

  8. 5.基于二进制部署kubernetes(k8s)集群

    1 kubernetes组件 1.1 Kubernetes 集群图 官网集群架构图 1.2 组件及功能 1.2.1 控制组件(Control Plane Components) 控制组件对集群做出全局 ...

  9. Centos7 安装部署Kubernetes(k8s)集群

    目录 一.系统环境 二.前言 三.Kubernetes 3.1 概述 3.2 Kubernetes 组件 3.2.1 控制平面组件 3.2.2 Node组件 四.安装部署Kubernetes集群 4. ...

随机推荐

  1. 什么是Hessian协议呢?

    什么是Hessian协议呢? 目前,Web服务技术是解决异构平台系统的集成及互操作问题的主流技术. 它所基于的XML已经是Internet上交换数据的实际标准,基于通用的进程间通信协议和网络传输协议屏 ...

  2. @codeforces - 1214G@ Feeling Good

    目录 @description@ @solution@ @accepted code@ @details@ @description@ 给定一个 n*m 的 01 矩阵 A,一开始所有格子都为 0. ...

  3. @codeforces - 631E@ Product Sum

    目录 @desription@ @solution@ @accepted code@ @details@ @desription@ 给定一个序列 a,定义它的权值 \(c = \sum_{i=1}^{ ...

  4. codeforces 609B

    #include<bits/stdc++.h> using namespace std; ]; int main() { memset(num,,sizeof(num)); int n,m ...

  5. IDEA中安装activiti并使用

    1.IDEA中本身不带activiti,需要自己安装下载. 打开IDEA中File列表下的Settings 输入actiBPM,然后点击下面的Search...搜索 点击Install 下载 下载结束 ...

  6. 深入理解String、StringBuffer、StringBuilder(转)

    文章系转载,非原创,原地址: http://www.cnblogs.com/dolphin0520/p/3778589.html 相信String这个类是Java中使用得最频繁的类之一,并且又是各大公 ...

  7. servicemix-3.2.1 内置的服务引擎和绑定组件

    服务引擎: servicemix-bean servicemix-camel servicemix-cxf-se servicemix-drools servicemix-eip servicemix ...

  8. 【codeforces 761B】Dasha and friends

    time limit per test2 seconds memory limit per test256 megabytes inputstandard input outputstandard o ...

  9. win10下,cmd,power shell设置默认编码为‘UTF-8’?

    这个问题可以终结了,最新版 Windows 10 支持 UTF-8 了.打开这个选项,cmd 和 powershell 默认就是 UTF-8 了.在控制面板-时钟和区域-区域-管理-更改系统区域设置( ...

  10. SourceYard 制作源代码包

    本文带大家走进SourceYard开发之旅 在项目开发中,将一个大的项目拆为多个小项目解耦,减少模块之间的耦合.因为如果将代码放在一起,即使有团队的约束,但只要能写出的代码就会有小伙伴写出,很快就发现 ...