部署apiserver

创建生成CSR的JSON配置文件
[root@k8s-master1 ssl]# vim kubernetes-csr.json
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.0.123",
"192.168.0.124",
"192.168.0.130",
"10.0.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
生成kubernetes证书和私钥
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
分发证书
[root@k8s-master1 ssl]# cp kubernetes*.pem /opt/kubernetes/ssl/
准备软件包

下载二进制包:https://github.com/kubernetes/kubernetes

cd /usr/local/src/
wget https://dl.k8s.io/v1.13.0/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kube-scheduler kube-controller-manager kubectl /opt/kubernetes/bin/

创建kube-apiserver使用的客户端token文件
export BOOTSTRAP_TOKEN=$(head -c  /dev/urandom | od -An -t x | tr -d ' ')
cat > /opt/kubernetes/cfg/token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,,"system:kubelet-bootstrap"
EOF
创建kube-apiserver配置文件
[root@k8s-master1 ~]# vim /opt/kubernetes/cfg/kube-apiserver 

KUBE_APISERVER_OPTS="--logtostderr=false \
--v= \
--log-dir=/opt/kubernetes/log \
--etcd-servers=https://192.168.0.123:2379,https://192.168.0.125:2379,https://192.168.0.126:2379 \
--bind-address=0.0.0.0 \
--secure-port= \
--advertise-address=192.168.0.123 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/ \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=- \
--tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \
--etcd-certfile=/opt/kubernetes/ssl/etcd.pem \
--etcd-keyfile=/opt/kubernetes/ssl/etcd-key.pem"
参数说明:

--logtostderr 启用日志
--v 日志等级
--etcd-servers etcd集群地址
--bind-address 监听地址
--secure-port https安全端口
--advertise-address 集群通告地址
--allow-privileged 启用授权
--service-cluster-ip-range Service虚拟IP地址段
--enable-admission-plugins 准入控制模块
--authorization-mode 认证授权,启用RBAC授权和节点自管理
--enable-bootstrap-token-auth 启用TLS bootstrap功能,后面会讲到
--token-auth-file token文件
--service-node-port-range Service Node类型默认分配端口范围

创建kube-apiserver系统服务
[root@k8s-master1 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=
Type=notify
LimitNOFILE= [Install]
WantedBy=multi-user.target
启动apiserver
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
systemctl status kube-apiserver
通过url访问api接口
[root@k8s-master1 ~]# curl -L --cacert /opt/kubernetes/ssl/ca.pem  https://192.168.0.123:6443/api
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "0.0.0.0/0",
"serverAddress": "192.168.0.123:6443"
}
]
}
[root@k8s-master1 ~]# curl -L http://127.0.0.1:8080/api
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "0.0.0.0/0",
"serverAddress": "192.168.0.123:6443"
}
]
}

部署Controller Manager

创建配置文件
[root@k8s-master1 ~]# vim /opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v= \
--log-dir=/opt/kubernetes/log \
--master=127.0.0.1: \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=10.0.0.0/ \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s"
创建服务文件
[root@k8s-master1 ~]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec= [Install]
WantedBy=multi-user.target
启动服务
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
systemctl status kube-controller-manager

部署scheduller

创建配置文件
[root@k8s-master1 ~]# vim /opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v= \
--log-dir=/opt/kubernetes/log \
--master=127.0.0.1: \
--leader-elect"

--master 连接本地apiserver
--leader-elect 当该组件启动多个时,自动选举(HA)

创建服务文件
[root@k8s-master1 ~]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec= [Install]
WantedBy=multi-user.target
启动服务
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
systemctl status kube-scheduler

master2部署

将master1的配置复制到master2,修改对应ip启动服务即可

scp -r /opt/kubernetes 192.168.0.124:/opt/
scp /usr/lib/systemd/system/kube-* 192.168.0.124:/usr/lib/systemd/system/

访问master2 API接口

[root@k8s-master2 ~]# curl -L --cacert /opt/kubernetes/ssl/ca.pem  https://192.168.0.124:6443/api
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "0.0.0.0/0",
"serverAddress": "192.168.0.124:6443"
}
]
}
[root@k8s-master2 ~]# curl -L http://127.0.0.1:8080/api
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "0.0.0.0/0",
"serverAddress": "192.168.0.124:6443"
}
]
}

配置apiserver的高可用

安装keepalived
yum -y install keepalived

master1 的keepalived配置文件

[root@k8s-master1 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout
router_id API_MASTER
} vrrp_script check_api {
script "/etc/keepalived/check_api.sh"
} vrrp_instance VI_1 {
state MASTER
interface ens32
virtual_router_id # VRRP 路由 ID实例,每个实例是唯一的
priority # 优先级,备服务器设置
advert_int # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass
}
virtual_ipaddress {
192.168.0.130/
}
track_script {
check_api
}
}

master2的keepalived配置文件

[root@k8s-master2 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout
router_id API_MASTER
} vrrp_script check_api {
script "/etc/keepalived/check_api.sh"
} vrrp_instance VI_1 {
state BACKUP
interface ens32
virtual_router_id # VRRP 路由 ID实例,每个实例是唯一的
priority # 优先级,备服务器设置
advert_int # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass
}
virtual_ipaddress {
192.168.0.130/
}
track_script {
check_api
}
}
准备检查apiserver的脚本
[root@k8s-master1 ~]# vim /etc/keepalived/check_api.sh
count=$(ps -ef |grep kube-apiserver |egrep -cv "grep|$$")
if [ "$count" -eq ];then
systemctl stop keepalived
fi [root@k8s-master1 ~]# chmod +x /etc/keepalived/check_api.sh
启动keepalived
systemctl start keepalived
systemctl enable keepalived
systemctl status keepalived

高可用测试

查看ip信息

[root@k8s-master1 ~]# ip a
: lo: <LOOPBACK,UP,LOWER_UP> mtu qdisc noqueue state UNKNOWN group default qlen
link/loopback ::::: brd :::::
inet 127.0.0.1/ scope host lo
valid_lft forever preferred_lft forever
inet6 ::/ scope host
valid_lft forever preferred_lft forever
: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu qdisc pfifo_fast state UP group default qlen
link/ether :0c::8a:2b:5f brd ff:ff:ff:ff:ff:ff
inet 192.168.0.123/ brd 192.168.0.255 scope global ens32
valid_lft forever preferred_lft forever
inet 192.168.0.130/ scope global secondary ens32
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe8a:2b5f/ scope link
valid_lft forever preferred_lft forever [root@k8s-master2 ~]# ip a
: lo: <LOOPBACK,UP,LOWER_UP> mtu qdisc noqueue state UNKNOWN group default qlen
link/loopback ::::: brd :::::
inet 127.0.0.1/ scope host lo
valid_lft forever preferred_lft forever
inet6 ::/ scope host
valid_lft forever preferred_lft forever
: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu qdisc pfifo_fast state UP group default qlen
link/ether :0c:::dc:9c brd ff:ff:ff:ff:ff:ff
inet 192.168.0.124/ brd 192.168.0.255 scope global ens32
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe77:dc9c/ scope link
valid_lft forever preferred_lft forever

vip绑定在master1,访问vip

[root@k8s-master1 ~]# curl -L --cacert /opt/kubernetes/ssl/ca.pem  https://192.168.0.130:6443/api
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "0.0.0.0/0",
"serverAddress": "192.168.0.123:6443"
}
]
}

停止master1的apiserver,再次访问vip

[root@k8s-master1 ~]# curl -L --cacert /opt/kubernetes/ssl/ca.pem  https://192.168.0.130:6443/api
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "0.0.0.0/0",
"serverAddress": "192.168.0.124:6443"
}
]
}

查看ip信息,vip绑定在master2

[root@k8s-master1 ~]# ip a
: lo: <LOOPBACK,UP,LOWER_UP> mtu qdisc noqueue state UNKNOWN group default qlen
link/loopback ::::: brd :::::
inet 127.0.0.1/ scope host lo
valid_lft forever preferred_lft forever
inet6 ::/ scope host
valid_lft forever preferred_lft forever
: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu qdisc pfifo_fast state UP group default qlen
link/ether :0c::8a:2b:5f brd ff:ff:ff:ff:ff:ff
inet 192.168.0.123/ brd 192.168.0.255 scope global ens32
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe8a:2b5f/ scope link
valid_lft forever preferred_lft forever [root@k8s-master2 ~]# ip a
: lo: <LOOPBACK,UP,LOWER_UP> mtu qdisc noqueue state UNKNOWN group default qlen
link/loopback ::::: brd :::::
inet 127.0.0.1/ scope host lo
valid_lft forever preferred_lft forever
inet6 ::/ scope host
valid_lft forever preferred_lft forever
: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu qdisc pfifo_fast state UP group default qlen
link/ether :0c:::dc:9c brd ff:ff:ff:ff:ff:ff
inet 192.168.0.124/ brd 192.168.0.255 scope global ens32
valid_lft forever preferred_lft forever
inet 192.168.0.130/ scope global secondary ens32
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe77:dc9c/ scope link
valid_lft forever preferred_lft forever

配置kubectl命令行工具

创建admin证书签名请求

[root@k8s-master1 ssl]# vim admin-csr.json
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}

生成admin证书和密钥

[root@k8s-master1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin [root@k8s-master1 ssl]# cp admin*.pem /opt/kubernetes/ssl/

设置集群参数

kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.0.130:6443

设置客户端认证参数

kubectl config set-credentials admin \
--client-certificate=/opt/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/opt/kubernetes/ssl/admin-key.pem

设置上下文参数

kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin

设置默认上下文

kubectl config use-context kubernetes

查看集群信息

[root@k8s-master1 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd- Healthy {"health":"true"}
etcd- Healthy {"health":"true"}
etcd- Healthy {"health":"true"}

k8s1.13.0二进制部署-master节点(三)的更多相关文章

  1. k8s1.13.0二进制部署-node节点(四)

    Master apiserver启用TLS认证后,Node节点kubelet组件想要加入集群,必须使用CA签发的有效证书才能与apiserver通信,当Node节点很多时,签署证书是一件很繁琐的事情, ...

  2. k8s1.13.0二进制部署-ETCD集群(一)

    Kubernetes集群中主要存在两种类型的节点:master.minion节点. Minion节点为运行 Docker容器的节点,负责和节点上运行的 Docker 进行交互,并且提供了代理功能.Ma ...

  3. k8s1.13.0二进制部署-flannel网络(二)

    Flannel容器集群网络部署 Overlay Network:覆盖网络,在基础网络上叠加的一种虚拟网络技术模式,该网络中的主机通过虚拟链路连接起来.VXLAN:将源数据包封装到UDP中,并使用基础网 ...

  4. k8s1.13.0二进制部署-Dashboard和coredns(五)

    部署UI 下载yaml文件https://github.com/kubernetes/kubernetes [root@k8s-master1 ~]# git clone https://github ...

  5. k8s二进制部署 - master节点安装

    下载kubernetes服务端 [root@hdss7-21 ~]# cd /opt/src [root@hdss7-21 src]# wget https://dl.k8s.io/v1.15.2/k ...

  6. kubeadm部署k8s1.9高可用集群--4部署master节点

    部署master节点 kubernetes master 节点包含的组件: kube-apiserver kube-scheduler kube-controller-manager 本文档介绍部署一 ...

  7. K8s二进制部署单节点 master组件 node组件 ——头悬梁

    K8s二进制部署单节点   master组件 node组件   --头悬梁 1.master组件部署 2.node   组件部署 k8s集群搭建: etcd集群 flannel网络插件 搭建maste ...

  8. K8s二进制部署单节点 etcd集群,flannel网络配置 ——锥刺股

    K8s 二进制部署单节点 master    --锥刺股 k8s集群搭建: etcd集群 flannel网络插件 搭建master组件 搭建node组件 1.部署etcd集群 2.Flannel 网络 ...

  9. K8S入门系列之集群二进制部署-->master篇(二)

    组件版本和配置策略 组件版本 Kubernetes 1.16.2 Docker 19.03-ce Etcd 3.3.17 https://github.com/etcd-io/etcd/release ...

随机推荐

  1. [Leetcode]847. Shortest Path Visiting All Nodes(BFS|DP)

    题解 题意 给出一个无向图,求遍历所有点的最小花费 分析 1.BFS,设置dis[status][k]表示遍历的点数状态为status,当前遍历到k的最小花费,一次BFS即可 2.使用DP 代码 // ...

  2. UE4 Runtime下动态给Actor添加组件

    http://www.v5xy.com/?p=858 UE4的组件分为两种:USceneComponent, UActorComponent UActorComponent (NewObject(th ...

  3. 【转】mysql中set autocommit=0与start transaction的关系

    在mysql中用户的任何一个更新操作(写操作)都被视为一个事务,set autocommit=0指事务非自动提交,自此句执行以后,每个SQL语句或者语句块所在的事务都需要显示"commit& ...

  4. CentOS 安装配置vncserver

    yum 安装tiger vncserver yum install tigervnc-server 安装后输入 vncserver 设置密码 3.配置用户 vim /etc/sysconfig/vnc ...

  5. oracle 查看 job 日志

    select * from user_scheduler_job_log select * from user_scheduler_job_run_details select *  from use ...

  6. 微信小程序入门文档

    一 基本介绍 微信专门为小程序开发了一个ide叫做微信开发者工具 最新一版的微信开发者工具,把微信公众号的调试开发工作也集成了进去,可以更换开发模式. https://mp.weixin.qq.com ...

  7. Opencv级联分类器实现人脸识别

    在本章中,我们将学习如何使用OpenCV使用系统相机捕获帧.org.opencv.videoio包的VideoCapture类包含使用相机捕获视频的类和方法.让我们一步一步学习如何捕捉帧 - 第1步: ...

  8. Codeforces Round #527 -A. Uniform String(思维)

    time limit per test 1 second memory limit per test 256 megabytes input standard input output standar ...

  9. Linux上使用VIM进行.Net Core

    如何在Linux上使用VIM进行.Net Core开发 对于在Linux上开发.Net Core的程序员来说, 似乎都缺少一个好的IDE.Windows上有Visual Studio, Mac上有Vi ...

  10. SSRS-lookupSet-DataSet-分组查询

    SSRS-lookupSet-DataSet-分组查询 来源:http://www.cnblogs.com/biwork/p/3621885.html 目录:http://www.cnblogs.co ...