[root@bs-k8s-ceph ~]# ceph osd pool create harbor 128
Error ETIMEDOUT: crush test failed with -110: timed out during smoke test (5 seconds)
//这个问题 我不知道怎么解决 因为过了一小会 就又好了
[root@bs-k8s-ceph ~]# ceph osd pool create harbor 128
pool 'harbor' created
[root@bs-k8s-ceph ceph]# ceph auth get-or-create client.harbor mon 'allow r' osd 'allow class-read, allow rwx pool=harbor' -o ceph.client.harbor.keyring
[root@bs-k8s-ceph ceph]# ceph auth get client.harbor
exported keyring for client.harbor
[client.harbor]
key = AQDoCklen6e4NxAAVXmy/PG+R5iH8fNzMhk6Jg==
caps mon = "allow r"
caps osd = "allow class-read, allow rwx pool=harbor" [root@bs-k8s-node01 ~]# ceph auth get-key client.admin | base64
QVFDNmNVSmV2eU8yRnhBQVBxYzE5Mm5PelNnZk5acmg5aEFQYXc9PQ==
[root@bs-k8s-node01 ~]# ceph auth get-key client.harbor | base64 [root@bs-k8s-master01 ~]# kubectl get nodes
The connection to the server 20.0.0.250:8443 was refused - did you specify the right host or port?
[root@bs-hk-hk01 ~]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
Active: failed (Result: exit-code) since 日 2020-02-16 17:16:43 CST; 12min ago
Process: 1168 ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid (code=exited, status=134)
Main PID: 1168 (code=exited, status=134) 2月 15 20:22:54 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202254 (1184) : Server k8s_api_nodes...ue.
2月 15 20:25:15 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202515 (1183) : Server k8s_api_nodes...ue.
2月 15 20:25:15 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202515 (1184) : Server k8s_api_nodes...ue.
2月 15 20:26:03 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202603 (1184) : Server k8s_api_nodes...ue.
2月 15 20:26:03 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202603 (1183) : Server k8s_api_nodes...ue.
2月 15 20:26:13 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202613 (1183) : Server k8s_api_nodes...ue.
2月 15 20:26:13 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202613 (1184) : Server k8s_api_nodes...ue.
2月 16 17:16:43 bs-hk-hk01 systemd[1]: haproxy.service: main process exited, code=exited, st...n/a
2月 16 17:16:44 bs-hk-hk01 systemd[1]: Unit haproxy.service entered failed state.
2月 16 17:16:44 bs-hk-hk01 systemd[1]: haproxy.service failed.
Hint: Some lines were ellipsized, use -l to show in full. [root@bs-hk-hk01 ~]# systemctl start haproxy
[root@bs-hk-hk01 ~]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
Active: active (running) since 日 2020-02-16 17:30:03 CST; 1s ago
Process: 4196 ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q (code=exited, status=0/SUCCESS)
Main PID: 4212 (haproxy)
CGroup: /system.slice/haproxy.service
├─4212 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy....
├─4216 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy....
└─4217 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.... 2月 16 17:30:00 bs-hk-hk01 systemd[1]: Starting HAProxy Load Balancer...
2月 16 17:30:03 bs-hk-hk01 systemd[1]: Started HAProxy Load Balancer.
2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : config : 'option for...de.
2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : config : 'option for...de.
2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : Proxy 'stats': in mu...st.
2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [NOTICE] 046/173004 (4212) : New worker #1 (4216) forked
2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [NOTICE] 046/173004 (4212) : New worker #2 (4217) forked
Hint: Some lines were ellipsized, use -l to show in full.
[root@bs-hk-hk01 ~]# systemctl enable haproxy [root@bs-k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
bs-k8s-master01 Ready master 7d6h v1.17.2
bs-k8s-master02 Ready master 7d6h v1.17.2
bs-k8s-master03 Ready master 7d6h v1.17.2
bs-k8s-node01 Ready <none> 7d6h v1.17.2
bs-k8s-node02 Ready <none> 7d6h v1.17.2
bs-k8s-node03 Ready <none> 7d6h v1.17.2
[root@bs-k8s-master01 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default rbd-provisioner-75b85f85bd-8ftdm 1/1 Running 11 7d6h
kube-system calico-node-4jxbp 1/1 Running 4 7d6h
kube-system calico-node-7t9cj 1/1 Running 7 7d6h
kube-system calico-node-cchgl 1/1 Running 14 7d6h
kube-system calico-node-czj76 1/1 Running 6 7d6h
kube-system calico-node-lxb2s 1/1 Running 14 7d6h
kube-system calico-node-nmg9t 1/1 Running 8 7d6h
kube-system coredns-7f9c544f75-bwx9p 1/1 Running 4 7d6h
kube-system coredns-7f9c544f75-q58mr 1/1 Running 3 7d6h
kube-system dashboard-metrics-scraper-6b66849c9-qtwzx 1/1 Running 2 7d5h
kube-system etcd-bs-k8s-master01 1/1 Running 17 7d6h
kube-system etcd-bs-k8s-master02 1/1 Running 7 7d6h
kube-system etcd-bs-k8s-master03 1/1 Running 32 7d6h
kube-system kube-apiserver-bs-k8s-master01 1/1 Running 28 7d6h
kube-system kube-apiserver-bs-k8s-master02 1/1 Running 15 7d6h
kube-system kube-apiserver-bs-k8s-master03 1/1 Running 62 7d6h
kube-system kube-controller-manager-bs-k8s-master01 1/1 Running 32 7d6h
kube-system kube-controller-manager-bs-k8s-master02 1/1 Running 27 7d6h
kube-system kube-controller-manager-bs-k8s-master03 1/1 Running 31 7d6h
kube-system kube-proxy-26ffm 1/1 Running 3 7d6h
kube-system kube-proxy-298tr 1/1 Running 5 7d6h
kube-system kube-proxy-hzsmb 1/1 Running 3 7d6h
kube-system kube-proxy-jb4sq 1/1 Running 4 7d6h
kube-system kube-proxy-pt94r 1/1 Running 4 7d6h
kube-system kube-proxy-wljwv 1/1 Running 4 7d6h
kube-system kube-scheduler-bs-k8s-master01 1/1 Running 32 7d6h
kube-system kube-scheduler-bs-k8s-master02 1/1 Running 21 7d6h
kube-system kube-scheduler-bs-k8s-master03 1/1 Running 31 7d6h
kube-system kubernetes-dashboard-887cbd9c6-j7ptq 1/1 Running 22 7d5h
[root@bs-k8s-master01 harbor]# pwd
/data/k8s/harbor
[root@bs-k8s-master01 rbd]# kubectl apply -f ceph-harbor-namespace.yaml
namespace/harbor created
[root@bs-k8s-master01 rbd]# kubectl get namespaces
NAME STATUS AGE
default Active 7d8h
harbor Active 16s
kube-node-lease Active 7d8h
kube-public Active 7d8h
kube-system Active 7d8h
[root@bs-k8s-master01 rbd]# cat ceph-harbor-namespace.yaml
##########################################################################
#Author: zisefeizhu
#QQ: 2********0
#Date: 2020-02-16
#FileName: ceph-harbor-namespace.yaml
#URL: https://www.cnblogs.com/zisefeizhu/
#Description: The test script
#Copyright (C): 2020 All rights reserved
###########################################################################
apiVersion: v1
kind: Namespace
metadata:
name: harbor
[root@bs-k8s-master01 rbd]# kubectl apply -f external-storage-rbd-provisioner.yaml
serviceaccount/rbd-provisioner created
clusterrole.rbac.authorization.k8s.io/rbd-provisioner unchanged
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner configured
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.apps/rbd-provisioner created
[root@bs-k8s-master01 rbd]# kubectl get pods -n harbor -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
rbd-provisioner-75b85f85bd-dhnr4 1/1 Running 0 3m48s 10.209.46.84 bs-k8s-node01 <none> <none>
[root@bs-k8s-master01 rbd]# cat external-storage-rbd-provisioner.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
namespace: harbor
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: harbor
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io ---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
namespace: harbor
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
namespace: harbor
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: harbor ---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rbd-provisioner
namespace: harbor
spec:
replicas: 1
selector:
matchLabels:
app: rbd-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: "quay.io/external_storage/rbd-provisioner:latest"
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
serviceAccount: rbd-provisioner
[root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-secret.yaml
secret/ceph-harbor-admin-secret created
secret/ceph-harbor-harbor-secret created
[root@bs-k8s-master01 harbor]# kubectl get secret -n harbor
NAME TYPE DATA AGE
ceph-harbor-admin-secret kubernetes.io/rbd 1 23s
ceph-harbor-harbor-secret kubernetes.io/rbd 1 23s
default-token-8k9gs kubernetes.io/service-account-token 3 8m49s
rbd-provisioner-token-mhl29 kubernetes.io/service-account-token 3 5m24s
[root@bs-k8s-master01 harbor]# cat ceph-harbor-secret.yaml
##########################################################################
#Author: zisefeizhu
#QQ: 2********0
#Date: 2020-02-16
#FileName: ceph-harbor-secret.yaml
#URL: https://www.cnblogs.com/zisefeizhu/
#Description: The test script
#Copyright (C): 2020 All rights reserved
###########################################################################
apiVersion: v1
kind: Secret
metadata:
name: ceph-harbor-admin-secret
namespace: harbor
data:
key: QVFDNmNVSmV2eU8yRnhBQVBxYzE5Mm5PelNnZk5acmg5aEFQYXc9PQ==
type: kubernetes.io/rbd
---
apiVersion: v1
kind: Secret
metadata:
name: ceph-harbor-harbor-secret
namespace: harbor
data:
key: QVFEb0NrbGVuNmU0TnhBQVZYbXkvUEcrUjVpSDhmTnpNaGs2Smc9PQ==
type: kubernetes.io/rbd
[root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-storageclass.yaml
storageclass.storage.k8s.io/ceph-harbor created
[root@bs-k8s-master01 harbor]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
ceph-harbor ceph.com/rbd Retain Immediate false 11s
ceph-rbd ceph.com/rbd Retain Immediate false 25h
[root@bs-k8s-master01 harbor]# cat ceph-harbor-storageclass.yaml
##########################################################################
#Author: zisefeizhu
#QQ: 2********0
#Date: 2020-02-16
#FileName: ceph-harbor-storageclass.yaml
#URL: https://www.cnblogs.com/zisefeizhu/
#Description: The test script
#Copyright (C): 2020 All rights reserved
###########################################################################
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-harbor
annotations:
storageclass.kubernetes.io/is-default-class: "false"
provisioner: ceph.com/rbd
reclaimPolicy: Retain
parameters:
monitors: 20.0.0.206:6789,20.0.0.207:6789,20.0.0.208:6789
adminId: admin
adminSecretName: ceph-harbor-admin-secret
adminSecretNamespace: harbor
pool: harbor
fsType: xfs
userId: harbor
userSecretName: ceph-harbor-harbor-secret
imageFormat: "2"
imageFeatures: "layering"
[root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-pvc.yaml
persistentvolumeclaim/pvc-ceph-harbor created
wp-pv-claim Bound pvc-494a130d-018c-4be3-9b31-e951cc4367a5 20Gi RWO ceph-rbd 23h
[root@bs-k8s-master01 harbor]# kubectl get pv -n harbor
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-494a130d-018c-4be3-9b31-e951cc4367a5 20Gi RWO Retain Bound default/wp-pv-claim ceph-rbd 23h
pvc-4df6a301-c9f3-4694-8271-d1d0184c00aa 1Gi RWO Retain Bound harbor/pvc-ceph-harbor ceph-harbor 6s
pvc-8ffa3182-a2f6-47d9-a71d-ff8e8b379a16 1Gi RWO Retain Bound default/ceph-pvc ceph-rbd 26h
pvc-ac7d3a09-123e-4614-886c-cded8822a078 20Gi RWO Retain Bound default/mysql-pv-claim ceph-rbd 23h
[root@bs-k8s-master01 harbor]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-pvc Bound pvc-8ffa3182-a2f6-47d9-a71d-ff8e8b379a16 1Gi RWO ceph-rbd 26h
mysql-pv-claim Bound pvc-ac7d3a09-123e-4614-886c-cded8822a078 20Gi RWO ceph-rbd 23h
wp-pv-claim Bound pvc-494a130d-018c-4be3-9b31-e951cc4367a5 20Gi RWO ceph-rbd 23h
[root@bs-k8s-master01 harbor]# kubectl get pvc -n harbor
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-ceph-harbor Bound pvc-4df6a301-c9f3-4694-8271-d1d0184c00aa 1Gi RWO ceph-harbor 24s
[root@bs-k8s-master01 harbor]# cat ceph-harbor-pvc.yaml
##########################################################################
#Author: zisefeizhu
#QQ: 2********0
#Date: 2020-02-16
#FileName: ceph-harbor-pvc.yaml
#URL: https://www.cnblogs.com/zisefeizhu/
#Description: The test script
#Copyright (C): 2020 All rights reserved
###########################################################################
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-ceph-harbor
namespace: harbor
spec:
storageClassName: ceph-harbor
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi //到此 完成了在harbor 名称空间下创建动态pv

基于ceph rbd 在kubernetes harbor 空间下创建动态存储的更多相关文章

  1. Linux下创建动态库与使用

    参考文章:dll和so文件区别与构成:http://www.cnblogs.com/likwo/archive/2012/05/09/2492225.html 动态库路径配置- /etc/ld.so. ...

  2. vbox下创建共享存储

    1.创建共享盘VBoxManage.exe createhd -filename D:\VM\linux01\ocr_vote.vdi -size 2048 -format VDI -variant ...

  3. CentOS7 下安装 iSCSI Target(tgt) ,使用 Ceph rbd

    目录 一.iSCSI 介绍 1. iSCSI 定义 2. 几种常见的 iSCSI Target 3. 优缺点比较 二.安装步骤 1. 关闭防火墙 2. 关闭selinux 3. 通过 yum 安装 t ...

  4. kubernetes挂载ceph rbd和cephfs的方法

    目录 k8s挂载Ceph RBD PV & PVC方式 创建secret 创建PV 创建PVC 创建deployment挂载PVC StorageClass方式 创建secret 创建Stor ...

  5. Kubernetes配置Ceph RBD StorageClass

    1. 在Ceph上为Kubernetes创建一个存储池 # ceph osd pool create k8s 2. 创建k8s用户 # ceph auth get-or-create client.k ...

  6. helm3.1安装及结合ceph rbd 部署harbor

    [root@bs-k8s-ceph ~]# ceph -s cluster: id: 11880418-1a9a-4b55-a353-4b141e2199d8 health: HEALTH_WARN ...

  7. 某空间下的令牌访问产生过程--Kubernetes Dashboard(k8s-Dashboard)

    在面试中发现,有些运维人员基本的令牌访问方式都不知道,下面介绍下令牌的产生过程 某个空间下的令牌访问产生过程(空间名称为cc) ###创建命名空间[root@vms61 ccadmin]# kubec ...

  8. 理解 QEMU/KVM 和 Ceph(1):QEMU-KVM 和 Ceph RBD 的 缓存机制总结

    本系列文章会总结 QEMU/KVM 和 Ceph 之间的整合: (1)QEMU-KVM 和 Ceph RBD 的 缓存机制总结 (2)QEMU 的 RBD 块驱动(block driver) (3)存 ...

  9. SUSE Ceph RBD Mirror - Storage 6

    Ceph采用的是强一致性同步模型,所有副本都必须完成写操作才算一次写入成功,这就导致不能很好地支持跨域部署,因为如果副本在异地,网络延迟就会很大,拖垮整个集群的写性能.因此,Ceph集群很少有跨域部署 ...

随机推荐

  1. Beacon API

    Beacon API User Tracking https://caniuse.com/#feat=beacon Question & Solution Beacon API 不会延缓网页卸 ...

  2. MBP & battery

    MBP & battery 实际:3 + 1 个小时左右 4 个小时左右 shit apple 10 小时 Chrome bug https://appleinsider.com/articl ...

  3. asm movbe 指令

    movbe MOVBE 目标操作数,源操作数 复制源操作数的数据,交换字节后,移动数据 假如: movbe eax,(float)1000.0 eax == 0x00007A44 movbe eax, ...

  4. 以代码为剑、数学为犁,SPC构建NGK算力生态体系

    人类创造工具,工具反过来也改变着人类.以区块链为核心的货币革命率先吹响了对金融世界重塑的号角.以代码为剑.数学为犁,区块链构建了新的网路信任体系,这是一切的开始.基于此,NGK区块链技术将赋能实体产业 ...

  5. django学习-24.创建时间和更新时间的添加

    目录结构 1.前言 2.入参auto_now和入参auto_now_add 2.1.入参auto_now的相关知识点 2.2.入参auto_now_add的相关知识点 3.完整的操作流程 3.1.第一 ...

  6. JS判断对象是否包含某个属性

    1.使用hasOwnProperty()判断 hasOwnProperty方法的参数就是要判断的属性名称,当对象的属性存在时返回true,否则返回false. var obj = { name:'ja ...

  7. win10 查看已保存的wifi密码

    netsh wlan show profile name="WIFINAME-Test" key=clear   C:\windows\system32> C:\window ...

  8. 学习java的第二天

    Java第二天 标识符 标识符开头只能以字母和_开头 严格区分大小写 不能以关键词命名 变量 变量是什么:就是可以变化的量 Java是一种强类型语言,定义变量必须声明后才能使用 Java变量是程序中最 ...

  9. jquery ajax error 函数的参数及使用

    使用jquery的ajax方法向服务器发送请求的时候,可选的回调函数有success.complete.beforeSend.error函数等.error函数常用来进行错误信息的处理,这里着重提一下e ...

  10. 为什么要用Spring Boot?

    什么是Spring Boot?   Spring Boot是由Pivotal团队提供的全新框架,其设计目的是用来简化新 Spring 应用的初始搭建以及开发过程,该框架使用了特定的方式来进行配置,从而 ...