当前位置: 代码迷 >> 综合 >> Kubernetes ceph rbd 基于 storageclass动态生成pv
  详细解决方案

Kubernetes ceph rbd 基于 storageclass动态生成pv

热度:66   发布时间:2023-09-30 13:00:27.0

基于storageclass动态生成pv


1、创建rbd的供应商  provisioner   ceph.com/rbd(环境变量里面的值)

#把rbd-provisioner.tar.gz上传,手动解压,这里面封装的是镜像

[root@master ceph]# docker load -i rbd-provisioner.tar.gz 
1d31b5806ba4: Loading layer [==================================================>]  208.3MB/208.3MB
499d93e0e038: Loading layer [==================================================>]  164.1MB/164.1MB
7c9bb3d61493: Loading layer [==================================================>]  44.52MB/44.52MB
Loaded image: quay.io/xianchao/external_storage/rbd-provisioner:v1
[root@master rbd-provisioner]# cat rbd-provisioner.yaml 
kind: ClusterRole   #定义了一个ClusterRole,可以对哪些资源做操作
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: rbd-provisioner
rules:- apiGroups: [""]resources: ["persistentvolumes"]verbs: ["get", "list", "watch", "create", "delete"]- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "watch", "update"]- apiGroups: ["storage.k8s.io"]resources: ["storageclasses"]verbs: ["get", "list", "watch"]- apiGroups: [""]resources: ["events"]verbs: ["create", "update", "patch"]- apiGroups: [""]resources: ["services"]resourceNames: ["kube-dns","coredns"]verbs: ["list", "get"]
---
kind: ClusterRoleBinding  #定义了一个clusterrolebinding,将下面的serviceAccount: rbd-provisioner 绑定
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: rbd-provisioner
subjects:- kind: ServiceAccountname: rbd-provisionernamespace: default
roleRef:kind: ClusterRolename: rbd-provisionerapiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:name: rbd-provisioner
rules:
- apiGroups: [""]resources: ["secrets"]verbs: ["get"]
- apiGroups: [""]resources: ["endpoints"]verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:name: rbd-provisioner
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: rbd-provisioner
subjects:
- kind: ServiceAccountname: rbd-provisionernamespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:name: rbd-provisioner
spec:selector:matchLabels:app: rbd-provisionerreplicas: 1strategy:type: Recreatetemplate:metadata:labels:app: rbd-provisionerspec:containers:- name: rbd-provisionerimage: quay.io/xianchao/external_storage/rbd-provisioner:v1imagePullPolicy: IfNotPresentenv:- name: PROVISIONER_NAME       #供应商名字value: ceph.com/rbdserviceAccount: rbd-provisioner    
---
apiVersion: v1
kind: ServiceAccount
metadata:name: rbd-provisioner
[root@master rbd-provisioner]# kubectl get pod
NAME                                      READY   STATUS    RESTARTS   AGE
rbd-provisioner-6bbc95cd74-g6lcd          1/1     Running   0          9s

2、创建ceph-secret

#创建pool池

[root@master1-admin ceph]# ceph osd pool create k8stest1 56
pool 'k8stest1' created
[root@master1-admin ceph]# ceph osd pool ls
rbd
cephfs_data
cephfs_metadata
k8srbd1
k8stest
k8stest1
You have new mail in /var/spool/mail/root
[root@master1-admin ~]#  ceph auth get-key client.admin | base64
QVFDOWF4eGhPM0UzTlJBQUJZZnVCMlZISVJGREFCZHN0UGhMc3c9PQ==[root@master rbd-provisioner]# cat ceph-secret-1.yaml 
apiVersion: v1
kind: Secret
metadata:name: ceph-secret-1
type: "ceph.com/rbd"
data:key: QVFDOWF4eGhPM0UzTlJBQUJZZnVCMlZISVJGREFCZHN0UGhMc3c9PQ==
[root@master rbd-provisioner]# kubectl get secret
NAME                                 TYPE                                  DATA   AGE
ceph-secret                          Opaque                                1      19h
ceph-secret-1                        ceph.com/rbd                          1      2m33s
default-token-cwbdx                  kubernetes.io/service-account-token   3      91d
nfs-client-provisioner-token-plww9   kubernetes.io/service-account-token   3      19d
qingcloud                            kubernetes.io/dockerconfigjson        1      91d
rbd-provisioner-token-82bql          kubernetes.io/service-account-token   3      10m

3、创建storageclass 

[root@master ceph]# cat rbd-provisioner/storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: k8s-rbd
provisioner: ceph.com/rbd
parameters:monitors: 192.168.0.5:6789,192.168.0.6:6789,192.168.0.7:6789adminId: adminadminSecretName: ceph-secret-1pool: k8stest1userId: adminuserSecretName: ceph-secret-1fsType: xfsimageFormat: "2"imageFeatures: "layering"#  pool: k8stest1 这个pool池子是之前创建的[root@master rbd-provisioner]# kubectl apply -f storageclass.yaml 
storageclass.storage.k8s.io/k8s-rbd created[root@master rbd-provisioner]# kubectl get sc
NAME                  PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
k8s-rbd               ceph.com/rbd                   Delete          Immediate              false                  7s

这个时候存储类在申请pv的时候就会找到ceph.com/rbd供应商,然后通过该供应商从集群里面去划分出来一个pv。

4、创建pvc

invalid AccessModes [ReadWriteMany]: only AccessModes [ReadWriteOnce ReadOnlyMany] are supported(可以看到rbd不支持[ReadWriteMany)

[root@master rbd-provisioner]# kubectl get pvc
NAME                  STATUS    VOLUME       CAPACITY   ACCESS MODES   STORAGECLASS    AGE
ceph-pvc              Bound     ceph-pv      1Gi        RWX                            18h
example-local-claim   Bound     example-pv   5Gi        RWO            local-storage   63d
rbd-pvc               Pending                                          k8s-rbd         5s[root@master rbd-provisioner]# kubectl describe pvc rbd-pvc    
Name:          rbd-pvc
Namespace:     default
StorageClass:  k8s-rbd
Status:        Pending
Volume:        
Labels:        <none>
Annotations:   volume.beta.kubernetes.io/storage-provisioner: ceph.com/rbd
Finalizers:    [kubernetes.io/pvc-protection]
Capacity:      
Access Modes:  
VolumeMode:    Filesystem
Mounted By:    <none>
Events:Type     Reason                Age                From                                                                                Message----     ------                ----               ----                                                                                -------Normal   Provisioning          10s (x2 over 25s)  ceph.com/rbd_rbd-provisioner-6bbc95cd74-g6lcd_ee50d24f-015f-11ec-a6a6-9e54668c01e4  External provisioner is provisioning volume for claim "default/rbd-pvc"Warning  ProvisioningFailed    10s (x2 over 25s)  ceph.com/rbd_rbd-provisioner-6bbc95cd74-g6lcd_ee50d24f-015f-11ec-a6a6-9e54668c01e4  failed to provision volume with StorageClass "k8s-rbd": invalid AccessModes [ReadWriteMany]: only AccessModes [ReadWriteOnce ReadOnlyMany] are supportedNormal   ExternalProvisioning  10s (x3 over 25s)  persistentvolume-controller                                                         waiting for a volume to be created, either by external provisioner "ceph.com/rbd" or manually created by system administrator
[root@master rbd-provisioner]# kubectl delete -f 
ceph-secret-1.yaml    rbd-provisioner.yaml  rbd-pvc.yaml          storageclass.yaml     
[root@master rbd-provisioner]# kubectl delete -f rbd-pvc.yaml 
persistentvolumeclaim "rbd-pvc" deleted[root@master rbd-provisioner]# vim rbd-pvc.yaml 
[root@master rbd-provisioner]# cat rbd-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:name: rbd-pvc
spec:accessModes:- ReadWriteOncevolumeMode: Filesystemresources:requests:storage: 1GistorageClassName: k8s-rbd[root@master rbd-provisioner]# kubectl apply -f rbd-pvc.yaml 
persistentvolumeclaim/rbd-pvc created
[root@master rbd-provisioner]# kubectl get pvc
NAME                  STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS    AGE
ceph-pvc              Bound    ceph-pv                                    1Gi        RWX                            19h
example-local-claim   Bound    example-pv                                 5Gi        RWO            local-storage   63d
rbd-pvc               Bound    pvc-5eef6286-c89f-421a-88d1-1a6593423087   1Gi        RWO            k8s-rbd         6s

5、创建pod,挂载pvc 

[root@master rbd-provisioner]# cat pod-sto.yaml 
apiVersion: v1
kind: Pod
metadata:labels:test: rbd-podname: ceph-rbd-pod
spec:containers:- name: ceph-rbd-nginximage: nginximagePullPolicy: IfNotPresentvolumeMounts:- name: ceph-rbdmountPath: /mntreadOnly: falsevolumes:- name: ceph-rbdpersistentVolumeClaim:claimName: rbd-pvc[root@master rbd-provisioner]# kubectl get pod
NAME                                      READY   STATUS    RESTARTS   AGE
ceph-rbd-pod                              1/1     Running   0          26s
  相关解决方案