failed to create rbd image: executable file not found in $PATH, command output:
K8S创建PVC时提示如下的报错:[root@k8s1 k8s-ceph]# kubectl get pvcNAMESTATUSVOLUMECAPACITYACCESS MODESSTORAGECLASSAGEpvc01Boundpv011GiRWO...
K8S 创建PVC 时提示如下的报错:
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
storageclass-pvc01 Pending storageclass01 12h
[root@k8s1 k8s-ceph]# kubectl describe pvc Pending
Error from server (NotFound): persistentvolumeclaims "Pending" not found
[root@k8s1 k8s-ceph]# kubectl describe pvc storageclass-pvc01
Name: storageclass-pvc01
Namespace: default
StorageClass: storageclass01
Status: Pending
Volume:
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"storageclass-pvc01","namespace":"default"},"spec":{...
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 86s (x350 over 12h) persistentvolume-controller Failed to provision volume with StorageClass "storageclass01": failed to create rbd image: executable file not found in $PATH, command output:
[root@k8s1 k8s-ceph]#
解决过程:
Last login: Fri Nov 15 10:42:11 2019 from 10.83.83.78
[root@k8s1 ~]#
[root@k8s1 ~]#
[root@k8s1 ~]#
[root@k8s1 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
storageclass-pvc01 Pending storageclass01 13h
[root@k8s1 ~]# kubectl delete pvc storageclass-pvc01
persistentvolumeclaim "storageclass-pvc01" deleted
[root@k8s1 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
[root@k8s1 ~]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 kubernetes.io/rbd 13h
[root@k8s1 ~]# kubectl delete storageclass storageclass01
storageclass.storage.k8s.io "storageclass01" deleted
[root@k8s1 ~]# kubectl get storageclass
No resources found in default namespace.
[root@k8s1 ~]# kubectl get storageclass
No resources found in default namespace.
[root@k8s1 ~]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 42h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d19h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 40h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 40h
[root@k8s1 ~]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 42h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d19h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 40h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 40h
[root@k8s1 ~]#
[root@k8s1 ~]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 42h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d19h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 40h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 40h
[root@k8s1 ~]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 42h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d19h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 40h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 40h
[root@k8s1 ~]# ls
anaconda-ks.cfg dashboard.crt dashboard.key k8s-ceph kube-flannel.yml kubernetes-dashboard.yaml.1.10 yml
calico.yaml dashboard.csr google kubeadm.yaml kubernetes-dashboard.yaml recommended.yaml
[root@k8s1 ~]# cd k8s-ceph/
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: kubernetes.io/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: ceph-secret-rbd
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: ceph-secret-rbd
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass.yaml
storageclass.storage.k8s.io/storageclass01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 kubernetes.io/rbd 10s
[root@k8s1 k8s-ceph]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s1 Ready master 8d v1.16.2
k8s2 NotReady <none> 8d v1.16.0
k8s3 Ready <none> 8d v1.16.2
[root@k8s1 k8s-ceph]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
example-app ClusterIP 10.103.107.104 <none> 8080/TCP 3d1h
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 40h
[root@k8s1 k8s-ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d17h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d17h
httpd-app-c77bb8b47-6q549 0/1 ContainerCreating 0 14s
httpd-app-c77bb8b47-bnf5t 1/1 Terminating 1 2d17h
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d17h
nginx-86c57db685-d25gf 1/1 Terminating 1 2d17h
nginx-86c57db685-n8csl 0/1 ContainerCreating 0 14s
nginx-86c57db685-tw9x5 1/1 Running 2 2d17h
nginx-86c57db685-w8pq9 0/1 ContainerCreating 0 14s
nginx-86c57db685-zjc9h 1/1 Terminating 1 2d17h
prometheus-operator-99dccdc56-92h97 0/1 Terminating 308 3d16h
prometheus-operator-99dccdc56-wwjt8 0/1 ContainerCreating 0 14s
rbd000new 1/1 Terminating 0 19h
[root@k8s1 k8s-ceph]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
example-app ClusterIP 10.103.107.104 <none> 8080/TCP 3d1h
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 40h
[root@k8s1 k8s-ceph]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 kubernetes.io/rbd 80s
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
[root@k8s1 k8s-ceph]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01 1Gi RWO Recycle Bound default/pvc01 18h
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc.yaml
persistentvolumeclaim/storageclass-pvc01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
storageclass-pvc01 Pending storageclass01 5s
[root@k8s1 k8s-ceph]# kubectl describe storageclass-pvc01
error: the server doesn't have a resource type "storageclass-pvc01"
[root@k8s1 k8s-ceph]# kubectl describe pvc storageclass-pvc01
Name: storageclass-pvc01
Namespace: default
StorageClass: storageclass01
Status: Pending
Volume:
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"storageclass-pvc01","namespace":"default"},"spec":{...
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 5s (x3 over 33s) persistentvolume-controller Failed to provision volume with StorageClass "storageclass01": failed to create rbd image: executable file not found in $PATH, command output:
[root@k8s1 k8s-ceph]# yum install -y ceph-common
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
Ceph | 2.9 kB 00:00:00
Ceph-noarch | 2.9 kB 00:00:00
base | 3.6 kB 00:00:00
ceph-source | 2.9 kB 00:00:00
docker-ce-stable | 3.5 kB 00:00:00
epel | 5.4 kB 00:00:00
extras | 2.9 kB 00:00:00
kubernetes/signature | 454 B 00:00:00
kubernetes/signature | 1.4 kB 00:00:01 !!!
updates | 2.9 kB 00:00:00
(1/2): kubernetes/primary | 59 kB 00:00:00
(2/2): docker-ce-stable/x86_64/primary_db | 37 kB 00:00:00
kubernetes 430/430
Package 2:ceph-common-14.2.4-0.el7.x86_64 already installed and latest version
Nothing to do
[root@k8s1 k8s-ceph]# kubectl describe pvc storageclass-pvc01
Name: storageclass-pvc01
Namespace: default
StorageClass: storageclass01
Status: Pending
Volume:
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"storageclass-pvc01","namespace":"default"},"spec":{...
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 9s (x5 over 67s) persistentvolume-controller Failed to provision volume with StorageClass "storageclass01": failed to create rbd image: executable file not found in $PATH, command output:
[root@k8s1 k8s-ceph]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s1 Ready master 8d v1.16.2
k8s2 NotReady <none> 8d v1.16.0
k8s3 Ready <none> 8d v1.16.2
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s1 Ready master 8d v1.16.2
k8s2 Ready <none> 8d v1.16.2
k8s3 Ready <none> 8d v1.16.2
[root@k8s1 k8s-ceph]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s1 Ready master 8d v1.16.2
k8s2 Ready <none> 8d v1.16.2
k8s3 Ready <none> 8d v1.16.2
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s1 Ready master 8d v1.16.2
k8s2 Ready <none> 8d v1.16.2
k8s3 Ready <none> 8d v1.16.2
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
storageclass-pvc01 Pending storageclass01 8m56s
[root@k8s1 k8s-ceph]# kubectl delete pvc storageclass-pvc01
persistentvolumeclaim "storageclass-pvc01" deleted
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl delete sc storageclass01
storageclass.storage.k8s.io "storageclass01" deleted
[root@k8s1 k8s-ceph]# kubectl get storageclass
No resources found in default namespace.
[root@k8s1 k8s-ceph]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 42h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d19h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 40h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 40h
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass.yaml
storageclass.storage.k8s.io/storageclass01 created
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc.yaml
persistentvolumeclaim/storageclass-pvc01 created
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 18h
storageclass-pvc01 Pending storageclass01 16s
[root@k8s1 k8s-ceph]# kubectl describe pvc storageclass-pvc01
Name: storageclass-pvc01
Namespace: default
StorageClass: storageclass01
Status: Pending
Volume:
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"storageclass-pvc01","namespace":"default"},"spec":{...
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 9s (x4 over 41s) persistentvolume-controller Failed to provision volume with StorageClass "storageclass01": failed to create rbd image: executable file not found in $PATH, command output:
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: kubernetes.io/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: ceph-secret-rbd
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: ceph-secret-rbd
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cat ceph-secret-rbd-1.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-rbd
type: "kubernetes.io/rbd"
data:
key: QVFBZkxNRmRwdE9kTVJBQTI0enFGYng1a2FLd0xwdXJvUFYwVWc9PQo=
[root@k8s1 k8s-ceph]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQAfLMFdptOdMRAA24zqFbx5kaKwLpuroPV0Ug==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
[root@k8s1 k8s-ceph]# echo "AQAfLMFdptOdMRAA24zqFbx5kaKwLpuroPV0Ug=="|base64
QVFBZkxNRmRwdE9kTVJBQTI0enFGYng1a2FLd0xwdXJvUFYwVWc9PQo=
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQAfLMFdptOdMRAA24zqFbx5kaKwLpuroPV0Ug==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# vim secret01.yml
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
"secret01.yml" [New] 8L, 136C written
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# cat secret01.yml
apiVersion: v1
kind: Secret
metadata:
name: secret01
type: "kubernetes.io/rbd"
data:
key: AQAfLMFdptOdMRAA24zqFbx5kaKwLpuroPV0Ug==
[root@k8s1 k8s-ceph]# kubectl apply -f secret01.yml
secret/secret01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 43h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d20h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 41h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 41h
secret01 kubernetes.io/rbd 1 10s
[root@k8s1 k8s-ceph]# kubectl get secret -n default
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 43h
ceph-secret-rbd kubernetes.io/rbd 1 25h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d20h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 41h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 41h
secret01 kubernetes.io/rbd 1 19s
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 19h
storageclass-pvc01 Pending storageclass01 16m
[root@k8s1 k8s-ceph]# kubectl delete pvc storageclass-pvc01
persistentvolumeclaim "storageclass-pvc01" deleted
[root@k8s1 k8s-ceph]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 kubernetes.io/rbd 16m
[root@k8s1 k8s-ceph]# kubectl delete storageclass storageclass01
storageclass.storage.k8s.io "storageclass01" deleted
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
[root@k8s1 k8s-ceph]# vim storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: kubernetes.io/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: ceph-secret-rbd
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: ceph-secret-rbd
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
"storageclass.yaml" 16L, 388C written
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
[root@k8s1 k8s-ceph]# vim storageclass-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc01
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
storage: 1Gi
storageClassName: storageclass01
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
~
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass.yaml
storageclass.storage.k8s.io/storageclass01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc.yaml
persistentvolumeclaim/storageclass-pvc01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 kubernetes.io/rbd 18s
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 19h
storageclass-pvc01 Pending storageclass01 14s
[root@k8s1 k8s-ceph]# kubectl describe pvc storageclass-pvc01
Name: storageclass-pvc01
Namespace: default
StorageClass: storageclass01
Status: Pending
Volume:
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"storageclass-pvc01","namespace":"default"},"spec":{...
volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/rbd
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Mounted By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 7s (x3 over 32s) persistentvolume-controller Failed to provision volume with StorageClass "storageclass01": failed to create rbd image: executable file not found in $PATH, command output:
[root@k8s1 k8s-ceph]# which rbd
/usr/bin/rbd
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# git clone https://github.com/kubernetes-incubator/external-storage.git
Cloning into 'external-storage'...
remote: Enumerating objects: 36, done.
remote: Counting objects: 100% (36/36), done.
remote: Compressing objects: 100% (35/35), done.
remote: Total 64165 (delta 11), reused 4 (delta 1), pack-reused 64129
Receiving objects: 100% (64165/64165), 113.76 MiB | 38.00 KiB/s, done.
Resolving deltas: 100% (29592/29592), done.
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# tree external-storage/ceph/rbd/deploy/
-bash: tree: command not found
[root@k8s1 k8s-ceph]# gree
-bash: gree: command not found
[root@k8s1 k8s-ceph]# tree
-bash: tree: command not found
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cd external-storage/
[root@k8s1 external-storage]# ls
aws code-of-conduct.md deploy.sh flex Gopkg.lock hack LICENSE Makefile nfs-client OWNERS RELEASE.md SECURITY_CONTACTS test.sh vendor
ceph CONTRIBUTING.md digitalocean gluster Gopkg.toml iscsi local-volume nfs openebs README.md repo-infra snapshot unittests.sh
[root@k8s1 external-storage]# cd ceph/
[root@k8s1 ceph]# ls
cephfs rbd
[root@k8s1 ceph]# cd rbd
[root@k8s1 rbd]# ls
CHANGELOG.md cmd deploy Dockerfile Dockerfile.release examples local-start.sh Makefile OWNERS pkg README.md
[root@k8s1 rbd]# cd deploy/
[root@k8s1 deploy]# ls
non-rbac rbac README.md
[root@k8s1 deploy]# cd rbac
[root@k8s1 rbac]# ls
clusterrolebinding.yaml clusterrole.yaml deployment.yaml rolebinding.yaml role.yaml serviceaccount.yaml
[root@k8s1 rbac]# cat clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
[root@k8s1 rbac]# ls
clusterrolebinding.yaml clusterrole.yaml deployment.yaml rolebinding.yaml role.yaml serviceaccount.yaml
[root@k8s1 rbac]# cat clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
[root@k8s1 rbac]# ls
clusterrolebinding.yaml clusterrole.yaml deployment.yaml rolebinding.yaml role.yaml serviceaccount.yaml
[root@k8s1 rbac]# cd deployment.yaml
-bash: cd: deployment.yaml: Not a directory
[root@k8s1 rbac]# cat deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: rbd-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: rbd-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: "quay.io/external_storage/rbd-provisioner:latest"
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
serviceAccount: rbd-provisioner
[root@k8s1 rbac]# cd ..
[root@k8s1 deploy]# ls
non-rbac rbac README.md
[root@k8s1 deploy]# kubectl apply -f rbac/
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.apps/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
serviceaccount/rbd-provisioner created
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 141m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 141m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 141m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 141m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 12s
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 141m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 141m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 141m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 141m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 26s
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 141m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 141m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 141m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 141m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 28s
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 141m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 141m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 141m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 141m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 30s
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 46s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 60s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 142m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 142m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 142m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 142m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 86s
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 142m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 142m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 142m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 142m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 90s
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 92s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 111s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 2m15s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 143m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 143m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 143m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 143m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 2m20s
[root@k8s1 deploy]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 143m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 143m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 143m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 143m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 2m21s
[root@k8s1 deploy]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 21h
storageclass-pvc01 Pending storageclass01 112m
[root@k8s1 deploy]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01 1Gi RWO Recycle Bound default/pvc01 21h
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 143m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 143m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 143m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 143m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 2m37s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 144m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 144m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 144m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 144m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 144m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 144m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 144m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 144m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m1s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 145m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 145m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 145m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 32 145m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m4s
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 4m8s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 4m24s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 4m26s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 145m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 145m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 145m
prometheus-operator-99dccdc56-wwjt8 0/1 Error 33 145m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m28s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 145m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 145m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 145m
prometheus-operator-99dccdc56-wwjt8 0/1 Error 33 145m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m31s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 145m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 145m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 145m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 33 145m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m33s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 145m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 145m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 145m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 33 145m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 4m34s
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 4m35s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.91/32
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ContainerCreating
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 6m21s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 147m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 147m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 147m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 33 147m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 6m24s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 147m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 147m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 147m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 33 147m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ContainerCreating 0 6m27s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 147m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 147m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 147m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 33 147m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ImagePullBackOff 0 6m55s
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 147m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 147m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 147m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 33 147m
rbd-provisioner-75b85f85bd-rtx6m 0/1 ImagePullBackOff 0 6m58s
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.93/32
Status: Pending
IP: 192.168.109.93
IPs:
IP: 192.168.109.93
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID:
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Normal Pulling 6m58s kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
Warning Failed 14s kubelet, k8s2 Failed to pull image "quay.io/external_storage/rbd-provisioner:latest": rpc error: code = Unknown desc = context canceled
Warning Failed 14s kubelet, k8s2 Error: ErrImagePull
Normal SandboxChanged 12s (x2 over 14s) kubelet, k8s2 Pod sandbox changed, it will be killed and re-created.
Normal BackOff 10s (x4 over 13s) kubelet, k8s2 Back-off pulling image "quay.io/external_storage/rbd-provisioner:latest"
Warning Failed 10s (x4 over 13s) kubelet, k8s2 Error: ImagePullBackOff
[root@k8s1 deploy]# docker pull quay.io/external_storage/rbd-provisioner:latest
latest: Pulling from external_storage/rbd-provisioner
Digest: sha256:94fd36b8625141b62ff1addfa914d45f7b39619e55891bad0294263ecd2ce09a
Status: Downloaded newer image for quay.io/external_storage/rbd-provisioner:latest
quay.io/external_storage/rbd-provisioner:latest
[root@k8s1 deploy]#
[root@k8s1 deploy]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 157m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 157m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 157m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 35 157m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 16m
[root@k8s1 deploy]#
[root@k8s1 deploy]# kubectl describe pod rbd-provisioner-75b85f85bd-rtx6m
Name: rbd-provisioner-75b85f85bd-rtx6m
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 13:32:15 +0800
Labels: app=rbd-provisioner
pod-template-hash=75b85f85bd
Annotations: cni.projectcalico.org/podIP: 192.168.109.93/32
Status: Running
IP: 192.168.109.93
IPs:
IP: 192.168.109.93
Controlled By: ReplicaSet/rbd-provisioner-75b85f85bd
Containers:
rbd-provisioner:
Container ID: docker://a4ce6488bb446001ef5a118f8fe9fc8a36f922fa97a5220d1f95045d634e3e25
Image: quay.io/external_storage/rbd-provisioner:latest
Image ID: docker-pullable://quay.io/external_storage/rbd-provisioner@sha256:94fd36b8625141b62ff1addfa914d45f7b39619e55891bad0294263ecd2ce09a
Port: <none>
Host Port: <none>
State: Running
Started: Fri, 15 Nov 2019 13:46:10 +0800
Ready: True
Restart Count: 0
Environment:
PROVISIONER_NAME: ceph.com/rbd
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from rbd-provisioner-token-p7l7d (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
rbd-provisioner-token-p7l7d:
Type: Secret (a volume populated by a Secret)
SecretName: rbd-provisioner-token-p7l7d
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/rbd-provisioner-75b85f85bd-rtx6m to k8s2
Warning Failed 10m kubelet, k8s2 Failed to pull image "quay.io/external_storage/rbd-provisioner:latest": rpc error: code = Unknown desc = context canceled
Warning Failed 10m kubelet, k8s2 Error: ErrImagePull
Normal SandboxChanged 10m (x2 over 10m) kubelet, k8s2 Pod sandbox changed, it will be killed and re-created.
Normal BackOff 10m (x4 over 10m) kubelet, k8s2 Back-off pulling image "quay.io/external_storage/rbd-provisioner:latest"
Warning Failed 10m (x4 over 10m) kubelet, k8s2 Error: ImagePullBackOff
Normal Pulling 10m (x2 over 17m) kubelet, k8s2 Pulling image "quay.io/external_storage/rbd-provisioner:latest"
Normal Pulled 3m24s kubelet, k8s2 Successfully pulled image "quay.io/external_storage/rbd-provisioner:latest"
Normal Created 3m20s kubelet, k8s2 Created container rbd-provisioner
Normal Started 3m20s kubelet, k8s2 Started container rbd-provisioner
[root@k8s1 deploy]#
[root@k8s1 deploy]#
[root@k8s1 deploy]# cd root
-bash: cd: root: No such file or directory
[root@k8s1 deploy]# cd ..
[root@k8s1 rbd]# ls
CHANGELOG.md cmd deploy Dockerfile Dockerfile.release examples local-start.sh Makefile OWNERS pkg README.md
[root@k8s1 rbd]# cd ..
[root@k8s1 ceph]# ls
cephfs rbd
[root@k8s1 ceph]# cd rbd
[root@k8s1 rbd]# ls
CHANGELOG.md cmd deploy Dockerfile Dockerfile.release examples local-start.sh Makefile OWNERS pkg README.md
[root@k8s1 rbd]# cd ..
[root@k8s1 ceph]# ls
cephfs rbd
[root@k8s1 ceph]# cd ..
[root@k8s1 external-storage]# ls
aws code-of-conduct.md deploy.sh flex Gopkg.lock hack LICENSE Makefile nfs-client OWNERS RELEASE.md SECURITY_CONTACTS test.sh vendor
ceph CONTRIBUTING.md digitalocean gluster Gopkg.toml iscsi local-volume nfs openebs README.md repo-infra snapshot unittests.sh
[root@k8s1 external-storage]# cd ..
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cd external-storage/
[root@k8s1 external-storage]# ls
aws code-of-conduct.md deploy.sh flex Gopkg.lock hack LICENSE Makefile nfs-client OWNERS RELEASE.md SECURITY_CONTACTS test.sh vendor
ceph CONTRIBUTING.md digitalocean gluster Gopkg.toml iscsi local-volume nfs openebs README.md repo-infra snapshot unittests.sh
[root@k8s1 external-storage]# cd ceph/
[root@k8s1 ceph]# ls
cephfs rbd
[root@k8s1 ceph]# cd rbd
[root@k8s1 rbd]# ls
CHANGELOG.md cmd deploy Dockerfile Dockerfile.release examples local-start.sh Makefile OWNERS pkg README.md
[root@k8s1 rbd]# cd deploy/
[root@k8s1 deploy]# ls
non-rbac rbac README.md
[root@k8s1 deploy]# cd rbac/
[root@k8s1 rbac]# ls
clusterrolebinding.yaml clusterrole.yaml deployment.yaml rolebinding.yaml role.yaml serviceaccount.yaml
[root@k8s1 rbac]#
[root@k8s1 rbac]#
[root@k8s1 rbac]#
[root@k8s1 rbac]# pwd
/root/k8s-ceph/external-storage/ceph/rbd/deploy/rbac
[root@k8s1 rbac]# ls
clusterrolebinding.yaml clusterrole.yaml deployment.yaml rolebinding.yaml role.yaml serviceaccount.yaml
[root@k8s1 rbac]# cd ..
[root@k8s1 deploy]# cd ..
[root@k8s1 rbd]# cd ..
[root@k8s1 ceph]# ls
cephfs rbd
[root@k8s1 ceph]# cd ..
[root@k8s1 external-storage]# ls
aws code-of-conduct.md deploy.sh flex Gopkg.lock hack LICENSE Makefile nfs-client OWNERS RELEASE.md SECURITY_CONTACTS test.sh vendor
ceph CONTRIBUTING.md digitalocean gluster Gopkg.toml iscsi local-volume nfs openebs README.md repo-infra snapshot unittests.sh
[root@k8s1 external-storage]# cd ..
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# vim storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: kubernetes.io/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: secret01
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: secret01
fsType: ext4
imageFormat: "2"
"storageclass.yaml" 16L, 383C written
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: ceph.com/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: secret01
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: secret01
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
[root@k8s1 k8s-ceph]# kubectl get secrt
error: the server doesn't have a resource type "secrt"
[root@k8s1 k8s-ceph]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 45h
ceph-secret-rbd kubernetes.io/rbd 1 27h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d22h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 43h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 43h
rbd-provisioner-token-p7l7d kubernetes.io/service-account-token 3 24m
secret01 kubernetes.io/rbd 1 136m
[root@k8s1 k8s-ceph]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: ceph.com/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: secret01
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: secret01
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
[root@k8s1 k8s-ceph]# vim storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: ceph.com/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: secret01
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: secret01
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
~
~
~
"storageclass.yaml" 16L, 397C written
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: ceph.com/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: ceph-secret-rbd
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: ceph-secret-rbd
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 21h
storageclass-pvc01 Pending storageclass01 135m
[root@k8s1 k8s-ceph]# kubectl delete pvc storageclass-pvc01
persistentvolumeclaim "storageclass-pvc01" deleted
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 kubernetes.io/rbd 135m
[root@k8s1 k8s-ceph]# kubectl delete storageclass storageclass01
storageclass.storage.k8s.io "storageclass01" deleted
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 166m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 166m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 166m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 37 166m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 25m
[root@k8s1 k8s-ceph]# kubectl get seccrt
error: the server doesn't have a resource type "seccrt"
[root@k8s1 k8s-ceph]# kubectl get secret
NAME TYPE DATA AGE
ceph-secret-admin kubernetes.io/rbd 1 45h
ceph-secret-rbd kubernetes.io/rbd 1 28h
default-token-c9rjq kubernetes.io/service-account-token 3 8d
kubernetes-dashboard-certs Opaque 2 7d22h
prometheus-operator-token-hzhb4 kubernetes.io/service-account-token 3 43h
prometheus-token-4dnn6 kubernetes.io/service-account-token 3 43h
rbd-provisioner-token-p7l7d kubernetes.io/service-account-token 3 26m
secret01 kubernetes.io/rbd 1 139m
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cat storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storageclass01
provisioner: ceph.com/rbd
parameters:
monitors: 10.83.35.79:6789,10.83.35.80:6789,10.83.35.81:6789
adminId: admin
adminSecretName: ceph-secret-rbd
adminSecretNamespace: default
pool: kubernets
userId: admin
userSecretName: ceph-secret-rbd
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass.yaml
storageclass.storage.k8s.io/storageclass01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get storageclass
NAME PROVISIONER AGE
storageclass01 ceph.com/rbd 11s
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cat storageclass-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc01
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: storageclass01
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc.yaml
persistentvolumeclaim/storageclass-pvc01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 21h
storageclass-pvc01 Bound pvc-44d5b384-434d-4d55-8abe-61bbb51df072 1Gi RWO storageclass01 7s
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cat storageclass-pvc-pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: test-pod
name: storageclass-pvc01-pod01
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: storageclass-pvc01
mountPath: /mnt/foo
readOnly: false
volumes:
- name: storageclass-pvc01
persistentVolumeClaim:
claimName: storageclass-pvc01
[root@k8s1 k8s-ceph]# vim storageclass-pvc-pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: test-pod
name: storageclass-pvc01-pod01
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
readOnly: false
volumes:
- name: storageclass-pvc01
claimName: storageclass-pvc01
~
~
~
~
~
~
~
"storageclass-pvc-pod.yaml" 18L, 407C written
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc-pod.yaml
pod/storageclass-pvc01-pod01 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 174m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 174m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 174m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 38 174m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 33m
storageclass-pvc01-pod01 0/1 ContainerCreating 0 7s
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 174m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 174m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 174m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 38 174m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 33m
storageclass-pvc01-pod01 1/1 Running 0 34s
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 174m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 174m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 174m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 38 174m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 34m
storageclass-pvc01-pod01 1/1 Running 0 53s
[root@k8s1 k8s-ceph]# kubectl describe pod storageclass-pvc01-pod01
Name: storageclass-pvc01-pod01
Namespace: default
Priority: 0
Node: k8s2/10.83.35.71
Start Time: Fri, 15 Nov 2019 14:05:23 +0800
Labels: test=rbd-dyn-pvc-pod
Annotations: cni.projectcalico.org/podIP: 192.168.109.94/32
kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"labels":{"test":"rbd-dyn-pvc-pod"},"name":"storageclass-pvc01-pod01","namesp...
Status: Running
IP: 192.168.109.94
IPs:
IP: 192.168.109.94
Containers:
storageclass-pvc01-pod01:
Container ID: docker://035da499a01f20f17311051a96305112c9dd2fbe5a7350751a804f290dd023e2
Image: nginx
Image ID: docker-pullable://nginx@sha256:922c815aa4df050d4df476e92daed4231f466acc8ee90e0e774951b0fd7195a4
Port: <none>
Host Port: <none>
State: Running
Started: Fri, 15 Nov 2019 14:05:47 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/mnt/ceph-dyn-rbd-pvc/nginx from storageclass-pvc01-pod01 (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-c9rjq (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
storageclass-pvc01-pod01:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: storageclass-pvc01
ReadOnly: false
default-token-c9rjq:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-c9rjq
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled <unknown> default-scheduler Successfully assigned default/storageclass-pvc01-pod01 to k8s2
Normal SuccessfulAttachVolume 74s attachdetach-controller AttachVolume.Attach succeeded for volume "pvc-44d5b384-434d-4d55-8abe-61bbb51df072"
Normal Pulling 56s kubelet, k8s2 Pulling image "nginx"
Normal Pulled 50s kubelet, k8s2 Successfully pulled image "nginx"
Normal Created 50s kubelet, k8s2 Created container storageclass-pvc01-pod01
Normal Started 50s kubelet, k8s2 Started container storageclass-pvc01-pod01
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pods
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 175m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 175m
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h
nginx-86c57db685-w8pq9 1/1 Running 0 175m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 38 175m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 34m
storageclass-pvc01-pod01 1/1 Running 0 92s
[root@k8s1 k8s-ceph]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h 192.168.219.4 k8s3 <none> <none>
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h 192.168.219.63 k8s3 <none> <none>
httpd-app-c77bb8b47-6q549 1/1 Running 0 176m 192.168.219.7 k8s3 <none> <none>
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h 192.168.219.3 k8s3 <none> <none>
nginx-86c57db685-n8csl 1/1 Running 0 176m 192.168.219.8 k8s3 <none> <none>
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h 192.168.219.2 k8s3 <none> <none>
nginx-86c57db685-w8pq9 1/1 Running 0 176m 192.168.219.5 k8s3 <none> <none>
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 39 176m 192.168.219.11 k8s3 <none> <none>
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 35m 192.168.109.93 k8s2 <none> <none>
storageclass-pvc01-pod01 1/1 Running 0 2m32s 192.168.109.94 k8s2 <none> <none>
[root@k8s1 k8s-ceph]# docker exec -it storageclass-pvc01-pod01 /bin/bash
Error: No such container: storageclass-pvc01-pod01
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d20h 192.168.219.4 k8s3 <none> <none>
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h 192.168.219.63 k8s3 <none> <none>
httpd-app-c77bb8b47-6q549 1/1 Running 0 3h 192.168.219.7 k8s3 <none> <none>
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h 192.168.219.3 k8s3 <none> <none>
nginx-86c57db685-n8csl 1/1 Running 0 3h 192.168.219.8 k8s3 <none> <none>
nginx-86c57db685-tw9x5 1/1 Running 2 2d20h 192.168.219.2 k8s3 <none> <none>
nginx-86c57db685-w8pq9 1/1 Running 0 3h 192.168.219.5 k8s3 <none> <none>
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 39 3h 192.168.219.11 k8s3 <none> <none>
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 39m 192.168.109.93 k8s2 <none> <none>
storageclass-pvc01-pod01 1/1 Running 0 6m49s 192.168.109.94 k8s2 <none> <none>
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# docker exec -it storageclass-pvc01-pod01 /bin/bash
Error: No such container: storageclass-pvc01-pod01
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl exec -it storageclass-pvc01-pod01 /bin/bash
root@storageclass-pvc01-pod01:/# df -h
Filesystem Size Used Avail Use% Mounted on
overlay 49G 4.9G 44G 10% /
tmpfs 64M 0 64M 0% /dev
tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/mapper/VG1-root 49G 4.9G 44G 10% /etc/hosts
shm 64M 0 64M 0% /dev/shm
/dev/rbd0 976M 2.6M 958M 1% /mnt/ceph-dyn-rbd-pvc/nginx
tmpfs 1.9G 12K 1.9G 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs 1.9G 0 1.9G 0% /proc/acpi
tmpfs 1.9G 0 1.9G 0% /proc/scsi
tmpfs 1.9G 0 1.9G 0% /sys/firmware
root@storageclass-pvc01-pod01:/# cd /mnt/ceph-dyn-rbd-pvc/nginx
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx# ls
lost+found
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx# ls
lost+found
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx# dd if=/dev/zero of=test-rbd-dyn-1 bs=500M count=1
1+0 records in
1+0 records out
524288000 bytes (524 MB, 500 MiB) copied, 2.10667 s, 249 MB/s
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx#
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx#
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx#
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx#
root@storageclass-pvc01-pod01:/mnt/ceph-dyn-rbd-pvc/nginx# exit
exit
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# rbd ls
foobar
[root@k8s1 k8s-ceph]# ceph df
RAW STORAGE:
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 51 GiB 40 GiB 7.9 GiB 11 GiB 22.26
TOTAL 51 GiB 40 GiB 7.9 GiB 11 GiB 22.26
POOLS:
POOL ID STORED OBJECTS USED %USED MAX AVAIL
.rgw.root 1 1.2 KiB 4 768 KiB 0 12 GiB
default.rgw.control 2 0 B 8 0 B 0 12 GiB
default.rgw.meta 3 2.8 KiB 12 2.1 MiB 0 12 GiB
default.rgw.log 4 0 B 207 0 B 0 12 GiB
default.rgw.buckets.index 5 1.9 KiB 4 1.9 KiB 0 12 GiB
default.rgw.buckets.data 6 2.1 GiB 540 6.3 GiB 14.48 12 GiB
liping 7 0 B 0 0 B 0 12 GiB
kubernets 8 533 MiB 148 1.6 GiB 4.06 12 GiB
rbd 9 10 MiB 17 33 MiB 0.09 12 GiB
[root@k8s1 k8s-ceph]# rbd ls -p kubernets
kubernetes-dynamic-pvc-4b688ce9-076d-11ea-a8cc-aa7bd6ce6316
pv
[root@k8s1 k8s-ceph]# rbd info kubernets/kubernetes-dynamic-pvc-4b688ce9-076d-11ea-a8cc-aa7bd6ce6316
rbd image 'kubernetes-dynamic-pvc-4b688ce9-076d-11ea-a8cc-aa7bd6ce6316':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 8ded6b8b4567
block_name_prefix: rbd_data.8ded6b8b4567
format: 2
features: layering
op_features:
flags:
create_timestamp: Fri Nov 15 14:00:58 2019
access_timestamp: Fri Nov 15 14:00:58 2019
modify_timestamp: Fri Nov 15 14:00:58 2019
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-pod.yaml storageclass.yaml
[root@k8s1 k8s-ceph]# cp storageclass-pvc.yaml storageclass-pvc-5G.yaml
[root@k8s1 k8s-ceph]# vim storageclass-pvc-5G.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc01
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
storage: 1Gi
storageClassName: storageclass01
~
~
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc01
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
storage: 1Gi
storageClassName: storageclass01
~
~
~
~
~
~
"storageclass-pvc-5G.yaml" 12L, 230C written
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# cat storageclass-pvc-5G.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc02-5G
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
storageClassName: storageclass01
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc-5G.yaml
The PersistentVolumeClaim "storageclass-pvc02-5G" is invalid: metadata.name: Invalid value: "storageclass-pvc02-5G": a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# vim storageclass-pvc-5G.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc02-5G
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
storage: 5Gi
storageClassName: storageclass01
~
~
~
~
~
~
"storageclass-pvc-5G.yaml" 12L, 230C written
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# vim storageclass-pvc-5G^Caml
[root@k8s1 k8s-ceph]# mvstorageclass-pvc-5G.yaml ^Ctorageclass-pvc-5G.yaml
[root@k8s1 k8s-ceph]# vim storagecla^C-pvc-5G.yaml storageclass-pvc-5g.yaml
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc-5G.yaml
persistentvolumeclaim/storageclass-pvc02-5g created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# cat storageclass-pvc-5G.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storageclass-pvc02-5g
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
storageClassName: storageclass01
[root@k8s1 k8s-ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01 Bound pv01 1Gi RWO 21h
storageclass-pvc01 Bound pvc-44d5b384-434d-4d55-8abe-61bbb51df072 1Gi RWO storageclass01 20m
storageclass-pvc02-5g Bound pvc-0b3e7dc2-a3a3-44e3-9b82-60ec4e789b6e 5Gi RWO storageclass01 49s
[root@k8s1 k8s-ceph]# ls
ceph-rbd-pod-4.yaml ceph-sc-rbd-2.yaml ceph-secret.yaml pvc.yaml rbdpd_new.yaml secret01.yml storageclass-pvc-pod.yaml storageclass.yaml
ceph-rbd-pvc-3.yaml ceph-secret-rbd-1.yaml external-storage pv.yaml rbdpd.yaml storageclass-pvc-5G.yaml storageclass-pvc.yaml
[root@k8s1 k8s-ceph]# cp storageclass-pvc-pod.yaml storageclass-pvc-pod02.yaml
[root@k8s1 k8s-ceph]# vim storageclass-pvc-pod02.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: rbd-dyn-pvc-pod
name: storageclass-pvc01-pod01
spec:
containers:
- name: storageclass-pvc01-pod01
image: nginx
volumeMounts:
readOnly: false
volumes:
- name: storageclass-pvc01-pod01
claimName: storageclass-pvc01
"storageclass-pvc-pod02.yaml" 18L, 411C written
[root@k8s1 k8s-ceph]# cat storageclass-pvc-pod02.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
test: rbd-dyn-pvc-pod02
name: storageclass-pvc01-pod02
spec:
containers:
- name: storageclass-pvc01-pod02
image: nginx
volumeMounts:
- name: storageclass-pvc01-pod02
mountPath: /mnt/ceph-dyn-rbd-pvc02/nginx
readOnly: false
volumes:
- name: storageclass-pvc01-pod02
persistentVolumeClaim:
claimName: storageclass-pvc01
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl apply -f storageclass-pvc-pod02.yaml
pod/storageclass-pvc01-pod02 created
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d21h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 3h12m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 3h12m
nginx-86c57db685-tw9x5 1/1 Running 2 2d21h
nginx-86c57db685-w8pq9 1/1 Running 0 3h12m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 42 3h12m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 51m
storageclass-pvc01-pod01 1/1 Running 0 18m
storageclass-pvc01-pod02 0/1 ContainerCreating 0 6s
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d21h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 3h13m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 3h13m
nginx-86c57db685-tw9x5 1/1 Running 2 2d21h
nginx-86c57db685-w8pq9 1/1 Running 0 3h13m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 42 3h13m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 52m
storageclass-pvc01-pod01 1/1 Running 0 19m
storageclass-pvc01-pod02 1/1 Running 0 18s
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
bqjr01-5cc79db9bf-jqxjl 1/1 Running 2 2d21h
bqjr02-57659797f5-rzpt2 1/1 Running 2 2d20h
httpd-app-c77bb8b47-6q549 1/1 Running 0 3h13m
httpd-app-c77bb8b47-hfjwj 1/1 Running 2 2d20h
nginx-86c57db685-n8csl 1/1 Running 0 3h13m
nginx-86c57db685-tw9x5 1/1 Running 2 2d21h
nginx-86c57db685-w8pq9 1/1 Running 0 3h13m
prometheus-operator-99dccdc56-wwjt8 0/1 CrashLoopBackOff 42 3h13m
rbd-provisioner-75b85f85bd-rtx6m 1/1 Running 0 52m
storageclass-pvc01-pod01 1/1 Running 0 19m
storageclass-pvc01-pod02 1/1 Running 0 28s
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]#
[root@k8s1 k8s-ceph]# kubectl exec -it storageclass-pvc01-pod02 /bin/bash
root@storageclass-pvc01-pod02:/#
root@storageclass-pvc01-pod02:/# df -h
Filesystem Size Used Avail Use% Mounted on
overlay 49G 4.9G 44G 10% /
tmpfs 64M 0 64M 0% /dev
tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/mapper/VG1-root 49G 4.9G 44G 10% /etc/hosts
shm 64M 0 64M 0% /dev/shm
/dev/rbd0 976M 503M 458M 53% /mnt/ceph-dyn-rbd-pvc02/nginx
tmpfs 1.9G 12K 1.9G 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs 1.9G 0 1.9G 0% /proc/acpi
tmpfs 1.9G 0 1.9G 0% /proc/scsi
tmpfs 1.9G 0 1.9G 0% /sys/firmware
root@storageclass-pvc01-pod02:/#
root@storageclass-pvc01-pod02:/#
root@storageclass-pvc01-pod02:/# cd /mnt/ceph-dyn-rbd-pvc02/nginx
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx#
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx#
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx# dd if=/dev/zero of=test-rbd-dyn-2 bs=500M count=1
dd: error writing 'test-rbd-dyn-2': No space left on device
1+0 records in
0+0 records out
479604736 bytes (480 MB, 457 MiB) copied, 1.98192 s, 242 MB/s
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx#
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx#
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx#
root@storageclass-pvc01-pod02:/mnt/ceph-dyn-rbd-pvc02/nginx# exit
exit
command terminated with exit code 1
[root@k8s1 k8s-ceph]# ceph df
RAW STORAGE:
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 51 GiB 38 GiB 9.2 GiB 13 GiB 24.84
TOTAL 51 GiB 38 GiB 9.2 GiB 13 GiB 24.84
POOLS:
POOL ID STORED OBJECTS USED %USED MAX AVAIL
.rgw.root 1 1.2 KiB 4 768 KiB 0 12 GiB
default.rgw.control 2 0 B 8 0 B 0 12 GiB
default.rgw.meta 3 2.8 KiB 12 2.1 MiB 0 12 GiB
default.rgw.log 4 0 B 207 0 B 0 12 GiB
default.rgw.buckets.index 5 1.9 KiB 4 1.9 KiB 0 12 GiB
default.rgw.buckets.data 6 2.1 GiB 540 6.3 GiB 14.94 12 GiB
liping 7 0 B 0 0 B 0 12 GiB
kubernets 8 977 MiB 260 2.9 GiB 7.44 12 GiB
rbd 9 10 MiB 17 33 MiB 0.09 12 GiB
[root@k8s1 k8s-ceph]# rbd ls -p kubernets
kubernetes-dynamic-pvc-17b7cefa-0770-11ea-a8cc-aa7bd6ce6316
kubernetes-dynamic-pvc-4b688ce9-076d-11ea-a8cc-aa7bd6ce6316
pv
[root@k8s1 k8s-ceph]# rbd info kubernets/kubernetes-dynamic-pvc-17b7cefa-0770-11ea-a8cc-aa7bd6ce6316
rbd image 'kubernetes-dynamic-pvc-17b7cefa-0770-11ea-a8cc-aa7bd6ce6316':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 41a26b8b4567
block_name_prefix: rbd_data.41a26b8b4567
format: 2
features: layering
op_features:
flags:
create_timestamp: Fri Nov 15 14:21:00 2019
access_timestamp: Fri Nov 15 14:21:00 2019
modify_timestamp: Fri Nov 15 14:21:00 2019
更多推荐
所有评论(0)