Kubernetes使用Ceph静态卷部署应用
cephkubernetes
对于有状态服务,存储是一个至关重要的问题。k8s 提供了非常丰富的组件来支持存储,这里大致列一下:
可用的 kubernetes 可用的 Ceph 集群 Ceph monitor 节点:lab1、lab2、lab3
# k8s
192.168.105.92 lab1 # master1
192.168.105.93 lab2 # master2
192.168.105.94 lab3 # master3
192.168.105.95 lab4 # node4
192.168.105.96 lab5 # node5
192.168.105.97 lab6 # node6
192.168.105.98 lab7 # node7
在每个 k8s node 中安装yum install -y ceph-common
ceph auth get-key client.admin > /tmp/secret
kubectl create namespace cephfs
kubectl create secret generic ceph-admin-secret --from-file=/tmp/secret
vim cephfs-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: cephfs-pv1
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
cephfs:
monitors:
- 192.168.105.92:6789
- 192.168.105.93:6789
- 192.168.105.94:6789
user: admin
secretRef:
name: ceph-admin-secret
readOnly: false
persistentVolumeReclaimPolicy: Recycle
vim cephfs-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: cephfs-pv-claim1
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
vim cephfs-nginx.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-cephfs
spec:
replicas: 1
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- name: ceph-cephfs-volume
mountPath: "/usr/share/nginx/html"
volumes:
- name: ceph-cephfs-volume
persistentVolumeClaim:
claimName: cephfs-pv-claim1
kubectl create -f cephfs-pv.yaml
kubectl create -f cephfs-pvc.yaml
kubectl create -f cephfs-nginx.yaml
验证结果:
[root@lab1 cephfs]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
cephfs-pv1 1Gi RWX Recycle Bound default/cephfs-pv-claim1 1h
[root@lab1 cephfs]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pv-claim1 Bound cephfs-pv1 1Gi RWX 1h
test-pvc Bound test-pv 1Gi RWO 32m
[root@lab1 cephfs]# kubectl get pod |grep nginx-cephfs
nginx-cephfs-7777495b9b-29vtw 1/1 Running 0 13m
[root@lab1 cephfs]# kubectl exec -it nginx-cephfs-7777495b9b-29vtw -- df -h|grep nginx
192.168.105.92:6789:/ 1.6T 4.1G 1.6T 1% /usr/share/nginx/html
ceph auth get-key client.admin > /tmp/secret
kubectl create namespace cephfs
kubectl create secret generic ceph-admin-secret --from-file=/tmp/secret
ceph osd pool create kube 128 128
rbd create kube/foo -s 10G --image-feature layering
vim rbd-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: rbd-pv1
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
rbd:
monitors:
- 192.168.105.92:6789
- 192.168.105.93:6789
- 192.168.105.94:6789
pool: kube
image: foo
user: admin
secretRef:
name: ceph-secret
persistentVolumeReclaimPolicy: Recycle
vim rbd-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: rbd-pv-claim1
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
vim rbd-nginx.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-rbd
spec:
replicas: 1
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- name: ceph-rbd-volume
mountPath: "/usr/share/nginx/html"
volumes:
- name: ceph-rbd-volume
persistentVolumeClaim:
claimName: rbd-pv-claim1
kubectl create -f rbd-pv.yaml
kubectl create -f rbd-pvc.yaml
kubectl create -f rbd-nginx.yaml
验证结果:
[root@lab1 rbd]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
cephfs-pv1 1Gi RWX Recycle Bound default/cephfs-pv-claim1 2h
rbd-pv1 5Gi RWO Recycle Bound default/rbd-pv-claim1 8m
[root@lab1 rbd]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pv-claim1 Bound cephfs-pv1 1Gi RWX 2h
claim2 Pending rbd 2h
claim3 Pending rbd 2h
rbd-pv-claim1 Bound rbd-pv1 5Gi RWO 8m
[root@lab1 rbd]# kubectl exec -it nginx-rbd-6b555f58c9-7k2k9 -- df -h|grep nginx
/dev/rbd0 9.8G 37M 9.7G 1% /usr/share/nginx/html
进入容器使用dd
测试,发现容器容易挂。而且经过验证,容器挂载的目录大小取决于rbd image的大小 dd if=/dev/zero of=/usr/share/nginx/html/test.data bs=1G count=8 &
root@nginx-rbd-6b555f58c9-7k2k9:/usr/share/nginx/html# error: Internal error occurred: error executing command in container: Error response from daemon: Container 12f9c29c03082d27c7ed4327536626189d02be451029f7385765d3c2e1451062 is not running: Exited (0) Less than a second ago
参考资料: [1] https://kubernetes.io/docs/concepts/storage/volumes/ [2] https://kubernetes.io/docs/concepts/storage/persistent-volumes/ [3] https://zhangchenchen.github.io/2017/11/17/kubernetes-integrate-with-ceph/
评论