This repo contains CSI driver for Gluster. The Container Storage Interface (CSI) is a proposed new industry standard for cluster-wide volume plugins. “Container Storage Interface” (CSI) enables storage vendors (SP) to develop a plugin once and have it work across a number of container orchestration (CO) systems.
This repository contains the source and a Dockerfile to build the GlusterFS CSI driver. The driver is built as a multi-stage container build. This requires a relatively recent version of Docker or Buildah.
Docker packages can be obtained for CentOS, Fedora or other distributions.
To build, ensure docker is installed, and run:
- Get inside the repository directory
[root@localhost]# cd gluster-csi-driver
- Build the glusterfs-csi-driver container
[root@localhost]# ./build.sh
[root@localhost]# cd examples/kubernetes/gluster-virtblock/
[root@localhost]# kubectl create -f csi-deployment.yaml
service/csi-attacher-glustervirtblockplugin created
statefulset.apps/csi-attacher-glustervirtblockplugin created
daemonset.apps/csi-nodeplugin-glustervirtblockplugin created
service/csi-provisioner-glustervirtblockplugin created
statefulset.apps/csi-provisioner-glustervirtblockplugin created
serviceaccount/glustervirtblock-csi created
clusterrole.rbac.authorization.k8s.io/glustervirtblock-csi created
clusterrolebinding.rbac.authorization.k8s.io/glustervirtblock-csi-role created
NOTE: You can skip seperate installation of kubernetes cluster, GD2 Cluster and CSI deployment if you directly use GCS installation method, it should bring your deployment in one shot. Refer GCS deployment guide for more details.
[root@localhost]# cat storage-class.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glustervirtblock-csi
annotations:
storageclass.kubernetes.io/is-default-class: "false"
provisioner: org.gluster.glustervirtblock
[root@localhost]# kubectl create -f storage-class.yaml
storageclass.storage.k8s.io/glustervirtblock-csi created
Verify gluster virtual block storage class
[root@localhost]# kubectl get storageclass
NAME PROVISIONER AGE
glusterfs-csi (default) org.gluster.glusterfs 28h
glustervirtblock-csi org.gluster.glustervirtblock 6s
local-storage kubernetes.io/no-provisioner 29h
[root@localhost]# cat pvc.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterblock-csi-pv
spec:
storageClassName: glustervirtblock-csi
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
[root@localhost]# kubectl create -f pvc.yaml
persistentvolumeclaim/glusterblock-csi-pv created
Validate the gluster virtual block claim creation
[root@localhost]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterblock-csi-pv Bound pvc-1048edfb-1f06-11e9-8b7a-525400491c42 100Mi RWO glustervirtblock-csi 8s
[root@localhost]# kubectl describe pvc
Name: glusterblock-csi-pv
Namespace: default
StorageClass: glustervirtblock-csi
Status: Bound
Volume: pvc-1048edfb-1f06-11e9-8b7a-525400491c42
Labels: <none>
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
storageClassName: glustervirtblock-csi
volume.beta.kubernetes.io/storage-provisioner: org.gluster.glustervirtblock
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 100Mi
Access Modes: RWO
VolumeMode: Filesystem
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ExternalProvisioning 2m34s persistentvolume-controller waiting for a volume to be created, either by external provisioner "org.gluster.glustervirtblock" or manually created by system administrator
Normal Provisioning 2m34s org.gluster.glustervirtblock_csi-provisioner-glustervirtblockplugin-0_494f6242-1ee8-11e9-b6a0-0a580ae9416b External provisioner is provisioning volume for claim "default/glusterblock-csi-pv"
Normal ProvisioningSucceeded 2m34s org.gluster.glustervirtblock_csi-provisioner-glustervirtblockplugin-0_494f6242-1ee8-11e9-b6a0-0a580ae9416b Successfully provisioned volume pvc-1048edfb-1f06-11e9-8b7a-525400491c42
Mounted By: <none>
Verify gluster virtual block PV details:
[root@localhost]# kubectl describe pv
Name: pvc-1048edfb-1f06-11e9-8b7a-525400491c42
Labels: <none>
Annotations: pv.kubernetes.io/provisioned-by: org.gluster.glustervirtblock
Finalizers: [kubernetes.io/pv-protection]
StorageClass: glustervirtblock-csi
Status: Bound
Claim: default/glusterblock-csi-pv
Reclaim Policy: Delete
Access Modes: RWO
VolumeMode: Filesystem
Capacity: 100Mi
Node Affinity: <none>
Message:
Source:
Type: CSI (a Container Storage Interface (CSI) volume source)
Driver: org.gluster.glustervirtblock
VolumeHandle: pvc-1048edfb-1f06-11e9-8b7a-525400491c42
ReadOnly: false
VolumeAttributes: glusterbkpservers=gluster-kube1-0.glusterd2.gcs:gluster-kube2-0.glusterd2.gcs
glusterserver=gluster-kube3-0.glusterd2.gcs
glustervol=block_hosting_volume_ddd7ced7-7766-4797-9214-01fa9587472a
storage.kubernetes.io/csiProvisionerIdentity=1548231861262-8081-org.gluster.glustervirtblock
Events: <none>
[root@localhost]# cat app.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: gluster-0
labels:
app: gluster
spec:
containers:
- name: gluster
image: redis
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/mnt/gluster"
name: glusterblockcsivol
volumes:
- name: glusterblockcsivol
persistentVolumeClaim:
claimName: glusterblock-csi-pv
[root@localhost]# kubectl create -f app.yaml
pod/gluster-0 created
Validate app (RWO pvc claim)
[root@localhost]# kubectl get pods
NAME READY STATUS RESTARTS AGE
gluster-0 1/1 Running 0 38s
Login into app and validate mount point
[root@localhost]# kubectl exec -it gluster-0 /bin/bash
[root@gluster-0 ~]# mount | grep gluster
/mnt/blockhostvol/block_hosting_volume_ddd7ced7-7766-4797-9214-01fa9587472a/pvc-1048edfb-1f06-11e9-8b7a-525400491c42 on /mnt/gluster type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
[root@gluster-0 ~]#
Delete pod and pvc
[root@localhost]# kubectl delete pod gluster-0
pod "gluster-0" deleted
[root@localhost]# kubectl delete pvc glusterblock-csi-pv
persistentvolumeclaim "glusterblock-csi-pv" deleted
[root@localhost]# kubectl create -f csi-deployment.yaml
service/csi-attacher-glusterfsplugin created
statefulset.apps/csi-attacher-glusterfsplugin created
daemonset.apps/csi-nodeplugin-glusterfsplugin created
service/csi-provisioner-glusterfsplugin created
statefulset.apps/csi-provisioner-glusterfsplugin created
serviceaccount/glusterfs-csi created
clusterrole.rbac.authorization.k8s.io/glusterfs-csi created
clusterrolebinding.rbac.authorization.k8s.io/glusterfs-csi-role created
Below listed feature gates need to be enabled in kubernetes v1.13.1
--feature-gates=VolumeSnapshotDataSource=true
NOTE: You can skip seperate installation of kubernetes cluster, GD2 Cluster and CSI deployment if you directly use GCS installation method, it should bring your deployment in one shot. Refer GCS deployment guide for more details.
[root@localhost]# cat storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-csi
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: org.gluster.glusterfs
[root@localhost]# kubectl create -f storage-class.yaml
storageclass.storage.k8s.io/glusterfs-csi created
Verify glusterfs storage class (RWX)
[root@localhost]# kubectl get storageclass
NAME PROVISIONER AGE
glusterfs-csi (default) org.gluster.glusterfs 105s
[root@localhost]# kubectl describe storageclass/glusterfs-csi
Name: glusterfs-csi
IsDefaultClass: Yes
Annotations: storageclass.kubernetes.io/is-default-class=true
Provisioner: org.gluster.glusterfs
Parameters: <none>
AllowVolumeExpansion: <unset>
MountOptions: <none>
ReclaimPolicy: Delete
VolumeBindingMode: Immediate
Events: <none>
[root@localhost]# cat pvc.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterfs-csi-pv
spec:
storageClassName: glusterfs-csi
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
[root@localhost]# kubectl create -f pvc.yaml
persistentvolumeclaim/glusterfs-csi-pv created
Validate the RWX claim creation
[root@localhost]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-csi-pv Bound pvc-953d21f5a51311e8 5Gi RWX glusterfs-csi 3s
[root@localhost]# kubectl describe pvc
Name: glusterfs-csi-pv
Namespace: default
StorageClass: glusterfs-csi
Status: Bound
Volume: pvc-953d21f5a51311e8
Labels: <none>
Annotations: control-plane.alpha.kubernetes.io/leader={"holderIdentity":"874a6cc9-a511-11e8-bae2-0a580af40202","leaseDurationSeconds":15,"acquireTime":"2018-08-21T07:26:58Z","renewTime":"2018-08-21T07:27:00Z","lea...
pv.kubernetes.io/bind-completed=yes
pv.kubernetes.io/bound-by-controller=yes
storageClassName=glusterfs-csi
volume.kubernetes.io/storage-provisioner=org.gluster.glusterfs
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 5Gi
Access Modes: RWX
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ExternalProvisioning 30s (x2 over 30s) persistentvolume-controller waiting for a volume to be created, either by external provisioner "org.gluster.glusterfs" or manually created by system administrator
Normal Provisioning 30s org.gluster.glusterfs csi-provisioner-glusterfsplugin-0 874a6cc9-a511-11e8-bae2-0a580af40202 External provisioner is provisioning volume for claim "default/glusterfs-csi-pv"
Normal ProvisioningSucceeded 29s org.gluster.glusterfs csi-provisioner-glusterfsplugin-0 874a6cc9-a511-11e8-bae2-0a580af40202 Successfully provisioned volume pvc-953d21f5a51311e8
Verify PV details:
[root@localhost]# kubectl describe pv
Name: pvc-953d21f5a51311e8
Labels: <none>
Annotations: pv.kubernetes.io/provisioned-by=org.gluster.glusterfs
Finalizers: [kubernetes.io/pv-protection]
StorageClass: glusterfs-csi
Status: Bound
Claim: default/glusterfs-csi-pv
Reclaim Policy: Delete
Access Modes: RWX
Capacity: 5Gi
Node Affinity: <none>
Message:
Source:
Type: CSI (a Container Storage Interface (CSI) volume source)
Driver: org.gluster.glusterfs
VolumeHandle: pvc-953d21f5a51311e8
ReadOnly: false
Events: <none>
[root@localhost]# cat app.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: gluster
labels:
name: gluster
spec:
containers:
- name: gluster
image: redis
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/mnt/gluster"
name: glustercsivol
volumes:
- name: glustercsivol
persistentVolumeClaim:
claimName: glusterfs-csi-pv
[root@localhost]# kubectl create -f app.yaml
Check mount output and validate.
[root@localhost]# mount |grep glusterfs
192.168.121.158:pvc-953d21f5a51311e8 on /var/lib/kubelet/pods/2a563343-a514-11e8-a324-525400a04cb4/volumes/kubernetes.io~csi/pvc-953d21f5a51311e8/mount type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
[root@localhost]# kubectl delete pod gluster
pod "gluster" deleted
[root@localhost]# mount |grep glusterfs
[root@localhost]#
Kubernetes v1.12 introduces alpha support for volume snapshotting. This feature allows creating/deleting volume snapshots, and the ability to create new volumes from a snapshot natively using the Kubernetes API.
To verify clone functionality work as intended, lets start with writing some data into already created application with PVC.
[root@localhost]# kubectl exec -it redis /bin/bash
root@redis:/data# cd /mnt/gluster/
root@redis:/mnt/gluster# echo "glusterfs csi clone test" > clone_data
[root@localhost]# cat snapshot-class.yaml
---
apiVersion: snapshot.storage.k8s.io/v1alpha1
kind: VolumeSnapshotClass
metadata:
name: glusterfs-csi-snap
snapshotter: org.gluster.glusterfs
[root@localhost]# kubectl create -f snapshot-class.yaml
volumesnapshotclass.snapshot.storage.k8s.io/glusterfs-csi-snap created
Verify snapshot class
[root@localhost]# kubectl get volumesnapshotclass
NAME AGE
glusterfs-csi-snap 1h
[root@localhost]# kubectl describe volumesnapshotclass/glusterfs-csi-snap
Name: glusterfs-csi-snap
Namespace:
Labels: <none>
Annotations: <none>
API Version: snapshot.storage.k8s.io/v1alpha1
Kind: VolumeSnapshotClass
Metadata:
Creation Timestamp: 2018-10-24T04:57:34Z
Generation: 1
Resource Version: 3215
Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/volumesnapshotclasses/glusterfs-csi-snap
UID: 51de83df-d749-11e8-892a-525400d84c47
Snapshotter: org.gluster.glusterfs
Events: <none>
[root@localhost]# cat volume-snapshot.yaml
---
apiVersion: snapshot.storage.k8s.io/v1alpha1
kind: VolumeSnapshot
metadata:
name: glusterfs-csi-ss
spec:
snapshotClassName: glusterfs-csi-ss
source:
name: glusterfs-csi-pv
kind: PersistentVolumeClaim
[root@localhost]# kubectl create -f volume-snapshot.yaml
volumesnapshot.snapshot.storage.k8s.io/glusterfs-csi-ss created
Verify volume snapshot
[root@localhost]# kubectl get volumesnapshot
NAME AGE
glusterfs-csi-ss 13s
[root@localhost]# kubectl describe volumesnapshot/glusterfs-csi-ss
Name: glusterfs-csi-ss
Namespace: default
Labels: <none>
Annotations: <none>
API Version: snapshot.storage.k8s.io/v1alpha1
Kind: VolumeSnapshot
Metadata:
Creation Timestamp: 2018-10-24T06:39:35Z
Generation: 1
Resource Version: 12567
Self Link: /apis/snapshot.storage.k8s.io/v1alpha1/namespaces/default/volumesnapshots/glusterfs-csi-ss
UID: 929722b7-d757-11e8-892a-525400d84c47
Spec:
Snapshot Class Name: glusterfs-csi-snap
Snapshot Content Name: snapcontent-929722b7-d757-11e8-892a-525400d84c47
Source:
Kind: PersistentVolumeClaim
Name: glusterfs-csi-pv
Status:
Creation Time: 1970-01-01T00:00:01Z
Ready: true
Restore Size: <nil>
Events: <none>
[root@localhost]# cat pvc-restore.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: glusterfs-pv-restore
spec:
storageClassName: glusterfs-csi
dataSource:
name: glusterfs-csi-ss
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
[root@localhost]# kubectl create -f pvc-restore.yaml
persistentvolumeclaim/glusterfs-pv-restore created
Verify newly created claim
[root@localhost]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-csi-pv Bound pvc-712278b0-d749-11e8-892a-525400d84c47 5Gi RWX glusterfs-csi 103m
glusterfs-pv-restore Bound pvc-dfcc36f0-d757-11e8-892a-525400d84c47 5Gi RWO glusterfs-csi 14s
[root@localhost]# kubectl describe pvc/glusterfs-pv-restore
Name: glusterfs-pv-restore
Namespace: default
StorageClass: glusterfs-csi
Status: Bound
Volume: pvc-dfcc36f0-d757-11e8-892a-525400d84c47
Labels: <none>
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
volume.kubernetes.io/storage-provisioner: org.gluster.glusterfs
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 5Gi
Access Modes: RWO
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ExternalProvisioning 41s persistentvolume-controller waiting for a volume to be created, either by external provisioner "org.gluster.glusterfs" or manually created by system administrator
Normal Provisioning 41s org.gluster.glusterfs_csi-provisioner-glusterfsplugin-0_1e7821cb-d749-11e8-9935-0a580af40303 External provisioner is provisioning volume for claim "default/glusterfs-pv-restore"
Normal ProvisioningSucceeded 41s org.gluster.glusterfs_csi-provisioner-glusterfsplugin-0_1e7821cb-d749-11e8-9935-0a580af40303 Successfully provisioned volume pvc-dfcc36f0-d757-11e8-892a-525400d84c47
Mounted By: <none>
[root@localhost]# cat app-with-clone.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: redis-pvc-restore
labels:
name: redis-pvc-restore
spec:
containers:
- name: redis-pvc-restore
image: redis:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/mnt/gluster"
name: glusterfscsivol
volumes:
- name: glusterfscsivol
persistentVolumeClaim:
claimName: glusterfs-pv-restore
[root@localhost]# kubectl create -f app-with-clone.yaml
pod/redis-pvc-restore created
Verify cloned data is present in newly created application
[root@localhost]# kubectl get po
NAME READY STATUS RESTARTS AGE
csi-attacher-glusterfsplugin-0 2/2 Running 0 112m
csi-nodeplugin-glusterfsplugin-dl7pp 2/2 Running 0 112m
csi-nodeplugin-glusterfsplugin-khrtd 2/2 Running 0 112m
csi-nodeplugin-glusterfsplugin-kqcsw 2/2 Running 0 112m
csi-provisioner-glusterfsplugin-0 3/3 Running 0 112m
glusterfs-55v7v 1/1 Running 0 128m
glusterfs-qbvgv 1/1 Running 0 128m
glusterfs-vclr4 1/1 Running 0 128m
redis 1/1 Running 0 109m
redis-pvc-restore 1/1 Running 0 26s
[root@localhost]# kubectl exec -it redis-pvc-restore /bin/bash
root@redis-pvc-restore:/data# cd /mnt/gluster/
root@redis-pvc-restore:/mnt/gluster# ls
clone_data
root@redis-pvc-restore:/mnt/gluster# cat clone_data
glusterfs csi clone test
[root@localhost]# cat glusterfs-lite-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-lite-csi
provisioner: org.gluster.glusterfs
parameters:
brickType: "loop"
[root@localhost]# kubectl create -f glusterfs-lite-storage-class.yaml
storageclass.storage.k8s.io/glusterfs-lite-csi created
Verify glusterfs storage class (RWX)
[root@localhost]# kubectl get storageclass
NAME PROVISIONER AGE
glusterfs-lite-csi org.gluster.glusterfs 105s
[root@localhost]# cat pvc.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterfs-lite-csi-pv
spec:
storageClassName: glusterfs-lite-csi
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
[root@localhost]# kubectl create -f pvc.yaml
persistentvolumeclaim/glusterfs-lite-csi-pv created
Validate the RWX claim creation
[root@localhost]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-lite-csi-pv Bound pvc-943d21f5a51312e7 5Gi RWX glusterfs-lite-csi 5s
follow guide to setup thin arbiter
$ cat thin-arbiter-storageclass.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-csi-thin-arbiter
provisioner: org.gluster.glusterfs
parameters:
arbiterType: "thin"
arbiterPath: "192.168.10.90:24007/mnt/arbiter-path"
$ kubectl create -f thin-arbiter-storageclass.yaml
storageclass.storage.k8s.io/glusterfs-csi-thin-arbiter created
$ cat thin-arbiter-pvc.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterfs-csi-thin-pv
spec:
storageClassName: glusterfs-csi-thin-arbiter
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
$ kube create -f thin-arbiter-pvc.yaml
persistentvolumeclaim/glusterfs-csi-thin-pv created
Verify PVC is in Bound state
$ kube get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-csi-thin-pv Bound pvc-86b3b70b-1fa0-11e9-9232-525400ea010d 5Gi RWX glusterfs-csi-arbiter 13m
$ cat thin-arbiter-pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: ta-redis
labels:
name: redis
spec:
containers:
- name: redis
image: redis
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: "/mnt/gluster"
name: glusterfscsivol
volumes:
- name: glusterfscsivol
persistentVolumeClaim:
claimName: glusterfs-csi-thin-pv
$ kube create -f thin-arbiter-pod.yaml
pod/ta-redis created
Verify app is in running state
$ kube get po
NAME READY STATUS RESTARTS AGE
ta-redis 1/1 Running 0 6m54s