1、配置nfs-subdir-external-provisioner
# 拉取helm chart文件
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
helm repo update
helm pull nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version 4.0.18 --untar# 配置values.yaml
···
···
nfs:server: 10.0.0.105 # 存储数据服务器的地址path: /nfs # 存储路径mountOptions:volumeName: nfs-subdir-external-provisioner-rootstorageClass:create: true defaultClass: true # 设置为默认的存储类,在其他项目没有指定存储类的时候会默认选择他 name: nfs-data # 存储类名字allowVolumeExpansion: true # 允许扩缩容reclaimPolicy: Delete # pvc删掉的时候将pv也一起删掉,测试环境中可用设置为delete,但是生产环境中不要这么做archiveOnDelete: true # 删除pvc后保留归档数据accessModes: ReadWriteOnce # 访问模式
2、安装nfs服务,NFS服务器与请求的服务器都安装
# 创建数据目录
mkdir -p /nfs
# 安装nfs服务
apt install nfs-common -y
# 修改nfs服务器的配置文件
[root@ubt-server ~]# cat /etc/exports
# /etc/exports: the access control list for filesystems which may be exported
# to NFS clients. See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
#
/nfs 10.0.0.201/24(rw,sync,no_root_squash) # 将需要使用的服务器名单都放进来
/nfs 10.0.0.202/24(rw,sync,no_root_squash)
/nfs 10.0.0.203/24(rw,sync,no_root_squash)
/nfs 10.0.0.11/24(rw,sync,no_root_squash)
/nfs 10.0.0.12/24(rw,sync,no_root_squash)
/nfs 10.0.0.13/24(rw,sync,no_root_squash)# 所有服务器启动nfs
systemctl start nfs-utils.service
systemctl enable nfs-utils.service# 尝试挂载一次
# nfs服务器
mkdir /nfs/aaa
touch /nfs/aaa/a.txt# 10.0.0.101服务器
mount.nfs 10.0.0.105:/nfs/aaa /mnt/
# 成功看到a.txt就可以了
umount /mnt/
3、测试功能是否可用
# 安装
kubectl create ns nfs-data
helm install nfs-data -n nfs-data -f values.yaml .root@master-01:/etc/kubernetes/charts/nfs-subdir-external-provisioner# kubectl get pods -n nfs-data
NAME READY STATUS RESTARTS AGE
nfs-data-nfs-subdir-external-provisioner-6797d886b8-tt87f 1/1 Running 0 14s
root@master-01:/etc/kubernetes/charts/nfs-subdir-external-provisioner# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-data (default) cluster.local/nfs-data-nfs-subdir-external-provisioner Delete Immediate true 3m17s# 创建一个pvc
root@master-01:~# cat pvc-test.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc-test
spec:accessModes:- ReadWriteOnceresources:requests:storage: 1GistorageClassName: nfs-data
root@master-01:~# kubectl apply -f pvc-test.yaml
persistentvolumeclaim/pvc-test created
root@master-01:~# kubectl create ns aaa
namespace/aaa created
root@master-01:~# kubectl apply -f pvc-test.yaml -n aaa
persistentvolumeclaim/pvc-test created# 查看pvc是否成功绑定,pending则为失败,检查nfs和nfs-subdir-external-provisioner的状态,
root@master-01:~# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-test Bound pvc-69113802-5b46-400e-af91-fece7f538c63 1Gi RWO nfs-data 5s# 去nfs服务器上看对应的目录是否创建,成功看见则为成功
[root@ubt-server nfs]# ls | grep aaa | grep test
aaa-pvc-test-pvc-7e83b61b-3c6d-489b-a9a4-cf91ab3c52b0# 成功部署完以后,在其他的helm chart中就可以将storageClassName: nfs-data写到values文件中了,这样pv就会自动创建