文章目录
- 1,安装heketi
-
- 1.1,离线安装
- 1.2,在线安装
- 1.3,使用heketi管理gfs
- 1.4,使用heketi创建gluster数据卷
- 1.5,使用heketi删除glusterfs数据卷
- 2,使用k8s创建glusterfs数据卷
-
- 2.1,创建pod使用该pvc
- 3,免密登录其他主机配置
1,安装heketi
heketi 项目介绍:https://github.com/heketi/heketi
要点概况:
- 通过 RESTful 接口,管理glusterfs 数据卷(创建,扩容),可以把数据文件分布在多个glusterfs集群之间
- 通过heketi,可以把glusterfs接入云管理(Kubernetes,OpenStack 等)
- heketi 版本和 glusterfs版本不需要一致性(可以选择自己的稳定版)
- 安装条件:glusterfs在资源池(无需创建数据卷)中添加了三个集群节点,每个节点至少需要一个空磁盘(或分区)–未初始化文件系统)。需要配置免密登录。glusterfs假设每个节点heketi ssh远程账号为非root,需要保证有sudo权限
参考文件:https://www.jianshu.com/p/8f36e59ae4f8
1.1,离线安装
heketi 二进制包下载:https://github.com/heketi/heketi/releases 编写systemd启动文件
[root@c73 heketi]# cat /usr/lib/systemd/system/heketi.service [Unit] Description=Heketi Server [Service] Type=simple WorkingDirectory=/var/lib/heketi User=heketi ExecStart=/usr/bin/heketi --config=/etc/heketi/heketi.json Restart=on-failure StandardOutput=syslog StandardError=syslog [Install] WantedBy=multi-user.target [root@c73 heketi]# cat /etc/heketi/heketi.json {
"_port_comment": "Heketi Server Port Number", "port": "18080", ====>指定服务端口 "_use_auth": "Enable JWT authorization. Please enable for deployment", "use_auth": true, ====>开启指定服务验证 "_jwt": "Private keys for access", "jwt": {
"_admin": "Admin has access to all APIs", "admin": {
"key": "admin" ====>指定admin密码 },
"_user"
:
"User only has access to /volumes endpoint",
"user"
:
{
"key"
:
"admin"
==
==
>指定admin密码
}
},
"_glusterfs_comment"
:
"GlusterFS Configuration",
"glusterfs"
:
{
"_executor_comment"
:
[
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor"
:
"ssh",
==
==
>指定远程连接方式
"_sshexec_comment"
:
"SSH username and private key file information",
"sshexec"
:
{
==
==
>指定远程连接方式-细则
"keyfile"
:
"/etc/heketi/heketi_key",
==
>远程登录账号的私钥
"user"
:
"heketi",
"port"
:
"22",
"sudo"
: true,
"fstab"
:
"/etc/fstab"
},
"_db_comment"
:
"Database file name",
"db"
:
"/var/lib/heketi/heketi.db",
"_loglevel_comment"
:
[
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel"
:
"debug"
}
}
[root@c73 heketi
]
# systemctl start heketi
[root@c73 heketi
]
# systemctl status heketi ● heketi.service - Heketi Server Loaded: loaded
(/usr/lib/systemd/system/heketi.service
; disabled
; vendor preset: disabled
) Active: active
(running
) since Tue
2022-07-05
14:45:35 CST
; 2h 11min ago Main PID:
6512
(heketi
) Tasks:
8 Memory:
7.2M CGroup: /system.slice/heketi.service └─6512 /usr/bin/heketi --config
=/etc/heketi/heketi.json Jul 05
16:55:37 c73 heketi
[
6512
]: Main PID:
698
(glusterd
) Jul 05
16:55:37 c73 heketi
[
6512
]: CGroup: /system.slice/glusterd.service Jul 05
16:55:37 c73 heketi
[
6512
]: ├─
698 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO Jul 05
16:55:37 c73 heketi
[
6512
]: ├─1355 /usr/sbin/glusterfsd -s
192.168.56.71 --volfile-id vol_872e4da1d2848c164….56.71-v Jul 05
16:55:37 c73 heketi
[
6512
]: └─1378 /usr/sbin/glusterfs -s localhost --volfile-id gluster/glustershd -p /var/run/g…hd Jul 05
16:55:37 c73 heketi
[
6512
]: Jul 05 07:18:46 c71 systemd
[
1
]: Starting GlusterFS, a clustered file-system server
... Jul 05
16:55:37 c73 heketi
[
6512
]: Jul 05 07:18:47 c71 systemd
[
1
]: Started GlusterFS, a clustered file-system server. Jul 05
16:55:37 c73 heketi
[
6512
]:
]: Stderr
[
] Jul 05
16:55:37 c73 heketi
[
6512
]:
[heketi
] INFO
2022/07/05
16:55:37 Periodic health check status:
node 50c111a951
...p
=true Jul 05
16:55:37 c73 heketi
[
6512
]:
[heketi
] INFO
2022/07/05
16:55:37 Cleaned
0 nodes from health cache Hint: Some lines were ellipsized, use -l to show
in full.
1.2,在线安装
#CentOS默认无heketi源,添加源及安装
yum install -y centos-release-gluster
yum install -y heketi heketi-client
1.3,使用heketi管理gfs
topology.json 参考:https://github.com/heketi/heketi/blob/master/client/cli/go/topology-sample.json 问题处理: 1, 磁盘分区未挂载,但有数据,导入失败问题解决: "devices": [{"name":"/dev/sda4","destroydata":true}] 2,本机ssh登录失败,问题原因:没有配置该用户通过秘钥,来免密登录
#1, 第一步,先验证服务是否正常启动
#2, 第二步,设置免密登录各glusterfs节点,然后用heketi-cli添加各节点并创建一个集群
#3, 第三步,通过 heketi-cli 创建glusterfs数据卷
#4, 第四步,登录glusterfs节点,验证结果
[root@c73 heketi]# systemctl start heketi
# 验证服务是否正常启动
[root@c73 heketi]# curl http://localhost:18080/hello
Hello from Heketi
[root@c73 heketi]# heketi-cli --server http://localhost:18080 --user admin --secret "admin" cluster list
Clusters:
#设置默认的环境变量 HEKETI_CLI_SERVER
[root@c73 heketi]# echo "export HEKETI_CLI_SERVER=http://192.168.56.73:18080" > /etc/profile.d/heketi.sh
[root@c73 heketi]# source /etc/profile.d/heketi.sh
#配置免密登录glusterfs各节点,此处省略具体过程(本机生产秘钥,把id_rsa.pub拷贝到gfs个节点的authorized_keys文件,并验证免密登录)
#glusterfs集群节点/dev/sdb1分区:若已被挂载使用,添加则报错
#glusterfs集群节点/dev/sdb2分区:若未被挂载使用,未被初始化,则添加成功
#topology-sample.json 可以先写两个glusterfs节点,后期要扩容是再添加一个进来,重新load即可
[root@c73 heketi]# cat topology-sample.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"192.168.56.7"
],
"storage": [
"192.168.56.7"
]
},
"zone": 1
},
"devices": [
"/dev/sdb2"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.56.71"
],
"storage": [
"192.168.56.71"
]
},
"zone": 1
},
"devices": [
"/dev/sdb2"
]
}
]
}
]
}
[root@c73 heketi]# heketi-cli topology load --json=topology-sample.json
Error: Unable to get topology information: Invalid JWT token: Token missing iss claim
#这是因为新版本的 heketi 在创建 gfs 集群时需要带上参数,声明用户名及密码,相应值在 heketi.json 文件中配置
[root@c73 heketi]# heketi-cli --user admin --secret admin topology load --json topology-sample.json
Creating cluster ... ID: 61f067af620b7bd2a8cbc3f375b19c8c
Allowing file volumes on cluster.
Allowing block volumes on cluster.
Creating node 192.168.56.7 ... ID: 023eca1214a5d9e24af56d1fa4aaceb8
Adding device /dev/sdb1 ... Unable to add device: Setup of device /dev/sdb1 failed (already initialized or contains data?): Can't open /dev/sdb1 exclusively. Mounted filesystem? Can't open /dev/sdb1 exclusively. Mounted filesystem?
Creating node 192.168.56.71 ... ID: 50c111a95160da5a85d944f98c888a05
Adding device /dev/sdb1 ... Unable to add device: Setup of device /dev/sdb1 failed (already initialized or contains data?): Can't open /dev/sdb1 exclusively. Mounted filesystem? Can't open /dev/sdb1 exclusively. Mounted filesystem?
[root@c73 heketi]# heketi-cli --user admin --secret admin topology load --json topology-sample.json
Found node 192.168.56.7 on cluster 61f067af620b7bd2a8cbc3f375b19c8c
Adding device /dev/sdb2 ... OK
Found node 192.168.56.71 on cluster 61f067af620b7bd2a8cbc3f375b19c8c
Adding device /dev/sdb2 ... OK
#创建一个1G大小,2副本的数据卷 --server http://localhost:18080
[root@c73 heketi]# heketi-cli --user admin --secret "admin" cluster list
Clusters:
Id:61f067af620b7bd2a8cbc3f375b19c8c [file][block]
1.4,使用heketi创建gluster数据卷
[root@c73 heketi]# heketi-cli --user admin --secret admin volume create --size=1 --replica=2 --clusters=61f067af620b7bd2a8cbc3f375b19c8c
Name: vol_872e4da1d2848c1645ab30c21fc05e3d
Size: 1
Volume Id: 872e4da1d2848c1645ab30c21fc05e3d
Cluster Id: 61f067af620b7bd2a8cbc3f375b19c8c
Mount: 192.168.56.7:vol_872e4da1d2848c1645ab30c21fc05e3d
Mount Options: backup-volfile-servers=192.168.56.71
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 2
#登录glusterfs节点验证---(c72未添加到topology-sample.json, c71添加了)---
[root@c72 ~]# gluster volume list
v2
vol_872e4da1d2848c1645ab30c21fc05e3d
[root@c72 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
└─sda1 8:1 0 40G 0 part /
sdb 8:16 0 20G 0 disk
├─sdb1 8:17 0 10G 0 part /export
└─sdb2 8:18 0 4G 0 part
[root@c71 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
└─sda1 8:1 0 40G 0 part /
sdb 8:16 0 20G 0 disk
├─sdb1 8:17 0 10G 0 part /export
└─sdb2 8:18 0 4G 0 part
├─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9_tmeta 253:0 0 8M 0 lvm
│ └─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9-tpool 253:2 0 1G 0 lvm
│ ├─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9 253:3 0 1G 1 lvm
│ └─vg_349d0d051c37d48c2be4b13f69b98b88-brick_f2345876c46be5d44d90ceec79668826 253:4 0 1G 0 lvm /var/lib/heket
└─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9_tdata 253:1 0 1G 0 lvm
└─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9-tpool 253:2 0 1G 0 lvm
├─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9 253:3 0 1G 1 lvm
└─vg_349d0d051c37d48c2be4b13f69b98b88-brick_f2345876c46be5d44d90ceec79668826 253:4 0 1G 0 lvm /var/lib/heket
1.5,使用heketi删除glusterfs数据卷
[root@JXQ-10-240-149-160 heketi]# heketi-cli --user admin --secret "admin@123" volume delete vol_9261d7c9e6c74fc019b98c219060953a
Error: Invalid path or request
[root@JXQ-10-240-149-160 heketi]# heketi-cli --user admin --secret "admin" volume delete 9261d7c9e6c74fc019b98c219060953a
Volume 9261d7c9e6c74fc019b98c219060953a deleted
2,使用k8s创建glusterfs数据卷
[root@JXQ-11-243-32-213 ~]# ll heketi-gfs/
total 12
-rw-r--r-- 1 root root 509 Jul 7 15:11 deploy-nginx.yaml
-rw-r--r-- 1 root root 254 Jul 7 14:44 pvc.yaml
-rw-r--r-- 1 root root 734 Jul 7 14:39 storage.yaml
[root@JXQ-11-243-32-213 heketi-gfs]# cd heketi-gfs/
[root@JXQ-11-243-32-213 heketi-gfs]# kubectl create ns heketi-gfs
[root@JXQ-11-243-32-213 heketi-gfs]# cat storage.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: heketi-gfs
data:
# base64 encoded password. E.g.: echo -n "mypassword" | base64
key: YWRtaW4=
type: kubernetes.io/glusterfs
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs
namespace: heketi-gfs
provisioner: kubernetes.io/glusterfs
#reclaimPolicy: Delete #默认值
reclaimPolicy: Retain
allowVolumeExpansion: true
parameters:
resturl: "http://11.243.32.215:18080"
clusterid: "e5d93c15f7a98cfd60a9d5f9d5817048"
restauthenabled: "true"
restuser: "admin"
#secretNamespace: "heketi-gfs"
#secretName: "heketi-secret"
restuserkey: "admin"
gidMin: "40000"
gidMax: "50000"
volumetype: "replicate:2"
[root@JXQ-11-243-32-213 heketi-gfs]# kubectl apply -f storage.yaml
secret/heketi-secret created
storageclass.storage.k8s.io/glusterfs created
[root@JXQ-11-243-32-213 heketi-gfs]# kubectl get storageclass -n heketi-gfs
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
glusterfs kubernetes.io/glusterfs Retain Immediate true 4s
[root@JXQ-11-243-32-213 heketi-gfs]# cat pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: glusterfs-pvc1
namespace: heketi-gfs
annotations:
volume.beta.kubernetes.io/storage-class: "glusterfs"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
[root@JXQ-11-243-32-213 heketi-gfs]# kubectl apply -f pvc.yaml
persistentvolumeclaim/glusterfs-pvc1 created
[root@JXQ-11-243-32-213 heketi-gfs]# kubectl get pvc -n heketi-gfs
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-pvc1 Bound pvc-12bad281-aa51-45f9-b121-9b7bbe2b8d31 1Gi RWX glusterfs 4m29s
2.1,创建pod使用该pvc
[root@JXQ-11-243-32-216 heketi-gfs]# cat deploy-nginx.yaml
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
namespace: heketi-gfs
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: harbor01.io/library/nginx:1.12.0
volumeMounts:
- name: wwwroot
mountPath: /usr/share/nginx/html
ports:
- containerPort: 80
volumes:
- name: wwwroot
persistentVolumeClaim:
claimName: glusterfs-pvc1
[root@JXQ-11-243-32-216 heketi-gfs]# kubectl apply -f deploy-nginx.yaml
deployment.apps/nginx-deployment created
[root@JXQ-11-243-32-216 heketi-gfs]# kubectl get all -n heketi-gfs
NAME READY STATUS RESTARTS AGE
pod/nginx-deployment-5cdd5b74fc-qvkg2 1/1 Running 0 9s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/glusterfs-dynamic-6280b0b7-fdc0-11ec-bdc3-246e96651388 ClusterIP 10.43.97.83 <none> 1/TCP 26m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-deployment 1/1 1 1 9s
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-deployment-5cdd5b74fc 1 1 1 9s
#########进入容器, 创建文件; 再退出容器
[root@JXQ-11-243-32-216 heketi-gfs]# kubectl exec -it nginx-deployment-5cdd5b74fc-qvkg2 sh -n heketi-gfs
# bash
root@nginx-deployment-5cdd5b74fc-qvkg2:/# ls /usr/share/nginx/html/
root@nginx-deployment-5cdd5b74fc-qvkg2:/# echo 123 > /usr/share/nginx/html/index.html
root@nginx-deployment-5cdd5b74fc-qvkg2:/# curl localhost
bash: curl: command not found
root@nginx-deployment-5cdd5b74fc-qvkg2:/# exit
[root@JXQ-11-243-32-216 heketi-gfs]#
######### 退出容器, 查看glusterfs挂载卷数据是否存在
[root@JXQ-11-243-32-216 heketi-gfs]# sudo mount -t glusterfs 10.240.149.158:/vol_bfd7d5e69f53514dec3c9c1ec15be137 /mnt-gfs/
[root@JXQ-11-243-32-216 heketi-gfs]# ll /mnt-gfs/
total 1
-rw-r--r-- 1 root 40000 4 Jul 7 15:13 index.html
[root@JXQ-11-243-32-216 heketi-gfs]# cat /mnt-gfs/index.html
123
3,免密登录其他主机配置
useradd heketi
mkdir /home/heketi/.ssh
touch /home/heketi/.ssh/authorized_keys
chown -R heketi. /home/heketi
chmod 700 /home/heketi/.ssh
#拷贝源主机公钥,到目标主机
echo 'ssh-rsa xxxxxxx heketi@c73' >> /home/heketi/.ssh/authorized_keys
chmod 600 /home/heketi/.ssh/authorized_keys
echo 'heketi ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers