heketi簡單安裝配置使用


heketi是為glusterfs集群提供restfulapi的,通過restful風格對集群進行管理控制

前置條件

1 安裝並啟動glusterd,可參考https://www.cnblogs.com/bfmq/p/9990467.html
2 需要使用的磁盤是裸盤,沒有被格式化掛載等
3 必須兩個以上節點,否則集群創建完成后也創建不出卷
4 如果不是為了與k8s集成沒必要安裝heketi,直接用gfs自帶的管理工具即可

正常安裝並修改配置文件啟動

[root@glusterfs-bj-ali-bgp1 ~]# yum install heketi heketi-client -y
[root@glusterfs-bj-ali-bgp1 ~]# cat /etc/heketi/heketi.json
{
  "_port_comment": "Heketi Server Port Number",
  "port": "8080",                                                            # 啟用端口

  "_use_auth": "Enable JWT authorization. Please enable for deployment",
  "use_auth": false,                                                        # JWT認證是否開啟

  "_jwt": "Private keys for access",                                        # JWT認證開啟情況下配置
  "jwt": {
    "_admin": "Admin has access to all APIs",
    "admin": {
      "key": "My Secret"                                                    # 超級用戶的密碼,超級用戶可以使用所有api
    },
    "_user": "User only has access to /volumes endpoint",
    "user": {
      "key": "My Secret"                                                    # 普通用戶的密碼,普通用戶可以使用卷資源,即集群、節點之間的關系無法操作
    }
  },

  "_glusterfs_comment": "GlusterFS Configuration",
  "glusterfs": {
    "_executor_comment": [                                                    # 執行命令的方式
      "Execute plugin. Possible choices: mock, ssh",
      "mock: This setting is used for testing and development.",            # 開發者模式,測試功能用
      "      It will not send commands to any node.",
      "ssh:  This setting will notify Heketi to ssh to the nodes.",            # ssh是正常生產環境使用的
      "      It will need the values in sshexec to be configured.",
      "kubernetes: Communicate with GlusterFS containers over",                # 當gfs集群在kubernetes作為ds跑的時候使用
      "            Kubernetes exec api."
    ],
    "executor": "ssh",

    "_sshexec_comment": "SSH username and private key file information",    # 調用ssh時配置
    "sshexec": {
      "keyfile": "/etc/heketi/id_rsa",                                        # ssh執行用戶的私鑰,heketi用戶需要該文件讀權限
      "user": "root",                                                        # ssh執行用戶,生產不用用root哦
      "port": "22",                                                            # ssh端口
      "fstab": "/etc/fstab"                                                    # 系統fstab路徑
    },

    "_kubeexec_comment": "Kubernetes configuration",                        # 調用k8s時配置
    "kubeexec": {
      "host" :"https://kubernetes.host:8443",                                # k8s api地址端口
      "cert" : "/path/to/crt.file",                                            # k8s證書
      "insecure": false,                                                    # 是否啟用不安全模式
      "user": "kubernetes username",                                        # k8s用戶
      "password": "password for kubernetes user",                            # k8s密碼
      "namespace": "OpenShift project or Kubernetes namespace",                # 項目所處命名空間
      "fstab": "Optional: Specify fstab file on node.  Default is /etc/fstab"
    },

    "_db_comment": "Database file name",
    "db": "/var/lib/heketi/heketi.db",                                        # heketi會有一個自己的小庫,這個默認地址即可

    "_loglevel_comment": [
      "Set log level. Choices are:",
      "  none, critical, error, warning, info, debug",
      "Default is warning"
    ],
    "loglevel" : "warning"                                                    # 日志等級,日志會在/var/log/messages里顯示
  }
}
[root@glusterfs-bj-ali-bgp1 ~]# systemctl enable heketi  && systemctl start heketi && systemctl status heketi
[root@glusterfs-bj-ali-bgp1 heketi]# netstat -tpln
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      3045/sshd           
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      59232/glusterd      
tcp6       0      0 :::8080                 :::*                    LISTEN      60356/heketi             # 端口打開
[root@glusterfs-bj-ali-bgp1 heketi]# curl http://127.0.0.1:8080/hello                # 測試連通性
Hello from Heketi

然后通過載入topology文件方式快速初始化一個集群並配置相關節點磁盤設備資源信息,注意該json文件必須是一行!

[root@glusterfs-bj-ali-bgp1 heketi]# cat /etc/heketi/topology.json                    # 該文件一定要注意格式化后成json為一行,否則解析不了,這是很多網上文檔的坑
{
    "clusters": [
        {
            "nodes": [
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "172.17.1.1"
                            ],
                            "storage": [
                                "172.17.1.1"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        "/dev/vdb",
                        "/dev/vdc",
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "172.17.1.2"
                            ],
                            "storage": [
                                "172.17.1.2"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        "/dev/vdd",
                        "/dev/vde",
                    ]
                }
            ]
        }
    ]
}
[root@glusterfs-bj-ali-bgp1 heketi]# cat /etc/heketi/topology.json                # 這是該文件該有的樣子!
{"clusters": [{"nodes": [{"node": {"hostnames": {"manage": ["172.17.1.1"], "storage": ["172.17.1.1"]}, "zone": 1}, "devices": ["/dev/vdb", "/dev/vdc"]}, {"node": {"hostnames": {"manage": ["172.17.1.2"], "storage": ["172.17.1.2"]}, "zone": 1}, "devices": ["/dev/vdd", "/dev/vde"]}]}]}
[root@glusterfs-bj-ali-bgp1 ~]# heketi-cli topology load --json=/etc/heketi/topology.json        # 如果開啟了認證則是heketi-cli --user admin --secret admin文件里密碼 topology load --json=/etc/heketi/topology.json
Creating cluster ... ID: 5ff75a20c566d3ff520026a2bcfbd359
    Allowing file volumes on cluster.
    Allowing block volumes on cluster.
    Creating node 172.17.1.1 ... ID: a778da2dfeebcb1dfd6d3ddb50ee9658
        Adding device /dev/vdb ... OK
        Adding device /dev/vdc ... OK
    Creating node 172.17.1.2 ... ID: 3e13521fdc3ff7ce1dec30a5107e9d43
        Adding device /dev/vdd ... OK
        Adding device /dev/vde ... OK

創建卷並掛載使用

[root@glusterfs-bj-ali-bgp1 ~]# heketi-cli volume create --size=6000 --replica=2            # 創建一個6000G的磁盤,副本數為2,時間大概2-3分鍾。更多用法可以用heketi-cli volume create -h查看,當然測試也不用創建6000G
Name: vol_adf27fe83b028ab6d7b0fde93a749d20                                    # 這個名字記下
Size: 6000
Volume Id: adf27fe83b028ab6d7b0fde93a749d20
Cluster Id: 5ff75a20c566d3ff520026a2bcfbd359
Mount: 172.17.1.1:vol_adf27fe83b028ab6d7b0fde93a749d20
Mount Options: backup-volfile-servers=172.17.1.1
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 2
Replica Count: 2
[root@glusterfs-bj-ali-bgp1 ~]# df -h                                        # 可以看出/var/lib/heketi/mounts共掛出12T磁盤(我的兩個ip是同一個機器,所以掛載全在一個機器上)
Filesystem                                                                              Size  Used Avail Use% Mounted on
devtmpfs                                                                                 16G     0   16G   0% /dev
tmpfs                                                                                    16G  120K   16G   1% /dev/shm
tmpfs                                                                                    16G  680K   16G   1% /run
tmpfs                                                                                    16G     0   16G   0% /sys/fs/cgroup
/dev/vda1                                                                               118G  3.0G  111G   3% /
tmpfs                                                                                   3.2G     0  3.2G   0% /run/user/0
/dev/mapper/vg_fd0ee85ed75d2b5c9fa6f5085b930806-brick_898f2216c03bac4f3ba17f55a9640917  3.0T   35M  3.0T   1% /var/lib/heketi/mounts/vg_fd0ee85ed75d2b5c9fa6f5085b930806/brick_898f2216c03bac4f3ba17f55a9640917
/dev/mapper/vg_416dcbb83bb64cfad79bfaaf64649e98-brick_375140d6002ea63c8f86675469ef1ee8  3.0T   35M  3.0T   1% /var/lib/heketi/mounts/vg_416dcbb83bb64cfad79bfaaf64649e98/brick_375140d6002ea63c8f86675469ef1ee8
/dev/mapper/vg_541a2089248e4b33f465eb3b15a55170-brick_34aea0de9fbdcf36b7af09eed538ea00  3.0T   35M  3.0T   1% /var/lib/heketi/mounts/vg_541a2089248e4b33f465eb3b15a55170/brick_34aea0de9fbdcf36b7af09eed538ea00
/dev/mapper/vg_3d348787cb304b524fe3261c2a7ccb7d-brick_7c73c33b5a64b7af4159e21c12847a64  3.0T   35M  3.0T   1% /var/lib/heketi/mounts/vg_3d348787cb304b524fe3261c2a7ccb7d/brick_7c73c33b5a64b7af4159e21c12847a64

# 換一台機器
[root@devops-bj-ali-bgp1 ~]# yum install -y glusterfs-fuse                    # 安裝客戶端
[root@devops-bj-ali-bgp1 ~]# mount -t glusterfs -o backup-volfile-servers=glusterfs-bj-ali-bgp2,log-level=WARNING glusterfs-bj-ali-bgp1:/vol_adf27fe83b028ab6d7b0fde93a749d20 /data/loki
[root@devops-bj-ali-bgp1 ~]# df -h
Filesystem                                                   Size  Used Avail Use% Mounted on
...
glusterfs-bj-ali-bgp1:/vol_adf27fe83b028ab6d7b0fde93a749d20  5.9T   61G  5.8T   2% /data/loki
[root@devops-bj-ali-bgp1 ~]# cd /data/
[root@devops-bj-ali-bgp1 data]# ll loki/
total 16
drwxr-xr-x 8 root root 4096 Mar 16 08:00 boltdb-shipper-active
drwxr-xr-x 3 root root 4096 Mar 12 15:32 boltdb-shipper-cache
drwxr-xr-x 5 root root 4096 Mar 16 09:53 boltdb-shipper-compactor
drwx------ 2 root root 4096 Mar 16 11:18 chunks

# 回到原機器可以看到數據
[root@glusterfs-bj-ali-bgp1 ~]# ll /var/lib/heketi/mounts/*/*/*
/var/lib/heketi/mounts/vg_3d348787cb304b524fe3261c2a7ccb7d/brick_7c73c33b5a64b7af4159e21c12847a64/brick:
total 1308
drwxr-xr-x 8 root root    8192 Mar 16 08:00 boltdb-shipper-active
drwxr-xr-x 3 root root      25 Mar 12 15:32 boltdb-shipper-cache
drwxr-xr-x 5 root root      63 Mar 16 09:53 boltdb-shipper-compactor
drwx------ 2 root root 1179648 Mar 16 11:18 chunks

/var/lib/heketi/mounts/vg_416dcbb83bb64cfad79bfaaf64649e98/brick_375140d6002ea63c8f86675469ef1ee8/brick:
total 1324
drwxr-xr-x 8 root root    8192 Mar 16 08:00 boltdb-shipper-active
drwxr-xr-x 3 root root      25 Mar 12 15:32 boltdb-shipper-cache
drwxr-xr-x 5 root root      63 Mar 16 09:53 boltdb-shipper-compactor
drwx------ 2 root root 1196032 Mar 16 11:18 chunks

/var/lib/heketi/mounts/vg_541a2089248e4b33f465eb3b15a55170/brick_34aea0de9fbdcf36b7af09eed538ea00/brick:
total 1036
drwxr-xr-x 8 root root    117 Mar 16 08:00 boltdb-shipper-active
drwxr-xr-x 3 root root     33 Mar 12 15:32 boltdb-shipper-cache
drwxr-xr-x 5 root root     79 Mar 16 09:53 boltdb-shipper-compactor
drwx------ 2 root root 909312 Mar 16 11:18 chunks

/var/lib/heketi/mounts/vg_fd0ee85ed75d2b5c9fa6f5085b930806/brick_898f2216c03bac4f3ba17f55a9640917/brick:
total 860
drwxr-xr-x 8 root root    145 Mar 16 08:00 boltdb-shipper-active
drwxr-xr-x 3 root root     33 Mar 12 15:32 boltdb-shipper-cache
drwxr-xr-x 5 root root     79 Mar 16 09:53 boltdb-shipper-compactor
drwx------ 2 root root 745472 Mar 16 11:18 chunks
[root@glusterfs-bj-ali-bgp1 ~]# 
[root@glusterfs-bj-ali-bgp1 ~]# heketi-cli volume create --size=1000 --replica=2            # 后期可以繼續創建其他卷,再分個1000G的雙副本的
Name: vol_8be30f4b5edc2b6dee325492e7400c96
Size: 1000
Volume Id: 8be30f4b5edc2b6dee325492e7400c96
Cluster Id: 5ff75a20c566d3ff520026a2bcfbd359
Mount: 172.17.32.102:vol_8be30f4b5edc2b6dee325492e7400c96
Mount Options: backup-volfile-servers=172.17.32.101
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 2
[root@glusterfs-bj-ali-bgp1 ~]# df -h                                                        # 可以看出新的vg,然后去對端機器正常掛載使用即可
Filesystem                                                                              Size  Used Avail Use% Mounted on
devtmpfs                                                                                 16G     0   16G   0% /dev
tmpfs                                                                                    16G  120K   16G   1% /dev/shm
tmpfs                                                                                    16G  732K   16G   1% /run
tmpfs                                                                                    16G     0   16G   0% /sys/fs/cgroup
/dev/vda1                                                                               118G  3.0G  110G   3% /
tmpfs                                                                                   3.2G     0  3.2G   0% /run/user/0
/dev/mapper/vg_fd0ee85ed75d2b5c9fa6f5085b930806-brick_898f2216c03bac4f3ba17f55a9640917  3.0T  1.6G  3.0T   1% /var/lib/heketi/mounts/vg_fd0ee85ed75d2b5c9fa6f5085b930806/brick_898f2216c03bac4f3ba17f55a9640917
/dev/mapper/vg_416dcbb83bb64cfad79bfaaf64649e98-brick_375140d6002ea63c8f86675469ef1ee8  3.0T  363M  3.0T   1% /var/lib/heketi/mounts/vg_416dcbb83bb64cfad79bfaaf64649e98/brick_375140d6002ea63c8f86675469ef1ee8
/dev/mapper/vg_541a2089248e4b33f465eb3b15a55170-brick_34aea0de9fbdcf36b7af09eed538ea00  3.0T  1.7G  3.0T   1% /var/lib/heketi/mounts/vg_541a2089248e4b33f465eb3b15a55170/brick_34aea0de9fbdcf36b7af09eed538ea00
/dev/mapper/vg_3d348787cb304b524fe3261c2a7ccb7d-brick_7c73c33b5a64b7af4159e21c12847a64  3.0T  362M  3.0T   1% /var/lib/heketi/mounts/vg_3d348787cb304b524fe3261c2a7ccb7d/brick_7c73c33b5a64b7af4159e21c12847a64
/dev/mapper/vg_541a2089248e4b33f465eb3b15a55170-brick_d4af7bb821d0e29c9e140e067bdeff13 1000G   35M 1000G   1% /var/lib/heketi/mounts/vg_541a2089248e4b33f465eb3b15a55170/brick_d4af7bb821d0e29c9e140e067bdeff13
/dev/mapper/vg_416dcbb83bb64cfad79bfaaf64649e98-brick_79bc81de736ff888511b2bde46678b41 1000G   35M 1000G   1% /var/lib/heketi/mounts/vg_416dcbb83bb64cfad79bfaaf64649e98/brick_79bc81de736ff888511b2bde46678b41

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM