remove and mount gluster volumes
This commit is contained in:
@@ -9,7 +9,7 @@ home ansible_ssh_host=192.168.4.11
|
|||||||
cubox-i ansible_ssh_host=192.168.4.12
|
cubox-i ansible_ssh_host=192.168.4.12
|
||||||
|
|
||||||
[gfs]
|
[gfs]
|
||||||
home ansible_ssh_host=192.168.4.11
|
;home ansible_ssh_host=192.168.4.11
|
||||||
cubox-i ansible_ssh_host=192.168.4.12
|
cubox-i ansible_ssh_host=192.168.4.12
|
||||||
|
|
||||||
[kube]
|
[kube]
|
||||||
|
|||||||
@@ -9,50 +9,12 @@
|
|||||||
|
|
||||||
vars:
|
vars:
|
||||||
# glusterfs_default_release: 6
|
# glusterfs_default_release: 6
|
||||||
glusterfs_ppa_version: 6
|
# glusterfs_ppa_version: 6
|
||||||
|
# glusterfs_daemon: glusterd
|
||||||
cluster:
|
cluster:
|
||||||
- cubox-i
|
- cubox-i
|
||||||
- home
|
move_brick:
|
||||||
mountpoints:
|
- gitea
|
||||||
- /data/glusterfs/vmshares/brick1
|
|
||||||
- /data/glusterfs/gitea/brick1
|
|
||||||
- /data/glusterfs/jenkins/brick1
|
|
||||||
- /data/glusterfs/jenkins2/brick1
|
|
||||||
- /data/glusterfs/tmp/brick1
|
|
||||||
- /data/glusterfs/prometheus/brick1
|
|
||||||
- /data/glusterfs/nextcloud/brick1
|
|
||||||
- /data/glusterfs/mariadb/brick1
|
|
||||||
- /data/glusterfs/postgresql/brick1
|
|
||||||
volumes:
|
|
||||||
gitea:
|
|
||||||
- /data/glusterfs/gitea/brick1/brick
|
|
||||||
jenkins2:
|
|
||||||
- /data/glusterfs/jenkins2/brick1/brick
|
|
||||||
vmshares:
|
|
||||||
- /data/glusterfs/vmshares/brick1/brick
|
|
||||||
mariadb:
|
|
||||||
- /data/glusterfs/mariadb/brick1/brick
|
|
||||||
plex:
|
|
||||||
- /data/glusterfs/plex/brick1/brick
|
|
||||||
nextcloud:
|
|
||||||
- /data/glusterfs/nextcloud/brick1/brick
|
|
||||||
nextcloud2:
|
|
||||||
- /data/glusterfs/nextcloud2/brick1/brick
|
|
||||||
distributed_volumes:
|
|
||||||
tmp:
|
|
||||||
- /data/glusterfs/tmp/brick1/brick
|
|
||||||
prometheus:
|
|
||||||
- /data/glusterfs/prometheus/brick1
|
|
||||||
# elasticsearch:
|
|
||||||
# - /data/glusterfs/elasticsearch/brick1/brick
|
|
||||||
|
|
||||||
removed_volumes:
|
|
||||||
postgresql:
|
|
||||||
- /data/glusterfs/postgresql/brick1/brick
|
|
||||||
# jenkins:
|
|
||||||
# - /data/glusterfs/jenkins/brick1/brick
|
|
||||||
# tmp:
|
|
||||||
# - /data/glusterfs/tmp/brick1/brick
|
|
||||||
|
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
# - name: test vars
|
# - name: test vars
|
||||||
|
|||||||
@@ -7,103 +7,17 @@
|
|||||||
machines:
|
machines:
|
||||||
cubox-i:
|
cubox-i:
|
||||||
fstab:
|
fstab:
|
||||||
add:
|
add: []
|
||||||
# nextcloud2:
|
# nextcloud2:
|
||||||
# src: UUID=ced31b8e-8404-4bff-b428-6411c54451a4
|
# src: UUID=ced31b8e-8404-4bff-b428-6411c54451a4
|
||||||
# path: /data/glusterfs/nextcloud2/brick1
|
# path: /data/glusterfs/nextcloud2/brick1
|
||||||
# fstype: xfs
|
# fstype: xfs
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
||||||
|
|
||||||
remove:
|
remove: []
|
||||||
|
|
||||||
# vmshare:
|
|
||||||
# src: /dev/sda6
|
|
||||||
# path: /data/glusterfs/vmshares/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# gitea:
|
|
||||||
# src: /dev/sda7
|
|
||||||
# path: /data/glusterfs/gitea/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# jenkins:
|
|
||||||
# src: /dev/sda8
|
|
||||||
# path: /data/glusterfs/jenkins/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# tmp:
|
|
||||||
# src: /dev/sda9
|
|
||||||
# path: /data/glusterfs/tmp/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# nextcloud:
|
|
||||||
# src: /dev/sda10
|
|
||||||
# path: /data/glusterfs/nextcloud/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# prometheus:
|
|
||||||
# src: /dev/sda11
|
|
||||||
# path: /data/glusterfs/prometheus/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# mariadb:
|
|
||||||
# src: /dev/sda12
|
|
||||||
# path: /data/glusterfs/mariadb/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# plex:
|
|
||||||
# src: /dev/sda13
|
|
||||||
# path: /data/glusterfs/plex/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
|
|
||||||
home:
|
home:
|
||||||
fstab:
|
fstab:
|
||||||
add:
|
add: []
|
||||||
# vmshare:
|
|
||||||
# src: UUID=1ee11dc5-0b55-47bd-a92c-e644175a8ba9
|
|
||||||
# path: /data/glusterfs/vmshares/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# gitea:
|
|
||||||
# src: UUID=1a10b5b3-a00b-463c-bb47-d7a1430357c2
|
|
||||||
# path: /data/glusterfs/gitea/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# jenkins:
|
|
||||||
# src: UUID=c1ae3332-8ccc-42c4-beb9-217e0dd58835
|
|
||||||
# path: /data/glusterfs/jenkins/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# tmp:
|
|
||||||
# src: UUID=cad3eb8f-bd07-499b-b1f0-19ec14ba0084
|
|
||||||
# path: /data/glusterfs/tmp/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# nextcloud:
|
|
||||||
# src: UUID=179763ba-27cc-4dbf-a0b2-01596727c541
|
|
||||||
# path: /data/glusterfs/nextcloud/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# prometheus:
|
|
||||||
# src: UUID=19ec83ce-cb19-465f-ba79-0426a659541d
|
|
||||||
# path: /data/glusterfs/prometheus/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# mariadb:
|
|
||||||
# src: UUID=fa3adff4-b456-4321-8528-eed843615191
|
|
||||||
# path: /data/glusterfs/mariadb/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# plex:
|
|
||||||
# src: UUID=2e87570b-4750-4aeb-926b-1d9b48ef89b3
|
|
||||||
# path: /data/glusterfs/plex/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
# nextcloud2:
|
|
||||||
# src: UUID=b0dc64be-80b1-49f6-ae0f-ae6219e734c9
|
|
||||||
# path: /data/glusterfs/nextcloud2/brick1
|
|
||||||
# fstype: xfs
|
|
||||||
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
|
|
||||||
|
|
||||||
remove:
|
remove: []
|
||||||
|
|||||||
@@ -8,4 +8,5 @@
|
|||||||
state: latest
|
state: latest
|
||||||
with_items:
|
with_items:
|
||||||
- htop
|
- htop
|
||||||
|
- attr
|
||||||
when: ansible_os_family == "Debian"
|
when: ansible_os_family == "Debian"
|
||||||
|
|||||||
@@ -3,15 +3,45 @@
|
|||||||
# device; this may be used with UUID= as a more robust way to name devices
|
# device; this may be used with UUID= as a more robust way to name devices
|
||||||
# that works even if disks are added and removed. See fstab(5).
|
# that works even if disks are added and removed. See fstab(5).
|
||||||
#
|
#
|
||||||
|
cluster:
|
||||||
|
- cubox-i
|
||||||
|
nodes:
|
||||||
|
- name: cubox-i
|
||||||
|
uuid: 8c6d97da-f712-41d5-829a-906de5113479
|
||||||
|
- name: home
|
||||||
|
uuid: c59792a8-72d6-463b-8309-e66c6bc96d35
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
brick_folder: brick
|
||||||
|
# bricks found at "{{mounts.paths[gitea]}}/{{volumes.brick_folder}}"
|
||||||
|
replicated:
|
||||||
|
# - name: gitea
|
||||||
|
- name: jenkins
|
||||||
|
# - name: vmshares
|
||||||
|
# - name: mariadb
|
||||||
|
# - name: plex
|
||||||
|
# - name: nextcloud
|
||||||
|
# - name: prometheus
|
||||||
|
# - name: tmp
|
||||||
|
|
||||||
|
distributed: []
|
||||||
|
# - name: tmp
|
||||||
|
|
||||||
|
removed:
|
||||||
|
- name: jenkins2
|
||||||
|
- name: nextcloud2
|
||||||
|
- name: postgres
|
||||||
|
|
||||||
mounts:
|
mounts:
|
||||||
paths:
|
paths:
|
||||||
gitea: /data/glusterfs/gitea/brick1
|
gitea: /data/glusterfs/gitea/brick1
|
||||||
jenkins: /data/glusterfs/jenkins/brick1
|
jenkins: /data/glusterfs/jenkins/brick1
|
||||||
|
jenkins2: /data/glusterfs/jenkins2/brick1 # dead
|
||||||
mariadb: /data/glusterfs/mariadb/brick1
|
mariadb: /data/glusterfs/mariadb/brick1
|
||||||
nextcloud: /data/glusterfs/nextcloud/brick1
|
nextcloud: /data/glusterfs/nextcloud/brick1
|
||||||
nextcloud2: /data/glusterfs/nextcloud2/brick1
|
nextcloud2: /data/glusterfs/nextcloud2/brick1 # dead
|
||||||
plex: /data/glusterfs/plex/brick1
|
plex: /data/glusterfs/plex/brick1
|
||||||
|
postgres: /data/glusterfs/postgres/brick1 # dead
|
||||||
prometheus: /data/glusterfs/prometheus/brick1
|
prometheus: /data/glusterfs/prometheus/brick1
|
||||||
tmp: /data/glusterfs/tmp/brick1
|
tmp: /data/glusterfs/tmp/brick1
|
||||||
vmshare: /data/glusterfs/vmshares/brick1
|
vmshare: /data/glusterfs/vmshares/brick1
|
||||||
@@ -41,7 +71,7 @@ mounts:
|
|||||||
- name: plex
|
- name: plex
|
||||||
status: mounted
|
status: mounted
|
||||||
src: "UUID=57a3133c-6ca3-4974-9a0b-7efc8c0c533b"
|
src: "UUID=57a3133c-6ca3-4974-9a0b-7efc8c0c533b"
|
||||||
- name: nextcloud2
|
- name: nextcloud2 # dead
|
||||||
status: mounted
|
status: mounted
|
||||||
src: "UUID=ced31b8e-8404-4bff-b428-6411c54451a4"
|
src: "UUID=ced31b8e-8404-4bff-b428-6411c54451a4"
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,10 @@
|
|||||||
|
|
||||||
- include_tasks: manage_volumes.yml
|
- include_tasks: manage_volumes.yml
|
||||||
|
|
||||||
|
## doesn't work
|
||||||
|
#- name: "check the underlying config"
|
||||||
|
# include_tasks: recover_nodes.yaml
|
||||||
|
|
||||||
#- include_tasks: prometheus_exporter.yml
|
#- include_tasks: prometheus_exporter.yml
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,57 +15,43 @@
|
|||||||
state: "{{ item.status }}"
|
state: "{{ item.status }}"
|
||||||
with_items: "{{mounts[inventory_hostname]}}"
|
with_items: "{{mounts[inventory_hostname]}}"
|
||||||
|
|
||||||
# create mount points
|
- name: create pool
|
||||||
#- name: create mount points for all bricks in all volumes
|
gluster.gluster.gluster_peer:
|
||||||
# file:
|
state: present
|
||||||
# path: "{{ item }}"
|
nodes: "{{ cluster }}"
|
||||||
# state: directory
|
|
||||||
# mode: 0755
|
|
||||||
# with_items: "{{mountpoints}}"
|
|
||||||
|
|
||||||
# manage volumes
|
# manage volumes
|
||||||
#- name: remove old gluster volumes
|
|
||||||
# gluster_volume:
|
|
||||||
# state: absent
|
|
||||||
# name: "{{ item.key }}"
|
|
||||||
# rebalance: no
|
|
||||||
# replicas: false
|
|
||||||
# cluster: "{{cluster}}"
|
|
||||||
# with_dict: "{{removed_volumes}}"
|
|
||||||
# run_once: true
|
|
||||||
|
|
||||||
|
- include_tasks: remove_volume.yml
|
||||||
|
with_items: "{{volumes.removed}}"
|
||||||
|
|
||||||
#- name: create gluster volumes
|
- name: debug
|
||||||
# gluster_volume:
|
debug:
|
||||||
# state: present
|
var: item.name
|
||||||
# start_on_create: yes
|
with_items: "{{volumes.replicated}}"
|
||||||
# name: "{{ item.key }}"
|
|
||||||
# brick: '{{ item.value | join(",")}}'
|
- name: create gluster volumes
|
||||||
# rebalance: no
|
gluster_volume:
|
||||||
|
state: present
|
||||||
|
start_on_create: yes
|
||||||
|
name: "{{item.name}}"
|
||||||
|
brick: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}"
|
||||||
|
rebalance: no
|
||||||
# replicas: 2
|
# replicas: 2
|
||||||
# force: true
|
force: true
|
||||||
# cluster: "{{cluster}}"
|
cluster: "{{cluster}}"
|
||||||
# with_dict: "{{volumes}}"
|
with_items: "{{volumes.replicated}}"
|
||||||
# run_once: true
|
run_once: true
|
||||||
|
|
||||||
#- name: create tmp gluster volumes
|
- name: create distributed gluster volumes
|
||||||
# gluster_volume:
|
gluster_volume:
|
||||||
# state: present
|
state: present
|
||||||
# name: "tmp"
|
name: "{{ item.name }}"
|
||||||
# brick: '/data/glusterfs/tmp/brick1/brick'
|
brick: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}"
|
||||||
# rebalance: no
|
rebalance: no
|
||||||
# replicas: 1
|
replicas: false
|
||||||
# cluster: "{{cluster}}"
|
|
||||||
# run_once: true
|
|
||||||
|
|
||||||
#- name: create distributed gluster volumes
|
|
||||||
# gluster_volume:
|
|
||||||
# state: present
|
|
||||||
# name: "{{ item.key }}"
|
|
||||||
# brick: '{{ item.value | join(",")}}'
|
|
||||||
# rebalance: no
|
|
||||||
# replicas: false
|
|
||||||
# force: true
|
# force: true
|
||||||
# cluster: "{{cluster}}"
|
cluster: "{{cluster}}"
|
||||||
# with_dict: "{{distributed_volumes}}"
|
with_items: "{{volumes.distributed}}"
|
||||||
# run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
|||||||
81
ansible-5/roles/glusterfs/tasks/recover_nodes.yaml
Normal file
81
ansible-5/roles/glusterfs/tasks/recover_nodes.yaml
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
---
|
||||||
|
# ensure that the underlying config is correct
|
||||||
|
|
||||||
|
- name: fetch details
|
||||||
|
block:
|
||||||
|
|
||||||
|
- name: get old node UUID
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
gluster pool list | grep {{ new_node }} | awk '{print $1}'
|
||||||
|
register: old_node_uuid
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- debug:
|
||||||
|
var: "old_node_uuid.stdout"
|
||||||
|
when: old_node_uuid is defined
|
||||||
|
|
||||||
|
become: true
|
||||||
|
when: inventory_hostname != new_node.name
|
||||||
|
|
||||||
|
- name: update new node
|
||||||
|
block:
|
||||||
|
- name: stop glusterd
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: "{{ glusterfs_daemon }}"
|
||||||
|
state: stopped
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: update glusterd.info
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /var/lib/glusterd/glusterd.info
|
||||||
|
regexp: '^UUID='
|
||||||
|
line: 'UUID={{ new_node.uuid }}'
|
||||||
|
|
||||||
|
- name: start glusterd
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: "{{ glusterfs_daemon }}"
|
||||||
|
state: started
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: add cluster nodes to new node
|
||||||
|
ansible.builtin.shell: "gluster peer probe {{ item }}"
|
||||||
|
register: peer_probe_result
|
||||||
|
with_items: cluster
|
||||||
|
changed_when:
|
||||||
|
- "'already in peer list' not in peer_probe_result.stdout"
|
||||||
|
- "'peer probe: success' in peer_probe_result.stdout"
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: reset systemctl restart count
|
||||||
|
ansible.builtin.shell: "systemctl reset-failed {{ glusterfs_daemon }}"
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: retart glusterd
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: "{{ glusterfs_daemon }}"
|
||||||
|
state: restarted
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
|
||||||
|
# - name: add volumes
|
||||||
|
# ansible.builtin.shell: "gluster --mode=script volume sync {{ cluster[0] }} tmp"
|
||||||
|
# register: sync_result
|
||||||
|
# failed_when: "'volume sync: success' not in sync_result"
|
||||||
|
|
||||||
|
become: true
|
||||||
|
when: inventory_hostname == new_node.name
|
||||||
|
|
||||||
|
|
||||||
|
- name: create tmp gluster volume
|
||||||
|
gluster_volume:
|
||||||
|
state: present
|
||||||
|
start_on_create: yes
|
||||||
|
name: "tmp"
|
||||||
|
brick: '/data/glusterfs/tmp/brick1'
|
||||||
|
rebalance: no
|
||||||
|
replicas: 2
|
||||||
|
force: true
|
||||||
|
cluster:
|
||||||
|
- home
|
||||||
|
- cubox-i
|
||||||
|
run_once: true
|
||||||
21
ansible-5/roles/glusterfs/tasks/remove_volume.yml
Normal file
21
ansible-5/roles/glusterfs/tasks/remove_volume.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
# remove a volume
|
||||||
|
- name: "check for existing {{mounts.paths[item.name]}}/{{volumes.brick_folder}}/.glusterfs"
|
||||||
|
stat:
|
||||||
|
path: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}/.glusterfs"
|
||||||
|
register: gluster_volume_config
|
||||||
|
|
||||||
|
- name: remove old gluster volumes
|
||||||
|
block:
|
||||||
|
- name: remove volume
|
||||||
|
gluster_volume:
|
||||||
|
state: absent
|
||||||
|
name: "{{item.name}}"
|
||||||
|
# rebalance: no
|
||||||
|
# replicas: false
|
||||||
|
force: true
|
||||||
|
cluster: "{{cluster}}"
|
||||||
|
run_once: true
|
||||||
|
- name: remove .glusterfs folder
|
||||||
|
ansible.builtin.shell: "rm -rf {{mounts.paths[item.name]}}/.glusterfs"
|
||||||
|
when: gluster_volume_config.stat.exists
|
||||||
Reference in New Issue
Block a user