remove and mount gluster volumes
This commit is contained in:
@@ -3,15 +3,45 @@
|
||||
# device; this may be used with UUID= as a more robust way to name devices
|
||||
# that works even if disks are added and removed. See fstab(5).
|
||||
#
|
||||
cluster:
|
||||
- cubox-i
|
||||
nodes:
|
||||
- name: cubox-i
|
||||
uuid: 8c6d97da-f712-41d5-829a-906de5113479
|
||||
- name: home
|
||||
uuid: c59792a8-72d6-463b-8309-e66c6bc96d35
|
||||
|
||||
volumes:
|
||||
brick_folder: brick
|
||||
# bricks found at "{{mounts.paths[gitea]}}/{{volumes.brick_folder}}"
|
||||
replicated:
|
||||
# - name: gitea
|
||||
- name: jenkins
|
||||
# - name: vmshares
|
||||
# - name: mariadb
|
||||
# - name: plex
|
||||
# - name: nextcloud
|
||||
# - name: prometheus
|
||||
# - name: tmp
|
||||
|
||||
distributed: []
|
||||
# - name: tmp
|
||||
|
||||
removed:
|
||||
- name: jenkins2
|
||||
- name: nextcloud2
|
||||
- name: postgres
|
||||
|
||||
mounts:
|
||||
paths:
|
||||
gitea: /data/glusterfs/gitea/brick1
|
||||
jenkins: /data/glusterfs/jenkins/brick1
|
||||
jenkins2: /data/glusterfs/jenkins2/brick1 # dead
|
||||
mariadb: /data/glusterfs/mariadb/brick1
|
||||
nextcloud: /data/glusterfs/nextcloud/brick1
|
||||
nextcloud2: /data/glusterfs/nextcloud2/brick1
|
||||
nextcloud2: /data/glusterfs/nextcloud2/brick1 # dead
|
||||
plex: /data/glusterfs/plex/brick1
|
||||
postgres: /data/glusterfs/postgres/brick1 # dead
|
||||
prometheus: /data/glusterfs/prometheus/brick1
|
||||
tmp: /data/glusterfs/tmp/brick1
|
||||
vmshare: /data/glusterfs/vmshares/brick1
|
||||
@@ -41,7 +71,7 @@ mounts:
|
||||
- name: plex
|
||||
status: mounted
|
||||
src: "UUID=57a3133c-6ca3-4974-9a0b-7efc8c0c533b"
|
||||
- name: nextcloud2
|
||||
- name: nextcloud2 # dead
|
||||
status: mounted
|
||||
src: "UUID=ced31b8e-8404-4bff-b428-6411c54451a4"
|
||||
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
|
||||
- include_tasks: manage_volumes.yml
|
||||
|
||||
## doesn't work
|
||||
#- name: "check the underlying config"
|
||||
# include_tasks: recover_nodes.yaml
|
||||
|
||||
#- include_tasks: prometheus_exporter.yml
|
||||
|
||||
|
||||
|
||||
@@ -15,57 +15,43 @@
|
||||
state: "{{ item.status }}"
|
||||
with_items: "{{mounts[inventory_hostname]}}"
|
||||
|
||||
# create mount points
|
||||
#- name: create mount points for all bricks in all volumes
|
||||
# file:
|
||||
# path: "{{ item }}"
|
||||
# state: directory
|
||||
# mode: 0755
|
||||
# with_items: "{{mountpoints}}"
|
||||
- name: create pool
|
||||
gluster.gluster.gluster_peer:
|
||||
state: present
|
||||
nodes: "{{ cluster }}"
|
||||
|
||||
# manage volumes
|
||||
#- name: remove old gluster volumes
|
||||
# gluster_volume:
|
||||
# state: absent
|
||||
# name: "{{ item.key }}"
|
||||
# rebalance: no
|
||||
# replicas: false
|
||||
# cluster: "{{cluster}}"
|
||||
# with_dict: "{{removed_volumes}}"
|
||||
# run_once: true
|
||||
|
||||
- include_tasks: remove_volume.yml
|
||||
with_items: "{{volumes.removed}}"
|
||||
|
||||
#- name: create gluster volumes
|
||||
# gluster_volume:
|
||||
# state: present
|
||||
# start_on_create: yes
|
||||
# name: "{{ item.key }}"
|
||||
# brick: '{{ item.value | join(",")}}'
|
||||
# rebalance: no
|
||||
- name: debug
|
||||
debug:
|
||||
var: item.name
|
||||
with_items: "{{volumes.replicated}}"
|
||||
|
||||
- name: create gluster volumes
|
||||
gluster_volume:
|
||||
state: present
|
||||
start_on_create: yes
|
||||
name: "{{item.name}}"
|
||||
brick: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}"
|
||||
rebalance: no
|
||||
# replicas: 2
|
||||
# force: true
|
||||
# cluster: "{{cluster}}"
|
||||
# with_dict: "{{volumes}}"
|
||||
# run_once: true
|
||||
force: true
|
||||
cluster: "{{cluster}}"
|
||||
with_items: "{{volumes.replicated}}"
|
||||
run_once: true
|
||||
|
||||
#- name: create tmp gluster volumes
|
||||
# gluster_volume:
|
||||
# state: present
|
||||
# name: "tmp"
|
||||
# brick: '/data/glusterfs/tmp/brick1/brick'
|
||||
# rebalance: no
|
||||
# replicas: 1
|
||||
# cluster: "{{cluster}}"
|
||||
# run_once: true
|
||||
|
||||
#- name: create distributed gluster volumes
|
||||
# gluster_volume:
|
||||
# state: present
|
||||
# name: "{{ item.key }}"
|
||||
# brick: '{{ item.value | join(",")}}'
|
||||
# rebalance: no
|
||||
# replicas: false
|
||||
- name: create distributed gluster volumes
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ item.name }}"
|
||||
brick: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}"
|
||||
rebalance: no
|
||||
replicas: false
|
||||
# force: true
|
||||
# cluster: "{{cluster}}"
|
||||
# with_dict: "{{distributed_volumes}}"
|
||||
# run_once: true
|
||||
cluster: "{{cluster}}"
|
||||
with_items: "{{volumes.distributed}}"
|
||||
run_once: true
|
||||
|
||||
|
||||
81
ansible-5/roles/glusterfs/tasks/recover_nodes.yaml
Normal file
81
ansible-5/roles/glusterfs/tasks/recover_nodes.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
# ensure that the underlying config is correct
|
||||
|
||||
- name: fetch details
|
||||
block:
|
||||
|
||||
- name: get old node UUID
|
||||
ansible.builtin.shell: |
|
||||
gluster pool list | grep {{ new_node }} | awk '{print $1}'
|
||||
register: old_node_uuid
|
||||
changed_when: false
|
||||
|
||||
- debug:
|
||||
var: "old_node_uuid.stdout"
|
||||
when: old_node_uuid is defined
|
||||
|
||||
become: true
|
||||
when: inventory_hostname != new_node.name
|
||||
|
||||
- name: update new node
|
||||
block:
|
||||
- name: stop glusterd
|
||||
ansible.builtin.service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: stopped
|
||||
changed_when: false
|
||||
|
||||
- name: update glusterd.info
|
||||
ansible.builtin.lineinfile:
|
||||
path: /var/lib/glusterd/glusterd.info
|
||||
regexp: '^UUID='
|
||||
line: 'UUID={{ new_node.uuid }}'
|
||||
|
||||
- name: start glusterd
|
||||
ansible.builtin.service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
changed_when: false
|
||||
|
||||
- name: add cluster nodes to new node
|
||||
ansible.builtin.shell: "gluster peer probe {{ item }}"
|
||||
register: peer_probe_result
|
||||
with_items: cluster
|
||||
changed_when:
|
||||
- "'already in peer list' not in peer_probe_result.stdout"
|
||||
- "'peer probe: success' in peer_probe_result.stdout"
|
||||
failed_when: false
|
||||
|
||||
- name: reset systemctl restart count
|
||||
ansible.builtin.shell: "systemctl reset-failed {{ glusterfs_daemon }}"
|
||||
changed_when: false
|
||||
|
||||
- name: retart glusterd
|
||||
ansible.builtin.service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: restarted
|
||||
changed_when: false
|
||||
|
||||
|
||||
# - name: add volumes
|
||||
# ansible.builtin.shell: "gluster --mode=script volume sync {{ cluster[0] }} tmp"
|
||||
# register: sync_result
|
||||
# failed_when: "'volume sync: success' not in sync_result"
|
||||
|
||||
become: true
|
||||
when: inventory_hostname == new_node.name
|
||||
|
||||
|
||||
- name: create tmp gluster volume
|
||||
gluster_volume:
|
||||
state: present
|
||||
start_on_create: yes
|
||||
name: "tmp"
|
||||
brick: '/data/glusterfs/tmp/brick1'
|
||||
rebalance: no
|
||||
replicas: 2
|
||||
force: true
|
||||
cluster:
|
||||
- home
|
||||
- cubox-i
|
||||
run_once: true
|
||||
21
ansible-5/roles/glusterfs/tasks/remove_volume.yml
Normal file
21
ansible-5/roles/glusterfs/tasks/remove_volume.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
# remove a volume
|
||||
- name: "check for existing {{mounts.paths[item.name]}}/{{volumes.brick_folder}}/.glusterfs"
|
||||
stat:
|
||||
path: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}/.glusterfs"
|
||||
register: gluster_volume_config
|
||||
|
||||
- name: remove old gluster volumes
|
||||
block:
|
||||
- name: remove volume
|
||||
gluster_volume:
|
||||
state: absent
|
||||
name: "{{item.name}}"
|
||||
# rebalance: no
|
||||
# replicas: false
|
||||
force: true
|
||||
cluster: "{{cluster}}"
|
||||
run_once: true
|
||||
- name: remove .glusterfs folder
|
||||
ansible.builtin.shell: "rm -rf {{mounts.paths[item.name]}}/.glusterfs"
|
||||
when: gluster_volume_config.stat.exists
|
||||
Reference in New Issue
Block a user