Fix installation of glusterd server

This commit is contained in:
2022-08-29 21:13:04 -04:00
parent aedf86ef0a
commit 7ff5dc892f
13 changed files with 77 additions and 7 deletions

View File

@@ -0,0 +1,111 @@
---
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
glusterd:
version: 9.2-1
unit: glusterd
envfile: /etc/sysconfig/glusterd
cluster:
- cubox-i
#cubox-inodes:
# - name: cubox-i
# uuid: 8c6d97da-f712-41d5-829a-906de5113479
# - name: home
# uuid: c59792a8-72d6-463b-8309-e66c6bc96d35
volumes:
brick_folder: brick
# bricks found at "{{mounts.paths[gitea]}}/{{volumes.brick_folder}}"
replicated:
- name: gitea
- name: jenkins
- name: vmshares
# - name: mariadb
# - name: plex
# - name: nextcloud2
# - name: prometheus
# - name: tmp
distributed: []
# - name: tmp
removed:
- name: jenkins2
- name: nextcloud
- name: postgres
mounts:
paths:
gitea: /data/glusterfs/gitea/brick1
jenkins: /data/glusterfs/jenkins/brick1
jenkins2: /data/glusterfs/jenkins2/brick1 # dead
mariadb: /data/glusterfs/mariadb/brick1
nextcloud: /data/glusterfs/nextcloud/brick1
nextcloud2: /data/glusterfs/nextcloud2/brick1 # dead
plex: /data/glusterfs/plex/brick1
postgres: /data/glusterfs/postgres/brick1 # dead
prometheus: /data/glusterfs/prometheus/brick1
tmp: /data/glusterfs/tmp/brick1
vmshares: /data/glusterfs/vmshares/brick1
cubox-i:
- name: vmshares
status: mounted
src: "UUID=d1b005e1-294f-4588-b9a6-21b72f7e3e0f"
- name: gitea
status: mounted
src: "UUID=3f6678ec-8555-4c32-a399-9b4418465a66"
- name: jenkins
status: mounted
src: "UUID=fb9f6c8d-2522-4f3b-831a-793964884967"
- name: tmp
status: mounted
src: "UUID=cc7588cc-1d39-4f09-baac-234f8c081443"
- name: nextcloud
status: mounted
src: "UUID=617b4eee-7c60-4506-b2c1-7768900482e9"
- name: prometheus
status: mounted
src: "UUID=f2b9d354-f7f1-45d9-80a3-f616beeca5bc"
- name: mariadb
status: mounted
src: "UUID=5641769e-9816-4955-9def-17bb625f88f0"
- name: plex
status: mounted
src: "UUID=57a3133c-6ca3-4974-9a0b-7efc8c0c533b"
- name: nextcloud2 # dead
status: mounted
src: "UUID=ced31b8e-8404-4bff-b428-6411c54451a4"
home:
- name: vmshares
status: mounted
src: "UUID=1ee11dc5-0b55-47bd-a92c-e644175a8ba9"
- name: gitea
status: mounted
src: "UUID=1a10b5b3-a00b-463c-bb47-d7a1430357c2"
- name: jenkins
status: mounted
src: "UUID=c1ae3332-8ccc-42c4-beb9-217e0dd58835"
- name: tmp
status: mounted
src: "UUID=cad3eb8f-bd07-499b-b1f0-19ec14ba0084"
- name: nextcloud
status: mounted
src: "UUID=179763ba-27cc-4dbf-a0b2-01596727c541"
- name: prometheus
status: mounted
src: "UUID=19ec83ce-cb19-465f-ba79-0426a659541d"
- name: mariadb
status: mounted
src: "UUID=fa3adff4-b456-4321-8528-eed843615191"
- name: plex
status: mounted
src: "UUID=2e87570b-4750-4aeb-926b-1d9b48ef89b3"
- name: nextcloud2
status: mounted
src: "UUID=b0dc64be-80b1-49f6-ae0f-ae6219e734c9"

View File

@@ -0,0 +1,9 @@
---
# handlers/main.yml
# define handlers here
- name: restart glusterd
ansible.builtin.systemd:
name: "{{ glusterd.unit }}"
state: restarted

View File

@@ -0,0 +1,7 @@
---
# create the cluster
- name: Create a trusted storage pool
gluster.gluster.gluster_peer:
state: present
nodes: "{{cluster}}"

View File

@@ -0,0 +1,41 @@
---
# main tasks for installing glusterfs
- name: add the glusterfs repo
ansible.builtin.lineinfile:
path: /etc/apt/sources.list.d/gluster.list
state: present
create: yes
owner: root
group: root
mode: 644
line: deb [arch=arm64] https://download.gluster.org/pub/gluster/glusterfs/6/6.9/Debian/bullseye/arm64/apt bullseye main
- name: Add an Apt signing key, uses whichever key is at the URL
ansible.builtin.apt_key:
url: https://download.gluster.org/pub/gluster/glusterfs/6/rsa.pub
state: present
# install packages
- name: Ensure glusterfs server is installed.
ansible.builtin.apt:
name: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
state: present
with_items:
- glusterfs-server=6.9
- xfsprogs
- xfsdump
- name: Start service gluster, if not started
block:
# - name: start on home
# ansible.builtin.service:
# name: glusterd
# state: started
# when: "ansible_lsb.major_release >= '20'"
- name: start on cubox-i
ansible.builtin.service:
name: glusterfs-server
state: started

View File

@@ -0,0 +1,39 @@
---
# install available glusterfs-server
# install packages
- name: Ensure glusterfs server is installed.
ansible.builtin.apt:
name: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
update_cache_retries: 10
state: present
with_items:
- glusterfs-server={{glusterd.version}}
- xfsprogs
- xfsdump
notify:
- restart glusterd
- name: create systemd environment file
ansible.builtin.template:
src: systemd/environment.j2
dest: "{{glusterd.envfile}}"
owner: root
group: root
mode: '0644'
notify:
- restart glusterd
- name: enable systemd unit
ansible.builtin.systemd:
name: "{{ glusterd.unit }}"
enabled: yes
notify:
- restart glusterd
- name: start glusterd
ansible.builtin.systemd:
name: "{{ glusterd.unit }}"
state: started

View File

@@ -0,0 +1,64 @@
---
# main tasks for installing glusterfs
- name: add the glusterfs repo
ansible.builtin.lineinfile:
path: /etc/apt/sources.list.d/gluster.list
state: present
create: yes
owner: root
group: root
mode: 644
regexp: deb-src
line: deb-src https://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/bullseye/amd64/apt bullseye main
- name: make a build dir
ansible.builtin.file:
path: src/debian
state: directory
mode: '0755'
- name: Add an Apt signing key, uses whichever key is at the URL
ansible.builtin.apt_key:
url: https://download.gluster.org/pub/gluster/glusterfs/6/rsa.pub
state: present
- name: Ensure build tools are installed.
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- dpkg-dev
- name: Ensure build deps are installed.
ansible.builtin.apt:
name: "{{ item }}"
state: build-dep
with_items:
- glusterfs-server
# install packages
#- name: Ensure glusterfs server is installed.
# ansible.builtin.apt:
# name: "{{ item }}"
# update_cache: yes
# cache_valid_time: 3600
# state: present
# with_items:
# - glusterfs-server=6.9
# - xfsprogs
# - xfsdump
#
#- name: Start service gluster, if not started
# block:
## - name: start on home
## ansible.builtin.service:
## name: glusterd
## state: started
## when: "ansible_lsb.major_release >= '20'"
#
# - name: start on cubox-i
# ansible.builtin.service:
# name: glusterfs-server
# state: started

View File

@@ -0,0 +1,20 @@
---
#- name: install on ubuntu
# ansible.builtin.include_role:
# name: geerlingguy.glusterfs
# when: ansible_architecture != 'armv7l'
- include_tasks: install.yaml
- include_tasks: create_cluster.yaml
#- include_tasks: manage_volumes.yml
## doesn't work
#- name: "check the underlying config"
# include_tasks: recover_nodes.yaml
#- include_tasks: prometheus_exporter.yml

View File

@@ -0,0 +1,57 @@
---
# glusterfs mounts
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# Mount drives in fstab
- name: Create fstab entries
ansible.posix.mount:
path: "{{ mounts.paths[item.name] }}"
src: "{{ item.src }}"
fstype: "xfs"
opts: "defaults,noatime,nofail,x-systemd.device-timeout=10 0 0"
state: "{{ item.status }}"
with_items: "{{mounts[inventory_hostname]}}"
- name: create pool
gluster.gluster.gluster_peer:
state: present
nodes: "{{ cluster }}"
# manage volumes
- include_tasks: remove_volume.yml
with_items: "{{volumes.removed}}"
- name: debug
debug:
var: item.name
with_items: "{{volumes.replicated}}"
- name: create gluster volumes
gluster_volume:
state: present
start_on_create: yes
name: "{{item.name}}"
brick: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}"
rebalance: no
# replicas: 2
force: true
cluster: "{{cluster}}"
with_items: "{{volumes.replicated}}"
run_once: true
- name: create distributed gluster volumes
gluster_volume:
state: present
name: "{{ item.name }}"
brick: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}"
rebalance: no
replicas: false
# force: true
cluster: "{{cluster}}"
with_items: "{{volumes.distributed}}"
run_once: true

View File

@@ -0,0 +1,31 @@
---
# tasks to install prometheus gluster_exporter
# https://github.com/ofesseler/gluster_exporter
# install packages
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=86400
- name: Install gluster_exporter dependencies.
apt: "name={{ item }} state=installed"
with_items:
- golang-go
- name: make golang workspace
file:
path: /opt/golang
state: directory
mode: 0775
- name: install gluster_exporter
shell: GOPATH=/opt/golang go get github.com/ofesseler/gluster_exporter
- name: add gluster_exporter to startup
lineinfile:
dest: /etc/rc.local
regexp: '/opt/golang/bin/gluster_exporter'
line: 'nohup /opt/golang/bin/gluster_exporter --profile &'
insertbefore: "^exit 0"
- name: start gluster_exporter
shell: nohup /opt/golang/bin/gluster_exporter --profile &

View File

@@ -0,0 +1,81 @@
---
# ensure that the underlying config is correct
- name: fetch details
block:
- name: get old node UUID
ansible.builtin.shell: |
gluster pool list | grep {{ new_node }} | awk '{print $1}'
register: old_node_uuid
changed_when: false
- debug:
var: "old_node_uuid.stdout"
when: old_node_uuid is defined
become: true
when: inventory_hostname != new_node.name
- name: update new node
block:
- name: stop glusterd
ansible.builtin.service:
name: "{{ glusterfs_daemon }}"
state: stopped
changed_when: false
- name: update glusterd.info
ansible.builtin.lineinfile:
path: /var/lib/glusterd/glusterd.info
regexp: '^UUID='
line: 'UUID={{ new_node.uuid }}'
- name: start glusterd
ansible.builtin.service:
name: "{{ glusterfs_daemon }}"
state: started
changed_when: false
- name: add cluster nodes to new node
ansible.builtin.shell: "gluster peer probe {{ item }}"
register: peer_probe_result
with_items: cluster
changed_when:
- "'already in peer list' not in peer_probe_result.stdout"
- "'peer probe: success' in peer_probe_result.stdout"
failed_when: false
- name: reset systemctl restart count
ansible.builtin.shell: "systemctl reset-failed {{ glusterfs_daemon }}"
changed_when: false
- name: retart glusterd
ansible.builtin.service:
name: "{{ glusterfs_daemon }}"
state: restarted
changed_when: false
# - name: add volumes
# ansible.builtin.shell: "gluster --mode=script volume sync {{ cluster[0] }} tmp"
# register: sync_result
# failed_when: "'volume sync: success' not in sync_result"
become: true
when: inventory_hostname == new_node.name
- name: create tmp gluster volume
gluster_volume:
state: present
start_on_create: yes
name: "tmp"
brick: '/data/glusterfs/tmp/brick1'
rebalance: no
replicas: 2
force: true
cluster:
- home
- cubox-i
run_once: true

View File

@@ -0,0 +1,25 @@
---
# remove a volume
- name: "check for existing {{mounts.paths[item.name]}}/{{volumes.brick_folder}}/.glusterfs"
stat:
path: "{{mounts.paths[item.name]}}/{{volumes.brick_folder}}/.glusterfs"
register: gluster_volume_config
- name: "Old gluster volume {{item.name}} needs to be removed"
debug:
var: gluster_volume_config.stat.exists
- name: remove old gluster volumes
block:
- name: remove volume
gluster_volume:
state: absent
name: "{{item.name}}"
# rebalance: no
# replicas: false
force: true
cluster: "{{cluster}}"
run_once: true
- name: remove .glusterfs folder
ansible.builtin.shell: "rm -rf {{mounts.paths[item.name]}}/.glusterfs"
when: gluster_volume_config.stat.exists

View File

@@ -0,0 +1,2 @@
LOG_LEVEL=INFO
GLUSTERD_OPTIONS=