mount partitions for glusterfs volumes

This commit is contained in:
2021-12-26 09:33:23 -05:00
parent 697b00fa4e
commit 74a32b4a02
8 changed files with 263 additions and 44 deletions

View File

@@ -54,14 +54,18 @@
# tmp: # tmp:
# - /data/glusterfs/tmp/brick1/brick # - /data/glusterfs/tmp/brick1/brick
pre_tasks:
# - name: test vars
# debug:
# var: "item.name"
# with_items: "{{mounts[inventory_hostname]}}"
roles: roles:
# - geerlingguy.glusterfs
- glusterfs - glusterfs
# - td-agent-bit
post_tasks: post_tasks:
- name: check service is up # - name: check service is up
ansible.builtin.service: # ansible.builtin.service:
name: "{{ glusterfs_daemon }}" # name: "{{ glusterfs_daemon }}"
state: started # state: started
enabled: true # enabled: true

View File

@@ -7,6 +7,8 @@
vars: vars:
pre_tasks:
roles: roles:
- role: common - role: common
become: true become: true

View File

@@ -0,0 +1,109 @@
---
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
machines:
cubox-i:
fstab:
add:
# nextcloud2:
# src: UUID=ced31b8e-8404-4bff-b428-6411c54451a4
# path: /data/glusterfs/nextcloud2/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
remove:
# vmshare:
# src: /dev/sda6
# path: /data/glusterfs/vmshares/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# gitea:
# src: /dev/sda7
# path: /data/glusterfs/gitea/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# jenkins:
# src: /dev/sda8
# path: /data/glusterfs/jenkins/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# tmp:
# src: /dev/sda9
# path: /data/glusterfs/tmp/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# nextcloud:
# src: /dev/sda10
# path: /data/glusterfs/nextcloud/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# prometheus:
# src: /dev/sda11
# path: /data/glusterfs/prometheus/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# mariadb:
# src: /dev/sda12
# path: /data/glusterfs/mariadb/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# plex:
# src: /dev/sda13
# path: /data/glusterfs/plex/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
home:
fstab:
add:
# vmshare:
# src: UUID=1ee11dc5-0b55-47bd-a92c-e644175a8ba9
# path: /data/glusterfs/vmshares/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# gitea:
# src: UUID=1a10b5b3-a00b-463c-bb47-d7a1430357c2
# path: /data/glusterfs/gitea/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# jenkins:
# src: UUID=c1ae3332-8ccc-42c4-beb9-217e0dd58835
# path: /data/glusterfs/jenkins/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# tmp:
# src: UUID=cad3eb8f-bd07-499b-b1f0-19ec14ba0084
# path: /data/glusterfs/tmp/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# nextcloud:
# src: UUID=179763ba-27cc-4dbf-a0b2-01596727c541
# path: /data/glusterfs/nextcloud/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# prometheus:
# src: UUID=19ec83ce-cb19-465f-ba79-0426a659541d
# path: /data/glusterfs/prometheus/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# mariadb:
# src: UUID=fa3adff4-b456-4321-8528-eed843615191
# path: /data/glusterfs/mariadb/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# plex:
# src: UUID=2e87570b-4750-4aeb-926b-1d9b48ef89b3
# path: /data/glusterfs/plex/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
# nextcloud2:
# src: UUID=b0dc64be-80b1-49f6-ae0f-ae6219e734c9
# path: /data/glusterfs/nextcloud2/brick1
# fstype: xfs
# options: defaults,noatime,nofail,x-systemd.device-timeout=10 0 0
remove:

View File

@@ -5,4 +5,8 @@
# update packages to latest # update packages to latest
- include_tasks: apply_updates.yml - include_tasks: apply_updates.yml
- include_tasks: motd.yml - name: update login screen
include_tasks: motd.yml
- name: update fstab
include_tasks: update_fstab.yml

View File

@@ -0,0 +1,11 @@
---
# set mount points in fstab
- name: Create fstab entries
ansible.posix.mount:
path: "{{ item.value.path }}"
src: "{{ item.value.src }}"
fstype: "{{ item.value.fstype }}"
opts: "{{ item.value.options }}"
state: mounted
with_dict: "{{machines[inventory_hostname].fstab.add}}"

View File

@@ -0,0 +1,75 @@
---
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
mounts:
paths:
gitea: /data/glusterfs/gitea/brick1
jenkins: /data/glusterfs/jenkins/brick1
mariadb: /data/glusterfs/mariadb/brick1
nextcloud: /data/glusterfs/nextcloud/brick1
nextcloud2: /data/glusterfs/nextcloud2/brick1
plex: /data/glusterfs/plex/brick1
prometheus: /data/glusterfs/prometheus/brick1
tmp: /data/glusterfs/tmp/brick1
vmshare: /data/glusterfs/vmshares/brick1
cubox-i:
- name: vmshare
status: mounted
src: "UUID=d1b005e1-294f-4588-b9a6-21b72f7e3e0f"
- name: gitea
status: mounted
src: "UUID=3f6678ec-8555-4c32-a399-9b4418465a66"
- name: jenkins
status: mounted
src: "UUID=fb9f6c8d-2522-4f3b-831a-793964884967"
- name: tmp
status: mounted
src: "UUID=cc7588cc-1d39-4f09-baac-234f8c081443"
- name: nextcloud
status: mounted
src: "UUID=617b4eee-7c60-4506-b2c1-7768900482e9"
- name: prometheus
status: mounted
src: "UUID=f2b9d354-f7f1-45d9-80a3-f616beeca5bc"
- name: mariadb
status: mounted
src: "UUID=5641769e-9816-4955-9def-17bb625f88f0"
- name: plex
status: mounted
src: "UUID=57a3133c-6ca3-4974-9a0b-7efc8c0c533b"
- name: nextcloud2
status: mounted
src: "UUID=ced31b8e-8404-4bff-b428-6411c54451a4"
home:
- name: vmshare
status: mounted
src: "UUID=1ee11dc5-0b55-47bd-a92c-e644175a8ba9"
- name: gitea
status: mounted
src: "UUID=1a10b5b3-a00b-463c-bb47-d7a1430357c2"
- name: jenkins
status: mounted
src: "UUID=c1ae3332-8ccc-42c4-beb9-217e0dd58835"
- name: tmp
status: mounted
src: "UUID=cad3eb8f-bd07-499b-b1f0-19ec14ba0084"
- name: nextcloud
status: mounted
src: "UUID=179763ba-27cc-4dbf-a0b2-01596727c541"
- name: prometheus
status: mounted
src: "UUID=19ec83ce-cb19-465f-ba79-0426a659541d"
- name: mariadb
status: mounted
src: "UUID=fa3adff4-b456-4321-8528-eed843615191"
- name: plex
status: mounted
src: "UUID=2e87570b-4750-4aeb-926b-1d9b48ef89b3"
- name: nextcloud2
status: mounted
src: "UUID=b0dc64be-80b1-49f6-ae0f-ae6219e734c9"

View File

@@ -14,16 +14,16 @@
- xfsprogs - xfsprogs
- xfsdump - xfsdump
- name: Start service k3s, if not started - name: Start service gluster, if not started
block: block:
- name: start on home # - name: start on home
ansible.builtin.service: # ansible.builtin.service:
name: glusterd # name: glusterd
state: started # state: started
when: "ansible_lsb.major_release >= '20'" # when: "ansible_lsb.major_release >= '20'"
- name: start on cubox-i - name: start on cubox-i
ansible.builtin.service: ansible.builtin.service:
name: glusterfs-server name: glusterfs-server
state: started state: started
when: "ansible_lsb.major_release < '20'" when: ansible_architecture == 'armv7l'

View File

@@ -1,13 +1,27 @@
--- ---
# glusterfs mounts # glusterfs mounts
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# Mount drives in fstab
- name: Create fstab entries
ansible.posix.mount:
path: "{{ mounts.paths[item.name] }}"
src: "{{ item.src }}"
fstype: "xfs"
opts: "defaults,noatime,nofail,x-systemd.device-timeout=10 0 0"
state: "{{ item.status }}"
with_items: "{{mounts[inventory_hostname]}}"
# create mount points # create mount points
- name: create mount points for all bricks in all volumes #- name: create mount points for all bricks in all volumes
file: # file:
path: "{{ item }}" # path: "{{ item }}"
state: directory # state: directory
mode: 0755 # mode: 0755
with_items: "{{mountpoints}}" # with_items: "{{mountpoints}}"
# manage volumes # manage volumes
#- name: remove old gluster volumes #- name: remove old gluster volumes
@@ -21,18 +35,18 @@
# run_once: true # run_once: true
- name: create gluster volumes #- name: create gluster volumes
gluster_volume: # gluster_volume:
state: present # state: present
start_on_create: yes # start_on_create: yes
name: "{{ item.key }}" # name: "{{ item.key }}"
brick: '{{ item.value | join(",")}}' # brick: '{{ item.value | join(",")}}'
rebalance: no # rebalance: no
replicas: 2 # replicas: 2
force: true # force: true
cluster: "{{cluster}}" # cluster: "{{cluster}}"
with_dict: "{{volumes}}" # with_dict: "{{volumes}}"
run_once: true # run_once: true
#- name: create tmp gluster volumes #- name: create tmp gluster volumes
# gluster_volume: # gluster_volume:
@@ -44,14 +58,14 @@
# cluster: "{{cluster}}" # cluster: "{{cluster}}"
# run_once: true # run_once: true
- name: create distributed gluster volumes #- name: create distributed gluster volumes
gluster_volume: # gluster_volume:
state: present # state: present
name: "{{ item.key }}" # name: "{{ item.key }}"
brick: '{{ item.value | join(",")}}' # brick: '{{ item.value | join(",")}}'
rebalance: no # rebalance: no
replicas: false # replicas: false
force: true # force: true
cluster: "{{cluster}}" # cluster: "{{cluster}}"
with_dict: "{{distributed_volumes}}" # with_dict: "{{distributed_volumes}}"
run_once: true # run_once: true