hacking at glusterfs

cubox-i role for specific configs
This commit is contained in:
2018-03-04 22:02:21 -05:00
parent d778f68993
commit e32fb90662
11 changed files with 176 additions and 14 deletions

20
cubox-i.xai-corp.net.yml Normal file
View File

@@ -0,0 +1,20 @@
---
# playbook to install tools on home.xai-corp.net
#
#
- hosts: cubox-i
remote_user: ansible
gather_facts: true
become: true
vars:
cleanup:
packages:
- git
roles:
- cubox-i.xai-corp.net
post_tasks:

View File

@@ -62,6 +62,7 @@
jenkins:
host: gluster:/jenkins
mount: /var/lib/jenkins
gluster_remove:
elasticsearch:
host: gluster:/elasticsearch
mount: /data/elasticsearch

View File

@@ -0,0 +1,55 @@
---
# docker-compose file for owncloud server
# DOCKER_HOST=dkhost:2376 docker stack deploy -c docker-compose.yml metrics
# DOCKER_HOST=dkhost:2376 docker stack ps metrics
version: '3.4'
services:
prometheus:
image: prom/prometheus
ports:
- 9090:9090
# volumes:
# - /opt/shared/nextcloud/data:/var/www/html/data
logging:
driver: fluentd
options:
fluentd-address: "logs.xai-corp.net:24224"
fluentd-async-connect: 'true'
tag: postgres
networks:
- ingress
- prod
graphana:
image: grafana/grafana
ports:
- 3001:3000
environment:
GF_SECURITY_ADMIN_PASSWORD: sakas
# volumes:
# - /opt/shared/nextcloud/data:/var/www/html/data
networks:
- ingress
postgres_exported:
image: wrouesnel/postgres_exporter
environment:
DATA_SOURCE_NAME: "postgresql://xaicorp_admin:snqioxni1sw@tasks.postgres:5432/?sslmode=disable"
networks:
- ingress
- prod
networks:
default:
external:
name: ingress
ingress:
external:
name: ingress
prod:
external:
name: prod

View File

@@ -4,31 +4,38 @@
- hosts: gfs
remote_user: ansible
gather_facts: yes
gather_facts: no
become: true
vars:
cluster:
- cubox-i.xai-corp.net
- cubox-i
- home
mountpoints:
- /data/glusterfs/vmshares/brick1
- /data/glusterfs/gitea/brick1
- /data/glusterfs/jenkins/brick1
- /data/glusterfs/jenkins2/brick1
- /data/glusterfs/tmp/brick1
- /data/glusterfs/elasticsearch/brick1
# - /data/glusterfs/elasticsearch/brick1
volumes:
vmshares:
- /data/glusterfs/vmshares/brick1/brick
gitea:
- /data/glusterfs/gitea/brick1/brick
jenkins:
- /data/glusterfs/jenkins/brick1/brick
jenkins2:
- /data/glusterfs/jenkins2/brick1/brick
vmshares:
- /data/glusterfs/vmshares/brick1/brick
distributed_volumes:
tmp:
- /data/glusterfs/tmp/brick1/brick
elasticsearch:
- /data/glusterfs/elasticsearch/brick1/brick
# elasticsearch:
# - /data/glusterfs/elasticsearch/brick1/brick
removed_volumes:
# jenkins:
# - /data/glusterfs/jenkins/brick1/brick
# tmp:
# - /data/glusterfs/tmp/brick1/brick
roles:
- glusterfs

View File

@@ -0,0 +1,33 @@
---
# defaults/main.yml
# define default variable values here
#/dev/sda6 /data/glusterfs/vmshares/brick1 xfs defaults 0 0
cubox-i:
fstab:
add:
vmshare:
host: /dev/sda6
mount: /data/glusterfs/vmshares/brick1
fstype: xfs
attr: defaults
gitea:
host: /dev/sda7
mount: /data/glusterfs/gitea/brick1
fstype: xfs
attr: defaults
vmshare:
host: /dev/sda8
mount: /data/glusterfs/jenkins/brick1
fstype: xfs
attr: defaults
gitea:
host: /dev/sda9
mount: /data/glusterfs/tmp/brick1
fstype: xfs
attr: defaults
gitea:
host: /dev/sda10
mount: /data/glusterfs/elasticsearch/brick1
fstype: xfs
attr: defaults

View File

@@ -0,0 +1,22 @@
---
# main tasks for special cubox-i config
- name: create mount points
file:
path: "{{ item.value.mount }}"
state: directory
mode: 0755
with_dict: cubox-i.fstab.add
- name: create glusterfs mount hosts file entries
lineinfile:
state: present
dest: /etc/fstab
insertafter: EOF
regexp: '^#?{{ item.value.host }}'
line: '{{ item.value.host }} {{ item.value.mount }} {{ item.value.fstype }} {{ item.value.attr }} 0 0'
with_dict: cubox-i.fstab.add
- name: mute syslog

View File

@@ -15,3 +15,12 @@
regexp: '^#?{{ item.value.host }}'
line: '{{ item.value.host }} {{ item.value.mount }} glusterfs direct-io-mode=disable,defaults,_netdev 0 0'
with_dict: gluster
- name: remove glusterfs mount hosts file entries
lineinfile:
state: absent
dest: /etc/fstab
insertafter: EOF
regexp: '^#?{{ item.value.host }}'
line: '{{ item.value.host }} {{ item.value.mount }} glusterfs direct-io-mode=disable,defaults,_netdev 0 0'
with_dict: gluster_remove

View File

@@ -23,13 +23,26 @@
with_items: "{{mountpoints}}"
# manage volumes
#- name: remove old gluster volumes
# gluster_volume:
# state: absent
# name: "{{ item.key }}"
# rebalance: no
# replicas: false
# cluster: "{{cluster}}"
# with_dict: "{{removed_volumes}}"
# run_once: true
- name: create gluster volumes
gluster_volume:
state: present
start_on_create: yes
name: "{{ item.key }}"
brick: '{{ item.value | join(",")}}'
rebalance: no
replicas: 2
force: true
cluster: "{{cluster}}"
with_dict: "{{volumes}}"
run_once: true
@@ -51,6 +64,8 @@
brick: '{{ item.value | join(",")}}'
rebalance: no
replicas: false
force: true
cluster: "{{cluster}}"
with_dict: "{{distributed_volumes}}"
run_once: true

View File

@@ -14,7 +14,7 @@
- figlet
- update-motd
- lm-sensors
when: ansible_architecture == 'armv7l'
when: ansible_architecture !== 'armv7l'
- name: remove help text
@@ -24,7 +24,7 @@
with_items:
- /etc/update-motd.d/10-help-text
- /etc/update-motd.d/51-cloudguest
when: ansible_architecture == 'armv7l'
when: ansible_architecture !== 'armv7l'
- name: add new info
@@ -35,4 +35,4 @@
with_items:
- { src: hostname.sh, dest: 10-hostname }
- { src: systats.sh, dest: 11-sysstats}
when: ansible_architecture == 'armv7l'
when: ansible_architecture !== 'armv7l'

View File

@@ -38,7 +38,7 @@ ns02 IN CNAME cubox-i
; gluster servers
gluster IN A 192.168.2.11
gluster IN A 192.168.2.12
;gluster IN A 192.168.2.12
; docker swarm nodes
dkhost IN A 192.168.2.11

View File

@@ -2,4 +2,4 @@
# run ansible playbook to update name servers
ansible-playbook -vv ${PLAYBOOK}.yml -i inventory.conf -u ansible
ansible-playbook -vvv ${PLAYBOOK}.yml -i inventory.conf -u ansible