refactor ansible code in to ansible-2.0 folder, and created space for ansible-2.10

This commit is contained in:
2021-12-21 10:03:39 -05:00
parent e156b183ed
commit 22ef544758
159 changed files with 59 additions and 36 deletions

6
ansible-2.0/ansible.cfg Normal file
View File

@@ -0,0 +1,6 @@
# local ansible config file
[defaults]
ask-become = True
roles_path = ./roles:./roles/vendor

View File

12
ansible-2.0/cli/ansible Executable file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
echo -e "\033[36mStarting\033[39m: Ansible"
ANSIBLE_VERSION=${ANSIBLE_VERSION:-2.7}
# shellcheck disable=SC2068
docker run --rm -i\
-v ~/.ssh:/home/user/.ssh \
-u 1000 \
-v ${PWD}:/opt/project \
-w /opt/project \
dkregistry.xai-corp.net:5000/xaicorp/ansible:${ANSIBLE_VERSION} -i inventory.conf -u ansible $@

13
ansible-2.0/cli/ansible-dev Executable file
View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
echo -e "\033[36mStarting\033[39m: Ansible"
# shellcheck disable=SC2068
docker run --rm \
-it \
-v $(pwd):/opt/project \
-v ~/.ssh:/user/.ssh \
-u $(id -u):$(id -g) \
--workdir /opt/project \
--entrypoint /bin/bash \
dkregistry.xai-corp.net:5000/xaicorp/ansible:2.0 $@

View File

@@ -0,0 +1,3 @@
ARGS - The arguments you wish to provide to this command
TODO: Fill out the help information for this command.

View File

@@ -0,0 +1 @@
ARGS...

View File

@@ -0,0 +1,2 @@
playbook the playbook to run, including extension

View File

@@ -0,0 +1 @@
playbook

View File

@@ -0,0 +1,20 @@
---
# playbook to install tools on home.xai-corp.net
#
#
- hosts: cubox-i
remote_user: ansible
gather_facts: true
become: true
vars:
cleanup:
packages:
- git
roles:
- cubox-i.xai-corp.net
post_tasks:

View File

@@ -0,0 +1,107 @@
---
# playbook for home02
- hosts: dkhost
remote_user: ansible
gather_facts: yes
become: true
vars:
# datadog_api_key: ca0faf176c4aedd4f547ed7cf85615eb
# datadog_config:
# log_level: WARNING
# apm_enabled: false
# datadog_checks:
# system:
# init_config: []
# instances: []
# disk:
# init_config:
# instances:
# - use_mount: yes
# excluded_filesystems:
# - sysfs
# - cgroup
# - tracefs
# - debugfs
# - proc
# - securityfs
# - tmpfs
# excluded_mountpoint_re: /[media/richard|run/user].*
docker:
init_config:
instances:
- url: "unix://var/run/docker.sock"
new_tag_names: true
dockerhost:
users:
- dd-agent
- richard
- ansible
nginx_remove_default_vhost: true
nginx_vhosts_filename: "xai-corp.conf"
nginx_vhosts:
- listen: "80 default_server"
server_name: "xai-corp.net"
root: "/var/www/xai-corp.net"
index: "index.html index.htm"
access_log: "/var/log/nginx/xaicorp.access.log"
error_log: "/var/log/nginx/xaicorp.error.log"
gluster:
vmshare:
host: gluster:/vmshares
mount: /opt/shared
gitea:
host: gluster:/gitea
mount: /var/lib/gitea
jenkins:
host: gluster:/jenkins2
mount: /var/lib/jenkins
prometheus:
host: gluster:/prometheus
mount: /opt/prometheus
nextcloud:
host: gluster:/nextcloud
mount: /opt/nextcloud
mariadb:
host: gluster:/mariadb
mount: /opt/mariadb
postgresql:
host: gluster:/postgresql
mount: /opt/postgresql
gluster_remove:
elasticsearch:
host: gluster:/elasticsearch
mount: /data/elasticsearch
certbot:
uninstall: true
domains:
- xai-corp.net
- www.xai-corp.net
- dkregistry.xai-corp.net
- sql.xai-corp.net
- fs.xai-corp.net
- dkhost.xai-corp.net
- git.xai-corp.net
- dkui.xai-corp.net
- jenkins.xai-corp.net
- logs.xai-corp.net
- tripbuilder.xai-corp.net
- xaibox.xai-corp.net
- office.xai-corp.net
roles:
- dockerhost
- rsyslog
# - geerlingguy.nginx
#- certbot
# - { role: Datadog.datadog, when: ansible_architecture != 'armv7l' } #does not support armhf architecture. should switch to fluentd or logstash
post_tasks:

View File

@@ -0,0 +1,27 @@
---
# playbook for home02
- hosts: dkhost
remote_user: ansible
gather_facts: yes
become: true
vars:
- swarm:
managers:
- dkhost01
- dkhost02
- home
workers: []
removed:
- dkhost03
- dkhost04
- dkhost05
history: 1
roles:
- docker_swarm_management
post_tasks:

61
ansible-2.0/glusterfs.yml Normal file
View File

@@ -0,0 +1,61 @@
---
# playbook for gluster file servers
- hosts: gfs
remote_user: ansible
gather_facts: no
become: true
vars:
cluster:
- cubox-i
- home
mountpoints:
- /data/glusterfs/vmshares/brick1
- /data/glusterfs/gitea/brick1
- /data/glusterfs/jenkins/brick1
- /data/glusterfs/jenkins2/brick1
- /data/glusterfs/tmp/brick1
- /data/glusterfs/prometheus/brick1
- /data/glusterfs/nextcloud/brick1
- /data/glusterfs/mariadb/brick1
- /data/glusterfs/postgresql/brick1
volumes:
gitea:
- /data/glusterfs/gitea/brick1/brick
jenkins2:
- /data/glusterfs/jenkins2/brick1/brick
vmshares:
- /data/glusterfs/vmshares/brick1/brick
mariadb:
- /data/glusterfs/mariadb/brick1/brick
plex:
- /data/glusterfs/plex/brick1/brick
nextcloud:
- /data/glusterfs/nextcloud/brick1/brick
nextcloud2:
- /data/glusterfs/nextcloud2/brick1/brick
distributed_volumes:
tmp:
- /data/glusterfs/tmp/brick1/brick
prometheus:
- /data/glusterfs/prometheus/brick1
# elasticsearch:
# - /data/glusterfs/elasticsearch/brick1/brick
removed_volumes:
postgresql:
- /data/glusterfs/postgresql/brick1/brick
# jenkins:
# - /data/glusterfs/jenkins/brick1/brick
# tmp:
# - /data/glusterfs/tmp/brick1/brick
roles:
- glusterfs
# - td-agent-bit
post_tasks:
- name: check service is up
service: name=glusterfs-server state=started

View File

@@ -0,0 +1,48 @@
---
# playbook to install tools on home.xai-corp.net
#
#
- hosts: home
remote_user: ansible
gather_facts: true
become: true
vars:
cleanup:
packages:
- git
cron:
- { name: "zoneedit", file: "zoneedit" }
vm:
bridge_network: enp3s0
# datadog_api_key: ca0faf176c4aedd4f547ed7cf85615eb
# datadog_checks:
# system:
# init_config: []
# instances: []
# disk:
# init_config:
# instances:
# - use_mount: yes
# excluded_filesystems:
# - sysfs
# - cgroup
# - tracefs
# - debugfs
# - proc
# - securityfs
# excluded_mountpoint_re: /[media/richard|run/user].*
roles:
# - Datadog.datadog
# - dockerhost
# - dynamic-ip
# - bootstrap_vms
- home.xai-corp.net
- cleanup
post_tasks:

View File

@@ -0,0 +1,43 @@
---
# playbook for home02
- hosts: home02
remote_user: ansible
gather_facts: yes
become: true
vars:
cleanup:
packages:
- git
cron: []
# datadog_api_key: ca0faf176c4aedd4f547ed7cf85615eb
# datadog_checks:
# system:
# init_config: []
# instances: []
# disk:
# init_config:
# instances:
# - use_mount: yes
# excluded_filesystems:
# - sysfs
# - cgroup
# - tracefs
# - debugfs
# - proc
# - securityfs
# excluded_mountpoint_re: /[media/richard|run/user].*
roles:
# - Datadog.datadog
# - ns.xai-corp.net
# - td-agent-bit
- dynamic-ip
- cleanup
post_tasks:
# - name: check service is up
# service: name={{ bind.service }} state=started

View File

@@ -0,0 +1,32 @@
localhost ansible_connection=local
[managed]
home ansible_ssh_host=192.168.4.11
home02 ansible_ssh_host=192.168.4.22
#dkhost01 ansible_ssh_host=192.168.4.41
#dkhost02 ansible_ssh_host=192.168.4.52
#dkhost03 ansible_ssh_host=192.168.4.53
#dkhost04 ansible_ssh_host=192.168.4.54
#dkhost05 ansible_ssh_host=192.168.4.55
cubox-i ansible_ssh_host=192.168.4.12
[dkhost]
dkhost01 ansible_ssh_host=192.168.4.41
dkhost02 ansible_ssh_host=192.168.4.52
#dkhost03 ansible_ssh_host=192.168.4.53
dkhost04 ansible_ssh_host=192.168.4.54
#dkhost05 ansible_ssh_host=192.168.4.55
home ansible_ssh_host=192.168.4.11
#localhost ansible_connection=local
[ns]
home02 ansible_ssh_host=192.168.4.22
cubox-i ansible_ssh_host=192.168.4.12
[gfs]
home ansible_ssh_host=192.168.4.11
cubox-i ansible_ssh_host=192.168.4.12
[desktop]
richard-desktop

View File

@@ -0,0 +1,16 @@
---
# playbook for logs.xai-corp.net
# configure hosts for running graylog
- hosts: dkhost
remote_user: ansible
gather_facts: yes
become: true
vars:
roles:
- docker_graylog

View File

@@ -0,0 +1,18 @@
# playbook for all managed hosts
# ansible-playbook managed_updates.yml -v --ask-become -u richard --ask-pass
- hosts: managed
remote_user: ansible
gather_facts: yes
become: True
vars:
roles:
tasks:
- name: run apt autoremove
command: apt -y autoremove

View File

@@ -0,0 +1,98 @@
# playbook for all managed hosts
# ansible-playbook managed_setup.yml -v --ask-become -u richard --ask-pass
- hosts: managed
# remote_user: ansible
gather_facts: yes
become: true
vars:
- users:
- name: "ansible"
state: present
shell: /bin/bash
createhome: yes
generate_ssh_key: yes
password: "$6$7z7PfYwduXom0o73$DEiy3K15URNNjmKkOQIwx8/mFKArUNYkFn8D/4q6t/eP9hf1X9jnG4YuSjI7q1Dnp1HwukZUxZY7cF2JK5DO/."
ssh_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAmJSdmj03d4fnZUuRByevPDNiReEk1fRL+7F9WPCo5zn+r5Oj84HXxd4P03DNXeGSBYmUAnsTqYEGdkjkpSrKfMm9bv8amL7hUC+Mzb+wOmXmyX1cw/SearYCBQRCz1s5p7I9+PO7XWaC0VJ99LUm1Bp4JM149U5X0Y3M2j2XV+0= RSA-1024"
uid: "1001"
groups:
- sudo
- name: "richard"
state: present
shell: /bin/bash
createhome: yes
generate_ssh_key: yes
password: "$6$yNKLUxX0$lxy/jaJI7cKCq5j.KondUalu9r96gUeRR//5qciZ/RX9z9PGSpbU9j7OsxaOzqV5uLeQ9ouIe8quo/2YqKE46/"
ssh_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAmJSdmj03d4fnZUuRByevPDNiReEk1fRL+7F9WPCo5zn+r5Oj84HXxd4P03DNXeGSBYmUAnsTqYEGdkjkpSrKfMm9bv8amL7hUC+Mzb+wOmXmyX1cw/SearYCBQRCz1s5p7I9+PO7XWaC0VJ99LUm1Bp4JM149U5X0Y3M2j2XV+0= RSA-1024
uid: "1000"
groups:
- sudo
- users_groups:
- datadog_api_key: ca0faf176c4aedd4f547ed7cf85615eb
- datadog_checks:
system:
init_config: []
instances: []
disk:
init_config:
instances:
- use_mount: yes
excluded_filesystems:
- sysfs
- cgroup
- tracefs
- debugfs
- proc
- securityfs
- tempfs
excluded_mountpoint_re: /[media/richard|run/user].*
- rsyslog:
user: root
group: root
service: rsyslog
configs:
- 48-ship2papertrail
# prepare python for ansible
pre_tasks:
- raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
- setup: # aka gather_facts
- name: Creates .ssh directory
file: path=~/.ssh state=directory mode=600
- name: remove ubuntu user if it exists
command: userdel -rf ubuntu
args:
removes: /home/ubuntu/.bashrc
- debug: var=ansible_os_family
run_once: true
- debug: var=ansible_architecture
run_once: true
roles:
- novuso.users
- user-richard
- rsyslog
- motd
- vbox-guest
- { role: Datadog.datadog, when: ansible_architecture != 'armv7l' } #does not support armhf architecture. should switch to fluentd or logstash
- { role: ddagent_source, when: ansible_architecture == 'armv7l' }
tasks:
- name: add ansible to sudoers
lineinfile:
dest: /etc/sudoers
state: present
regexp: '^ansible ALL='
line: 'ansible ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'

View File

@@ -0,0 +1,19 @@
# playbook for all managed hosts
# ansible-playbook managed_updates.yml -v --ask-become -u richard --ask-pass
- hosts: managed
remote_user: ansible
gather_facts: yes
become: True
vars:
roles:
- _install_updates
- user-richard
- motd
# - { role: Datadog.datadog, when: ansible_architecture != 'armv7l' } #does not support armhf architecture. should switch to fluentd or logstash
tasks:

View File

@@ -0,0 +1,39 @@
---
# playbook for home02
- hosts: ns
remote_user: ansible
gather_facts: yes
become: true
vars:
# datadog_api_key: ca0faf176c4aedd4f547ed7cf85615eb
# datadog_checks:
# system:
# init_config: []
# instances: []
# disk:
# init_config:
# instances:
# - use_mount: yes
# excluded_filesystems:
# - sysfs
# - cgroup
# - tracefs
# - debugfs
# - proc
# - securityfs
# excluded_mountpoint_re: /[media/richard|run/user].*
roles:
# - _install_updates
# - Datadog.datadog
- ns.xai-corp.net
- dynamic-ip
# - td-agent-bit
post_tasks:
- name: check service is up
service: name={{ bind.service }} state=started

View File

@@ -0,0 +1,22 @@
---
# import required roles
- src: jmcvetta.debian-upgrade-reboot
path: roles/vendor/
- src: novuso.users
path: roles/vendor/
- src: Datadog.datadog
path: roles/vendor/
- src: resmo.bind
path: roles/vendor/
- src: bennojoy.ntp
path: roles/vendor/
- src: geerlingguy.nginx
path: roles/vendor/

View File

@@ -0,0 +1,9 @@
---
# update packages to latest
- name: run apt updates
apt:
upgrade: dist
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian"

View File

@@ -0,0 +1,18 @@
---
# wait random time to start this to offset reboots of individual machines
- pause: seconds={{ 100 | random(1,10) }}
# Send the reboot command
- shell: shutdown -r now
# This pause is mandatory, otherwise the existing control connection gets reused!
- pause: seconds=30
# Now we will run a local 'ansible -m ping' on this host until it returns.
# This works with the existing ansible hosts inventory and so any custom ansible_ssh_hosts definitions are being used
- local_action: shell ansible -u {{ ansible_user_id }} -m ping {{ inventory_hostname }}
register: result
until: result.rc == 0
retries: 30
delay: 10

View File

@@ -0,0 +1,4 @@
---
- name: test connection
ping:

View File

@@ -0,0 +1,4 @@
---
# defaults/main.yml
# define default variable values here

View File

@@ -0,0 +1,9 @@
---
# handlers/main.yml
# define handlers here
#- name: restart <service>
# service: name=<service> state=restarted
#- name: stop <service>
# service: name=<service> state=stopped

View File

@@ -0,0 +1,8 @@
---
# meta/main.yml
# define dependancies here
# dependencies:
# - { role: geerlingguy.java }
dependencies: []

View File

@@ -0,0 +1,4 @@
---
# tasks/main.yml
# define tasks here

View File

@@ -0,0 +1,10 @@
---
# vars/_extravars.yml
# define extra variable values here
# this file should be loaded via an include_vars statement in the task.
# often this is used for managing differences in os.
# Variable setup.
#- name: Include OS-Specific variables
# include_vars: "{{ ansible_os_family }}.yml"

View File

@@ -0,0 +1,4 @@
---
#default variables for boostrap_vms
vagrant_folder: "/opt/home.xai-corp.net/vm_config"

View File

@@ -0,0 +1,9 @@
---
# meta/main.yml
# define dependancies here
# dependencies:
# - { role: geerlingguy.java }
dependencies:
- { role: vagrant }

View File

@@ -0,0 +1,32 @@
---
# bootstrap vms on host machine
# install required system packages
- name: install system packages
apt:
state: present
name: "{{item}}"
update_cache: yes
with_items: []
# copy across vagrant file
- name: create target folder
file:
state: directory
dest: "{{ vagrant_folder }}"
- name: copy vagrant file
template:
src: Vagrantfile.j2
dest: "{{ vagrant_folder }}/Vagrantfile"
# update and run vagrant boxes
- name: update vagrant box
command: vagrant box update
args:
chdir: "{{vagrant_folder}}"
- name: vagrant up
command: vagrant up
args:
chdir: "{{vagrant_folder}}"

View File

@@ -0,0 +1,77 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
BASE_BOX = "ubuntu/xenial64"
BRIDGE_NET = "{{vm.bridge_network}}"
$script = <<SCRIPT
echo I am provisioning...
sudo adduser --home /home/ansible --shell /bin/bash --uid 1001 --disabled-password ansible
sudo mkdir -p /home/ansible/.ssh
sudo cat /etc/sudoers
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAmJSdmj03d4fnZUuRByevPDNiReEk1fRL+7F9WPCo5zn+r5Oj84HXxd4P03DNXeGSBYmUAnsTqYEGdkjkpSrKfMm9bv8amL7hUC+Mzb+wOmXmyX1cw/SearYCBQRCz1s5p7I9+PO7XWaC0VJ99LUm1Bp4JM149U5X0Y3M2j2XV+0= RSA-1024" | sudo tee -a /home/ansible/.ssh/authorized_keys
sudo chmod 600 /home/ansible/.ssh/authorized_keys
sudo chmod 700 /home/ansible/.ssh
sudo chown -R ansible /home/ansible
sudo echo "ansible ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
sudo apt -y update && sudo apt install -y python-minimal
date > /etc/vagrant_provisioned_at
SCRIPT
Vagrant.configure("2") do |config|
config.vm.define "dkhost01" do |dkhost01|
dkhost01.vm.hostname = "dkhost01"
dkhost01.vm.box = BASE_BOX
dkhost01.vm.box_check_update = true
dkhost01.vm.network "public_network", bridge: BRIDGE_NET
dkhost01.vm.synced_folder '.', '/vagrant', disabled: true
dkhost01.vm.provision "shell", inline: $script
dkhost01.vm.provider "virtualbox" do |vb|
vb.name = "dkhost01"
vb.gui = false
vb.memory = "2048"
vb.cpus = 2
#vb.customize ["modifyvm", :id, "--autostart-enabled", "on"]
#vb.customize ["modifyvm", :id, "--autostart-delay", "30"]
vb.customize ["modifyvm", :id, "--macaddress2", "0800271a4d51"]
end
end
config.vm.define "dkhost02" do |dkhost|
dkhost.vm.hostname = "dkhost02"
dkhost.vm.box = BASE_BOX
dkhost.vm.box_check_update = true
dkhost.vm.network "public_network", bridge: BRIDGE_NET
dkhost.vm.synced_folder '.', '/vagrant', disabled: true
dkhost.vm.provision "shell", inline: $script
dkhost.vm.provider "virtualbox" do |vb|
vb.name = "dkhost01"
vb.gui = false
vb.memory = "2048"
vb.cpus = 2
#vb.customize ["modifyvm", :id, "--autostart-enabled", "on"]
#vb.customize ["modifyvm", :id, "--autostart-delay", "30"]
vb.customize ["modifyvm", :id, "--macaddress2", "0800273D10E4"]
end
end
end

View File

@@ -0,0 +1,33 @@
---
# main task for installing Let's Encrypt's certbot tool
# https://certbot.eff.org/#ubuntuxenial-other
- name: install certbot on ubuntu 16.04
apt:
state: latest
package: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
with_items:
- "letsencrypt"
when: ansible_os_family == "Debian"
- name: create webroot /var/www/xai-corp.net
file:
state: directory
path: /var/www/xai-corp.net
#- name: create first certificates
# command: "letsencrypt certonly --webroot -w /var/www/xai-corp.net -d {{ item }}"
# args:
# creates: /etc/letsencrypt/live/{{ item }}/cert.pem
# with_items: "{{certbot.domains}}"
- name: cron job for renewing certs
cron:
name: renew let's encrypt certificates
state: absent
user: root
day: "*/2"
job: "letsencrypt renew "

View File

@@ -0,0 +1,10 @@
---
# main install certbot
# deprecated. Use container instead
- include: install.yml
when: certbot.uninstall != true
- include: uninstall.yml
when: certbot.uninstall == true

View File

@@ -0,0 +1,31 @@
---
# uninstall certbot
- name: stop nginx before removing it
service:
name: nginx
state: stopped
- name: uninstall certbot on ubuntu 16.04
apt:
state: absent
package: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
with_items:
- "letsencrypt"
- "nginx"
when: ansible_os_family == "Debian"
- name: remove webroot /var/www/xai-corp.net
file:
state: absent
path: /var/www/xai-corp.net
- name: remove cron job for renewing certs
cron:
name: renew let's encrypt certificates
state: absent
user: root
day: "*/2"
job: "letsencrypt renew "

View File

@@ -0,0 +1,6 @@
---
# defaults for cleanup
cleanup:
cron: []
packages: []

View File

@@ -0,0 +1,18 @@
---
# cleanup/tasks/main.yml
# removes packages and cron jobs
- name: remove packages
apt:
state: absent
name: "{{item}}"
update_cache: yes
with_items: "{{ cleanup.packages }}"
- name: remove cronjob
cron:
state: absent
name: "{{ item.name }}"
cron_file: "{{ item.file }}"
with_items: "{{ cleanup.cron }}"

View File

@@ -0,0 +1,33 @@
---
# defaults/main.yml
# define default variable values here
#/dev/sda6 /data/glusterfs/vmshares/brick1 xfs defaults 0 0
cubox-i:
fstab:
add:
vmshare:
host: /dev/sda6
mount: /data/glusterfs/vmshares/brick1
fstype: xfs
attr: defaults
gitea:
host: /dev/sda7
mount: /data/glusterfs/gitea/brick1
fstype: xfs
attr: defaults
jenkins:
host: /dev/sda8
mount: /data/glusterfs/jenkins/brick1
fstype: xfs
attr: defaults
tmp:
host: /dev/sda9
mount: /data/glusterfs/tmp/brick1
fstype: xfs
attr: defaults
elasticsearch:
host: /dev/sda10
mount: /data/glusterfs/elasticsearch/brick1
fstype: xfs
attr: defaults

View File

@@ -0,0 +1,22 @@
---
# main tasks for special cubox-i config
- name: create mount points
file:
path: "{{ item.value.mount }}"
state: directory
mode: 0755
with_dict: cubox-i.fstab.add
- name: create glusterfs mount hosts file entries
lineinfile:
state: present
dest: /etc/fstab
insertafter: EOF
regexp: '^#?{{ item.value.host }}'
line: '{{ item.value.host }} {{ item.value.mount }} {{ item.value.fstype }} {{ item.value.attr }} 0 0'
with_dict: cubox-i.fstab.add
- name: mute syslog

View File

@@ -0,0 +1,4 @@
[program:dd-agent]
command=/root/.datadog-agent/bin/agent start -d
stderr_logfile = /var/log/supervisord/ddagent-stderr.log
stdout_logfile = /var/log/supervisord/ddagent-stdout.log

View File

@@ -0,0 +1,12 @@
---
# handlers/main.yml
# define handlers here
#- name: restart <service>
# service: name=<service> state=restarted
#- name: stop <service>
# service: name=<service> state=stopped
- name: restart supervisor
service: name=supervisor start restarted

View File

@@ -0,0 +1,25 @@
---
# install dd-agent from source
- name: install packages
apt:
state: installed
package: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
with_items:
- python-psutil
- shell: DD_API_KEY=ca0faf176c4aedd4f547ed7cf85615eb DD_SKIP_INTEGRATIONS=1 DD_START_AGENT=0 sh -c "$(curl -L https://raw.githubusercontent.com/DataDog/dd-agent/master/packaging/datadog-agent/source/setup_agent.sh)"
args:
creates: "/root/.datadog-agent/bin/agent"
- name: add agent line to rc.local
lineinfile:
dest: /etc/rc.local
regexp: '^nohup sh /root/.datadog-agent/bin/agent &'
line: 'nohup sh /root/.datadog-agent/bin/agent &'
insertbefore: "^exit 0"
- name: start agent
shell: /root/.datadog-agent/bin/agent start

View File

@@ -0,0 +1,4 @@
---
# defaults/main.yml
# define default variable values here

View File

@@ -0,0 +1,9 @@
---
# handlers/main.yml
# define handlers here
#- name: restart <service>
# service: name=<service> state=restarted
#- name: stop <service>
# service: name=<service> state=stopped

View File

@@ -0,0 +1,8 @@
---
# meta/main.yml
# define dependancies here
# dependencies:
# - { role: geerlingguy.java }
dependencies: []

View File

@@ -0,0 +1,17 @@
---
# tasks/main.yml
# define tasks here
- name: install composer
shell: curl -sS https://getcomposer.org/installer | php && mv composer.phar /usr/local/bin/composer
- name: install nodejs and npm
apt: pkg={{ item }} state=installed
with_items:
- nodejs
- nodejs-legacy
- npm
- name: install grunt globaly
npm: name="grunt-cli" global=yes

View File

@@ -0,0 +1,10 @@
---
# vars/_extravars.yml
# define extra variable values here
# this file should be loaded via an include_vars statement in the task.
# often this is used for managing differences in os.
# Variable setup.
#- name: Include OS-Specific variables
# include_vars: "{{ ansible_os_family }}.yml"

View File

@@ -0,0 +1,7 @@
---
# roles/django/tasks/main.yml
- name: install packages
portage: state=present package={{ item }}
with_items:
- "dev-python/django"

View File

@@ -0,0 +1,30 @@
version: '3.9'
services:
mongo:
restart: always
image: "mongo:3"
volumes:
- /opt/shared/graylog/data/mongo:/data/db
elasticsearch:
restart: always
image: "elasticsearch:2"
command: "elasticsearch -Des.cluster.name='graylog'"
volumes:
- /opt/shared/graylog/data/elasticsearch:/usr/share/elasticsearch/data
graylog:
restart: always
image: graylog2/server:2.1.0-3
volumes:
- /opt/shared/graylog/data/journal:/usr/share/graylog/data/journal
- /opt/shared/graylog/config:/usr/share/graylog/data/config
environment:
GRAYLOG_PASSWORD_SECRET: gsahu1dj901hdaiuafg3g1q
GRAYLOG_ROOT_PASSWORD_SHA2: 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
GRAYLOG_WEB_ENDPOINT_URI: http://192.168.4.41:9000/api/
depends_on:
- mongo
- elasticsearch
ports:
- "9000:9000"
- "12201/udp:12201/udp"
- "1514/udp:1514/udp"

View File

@@ -0,0 +1,442 @@
# If you are running more than one instances of Graylog server you have to select one of these
# instances as master. The master will perform some periodical tasks that non-masters won't perform.
is_master = true
# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
# to use an absolute file path here if you are starting Graylog server from init scripts or similar.
node_id_file = /usr/share/graylog/data/config/node-id
# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
# Generate one by using for example: pwgen -N 1 -s 96
password_secret = replacethiswithyourownsecret!
# The default root user is named 'admin'
#root_username = admin
# You MUST specify a hash password for the root user (which you only need to initially set up the
# system and in case you lose connectivity to your authentication backend)
# This password cannot be changed using the API or via the web interface. If you need to change it,
# modify it in this file.
# Create one by using for example: echo -n yourpassword | shasum -a 256
# and put the resulting hash value into the following line
root_password_sha2 = 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
# The email address of the root user.
# Default is empty
#root_email = ""
# The time zone setting of the root user.
# The configured time zone must be parseable by http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html#forID-java.lang.String-
# Default is UTC
#root_timezone = UTC
# Set plugin directory here (relative or absolute)
plugin_dir = /usr/share/graylog/plugin
# REST API listen URI. Must be reachable by other Graylog server nodes if you run a cluster.
# When using Graylog Collectors, this URI will be used to receive heartbeat messages and must be accessible for all collectors.
rest_listen_uri = http://0.0.0.0:9000/api/
# REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri
# is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used.
# If set, this will be promoted in the cluster discovery APIs, so other nodes may try to connect on
# this address and it is used to generate URLs addressing entities in the REST API. (see rest_listen_uri)
# You will need to define this, if your Graylog server is running behind a HTTP proxy that is rewriting
# the scheme, host name or URI.
#rest_transport_uri = http://192.168.1.1:9000/api/
# Enable CORS headers for REST API. This is necessary for JS-clients accessing the server directly.
# If these are disabled, modern browsers will not be able to retrieve resources from the server.
# This is enabled by default. Uncomment the next line to disable it.
#rest_enable_cors = false
# Enable GZIP support for REST API. This compresses API responses and therefore helps to reduce
# overall round trip times. This is disabled by default. Uncomment the next line to enable it.
#rest_enable_gzip = true
# Enable HTTPS support for the REST API. This secures the communication with the REST API with
# TLS to prevent request forgery and eavesdropping. This is disabled by default. Uncomment the
# next line to enable it.
#rest_enable_tls = true
# The X.509 certificate file to use for securing the REST API.
#rest_tls_cert_file = /path/to/graylog.crt
# The private key to use for securing the REST API.
#rest_tls_key_file = /path/to/graylog.key
# The password to unlock the private key used for securing the REST API.
#rest_tls_key_password = secret
# The maximum size of the HTTP request headers in bytes.
#rest_max_header_size = 8192
# The maximal length of the initial HTTP/1.1 line in bytes.
#rest_max_initial_line_length = 4096
# The size of the thread pool used exclusively for serving the REST API.
#rest_thread_pool_size = 16
# Enable the embedded Graylog web interface.
# Default: true
#web_enable = false
# Web interface listen URI
web_listen_uri = http://0.0.0.0:9000/
# Enable CORS headers for the web interface. This is necessary for JS-clients accessing the server directly.
# If these are disabled, modern browsers will not be able to retrieve resources from the server.
web_enable_cors = true
# Enable/disable GZIP support for the web interface. This compresses HTTP responses and therefore helps to reduce
# overall round trip times. This is enabled by default. Uncomment the next line to disable it.
#web_enable_gzip = false
# Enable HTTPS support for the web interface. This secures the communication of the web browser with the web interface
# using TLS to prevent request forgery and eavesdropping.
# This is disabled by default. Uncomment the next line to enable it and see the other related configuration settings.
#web_enable_tls = true
# The X.509 certificate file to use for securing the web interface.
#web_tls_cert_file = /path/to/graylog-web.crt
# The private key to use for securing the web interface.
#web_tls_key_file = /path/to/graylog-web.key
# The password to unlock the private key used for securing the web interface.
#web_tls_key_password = secret
# The maximum size of the HTTP request headers in bytes.
#web_max_header_size = 8192
# The maximal length of the initial HTTP/1.1 line in bytes.
#web_max_initial_line_length = 4096
# The size of the thread pool used exclusively for serving the web interface.
#web_thread_pool_size = 16
# Embedded Elasticsearch configuration file
# pay attention to the working directory of the server, maybe use an absolute path here
# elasticsearch_config_file = /usr/share/graylog/data/config/elasticsearch.yml
# Graylog will use multiple indices to store documents in. You can configured the strategy it uses to determine
# when to rotate the currently active write index.
# It supports multiple rotation strategies:
# - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure
# - "size" per index, use elasticsearch_max_size_per_index below to configure
# valid values are "count", "size" and "time", default is "count"
rotation_strategy = count
# (Approximate) maximum number of documents in an Elasticsearch index before a new index
# is being created, also see no_retention and elasticsearch_max_number_of_indices.
# Configure this if you used 'rotation_strategy = count' above.
elasticsearch_max_docs_per_index = 20000000
# (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see
# no_retention and elasticsearch_max_number_of_indices. Default is 1GB.
# Configure this if you used 'rotation_strategy = size' above.
#elasticsearch_max_size_per_index = 1073741824
# (Approximate) maximum time before a new Elasticsearch index is being created, also see
# no_retention and elasticsearch_max_number_of_indices. Default is 1 day.
# Configure this if you used 'rotation_strategy = time' above.
# Please note that this rotation period does not look at the time specified in the received messages, but is
# using the real clock value to decide when to rotate the index!
# Specify the time using a duration and a suffix indicating which unit you want:
# 1w = 1 week
# 1d = 1 day
# 12h = 12 hours
# Permitted suffixes are: d for day, h for hour, m for minute, s for second.
#elasticsearch_max_time_per_index = 1d
# Disable checking the version of Elasticsearch for being compatible with this Graylog release.
# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss!
#elasticsearch_disable_version_check = true
# Disable message retention on this node, i. e. disable Elasticsearch index rotation.
#no_retention = false
# How many indices do you want to keep?
elasticsearch_max_number_of_indices = 20
# Decide what happens with the oldest indices when the maximum number of indices is reached.
# The following strategies are availble:
# - delete # Deletes the index completely (Default)
# - close # Closes the index and hides it from the system. Can be re-opened later.
retention_strategy = delete
# How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices.
elasticsearch_shards = 4
elasticsearch_replicas = 0
# Prefix for all Elasticsearch indices and index aliases managed by Graylog.
elasticsearch_index_prefix = graylog
# Name of the Elasticsearch index template used by Graylog to apply the mandatory index mapping.
# # Default: graylog-internal
#elasticsearch_template_name = graylog-internal
# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
# be enabled with care. See also: https://www.graylog.org/documentation/general/queries/
allow_leading_wildcard_searches = true
# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
# should only be enabled after making sure your Elasticsearch cluster has enough memory.
allow_highlighting = true
# settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
# all these
# this must be the same as for your Elasticsearch cluster
elasticsearch_cluster_name = graylog
# The prefix being used to generate the Elasticsearch node name which makes it easier to identify the specific Graylog
# server running the embedded Elasticsearch instance. The node name will be constructed by concatenating this prefix
# and the Graylog node ID (see node_id_file), for example "graylog-17052010-1234-5678-abcd-1337cafebabe".
# Default: graylog-
#elasticsearch_node_name_prefix = graylog-
# we don't want the Graylog server to store any data, or be master node
#elasticsearch_node_master = false
#elasticsearch_node_data = false
# use a different port if you run multiple Elasticsearch nodes on one machine
elasticsearch_transport_tcp_port = 9350
# we don't need to run the embedded HTTP server here
elasticsearch_http_enabled = false
elasticsearch_discovery_zen_ping_multicast_enabled = false
elasticsearch_discovery_zen_ping_unicast_hosts = elasticsearch:9300
# Change the following setting if you are running into problems with timeouts during Elasticsearch cluster discovery.
# The setting is specified in milliseconds, the default is 5000ms (5 seconds).
#elasticsearch_cluster_discovery_timeout = 5000
# the following settings allow to change the bind addresses for the Elasticsearch client in Graylog
# these settings are empty by default, letting Elasticsearch choose automatically,
# override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
# refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html
# for special values here
elasticsearch_network_host = 0.0.0.0
#elasticsearch_network_bind_host =
#elasticsearch_network_publish_host =
# The total amount of time discovery will look for other Elasticsearch nodes in the cluster
# before giving up and declaring the current node master.
#elasticsearch_discovery_initial_state_timeout = 3s
# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
# Elasticsearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
# Note that this setting only takes effect on newly created indices.
elasticsearch_analyzer = standard
# Global request timeout for Elasticsearch requests (e. g. during search, index creation, or index time-range
# calculations) based on a best-effort to restrict the runtime of Elasticsearch operations.
# Default: 1m
#elasticsearch_request_timeout = 1m
# Time interval for index range information cleanups. This setting defines how often stale index range information
# is being purged from the database.
# Default: 1h
#index_ranges_cleanup_interval = 1h
# Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output
# module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been
# reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember
# that every outputbuffer processor manages its own batch and performs its own batch write calls.
# ("outputbuffer_processors" variable)
output_batch_size = 500
# Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two
# batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages
# for this time period is less than output_batch_size * outputbuffer_processors.
output_flush_interval = 1
# As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and
# over again. To prevent this, the following configuration options define after how many faults an output will
# not be tried again for an also configurable amount of seconds.
output_fault_count_threshold = 5
output_fault_penalty_seconds = 30
# The number of parallel running processors.
# Raise this number if your buffers are filling up.
processbuffer_processors = 5
outputbuffer_processors = 3
#outputbuffer_processor_keep_alive_time = 5000
#outputbuffer_processor_threads_core_pool_size = 3
#outputbuffer_processor_threads_max_pool_size = 30
# UDP receive buffer size for all message inputs (e. g. SyslogUDPInput).
#udp_recvbuffer_sizes = 1048576
# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
# Possible types:
# - yielding
# Compromise between performance and CPU usage.
# - sleeping
# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
# - blocking
# High throughput, low latency, higher CPU usage.
# - busy_spinning
# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
processor_wait_strategy = blocking
# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
# Start server with --statistics flag to see buffer utilization.
# Must be a power of 2. (512, 1024, 2048, ...)
ring_size = 65536
inputbuffer_ring_size = 65536
inputbuffer_processors = 2
inputbuffer_wait_strategy = blocking
# Enable the disk based message journal.
message_journal_enabled = true
# The directory which will be used to store the message journal. The directory must me exclusively used by Graylog and
# must not contain any other files than the ones created by Graylog itself.
message_journal_dir = /usr/share/graylog/data/journal
# Journal hold messages before they could be written to Elasticsearch.
# For a maximum of 12 hours or 5 GB whichever happens first.
# During normal operation the journal will be smaller.
#message_journal_max_age = 12h
#message_journal_max_size = 5gb
#message_journal_flush_age = 1m
#message_journal_flush_interval = 1000000
#message_journal_segment_age = 1h
#message_journal_segment_size = 100mb
# Number of threads used exclusively for dispatching internal events. Default is 2.
#async_eventbus_processors = 2
# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
# shutdown process. Set to 0 if you have no status checking load balancers in front.
lb_recognition_period_seconds = 3
# Every message is matched against the configured streams and it can happen that a stream contains rules which
# take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking.
# This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other
# streams, Graylog limits the execution time for each stream.
# The default values are noted below, the timeout is in milliseconds.
# If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times
# that stream is disabled and a notification is shown in the web interface.
#stream_processing_timeout = 2000
#stream_processing_max_faults = 3
# Length of the interval in seconds in which the alert conditions for all streams should be checked
# and alarms are being sent.
#alert_check_interval = 60
# Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple
# outputs. The next setting defines the timeout for a single output module, including the default output module where all
# messages end up.
#
# Time in milliseconds to wait for all message outputs to finish writing a single message.
#output_module_timeout = 10000
# Time in milliseconds after which a detected stale master node is being rechecked on startup.
#stale_master_timeout = 2000
# Time in milliseconds which Graylog is waiting for all threads to stop on shutdown.
#shutdown_timeout = 30000
# MongoDB connection string
# See http://docs.mongodb.org/manual/reference/connection-string/ for details
mongodb_uri = mongodb://mongo/graylog
# Authenticate against the MongoDB server
#mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog
# Use a replica set instead of a single host
#mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog
# Increase this value according to the maximum connections your MongoDB server can handle from a single client
# if you encounter MongoDB connection problems.
mongodb_max_connections = 100
# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5,
# then 500 threads can block. More than that and an exception will be thrown.
# http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
mongodb_threads_allowed_to_block_multiplier = 5
# Drools Rule File (Use to rewrite incoming log messages)
# See: https://www.graylog.org/documentation/general/rewriting/
#rules_file = /etc/graylog/server/rules.drl
# Email transport
#transport_email_enabled = false
#transport_email_hostname = mail.example.com
#transport_email_port = 587
#transport_email_use_auth = true
#transport_email_use_tls = true
#transport_email_use_ssl = true
#transport_email_auth_username = you@example.com
#transport_email_auth_password = secret
#transport_email_subject_prefix = [graylog]
#transport_email_from_email = graylog@example.com
# Specify and uncomment this if you want to include links to the stream in your stream alert mails.
# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
#transport_email_web_interface_url = https://graylog.example.com
# The default connect timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 5s
#http_connect_timeout = 5s
# The default read timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 10s
#http_read_timeout = 10s
# The default write timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 10s
#http_write_timeout = 10s
# HTTP proxy for outgoing HTTP connections
#http_proxy_uri =
# Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch
# on heavily used systems with large indices, but it will decrease search performance. The default is to optimize
# cycled indices.
#disable_index_optimization = true
# Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch
# on heavily used systems with large indices, but it will decrease search performance. The default is 1.
#index_optimization_max_num_segments = 1
# The threshold of the garbage collection runs. If GC runs take longer than this threshold, a system notification
# will be generated to warn the administrator about possible problems with the system. Default is 1 second.
#gc_warning_threshold = 1s
# Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds.
#ldap_connection_timeout = 2000
# Enable collection of Graylog-related metrics into MongoDB
# WARNING: This will add *a lot* of data into your MongoDB database on a regular interval (1 second)!
# DEPRECATED: This setting and the respective feature will be removed in a future version of Graylog.
#enable_metrics_collection = false
# Disable the use of SIGAR for collecting system stats
#disable_sigar = false
# The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second)
#dashboard_widget_default_cache_time = 10s
# Automatically load content packs in "content_packs_dir" on the first start of Graylog.
content_packs_loader_enabled = true
# The directory which contains content packs which should be loaded on the first start of Graylog.
content_packs_dir = /usr/share/graylog/data/contentpacks
# A comma-separated list of content packs (files in "content_packs_dir") which should be applied on
# the first start of Graylog.
content_packs_auto_load = grok-patterns.json
0

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<Configuration packages="org.graylog2.log4j">
<Appenders>
<Console name="STDOUT" target="SYSTEM_OUT">
<PatternLayout pattern="%d %-5p: %c - %m%n"/>
</Console>
<!-- Internal Graylog log appender. Please do not disable. This makes internal log messages available via REST calls. -->
<Memory name="graylog-internal-logs" bufferSize="500"/>
</Appenders>
<Loggers>
<!-- Application Loggers -->
<Logger name="org.graylog2" level="info"/>
<Logger name="com.github.joschi.jadconfig" level="warn"/>
<!-- this emits a harmless warning for ActiveDirectory every time which we can't work around :( -->
<Logger name="org.apache.directory.api.ldap.model.message.BindRequestImpl" level="error"/>
<!-- Prevent DEBUG message about Lucene Expressions not found. -->
<Logger name="org.elasticsearch.script" level="warn"/>
<!-- Disable messages from the version check -->
<Logger name="org.graylog2.periodical.VersionCheckThread" level="off"/>
<!-- Suppress crazy byte array dump of Drools -->
<Logger name="org.drools.compiler.kie.builder.impl.KieRepositoryImpl" level="warn"/>
<!-- Silence chatty natty -->
<Logger name="com.joestelmach.natty.Parser" level="warn"/>
<!-- Silence Kafka log chatter -->
<Logger name="kafka.log.Log" level="warn"/>
<Logger name="kafka.log.OffsetIndex" level="warn"/>
<Root level="warn">
<AppenderRef ref="STDOUT"/>
<AppenderRef ref="graylog-internal-logs"/>
</Root>
</Loggers>
</Configuration>

View File

@@ -0,0 +1,35 @@
---
# main tasks for running graylog on a docker host
# configure host for graylog
# create folders for certs, data,
- name: create data folders (/opt/dkregistry)
run_once: true
file:
path: "{{ item }}"
state: directory
owner: root
group: docker
mode: 0777
with_items:
- /opt/shared/graylog/config
- /opt/shared/graylog/data/config
- /opt/shared/graylog/data/elasticsearch
- /opt/shared/graylog/data/journal
- /opt/shared/graylog/data/mongo
- name: install default config files
run_once: true
copy:
src: "{{ item }}"
dest: "/opt/shared/graylog/config/{{ item }}"
with_items:
- graylog.conf
- log4j2.xml
# setup graylog docker service
#- name: run docker up
# shell: "DOCKER_HOST=dkhost03:2376 docker stack deploy -c docker-compose.yml graylog"
# run_once: true
# args:
# chdir: roles/docker_graylog/files

View File

@@ -0,0 +1,6 @@
---
# private credentials used in docker_registry
docker_registry.users:
- { "richard" : "richard" }
- { "testuser" : "testpassword" }

View File

@@ -0,0 +1,20 @@
registry:
restart: always
image: registry:2
ports:
- 5000:5000
environment:
REGISTRY_HTTP_SECRET: aabuioqlwlcpp2
# REGISTRY_HTTP_TLS_CERTIFICATE: /certs/cert.pem
# REGISTRY_HTTP_TLS_KEY: /certs/privkey.pem
REGISTRY_HTTP_LETSENCRYPT_CACHEFILE: /var/run/letsencrypt.cache
REGISTRY_HTTP_LETSENCRYPT_EMAIL: r_morgan@sympatico.ca
# REGISTRY_HTTP_HOST: https://192.168.4.41:5000
# REGISTRY_HTTP_ADDR: 192.168.4.41:5000
REGISTRY_AUTH: htpasswd
REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
volumes:
- /opt/shared/dkregistry/data:/var/lib/registry
- /etc/letsencrypt/live/dkregistry.xai-corp.net:/certs
- /opt/shared/dkregistry/auth:/auth

View File

@@ -0,0 +1,40 @@
---
# Main task for creating a docker registry
- name: clean up old config
command: "{{ item }}"
with_items:
- "rm -rf /opt/dkrepository"
- "rm -rf /opt/shared/dkrepository/auth"
# create folders for certs, data,
- name: create data folders (/opt/dkregistry)
file:
path: "{{ item }}"
state: directory
owner: root
group: docker
mode: 0770
with_items:
- /opt/shared/dkregistry/data
- /opt/shared/dkregistry/auth
# make auth files using docker container
- name: create auth file
shell: echo '' > /opt/shared/dkregistry/auth/htpasswd
- name: add user to auth file
shell: "docker run --entrypoint htpasswd registry:2 -Bbn {{ item.name }} {{ item.pass }} >> /opt/shared/dkregistry/auth/htpasswd"
with_items:
- { "name" : "richard", "pass" : "richard" }
- { "name" : "testuser", "pass" : "testpassword" }
- name: copy composer file
copy:
src: docker-compose.yml
dest: /opt/dkregistry/docker-compose.yml
- name: run docker up
shell: "docker-compose down && docker-compose create && docker-compose start"
args:
chdir: /opt/dkregistry

View File

@@ -0,0 +1,65 @@
---
# tasks for managing docker a swarm
# init swarm
- name: run init on a host
command: docker swarm init
args:
creates: /var/lib/docker/swarm/state.json
when: "ansible_nodename in swarm.managers"
run_once: true
# get tokens
- name: get manager_token
command: docker swarm join-token manager -q
register: manager_token
when: "ansible_nodename in swarm.managers"
run_once: true
changed_when: False
- name: get worker_token
command: docker swarm join-token worker -q
register: worker_token
when: "ansible_nodename in swarm.managers"
run_once: true
changed_when: False
#- debug: var=manager_token
# add hosts to swarm
- name: adding manager to swarm
command: docker swarm join --token={{manager_token.stdout}} {{swarm.managers[0]}}
args:
creates: /var/lib/docker/swarm/state.json
when: "ansible_nodename in swarm.managers"
- name: adding worker to swarm
command: docker swarm join --token={{worker_token.stdout}} {{swarm.managers[0]}}
args:
creates: /var/lib/docker/swarm/state.json
when: "ansible_nodename in swarm.workers"
# remove hosts from swarm
- name: remove hosts from swam if found in remove list
command: docker swarm leave
args:
removes: /var/lib/docker/swarm/state.json
when: "ansible_nodename in swarm.removed"
# swarm config
- name: get history limit
command: docker info --format '{% raw %}{{json .Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit}}{% endraw %}'
register: history_limit
run_once: true
changed_when: False
- debug: var=history_limit.stdout
run_once: True
- name: set history limit
command: docker swarm update --task-history-limit {{swarm.history}}
when:
- "ansible_nodename in swarm.managers"
- 'swarm.history != history_limit.stdout|int'
run_once: true

View File

@@ -0,0 +1,9 @@
---
# default vars
dockerhost:
users:
- richard
- ansible
# compose_version: 1.13.0
compose_version: 1.18.0

View File

@@ -0,0 +1,10 @@
{
"insecure-registries": [
"dkhost.xai-corp.net:5000"
],
"dns": [
"192.168.4.12",
"192.168.4.22",
"8.8.8.8"
]
}

View File

@@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2376

View File

@@ -0,0 +1,7 @@
---
# handlers for docker services
- name: restart docker
service:
name: docker
state: restarted

View File

@@ -0,0 +1,26 @@
---
- name: create mount points
file:
path: "{{ item.value.mount }}"
state: directory
mode: 0755
with_dict: gluster
- name: create glusterfs mount hosts file entries
lineinfile:
state: present
dest: /etc/fstab
insertafter: EOF
regexp: '^#?{{ item.value.host }}'
line: '{{ item.value.host }} {{ item.value.mount }} glusterfs direct-io-mode=disable,defaults,_netdev 0 0'
with_dict: gluster
- name: remove glusterfs mount hosts file entries
lineinfile:
state: absent
dest: /etc/fstab
insertafter: EOF
regexp: '^#?{{ item.value.host }}'
line: '{{ item.value.host }} {{ item.value.mount }} glusterfs direct-io-mode=disable,defaults,_netdev 0 0'
with_dict: gluster_remove

View File

@@ -0,0 +1,31 @@
---
# install glusterfs volume driver plugin
- name: install golang packages
apt:
state: installed
package: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
with_items:
- "golang-go"
- name: make golang workspace
file:
path: /opt/golang
state: directory
mode: 0775
- name: fetch go package
shell: GOPATH=/opt/golang go get github.com/amarkwalder/docker-volume-glusterfs
- name: add plugin to startup
lineinfile:
dest: /etc/rc.local
regexp: 'docker-volume-glusterfs -servers'
line: '/opt/golang/bin/docker-volume-glusterfs -servers cubox-i:home &'
insertbefore: "^exit 0"
#sudo docker-volume-glusterfs -servers gfs-1:gfs-2:gfs-3

View File

@@ -0,0 +1,89 @@
---
# main tasks to install docker
- name: install packages
apt:
state: installed
package: "{{ item }}"
update_cache: yes
with_items:
- "htop"
- "wget"
- "apt-transport-https"
- "ca-certificates"
- "software-properties-common"
- "curl"
- "glusterfs-client"
- xfsprogs
- attr
- virtualbox-guest-additions-iso
- name: remove deprecated packages
apt:
state: absent
package: "{{ item }}"
with_items:
- "docker"
- "docker-engine"
- "docker.io"
- name: remove obsolete apt repro
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu ubuntu-xenial Release
state: absent
- name: install repo keys
shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- name: install apt repo
apt_repository:
repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable
state: present
- name: install prerequisits
shell: apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual
- name: create docker group
group: state=present name=docker gid=999 system=yes
- name: add users to docker group
user: name={{ item }} groups=docker append=yes
with_items: "{{ dockerhost.users }}"
- name: install via apt
apt:
state: latest
update_cache: true
package: "{{ item }}"
with_items:
- docker-ce
# - virtualbox-guest-dkms
- name: copy docker config file
copy:
src: daemon.json
dest: /etc/docker/daemon.json
notify:
- restart docker
## install docker-compose
- name: install docker-compose from git repo
shell: |
curl -L https://github.com/docker/compose/releases/download/{{compose_version}}/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
## expose the docker daemon on tcp
- name: expose docker daemon
file:
state: directory
path: /etc/systemd/system/docker.service.d
- name: expose docker daemon
copy:
src: docker.service.host.conf
dest: /etc/systemd/system/docker.service.d/host.conf
notify:
- restart docker

View File

@@ -0,0 +1,99 @@
---
# main tasks to install docker
- name: install packages
apt:
state: installed
package: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
with_items:
- "htop"
- "wget"
- "apt-transport-https"
- "ca-certificates"
- "glusterfs-client"
- xfsprogs
- attr
- virtualbox-guest-additions-iso
#- name: install repo keys
# apt_key:
# keyserver: hkp://ha.pool.sks-keyservers.net:80
# id: 58118E89F3A912897C070ADBF76221572C52609D
- name: install apt repo
apt_repository:
repo: deb https://apt.dockerproject.org/repo ubuntu-xenial main
state: present
- name: import repo key
apt_key:
id: 58118E89F3A912897C070ADBF76221572C52609D
keyserver: "hkp://ha.pool.sks-keyservers.net:80"
state: present
- name: install prerequisits
shell: apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual
- name: create docker group
group: state=present name=docker gid=999 system=yes
- name: add users to docker group
user: name={{ item }} groups=docker append=yes
with_items: "{{ dockerhost.users }}"
- name: install via apt
apt:
state: latest
update_cache: true
package: "{{ item }}"
with_items:
- docker-engine
# - virtualbox-guest-dkms
- name: copy docker config file
copy:
src: daemon.json
dest: /etc/docker/daemon.json
notify:
- restart docker
#- name: create mount points for shares
# file:
# state: absent
# mode: 0774
# group: docker
# dest: /opt/shared
#
#- name: create mount points for shares
# file:
# state: link
# mode: 0774
# group: docker
# src: /media/sf_dkhost
# dest: /opt/shared
## install docker-compose
- name: install docker-compose from git repo
shell: |
curl -L https://github.com/docker/compose/releases/download/1.13.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
## expose the docker daemon on tcp
- name: expose docker daemon
file:
state: directory
path: /etc/systemd/system/docker.service.d
- name: expose docker daemon
copy:
src: docker.service.host.conf
dest: /etc/systemd/system/docker.service.d/host.conf
notify:
- restart docker

View File

@@ -0,0 +1,27 @@
---
# main docker tasks
- include: "install-xenial-ce.yml"
when: ansible_distribution_release == "xenial"
become: true
- include: "fstab.yml"
- name: symlinks
file:
state: link
src: "{{ item.src }}"
path: "{{ item.path }}"
force: yes
with_items:
- { src: "/opt/shared/letsencrypt", path: "/etc/letsencrypt" }
- name: setup owncloud cron job
cron:
name: owncloud
state: absent
user: www-data
day: "*/15"
job: "curl -k https://xaibox.xai-corp.net/cron.php"
- include: glusterfs-volume-driver.yml

View File

@@ -0,0 +1,10 @@
---
# create cronjob to set dynamic ip
- name: create zone edit cronjob
cron:
name: zoneedit
minute: "*/30"
user: root
job: 'IP=`curl -s http://api.ipify.org` && wget -O - --http-user=rmorgan15 --http-passwd=D422B334D3768ACD "https://dynamic.zoneedit.com/auth/dynamic.html?host=test.xai-corp.net&dnsto=$IP" &>/dev/null'
cron_file: zoneedit

View File

@@ -0,0 +1,20 @@
---
# main tasks for installing glusterfs
# install packages
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=86400
- name: Ensure glusterfs server is installed.
apt: "name={{ item }} state=installed"
with_items:
- glusterfs-server
- xfsprogs
- xfsdump
- include: manage_volumes.yml
- include: prometheus_exporter.yml

View File

@@ -0,0 +1,57 @@
---
# glusterfs mounts
# create mount points
- name: create mount points for all bricks in all volumes
file:
path: "{{ item }}"
state: directory
mode: 0755
with_items: "{{mountpoints}}"
# manage volumes
#- name: remove old gluster volumes
# gluster_volume:
# state: absent
# name: "{{ item.key }}"
# rebalance: no
# replicas: false
# cluster: "{{cluster}}"
# with_dict: "{{removed_volumes}}"
# run_once: true
- name: create gluster volumes
gluster_volume:
state: present
start_on_create: yes
name: "{{ item.key }}"
brick: '{{ item.value | join(",")}}'
rebalance: no
replicas: 2
force: true
cluster: "{{cluster}}"
with_dict: "{{volumes}}"
run_once: true
#- name: create tmp gluster volumes
# gluster_volume:
# state: present
# name: "tmp"
# brick: '/data/glusterfs/tmp/brick1/brick'
# rebalance: no
# replicas: 1
# cluster: "{{cluster}}"
# run_once: true
- name: create distributed gluster volumes
gluster_volume:
state: present
name: "{{ item.key }}"
brick: '{{ item.value | join(",")}}'
rebalance: no
replicas: false
force: true
cluster: "{{cluster}}"
with_dict: "{{distributed_volumes}}"
run_once: true

View File

@@ -0,0 +1,31 @@
---
# tasks to install prometheus gluster_exporter
# https://github.com/ofesseler/gluster_exporter
# install packages
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=86400
- name: Install gluster_exporter dependencies.
apt: "name={{ item }} state=installed"
with_items:
- golang-go
- name: make golang workspace
file:
path: /opt/golang
state: directory
mode: 0775
- name: install gluster_exporter
shell: GOPATH=/opt/golang go get github.com/ofesseler/gluster_exporter
- name: add gluster_exporter to startup
lineinfile:
dest: /etc/rc.local
regexp: '/opt/golang/bin/gluster_exporter'
line: 'nohup /opt/golang/bin/gluster_exporter --profile &'
insertbefore: "^exit 0"
- name: start gluster_exporter
shell: nohup /opt/golang/bin/gluster_exporter --profile &

View File

@@ -0,0 +1,11 @@
---
# defaults/main.yml
# define default variable values here
websites:
available:
- home.xai-corp.net
- sql.xai-corp.net
enabled:
- home.xai-corp.net
- sql.xai-corp.net

View File

@@ -0,0 +1,15 @@
---
# handlers/main.yml
# define handlers here
#- name: restart <service>
# service: name=<service> state=restarted
#- name: stop <service>
# service: name=<service> state=stopped
- name: restart apache2
service: name=apache2 state=restarted
- name: reload apache2
service: name=apache2 state=reloaded

View File

@@ -0,0 +1,10 @@
---
# meta/main.yml
# define dependancies here
# dependencies:
# - { role: geerlingguy.java }
dependencies:
- _ping
- user-richard

View File

@@ -0,0 +1,17 @@
---
# configure crons on home.xai-corp.net
- name: create vm reset crons
cron:
name: vm_reset_{{item}}
state: present
special_time: hourly
user: root
job: 'ping -c1 {{item}} || vboxmanage controlvm {{item}} reset &>/dev/null'
cron_file: vm_reset
with_items:
- dkhost01
- dkhost02
- dkhost04

View File

@@ -0,0 +1,5 @@
---
# tasks/main.yml
# define tasks here
- include: cron.yml

View File

@@ -0,0 +1,39 @@
---
#
# setup the vhosts for apache based websites
# - name: fetch existing vhosts
# fetch: src=/etc/apache2/vhost-available/{{ item }} dest=backups/
# with_items:
# - 00_default_ssl_vhost.conf
# - 00_default_vhost.conf
# - 01_home_vhost.conf
# - 02_test_vhost.conf
# - 04_sql_vhost.conf
# - 05_mail_vhost.conf
# - default_vhost.include
#
# - fetch: src=/etc/apache2/httpd.conf dest=backups/
- name: create vhost dirs
file: state=directory path=/etc/apache2/{{ item }} owner=root group=root mode=0750
with_items:
- sites-available
- sites-enabled
- name: main apache conf
template: src=httpd.conf dest=/etc/apache2/httpd.conf owner=root group=root mode=640
notify:
- reload apache2
- name: setup available vhosts
template: src={{ item }}.j2 dest=/etc/apache2/sites-available/{{ item }}.conf owner=root group=root mode=0640
with_items: "{{ websites.available }}"
notify:
- reload apache2
- name: enable sites
file: state=link src=/etc/apache2/sites-available/{{ item }}.conf dest=/etc/apache2/sites-enabled/{{ item }}.conf
with_items: "{{ websites.enabled }}"
notify:
- reload apache2

View File

@@ -0,0 +1,28 @@
<IfModule ssl_module>
<VirtualHost *:80>
ServerName home.xai-corp.net
Redirect / https://home.xai-corp.net/
</VirtualHost>
<VirtualHost *:443>
ServerName home.xai-corp.net
ServerAdmin admin@xai-corp.net
DocumentRoot "/var/www/home.xai-corp.net/public"
SSLEngine on
SSLProtocol ALL -SSLv2 -SSLv3
SSLCipherSuite ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-S$
SSLHonorCipherOrder On
SSLCertificateFile /etc/ssl/apache2/home.xai-corp.net.crt
SSLCertificateKeyFile /etc/ssl/apache2/home.xai-corp.net.key
<Directory "/var/www/home.xai-corp.net/public">
Options Indexes FollowSymLinks
AllowOverride All
Order deny,allow
Allow from 192.168.4.
</Directory>
</VirtualHost>
</IfModule>

View File

@@ -0,0 +1,181 @@
# This is a modification of the default Apache 2.2 configuration file
# for Gentoo Linux.
#
# Support:
# http://www.gentoo.org/main/en/lists.xml [mailing lists]
# http://forums.gentoo.org/ [web forums]
# irc://irc.freenode.net#gentoo-apache [irc chat]
#
# Bug Reports:
# http://bugs.gentoo.org [gentoo related bugs]
# http://httpd.apache.org/bug_report.html [apache httpd related bugs]
#
#
# This is the main Apache HTTP server configuration file. It contains the
# configuration directives that give the server its instructions.
# See <URL:http://httpd.apache.org/docs/2.2> for detailed information.
# In particular, see
# <URL:http://httpd.apache.org/docs/2.2/mod/directives.html>
# for a discussion of each configuration directive.
#
# Do NOT simply read the instructions in here without understanding
# what they do. They're here only as hints or reminders. If you are unsure
# consult the online docs. You have been warned.
#
# Configuration and logfile names: If the filenames you specify for many
# of the server's control files begin with "/" (or "drive:/" for Win32), the
# server will use that explicit path. If the filenames do *not* begin
# with "/", the value of ServerRoot is prepended -- so "var/log/apache2/foo_log"
# with ServerRoot set to "/usr" will be interpreted by the
# server as "/usr/var/log/apache2/foo.log".
# ServerRoot: The top of the directory tree under which the server's
# configuration, error, and log files are kept.
#
# Do not add a slash at the end of the directory path. If you point
# ServerRoot at a non-local disk, be sure to point the LockFile directive
# at a local disk. If you wish to share the same ServerRoot for multiple
# httpd daemons, you will need to change at least LockFile and PidFile.
ServerRoot "/usr/lib/apache2"
# Dynamic Shared Object (DSO) Support
#
# To be able to use the functionality of a module which was built as a DSO you
# have to place corresponding `LoadModule' lines at this location so the
# directives contained in it are actually available _before_ they are used.
# Statically compiled modules (those listed by `httpd -l') do not need
# to be loaded here.
#
# Example:
# LoadModule foo_module modules/mod_foo.so
#
# GENTOO: Automatically defined based on APACHE2_MODULES USE_EXPAND variable.
# Do not change manually, it will be overwritten on upgrade.
#
# The following modules are considered as the default configuration.
# If you wish to disable one of them, you may have to alter other
# configuration directives.
#
# Change these at your own risk!
LoadModule actions_module modules/mod_actions.so
LoadModule alias_module modules/mod_alias.so
LoadModule auth_basic_module modules/mod_auth_basic.so
LoadModule authn_alias_module modules/mod_authn_alias.so
LoadModule authn_anon_module modules/mod_authn_anon.so
LoadModule authn_dbm_module modules/mod_authn_dbm.so
LoadModule authn_default_module modules/mod_authn_default.so
LoadModule authn_file_module modules/mod_authn_file.so
LoadModule authz_dbm_module modules/mod_authz_dbm.so
LoadModule authz_default_module modules/mod_authz_default.so
LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule authz_owner_module modules/mod_authz_owner.so
LoadModule authz_user_module modules/mod_authz_user.so
LoadModule autoindex_module modules/mod_autoindex.so
<IfDefine CACHE>
LoadModule cache_module modules/mod_cache.so
</IfDefine>
LoadModule cgi_module modules/mod_cgi.so
LoadModule cgid_module modules/mod_cgid.so
<IfDefine DAV>
LoadModule dav_module modules/mod_dav.so
</IfDefine>
<IfDefine DAV>
LoadModule dav_fs_module modules/mod_dav_fs.so
</IfDefine>
<IfDefine DAV>
LoadModule dav_lock_module modules/mod_dav_lock.so
</IfDefine>
LoadModule deflate_module modules/mod_deflate.so
LoadModule dir_module modules/mod_dir.so
<IfDefine CACHE>
LoadModule disk_cache_module modules/mod_disk_cache.so
</IfDefine>
LoadModule env_module modules/mod_env.so
LoadModule expires_module modules/mod_expires.so
LoadModule ext_filter_module modules/mod_ext_filter.so
<IfDefine CACHE>
LoadModule file_cache_module modules/mod_file_cache.so
</IfDefine>
LoadModule filter_module modules/mod_filter.so
LoadModule headers_module modules/mod_headers.so
LoadModule include_module modules/mod_include.so
<IfDefine INFO>
LoadModule info_module modules/mod_info.so
</IfDefine>
LoadModule log_config_module modules/mod_log_config.so
LoadModule logio_module modules/mod_logio.so
<IfDefine CACHE>
LoadModule mem_cache_module modules/mod_mem_cache.so
</IfDefine>
LoadModule mime_module modules/mod_mime.so
LoadModule mime_magic_module modules/mod_mime_magic.so
LoadModule negotiation_module modules/mod_negotiation.so
<IfDefine PROXY>
LoadModule proxy_module modules/mod_proxy.so
</IfDefine>
<IfDefine PROXY>
LoadModule proxy_ajp_module modules/mod_proxy_ajp.so
</IfDefine>
<IfDefine PROXY>
LoadModule proxy_connect_module modules/mod_proxy_connect.so
</IfDefine>
<IfDefine PROXY>
LoadModule proxy_ftp_module modules/mod_proxy_ftp.so
</IfDefine>
<IfDefine PROXY>
LoadModule proxy_http_module modules/mod_proxy_http.so
</IfDefine>
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule speling_module modules/mod_speling.so
<IfDefine SSL>
LoadModule ssl_module modules/mod_ssl.so
</IfDefine>
<IfDefine STATUS>
LoadModule status_module modules/mod_status.so
</IfDefine>
LoadModule unique_id_module modules/mod_unique_id.so
<IfDefine USERDIR>
LoadModule userdir_module modules/mod_userdir.so
</IfDefine>
LoadModule usertrack_module modules/mod_usertrack.so
LoadModule vhost_alias_module modules/mod_vhost_alias.so
# If you wish httpd to run as a different user or group, you must run
# httpd as root initially and it will switch.
#
# User/Group: The name (or #number) of the user/group to run httpd as.
# It is usually good practice to create a dedicated user and group for
# running httpd, as with most system services.
User apache
Group apache
# Supplemental configuration
#
# Most of the configuration files in the /etc/apache2/modules.d/ directory can
# be turned on using APACHE2_OPTS in /etc/conf.d/apache2 to add extra features
# or to modify the default configuration of the server.
#
# To know which flag to add to APACHE2_OPTS, look at the first line of the
# the file, which will usually be an <IfDefine OPTION> where OPTION is the
# flag to use.
Include /etc/apache2/modules.d/*.conf
# Virtual-host support
#
# Gentoo has made using virtual-hosts easy. In /etc/apache2/vhosts.d/ we
# include a default vhost (enabled by adding -D DEFAULT_VHOST to
# APACHE2_OPTS in /etc/conf.d/apache2).
#Include /etc/apache2/vhosts.d/*.conf
Include /etc/apache2/sites-enabled/*.conf
ServerName localhost
Listen 80
NameVirtualHost *:80
Listen 443
NameVirtualHost *:443
# vim: ts=4 filetype=apache

View File

@@ -0,0 +1,29 @@
<IfModule ssl_module>
<VirtualHost *:80>
ServerName sql.xai-corp.net
Redirect / https://sql.xai-corp.net/
</VirtualHost>
<VirtualHost *:443>
ServerName sql.xai-corp.net
ServerAdmin admin@xai-corp.net
DocumentRoot "/var/www/sql.xai-corp.net/htdocs"
SSLEngine on
SSLProtocol ALL -SSLv2 -SSLv3
SSLCipherSuite ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-S$
SSLHonorCipherOrder On
SSLCertificateFile /etc/ssl/apache2/sql.xai-corp.net.crt
SSLCertificateKeyFile /etc/ssl/apache2/sql.xai-corp.net.key
<Directory "/var/www/sql.xai-corp.net/htdocs">
Options Indexes FollowSymLinks
AllowOverride All
Order deny,allow
Allow from 192.168.4.
</Directory>
</VirtualHost>
</IfModule>

View File

@@ -0,0 +1,10 @@
---
# vars/_extravars.yml
# define extra variable values here
# this file should be loaded via an include_vars statement in the task.
# often this is used for managing differences in os.
# Variable setup.
#- name: Include OS-Specific variables
# include_vars: "{{ ansible_os_family }}.yml"

View File

@@ -0,0 +1,6 @@
---
# defaults/main.yml
# define default variable values here
java_packages:
- openjdk-7-jdk

View File

@@ -0,0 +1,9 @@
---
# handlers/main.yml
# define handlers here
#- name: restart <service>
# service: name=<service> state=restarted
#- name: stop <service>
# service: name=<service> state=stopped

View File

@@ -0,0 +1,8 @@
---
# meta/main.yml
# define dependancies here
# dependencies:
# - { role: geerlingguy.java }
dependencies: []

View File

@@ -0,0 +1,12 @@
---
# tasks/main.yml
# define tasks here
# install packages
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=86400
- name: Ensure Java is installed.
apt: "name={{ item }} state=installed"
with_items: java_packages

View File

@@ -0,0 +1,10 @@
---
# vars/_extravars.yml
# define extra variable values here
# this file should be loaded via an include_vars statement in the task.
# often this is used for managing differences in os.
# Variable setup.
#- name: Include OS-Specific variables
# include_vars: "{{ ansible_os_family }}.yml"

View File

@@ -0,0 +1,35 @@
---
# defaults/main.yml
# define default variable values here
jenkins_repo_url: deb http://pkg.jenkins-ci.org/debian binary/
jenkins_repo_key_url: http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key
jenkins_connection_delay: 5
jenkins_connection_retries: 60
jenkins_hostname: localhost
jenkins_port: 8080
jenkins_jar_location: /opt/jenkins-cli.jar
jenkins_home: /var/lib/jenkins
jenkins_plugins:
- git
- scm-sync-configuration
- ssh
- ansible
- bitbucket-pullrequest-builder
- windows-slaves
- ssh-slaves
- ant
- cloudbees-folder
- role-strategy
- phing
- build-pipeline-plugin
- delivery-pipeline-plugin
- deployment-sphere
- ownership
jenkins_git_user: jenkins
jenkins_git_email: jenkins@xai-corp.net
http_standard_port: 80

View File

@@ -0,0 +1,12 @@
---
# handlers/main.yml
# define handlers here
#- name: restart <service>
# service: name=<service> state=restarted
#- name: stop <service>
# service: name=<service> state=stopped
- name: restart jenkins
service: name=jenkins state=restarted

View File

@@ -0,0 +1,7 @@
---
# meta/main.yml
# define dependancies here
dependencies:
- {role: java }
- {role: nginx }

View File

@@ -0,0 +1,96 @@
---
# tasks/main.yml
# define tasks here
# install packages
- name: Ensure dependencies are installed.
apt: pkg=curl state=installed
- name: Add Jenkins apt repository key.
apt_key:
url: "{{ jenkins_repo_key_url }}"
state: present
- name: Add Jenkins apt repository.
apt_repository:
repo: "{{ jenkins_repo_url }}"
state: present
update_cache: yes
- name: Ensure Jenkins is installed.
apt: pkg=jenkins state=installed
# start jenkins
- name: Ensure Jenkins is started and runs on startup.
service: name=jenkins state=started enabled=yes
- name: Wait for Jenkins to start up.
shell: curl --head --silent http://{{ jenkins_hostname }}:8080/cli/
register: result
until: result.stdout.find("200 OK") != -1
retries: "{{ jenkins_connection_retries }}"
delay: "{{ jenkins_connection_delay }}"
changed_when: false
# install cli
- name: Get the jenkins-cli jarfile from the Jenkins server.
get_url:
url: "http://{{ jenkins_hostname }}:8080/jnlpJars/jenkins-cli.jar"
dest: "{{ jenkins_jar_location }}"
register: jarfile_get
until: "'OK' in jarfile_get.msg or 'file already exists' in jarfile_get.msg"
retries: 5
delay: 10
# - name: Copy ssh key for authentication
# copy: src={ HOME }/.ssh/jenkins.pub dest="/var/lib/jenkins/.ssh/"
# configure jenkins
# - name: Update jenkins config
# lineinfile: dest=/var/lib/jenkins/config.xml regexp={{ item.reg }} line={{ item.line }}
# with_items:
# - {"reg": "<useSecurity>.*</useSecurity>", "line": "<useSecurity>false</useSecurity>"}
# setup .gitconfig
- name: Setup .gitconfig for jenkins user
template: dest="{{ jenkins_home }}/.gitconfig" src="gitconfig.j2" owner="jenkins" group="jenkins"
# update and install plugins
- name: Create Jenkins updates folder.
file:
path: /var/lib/jenkins/updates
owner: jenkins
group: jenkins
mode: 0755
state: directory
- name: Update Jenkins plugin data.
shell: >
curl -L https://updates.jenkins-ci.org/update-center.json | sed '1d;$d' > /var/lib/jenkins/updates/default.json
creates=/var/lib/jenkins/updates/default.json
- name: Permissions for default.json updates info.
file:
path: /var/lib/jenkins/updates/default.json
owner: jenkins
group: jenkins
mode: 0755
- name: Install Jenkins plugins.
command: >
java -jar {{ jenkins_jar_location }} -s http://{{ jenkins_hostname }}:8080/ install-plugin {{ item }}
creates=/var/lib/jenkins/plugins/{{ item }}.jpi
with_items: jenkins_plugins
notify: restart jenkins
- name: setup folder for jenkins slave on localhost
file: state=directory path=/var/lib/jenkins-slave owner=jenkins group=jenkins mode=0777
# install nginx as proxy
- name: Copy the nginx configuration file
template: src=nginx.jenkins.conf.j2 dest=/etc/nginx/sites-available/jenkins.conf
- name: Enable Jenkins Nginx configuration
file: src=/etc/nginx/sites-available/jenkins.conf dest=/etc/nginx/sites-enabled/jenkins.conf state=link
notify:
- reload nginx

View File

@@ -0,0 +1,3 @@
[user]
email = {{ jenkins_git_user }}
name = {{ jenkins_git_user }}

View File

@@ -0,0 +1,56 @@
server {
listen {{ http_standard_port }};
server_name {{ jenkins_hostname }};
access_log /var/log/nginx/jenkins_access.log;
error_log /var/log/nginx/jenkins_error.log;
location /userContent {
# Have nginx handle all the static requests to the userContent folder files
# NOTE: This is the $JENKINS_HOME dir
root /var/lib/jenkins/;
if (!-f $request_filename){
# This file does not exist, might be a directory or a /**view** url
rewrite (.*) /$1 last;
break;
}
sendfile on;
}
location @jenkins {
sendfile off;
proxy_pass http://127.0.0.1:{{ jenkins_port }};
proxy_redirect default;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_max_temp_file_size 0;
# This is the maximum upload size
client_max_body_size 10m;
client_body_buffer_size 128k;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 4k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
}
location / {
# This is the jenkins web root directory (mentioned in the /etc/default/jenkins file)
root /var/run/jenkins/war/;
# Optional configuration to detect and redirect iPhones
if ($http_user_agent ~* '(iPhone|iPod)') {
rewrite ^/$ /view/iphone/ redirect;
}
try_files $uri @jenkins;
}
}

View File

@@ -0,0 +1,10 @@
---
# vars/_extravars.yml
# define extra variable values here
# this file should be loaded via an include_vars statement in the task.
# often this is used for managing differences in os.
# Variable setup.
#- name: Include OS-Specific variables
# include_vars: "{{ ansible_os_family }}.yml"

View File

@@ -0,0 +1,10 @@
---
#default values
vagrant_installer_url: https://releases.hashicorp.com/vagrant/1.8.6/vagrant_1.8.6_x86_64.deb
vagrant_installer_path: /tmp/vagrant_installer.deb
kubernetes_kubectl_url: https://storage.googleapis.com/kubernetes-release/release/v1.4.3/bin/linux/amd64/kubectl
kubernetes_kubectl_path: /usr/local/bin/kubectl
kubernates_vagrant_config_path: /opt/home.xai-corp.net/coreos-kubernetes/multi-node/vagrant

View File

@@ -0,0 +1,9 @@
---
# meta/main.yml
# define dependancies here
# dependencies:
# - { role: geerlingguy.java }
dependencies:
- { role: vagrant }

View File

@@ -0,0 +1,54 @@
---
# main tasks for kubernetes role
# install required system packages
- name: install system packages
apt:
state: present
name: "{{item}}"
update_cache: yes
with_items:
- git
- virtualbox
# install kubectl
- stat: path="{{kubernetes_kubectl_path}}"
register: kubectl_exe
- name: download kubectl
get_url:
url: "{{ kubernetes_kubectl_url }}"
dest: "{{kubernetes_kubectl_path}}"
mode: 0755
when: kubectl_exe.stat.exists == False
# get vagrant file from git
- name: fetch vagrantfile
git:
repo: https://github.com/coreos/coreos-kubernetes.git
dest: /opt/home.xai-corp.net/coreos-kubernetes
- stat: path="{{kubernates_vagrant_config_path}}/config.rb"
register: k8s_config
- name: create k8s config
command: cp {{kubernates_vagrant_config_path}}/config.rb.sample {{kubernates_vagrant_config_path}}/config.rb
# update vagrant box
- name: update vagrant box
command: vagrant box update
args:
chdir: "{{kubernates_vagrant_config_path}}"
- name: vagrant up
command: vagrant up
args:
chdir: "{{kubernates_vagrant_config_path}}"
- name: configure kubectl
command: "{{ item }}"
with_items:
- kubectl config set-cluster vagrant-multi-cluster --server=https://172.17.4.99:443 --certificate-authority=${PWD}/ssl/ca.pem
- kubectl config set-credentials vagrant-multi-admin --certificate-authority=${PWD}/ssl/ca.pem --client-key=${PWD}/ssl/admin-key.pem --client-certificate=${PWD}/ssl/admin.pem
- kubectl config set-context vagrant-multi --cluster=vagrant-multi-cluster --user=vagrant-multi-admin
- kubectl config use-context vagrant-multi

View File

@@ -0,0 +1,163 @@
#!/bin/bash
#
# 30-sysinfo - generate the system information
# Copyright (c) 2015-2017 Igor Pecovnik
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
THIS_SCRIPT="sysinfo"
MOTD_DISABLE=""
[[ -f /etc/default/armbian-motd ]] && . /etc/default/armbian-motd
for f in $MOTD_DISABLE; do
[[ $f == $THIS_SCRIPT ]] && exit 0
done
# define which hard drive you want to monitor
storage=/dev/sda1
# don't edit below here
function display() {
# $1=name $2=value $3=red_limit $4=minimal_show_limit $5=unit $6=after $7=acs/desc{
# battery red color is opposite, lower number
if [[ "$1" == "Battery" ]]; then local great="<"; else local great=">"; fi
if [[ -n "$2" && "$2" > "0" && (( "${2%.*}" -ge "$4" )) ]]; then
printf "%-14s%s" "$1:"
if awk "BEGIN{exit ! ($2 $great $3)}"; then echo -ne "\e[0;91m $2"; else echo -ne "\e[0;92m $2"; fi
printf "%-1s%s\x1B[0m" "$5"
printf "%-11s%s\t" "$6"
return 1
fi
} # display
function getboardtemp() {
if [ -f /etc/armbianmonitor/datasources/soctemp ]; then
read raw_temp </etc/armbianmonitor/datasources/soctemp
if [ ${raw_temp} -le 200 ]; then
# Allwinner legacy kernels output degree C
board_temp=${raw_temp}
else
# Marvell gets special treatment for whatever reasons
grep -qi Marvell /proc/cpuinfo && \
board_temp=$(( $(awk '{printf("%d",$1/1000)}' <<<${raw_temp}) - 20 )) || \
board_temp=$(awk '{printf("%d",$1/1000)}' <<<${raw_temp})
fi
elif [ -f /etc/armbianmonitor/datasources/pmictemp ]; then
# fallback to PMIC temperature
board_temp=$(awk '{printf("%d",$1/1000)}' </etc/armbianmonitor/datasources/pmictemp)
fi
} # getboardtemp
function batteryinfo() {
# Battery info for Allwinner
mainline_dir="/sys/power/axp_pmu"
legacy_dir="/sys/class/power_supply"
if [[ -e "$mainline_dir" ]]; then
read status_battery_connected < $mainline_dir/battery/connected
if [[ "$status_battery_connected" == "1" ]]; then
read status_battery_charging < $mainline_dir/charger/charging
read status_ac_connect < $mainline_dir/ac/connected
read battery_percent< $mainline_dir/battery/capacity
# dispay charging / percentage
if [[ "$status_ac_connect" == "1" && "$battery_percent" -lt "100" ]]; then
status_battery_text=" charging"
elif [[ "$status_ac_connect" == "1" && "$battery_percent" -eq "100" ]]; then
status_battery_text=" charged"
else
status_battery_text=" discharging"
fi
fi
elif [[ -e "$legacy_dir/battery" ]]; then
if [[ (("$(cat $legacy_dir/battery/voltage_now)" -gt "5" )) ]]; then
status_battery_text=" "$(awk '{print tolower($0)}' < $legacy_dir/battery/status)
read battery_percent <$legacy_dir/battery/capacity
fi
fi
} # batteryinfo
function ambienttemp() {
# read ambient temperature from USB device if available
amb_temp=$(temper -c 2>/dev/null)
case ${amb_temp} in
*"find the USB device"*)
echo ""
;;
*)
amb_temp=$(awk '{print $NF}' <<<$amb_temp | sed 's/C//g')
echo -n "scale=1;${amb_temp}/1" | grep -oE "\-?[[:digit:]]+.[[:digit:]]"
esac
} # ambienttemp
function get_ip_addresses() {
# return up to 2 IPv4 address(es) comma separated
hostname -I | tr " " "\n" | \
grep -E "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" | \
tail -n2 | sed ':a;N;$!ba;s/\n/,/g'
} # get_ip_addresses
function storage_info() {
# storage info
RootInfo=$(df -h /)
root_usage=$(awk '/\// {print $(NF-1)}' <<<${RootInfo} | sed 's/%//g')
root_total=$(awk '/\// {print $(NF-4)}' <<<${RootInfo})
StorageInfo=$(df -h $storage 2>/dev/null | grep $storage)
if [ -n "${StorageInfo}" ]; then
storage_usage=$(awk '/\// {print $(NF-1)}' <<<${StorageInfo} | sed 's/%//g')
storage_total=$(awk '/\// {print $(NF-4)}' <<<${StorageInfo})
[[ "$storage" == */sd* ]] && hdd_temp=$(hddtemp -u C -nq $storage)
fi
} # storage_info
# query various systems and send some stuff to the background for overall faster execution.
# Works only with ambienttemp and batteryinfo since A20 is slow enough :)
amb_temp=$(ambienttemp &)
ip_address=$(get_ip_addresses &)
batteryinfo
storage_info
getboardtemp
critical_load=$(( 1 + $(grep -c processor /proc/cpuinfo) / 2 ))
# get uptime, logged in users and load in one take
UptimeString=$(uptime | tr -d ',')
time=$(awk -F" " '{print $3" "$4}' <<<"${UptimeString}")
load="$(awk -F"average: " '{print $2}'<<<"${UptimeString}")"
users="$(awk -F" user" '{print $1}'<<<"${UptimeString}")"
case ${time} in
1:*) # 1-2 hours
time=$(awk -F" " '{print $3" hour"}' <<<"${UptimeString}")
;;
*:*) # 2-24 hours
time=$(awk -F" " '{print $3" hours"}' <<<"${UptimeString}")
;;
esac
# memory and swap
mem_info=$(LANG=en_US.UTF-8 free -w 2>/dev/null | grep "^Mem" || LANG=en_US.UTF-8 free | grep "^Mem")
memory_usage=$(awk '{printf("%.0f",(($2-($4+$6+$7))/$2) * 100)}' <<<${mem_info})
memory_total=$(awk '{printf("%d",$2/1024)}' <<<${mem_info})
swap_info=$(LANG=en_US.UTF-8 free -m | grep "^Swap")
swap_usage=$( (awk '/Swap/ { printf("%3.0f", $3/$2*100) }' <<<${swap_info} 2>/dev/null || echo 0) | tr -c -d '[:digit:]')
swap_total=$(awk '{print $(2)}' <<<${swap_info})
# display info
display "System load" "${load%% *}" "${critical_load}" "0" "" "${load#* }"
printf "Up time: \x1B[92m%s\x1B[0m\t\t" "$time"
display "Local users" "${users##* }" "3" "2" ""
echo "" # fixed newline
display "Memory usage" "$memory_usage" "70" "0" " %" " of ${memory_total}MB"
display "Swap usage" "$swap_usage" "10" "0" " %" " of $swap_total""Mb"
printf "IP: "
printf "\x1B[92m%s\x1B[0m" "$ip_address"
echo "" # fixed newline
a=0;b=0;c=0
display "CPU temp" "$board_temp" "45" "0" "°C" "" ; a=$?
display "HDD temp" "$hdd_temp" "45" "0" "°C" "" ; b=$?
display "Ambient temp" "$amb_temp" "40" "0" "°C" "" ; c=$?
(( ($a+$b+$c) >0 )) && echo "" # new line only if some value is displayed
display "Usage of /" "$root_usage" "90" "1" "%" " of $root_total"
display "storage/" "$storage_usage" "90" "1" "%" " of $storage_total"
display "Battery" "$battery_percent" "20" "1" "%" "$status_battery_text"
echo ""
echo ""

View File

@@ -0,0 +1,2 @@
#!/bin/bash
figlet $(hostname)

View File

@@ -0,0 +1,167 @@
#!/bin/bash
#
# 10-sysinfo - generate the system information
# Copyright (c) 2013 Nick Charlton
#
# Authors: Nick Charlton &lt;hello@nickcharlton.net&gt;
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# define which hard drive you want to monitor
storage=/dev/sda1
function display() {
# $1=name $2=value $3=red_limit $4=minimal_show_limit $5=unit $6=after $7=acs/desc{
# battery red color is opposite, lower number
if [[ "$1" == "Battery" ]]; then local great="<"; else local great=">"; fi
if [[ -n "$2" && "$2" > "0" && (( "${2%.*}" -ge "$4" )) ]]; then
printf "%-14s%s" "$1:"
if awk "BEGIN{exit ! ($2 $great $3)}"; then echo -ne "\e[0;91m $2"; else echo -ne "\e[0;92m $2"; fi
printf "%-1s%s\x1B[0m" "$5"
printf "%-11s%s\t" "$6"
return 1
fi
} # display
#function getboardtemp() {
# if [ -f /etc/armbianmonitor/datasources/soctemp ]; then
# read raw_temp </etc/armbianmonitor/datasources/soctemp
# if [ ${raw_temp} -le 200 ]; then
# # Allwinner legacy kernels output degree C
# board_temp=${raw_temp}
# else
# # Marvell gets special treatment for whatever reasons
# grep -qi Marvell /proc/cpuinfo && \
# board_temp=$(( $(awk '{printf("%d",$1/1000)}' <<<${raw_temp}) - 20 )) || \
# board_temp=$(awk '{printf("%d",$1/1000)}' <<<${raw_temp})
# fi
# elif [ -f /etc/armbianmonitor/datasources/pmictemp ]; then
# # fallback to PMIC temperature
# board_temp=$(awk '{printf("%d",$1/1000)}' </etc/armbianmonitor/datasources/pmictemp)
# fi
#} # getboardtemp
#function batteryinfo() {
# # Battery info for Allwinner
# mainline_dir="/sys/power/axp_pmu"
# legacy_dir="/sys/class/power_supply"
# if [[ -e "$mainline_dir" ]]; then
# read status_battery_connected < $mainline_dir/battery/connected
# if [[ "$status_battery_connected" == "1" ]]; then
# read status_battery_charging < $mainline_dir/charger/charging
# read status_ac_connect < $mainline_dir/ac/connected
# read battery_percent< $mainline_dir/battery/capacity
# # dispay charging / percentage
# if [[ "$status_ac_connect" == "1" && "$battery_percent" -lt "100" ]]; then
# status_battery_text=" charging"
# elif [[ "$status_ac_connect" == "1" && "$battery_percent" -eq "100" ]]; then
# status_battery_text=" charged"
# else
# status_battery_text=" discharging"
# fi
# fi
# elif [[ -e "$legacy_dir/battery" ]]; then
# if [[ (("$(cat $legacy_dir/battery/voltage_now)" -gt "5" )) ]]; then
# status_battery_text=" "$(awk '{print tolower($0)}' < $legacy_dir/battery/status)
# read battery_percent <$legacy_dir/battery/capacity
# fi
# fi
#} # batteryinfo
#
#function ambienttemp() {
# # read ambient temperature from USB device if available
# amb_temp=$(temper -c 2>/dev/null)
# case ${amb_temp} in
# *"find the USB device"*)
# echo ""
# ;;
# *)
# amb_temp=$(awk '{print $NF}' <<<$amb_temp | sed 's/C//g')
# echo -n "scale=1;${amb_temp}/1" | grep -oE "\-?[[:digit:]]+.[[:digit:]]"
# esac
#} # ambienttemp
function get_ip_addresses() {
# return up to 2 IPv4 address(es) comma separated
hostname -I | tr " " "\n" | \
grep "192.168." | \
tail -n2 | sed ':a;N;$!ba;s/\n/,/g'
} # get_ip_addresses
ip_address=$(get_ip_addresses &)
## storage
#function storage_info() {
# # storage info
# RootInfo=$(df -h /)
# root_usage=$(awk '/\// {print $(NF-1)}' <<<${RootInfo} | sed 's/%//g')
# root_total=$(awk '/\// {print $(NF-4)}' <<<${RootInfo})
# StorageInfo=$(df -h $storage 2>/dev/null | grep $storage)
# if [ -n "${StorageInfo}" ]; then
# storage_usage=$(awk '/\// {print $(NF-1)}' <<<${StorageInfo} | sed 's/%//g')
# storage_total=$(awk '/\// {print $(NF-4)}' <<<${StorageInfo})
# [[ "$storage" == */sd* ]] && hdd_temp=$(hddtemp -u C -nq $storage)
# fi
#} # storage_info
#storage_info
root_usage=`df -h / | awk '/\// {print $(NF-1)}'`
## System info
date=`date`
UptimeString=$(uptime | tr -d ',')
time=$(awk -F" " '{print $3" "$4}' <<<"${UptimeString}")
load="$(awk -F"average: " '{print $2}'<<<"${UptimeString}")"
users="$(awk -F" user" '{print $1}'<<<"${UptimeString}")"
critical_load=$(( 1 + $(grep -c processor /proc/cpuinfo) / 2 ))
processes=`ps aux | wc -l`
## memory and swap
mem_info=$(LANG=en_US.UTF-8 free -w 2>/dev/null | grep "^Mem" || LANG=en_US.UTF-8 free | grep "^Mem")
memory_usage=$(awk '{printf("%.0f",(($2-($4+$6+$7))/$2) * 100)}' <<<${mem_info})
memory_total=$(awk '{printf("%d",$2/1024)}' <<<${mem_info})
swap_info=$(LANG=en_US.UTF-8 free -m | grep "^Swap")
swap_usage=$( (awk '/Swap/ { printf("%3.0f", $3/$2*100) }' <<<${swap_info} 2>/dev/null || echo 0) | tr -c -d '[:digit:]')
swap_total=$(awk '{print $(2)}' <<<${swap_info})
swap_usage=`free -m | awk '/Swap:/ { printf("%3.1f%%", $3/$2*100) }'`
#batteryinfo
#getboardtemp
# DISPLAY
echo "System information as of: $date"
echo
display "System load" "${load%% *}" "${critical_load}" "0" "" "${load#* }"
printf "Up time: \x1B[92m%s\x1B[0m\t\t" "$time"
echo "" # fixed newline
display "Memory usage" "$memory_usage" "70" "0" " %" " of ${memory_total}MB"
display "Swap usage" "$swap_usage" "10" "0" " %" " of ${swap_total}Mb"
echo ""
display "Usage of /" "$root_usage" "90" "1" "%" " of $root_total"
printf "IP: "
printf "\x1B[92m%s\x1B[0m" "$ip_address"
echo ""
display "Local users" "${users##* }" "3" "0" ""
display "Processes" "${processes##* }" "150" "100" ""
echo ""
#a=0;b=0;c=0
#display "CPU temp" "$board_temp" "45" "0" "°C" "" ; a=$?
#display "HDD temp" "$hdd_temp" "45" "0" "°C" "" ; b=$?
#display "Ambient temp" "$amb_temp" "40" "0" "°C" "" ; c=$?
#(( ($a+$b+$c) >0 )) && echo "" # new line only if some value is displayed

View File

@@ -0,0 +1,38 @@
---
# main tasks for setting up motd dynamic shell header
- debug: var=ansible_nodename
- name: Install the nginx packages
apt:
name: "{{ item }}"
update_cache: yes
cache_valid_time: 3600
state: latest
with_items:
- lsb-release
- figlet
- update-motd
- lm-sensors
when: ansible_architecture != 'armv7l'
- name: remove help text
file:
state: absent
path: "{{ item }}"
with_items:
- /etc/update-motd.d/10-help-text
- /etc/update-motd.d/51-cloudguest
when: ansible_architecture != 'armv7l'
- name: add new info
copy:
src: "{{ item.src }}"
dest: /etc/update-motd.d/{{ item.dest }}
mode: 755
with_items:
- { src: hostname.sh, dest: 10-hostname }
- { src: systats.sh, dest: 11-sysstats}
when: ansible_architecture != 'armv7l'

View File

@@ -0,0 +1,4 @@
---
# defaults/main.yml
# define default variable values here

Some files were not shown because too many files have changed in this diff Show More