work on xai-corp vm servers

This commit is contained in:
2017-05-07 22:12:33 -04:00
parent 6b106e437f
commit b0c52090a8
32 changed files with 936 additions and 83 deletions

2
.gitignore vendored
View File

@@ -8,3 +8,5 @@
# backup folder used to fetch files from targets
backups/
.idea/
roles/vendor/

View File

@@ -2,7 +2,7 @@
# playbook for home02
- hosts: dkhost01
- hosts: dkhost02
remote_user: ansible
gather_facts: yes
become: true
@@ -48,10 +48,11 @@
roles:
# - _install_updates
# - Datadog.datadog
- Datadog.datadog
- dockerhost
# - geerlingguy.nginx
# - certbot
- docker_registry
- geerlingguy.nginx
- certbot
# - docker_registry
# - docker_graylog
post_tasks:

View File

@@ -0,0 +1,3 @@
FROM kyma/docker-nginx
COPY src/ /var/www
CMD 'nginx'

View File

View File

@@ -0,0 +1,10 @@
version: '2'
services:
app:
restart: always
image: "gitea/gitea:latest"
volumes:
- /var/lib/gitea:/data
ports:
- "10022:22"
- "10080:3000"

View File

@@ -0,0 +1,15 @@
---
# DOCKER_HOST=192.168.2.41:2376 docker-compose up -d
version: '2'
services:
app:
restart: always
privileged: true
image: "jenkins:alpine"
volumes:
- /opt/shared/jenkins/home:/var/jenkins_home
ports:
- "8080:8080"
- "5001:5000"

View File

@@ -0,0 +1,18 @@
---
# https://hub.docker.com/r/cptactionhank/atlassian-jira/
# docker run --detach --publish 8080:8080 cptactionhank/atlassian-jira:latest
# DOCKER_HOST=dkhost02:2376 docker-compose up -d
version: '2'
services:
app:
restart: always
privileged: true
image: "cptactionhank/atlassian-jira:latest"
# volumes:
# - /var/atlassian/jira
# - /opt/atlassian/jira/logs
ports:
# - "80:80"
- "8088:8080"

View File

@@ -0,0 +1,14 @@
---
# DOCKER_HOST=192.168.2.41:2376 docker-compose up -d
version: '2'
services:
app:
restart: always
# privileged: true
image: "orangescrum/official"
ports:
- "8085:80"
# - "80:80"
entrypoint:
- start.sh

View File

@@ -0,0 +1,12 @@
version: '2'
services:
postgres:
container_name: postgres-9.6
restart: always
image: "postgres:9.6-alpine"
volumes:
- /opt/shared/postgres/data:/data
ports:
- "5432:5432"
environment:
- POSTGRES_PASSWORD=alphapass1

View File

@@ -0,0 +1,4 @@
FROM nginx:alpine
COPY ./nginx.conf /etc/nginx/nginx.conf
COPY ./host.conf /etc/nginx/conf.d/host.conf

View File

@@ -0,0 +1,7 @@
---
version: '2'
services:
app:
restart: always
volumes:
- /opt/shared/fileserver:/www/data:ro

View File

@@ -0,0 +1,15 @@
---
version: '2'
services:
app:
restart: always
image: "sslproxy:latest"
build:
context: .
dockerfile: Dockerfile
volumes:
- /etc/letsencrypt:/etc/letsencrypt:ro
ports:
# - "80:80"
- "443:443"

View File

@@ -0,0 +1,93 @@
# fs.xai-corp.net
server {
listen 443 ssl;
server_name fs.xai-corp.net;
ssl_certificate /etc/letsencrypt/live/fs.xai-corp.net/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/fs.xai-corp.net/privkey.pem;
#Strict-Transport-Security: max-age=15768000
add_header Strict-Transport-Security "max-age=600; includeSubDomains" always;
location / {
proxy_pass http://192.168.2.41:8081;
}
}
# git.xai-corp.net
server {
listen 443 ssl;
server_name git.xai-corp.net docker.dev;
ssl_certificate /etc/letsencrypt/live/git.xai-corp.net/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/git.xai-corp.net/privkey.pem;
#Strict-Transport-Security: max-age=15768000
add_header Strict-Transport-Security "max-age=600; includeSubDomains" always;
location / {
proxy_pass http://192.168.2.41:10080;
}
}
# jenkins.xai-corp.net
server {
listen 443 ssl;
server_name jenkins.xai-corp.net docker.dev;
ssl_certificate /etc/letsencrypt/live/jenkins.xai-corp.net/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/jenkins.xai-corp.net/privkey.pem;
#Strict-Transport-Security: max-age=15768000
add_header Strict-Transport-Security "max-age=600; includeSubDomains" always;
location / {
proxy_pass http://192.168.2.41:8080;
}
}
# dkui.xai-corp.net
server {
listen 443 ssl;
server_name dkui.xai-corp.net docker.dev;
ssl_certificate /etc/letsencrypt/live/dkui.xai-corp.net/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/dkui.xai-corp.net/privkey.pem;
#Strict-Transport-Security: max-age=15768000
add_header Strict-Transport-Security "max-age=600; includeSubDomains" always;
location / {
proxy_set_header Connection $http_connection;
proxy_pass http://192.168.2.41:9000;
}
}
# sql.xai-corp.net
server {
listen 443 ssl;
server_name sql.xai-corp.net docker.dev;
ssl_certificate /etc/letsencrypt/live/sql.xai-corp.net/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/sql.xai-corp.net/privkey.pem;
#Strict-Transport-Security: max-age=15768000
add_header Strict-Transport-Security "max-age=600; includeSubDomains" always;
location / {
proxy_pass http://192.168.2.41:9000;
}
}
# www.xai-corp.net
server {
listen 443 ssl;
server_name www.xai-corp.net xai-corp.net docker.dev;
ssl_certificate /etc/letsencrypt/live/www.xai-corp.net/cert.pem;
ssl_certificate_key /etc/letsencrypt/live/www.xai-corp.net/privkey.pem;
#Strict-Transport-Security: max-age=15768000
add_header Strict-Transport-Security "max-age=600; includeSubDomains" always;
location / {
proxy_pass http://192.168.2.41:9000;
}
}

View File

@@ -0,0 +1,32 @@
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}

View File

@@ -0,0 +1 @@
something elses.

View File

@@ -0,0 +1,15 @@
---
# DOCKER_HOST=192.168.2.41:2376 docker-compose up -d
version: '2'
services:
app:
restart: always
privileged: true
image: "portainer/portainer"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
# - "80:80"
- "9000:9000"

View File

@@ -27,6 +27,8 @@
roles:
- Datadog.datadog
# - Datadog.datadog
# - dockerhost
- dynamic-ip
post_tasks:

View File

@@ -1,8 +1,8 @@
# playbook for home02
# playbook for all managed hosts
- hosts: managed
# remote_user: anansibsible
# remote_user: ansible
gather_facts: no
become: true
@@ -26,7 +26,9 @@
- setup: # aka gather_facts
roles:
- _install_updates
- novuso.users
- Datadog.datadog
tasks:
- name: add ansible to sudoers

38
ns.xai-corp.net.yml Normal file
View File

@@ -0,0 +1,38 @@
---
# playbook for home02
- hosts: ns
remote_user: ansible
gather_facts: yes
become: true
vars:
datadog_api_key: ca0faf176c4aedd4f547ed7cf85615eb
datadog_checks:
system:
init_config: []
instances: []
disk:
init_config:
instances:
- use_mount: yes
excluded_filesystems:
- sysfs
- cgroup
- tracefs
- debugfs
- proc
- securityfs
excluded_mountpoint_re: /[media/richard|run/user].*
roles:
- _install_updates
- Datadog.datadog
- ns.xai-corp.net
# - td-agent-bit
post_tasks:
# - name: check service is up
# service: name={{ bind.service }} state=started

View File

@@ -22,6 +22,12 @@
- www.xai-corp.net
- dkregistry.xai-corp.net
- sql.xai-corp.net
- fs.xai-corp.net
- dkhost.xai-corp.net
- git.xai-corp.net
- dkui.xai-corp.net
- jenkins.xai-corp.net
- logs.xai-corp.net
- name: cron job for renewing certs
cron:

View File

@@ -0,0 +1,30 @@
version: '2'
services:
mongo:
restart: always
image: "mongo:3"
volumes:
- /opt/shared/graylog/data/mongo:/data/db
elasticsearch:
restart: always
image: "elasticsearch:2"
command: "elasticsearch -Des.cluster.name='graylog'"
volumes:
- /opt/shared/graylog/data/elasticsearch:/usr/share/elasticsearch/data
graylog:
restart: always
image: graylog2/server:2.1.0-3
volumes:
- /opt/shared/graylog/data/journal:/usr/share/graylog/data/journal
- /opt/shared/graylog/config:/usr/share/graylog/data/config
environment:
GRAYLOG_PASSWORD_SECRET: gsahu1dj901hdaiuafg3g1q
GRAYLOG_ROOT_PASSWORD_SHA2: 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
GRAYLOG_WEB_ENDPOINT_URI: http://192.168.2.41:9000/api/
depends_on:
- mongo
- elasticsearch
ports:
- "9000:9000"
- "12201/udp:12201/udp"
- "1514/udp:1514/udp"

View File

@@ -0,0 +1,442 @@
# If you are running more than one instances of Graylog server you have to select one of these
# instances as master. The master will perform some periodical tasks that non-masters won't perform.
is_master = true
# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
# to use an absolute file path here if you are starting Graylog server from init scripts or similar.
node_id_file = /usr/share/graylog/data/config/node-id
# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
# Generate one by using for example: pwgen -N 1 -s 96
password_secret = replacethiswithyourownsecret!
# The default root user is named 'admin'
#root_username = admin
# You MUST specify a hash password for the root user (which you only need to initially set up the
# system and in case you lose connectivity to your authentication backend)
# This password cannot be changed using the API or via the web interface. If you need to change it,
# modify it in this file.
# Create one by using for example: echo -n yourpassword | shasum -a 256
# and put the resulting hash value into the following line
root_password_sha2 = 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
# The email address of the root user.
# Default is empty
#root_email = ""
# The time zone setting of the root user.
# The configured time zone must be parseable by http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html#forID-java.lang.String-
# Default is UTC
#root_timezone = UTC
# Set plugin directory here (relative or absolute)
plugin_dir = /usr/share/graylog/plugin
# REST API listen URI. Must be reachable by other Graylog server nodes if you run a cluster.
# When using Graylog Collectors, this URI will be used to receive heartbeat messages and must be accessible for all collectors.
rest_listen_uri = http://0.0.0.0:9000/api/
# REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri
# is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used.
# If set, this will be promoted in the cluster discovery APIs, so other nodes may try to connect on
# this address and it is used to generate URLs addressing entities in the REST API. (see rest_listen_uri)
# You will need to define this, if your Graylog server is running behind a HTTP proxy that is rewriting
# the scheme, host name or URI.
#rest_transport_uri = http://192.168.1.1:9000/api/
# Enable CORS headers for REST API. This is necessary for JS-clients accessing the server directly.
# If these are disabled, modern browsers will not be able to retrieve resources from the server.
# This is enabled by default. Uncomment the next line to disable it.
#rest_enable_cors = false
# Enable GZIP support for REST API. This compresses API responses and therefore helps to reduce
# overall round trip times. This is disabled by default. Uncomment the next line to enable it.
#rest_enable_gzip = true
# Enable HTTPS support for the REST API. This secures the communication with the REST API with
# TLS to prevent request forgery and eavesdropping. This is disabled by default. Uncomment the
# next line to enable it.
#rest_enable_tls = true
# The X.509 certificate file to use for securing the REST API.
#rest_tls_cert_file = /path/to/graylog.crt
# The private key to use for securing the REST API.
#rest_tls_key_file = /path/to/graylog.key
# The password to unlock the private key used for securing the REST API.
#rest_tls_key_password = secret
# The maximum size of the HTTP request headers in bytes.
#rest_max_header_size = 8192
# The maximal length of the initial HTTP/1.1 line in bytes.
#rest_max_initial_line_length = 4096
# The size of the thread pool used exclusively for serving the REST API.
#rest_thread_pool_size = 16
# Enable the embedded Graylog web interface.
# Default: true
#web_enable = false
# Web interface listen URI
web_listen_uri = http://0.0.0.0:9000/
# Enable CORS headers for the web interface. This is necessary for JS-clients accessing the server directly.
# If these are disabled, modern browsers will not be able to retrieve resources from the server.
web_enable_cors = true
# Enable/disable GZIP support for the web interface. This compresses HTTP responses and therefore helps to reduce
# overall round trip times. This is enabled by default. Uncomment the next line to disable it.
#web_enable_gzip = false
# Enable HTTPS support for the web interface. This secures the communication of the web browser with the web interface
# using TLS to prevent request forgery and eavesdropping.
# This is disabled by default. Uncomment the next line to enable it and see the other related configuration settings.
#web_enable_tls = true
# The X.509 certificate file to use for securing the web interface.
#web_tls_cert_file = /path/to/graylog-web.crt
# The private key to use for securing the web interface.
#web_tls_key_file = /path/to/graylog-web.key
# The password to unlock the private key used for securing the web interface.
#web_tls_key_password = secret
# The maximum size of the HTTP request headers in bytes.
#web_max_header_size = 8192
# The maximal length of the initial HTTP/1.1 line in bytes.
#web_max_initial_line_length = 4096
# The size of the thread pool used exclusively for serving the web interface.
#web_thread_pool_size = 16
# Embedded Elasticsearch configuration file
# pay attention to the working directory of the server, maybe use an absolute path here
# elasticsearch_config_file = /usr/share/graylog/data/config/elasticsearch.yml
# Graylog will use multiple indices to store documents in. You can configured the strategy it uses to determine
# when to rotate the currently active write index.
# It supports multiple rotation strategies:
# - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure
# - "size" per index, use elasticsearch_max_size_per_index below to configure
# valid values are "count", "size" and "time", default is "count"
rotation_strategy = count
# (Approximate) maximum number of documents in an Elasticsearch index before a new index
# is being created, also see no_retention and elasticsearch_max_number_of_indices.
# Configure this if you used 'rotation_strategy = count' above.
elasticsearch_max_docs_per_index = 20000000
# (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see
# no_retention and elasticsearch_max_number_of_indices. Default is 1GB.
# Configure this if you used 'rotation_strategy = size' above.
#elasticsearch_max_size_per_index = 1073741824
# (Approximate) maximum time before a new Elasticsearch index is being created, also see
# no_retention and elasticsearch_max_number_of_indices. Default is 1 day.
# Configure this if you used 'rotation_strategy = time' above.
# Please note that this rotation period does not look at the time specified in the received messages, but is
# using the real clock value to decide when to rotate the index!
# Specify the time using a duration and a suffix indicating which unit you want:
# 1w = 1 week
# 1d = 1 day
# 12h = 12 hours
# Permitted suffixes are: d for day, h for hour, m for minute, s for second.
#elasticsearch_max_time_per_index = 1d
# Disable checking the version of Elasticsearch for being compatible with this Graylog release.
# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss!
#elasticsearch_disable_version_check = true
# Disable message retention on this node, i. e. disable Elasticsearch index rotation.
#no_retention = false
# How many indices do you want to keep?
elasticsearch_max_number_of_indices = 20
# Decide what happens with the oldest indices when the maximum number of indices is reached.
# The following strategies are availble:
# - delete # Deletes the index completely (Default)
# - close # Closes the index and hides it from the system. Can be re-opened later.
retention_strategy = delete
# How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices.
elasticsearch_shards = 4
elasticsearch_replicas = 0
# Prefix for all Elasticsearch indices and index aliases managed by Graylog.
elasticsearch_index_prefix = graylog
# Name of the Elasticsearch index template used by Graylog to apply the mandatory index mapping.
# # Default: graylog-internal
#elasticsearch_template_name = graylog-internal
# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
# be enabled with care. See also: https://www.graylog.org/documentation/general/queries/
allow_leading_wildcard_searches = true
# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
# should only be enabled after making sure your Elasticsearch cluster has enough memory.
allow_highlighting = true
# settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
# all these
# this must be the same as for your Elasticsearch cluster
elasticsearch_cluster_name = graylog
# The prefix being used to generate the Elasticsearch node name which makes it easier to identify the specific Graylog
# server running the embedded Elasticsearch instance. The node name will be constructed by concatenating this prefix
# and the Graylog node ID (see node_id_file), for example "graylog-17052010-1234-5678-abcd-1337cafebabe".
# Default: graylog-
#elasticsearch_node_name_prefix = graylog-
# we don't want the Graylog server to store any data, or be master node
#elasticsearch_node_master = false
#elasticsearch_node_data = false
# use a different port if you run multiple Elasticsearch nodes on one machine
elasticsearch_transport_tcp_port = 9350
# we don't need to run the embedded HTTP server here
elasticsearch_http_enabled = false
elasticsearch_discovery_zen_ping_multicast_enabled = false
elasticsearch_discovery_zen_ping_unicast_hosts = elasticsearch:9300
# Change the following setting if you are running into problems with timeouts during Elasticsearch cluster discovery.
# The setting is specified in milliseconds, the default is 5000ms (5 seconds).
#elasticsearch_cluster_discovery_timeout = 5000
# the following settings allow to change the bind addresses for the Elasticsearch client in Graylog
# these settings are empty by default, letting Elasticsearch choose automatically,
# override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
# refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html
# for special values here
elasticsearch_network_host = 0.0.0.0
#elasticsearch_network_bind_host =
#elasticsearch_network_publish_host =
# The total amount of time discovery will look for other Elasticsearch nodes in the cluster
# before giving up and declaring the current node master.
#elasticsearch_discovery_initial_state_timeout = 3s
# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
# Elasticsearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
# Note that this setting only takes effect on newly created indices.
elasticsearch_analyzer = standard
# Global request timeout for Elasticsearch requests (e. g. during search, index creation, or index time-range
# calculations) based on a best-effort to restrict the runtime of Elasticsearch operations.
# Default: 1m
#elasticsearch_request_timeout = 1m
# Time interval for index range information cleanups. This setting defines how often stale index range information
# is being purged from the database.
# Default: 1h
#index_ranges_cleanup_interval = 1h
# Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output
# module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been
# reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember
# that every outputbuffer processor manages its own batch and performs its own batch write calls.
# ("outputbuffer_processors" variable)
output_batch_size = 500
# Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two
# batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages
# for this time period is less than output_batch_size * outputbuffer_processors.
output_flush_interval = 1
# As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and
# over again. To prevent this, the following configuration options define after how many faults an output will
# not be tried again for an also configurable amount of seconds.
output_fault_count_threshold = 5
output_fault_penalty_seconds = 30
# The number of parallel running processors.
# Raise this number if your buffers are filling up.
processbuffer_processors = 5
outputbuffer_processors = 3
#outputbuffer_processor_keep_alive_time = 5000
#outputbuffer_processor_threads_core_pool_size = 3
#outputbuffer_processor_threads_max_pool_size = 30
# UDP receive buffer size for all message inputs (e. g. SyslogUDPInput).
#udp_recvbuffer_sizes = 1048576
# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
# Possible types:
# - yielding
# Compromise between performance and CPU usage.
# - sleeping
# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
# - blocking
# High throughput, low latency, higher CPU usage.
# - busy_spinning
# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
processor_wait_strategy = blocking
# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
# Start server with --statistics flag to see buffer utilization.
# Must be a power of 2. (512, 1024, 2048, ...)
ring_size = 65536
inputbuffer_ring_size = 65536
inputbuffer_processors = 2
inputbuffer_wait_strategy = blocking
# Enable the disk based message journal.
message_journal_enabled = true
# The directory which will be used to store the message journal. The directory must me exclusively used by Graylog and
# must not contain any other files than the ones created by Graylog itself.
message_journal_dir = /usr/share/graylog/data/journal
# Journal hold messages before they could be written to Elasticsearch.
# For a maximum of 12 hours or 5 GB whichever happens first.
# During normal operation the journal will be smaller.
#message_journal_max_age = 12h
#message_journal_max_size = 5gb
#message_journal_flush_age = 1m
#message_journal_flush_interval = 1000000
#message_journal_segment_age = 1h
#message_journal_segment_size = 100mb
# Number of threads used exclusively for dispatching internal events. Default is 2.
#async_eventbus_processors = 2
# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
# shutdown process. Set to 0 if you have no status checking load balancers in front.
lb_recognition_period_seconds = 3
# Every message is matched against the configured streams and it can happen that a stream contains rules which
# take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking.
# This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other
# streams, Graylog limits the execution time for each stream.
# The default values are noted below, the timeout is in milliseconds.
# If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times
# that stream is disabled and a notification is shown in the web interface.
#stream_processing_timeout = 2000
#stream_processing_max_faults = 3
# Length of the interval in seconds in which the alert conditions for all streams should be checked
# and alarms are being sent.
#alert_check_interval = 60
# Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple
# outputs. The next setting defines the timeout for a single output module, including the default output module where all
# messages end up.
#
# Time in milliseconds to wait for all message outputs to finish writing a single message.
#output_module_timeout = 10000
# Time in milliseconds after which a detected stale master node is being rechecked on startup.
#stale_master_timeout = 2000
# Time in milliseconds which Graylog is waiting for all threads to stop on shutdown.
#shutdown_timeout = 30000
# MongoDB connection string
# See http://docs.mongodb.org/manual/reference/connection-string/ for details
mongodb_uri = mongodb://mongo/graylog
# Authenticate against the MongoDB server
#mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog
# Use a replica set instead of a single host
#mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog
# Increase this value according to the maximum connections your MongoDB server can handle from a single client
# if you encounter MongoDB connection problems.
mongodb_max_connections = 100
# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5,
# then 500 threads can block. More than that and an exception will be thrown.
# http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
mongodb_threads_allowed_to_block_multiplier = 5
# Drools Rule File (Use to rewrite incoming log messages)
# See: https://www.graylog.org/documentation/general/rewriting/
#rules_file = /etc/graylog/server/rules.drl
# Email transport
#transport_email_enabled = false
#transport_email_hostname = mail.example.com
#transport_email_port = 587
#transport_email_use_auth = true
#transport_email_use_tls = true
#transport_email_use_ssl = true
#transport_email_auth_username = you@example.com
#transport_email_auth_password = secret
#transport_email_subject_prefix = [graylog]
#transport_email_from_email = graylog@example.com
# Specify and uncomment this if you want to include links to the stream in your stream alert mails.
# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
#transport_email_web_interface_url = https://graylog.example.com
# The default connect timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 5s
#http_connect_timeout = 5s
# The default read timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 10s
#http_read_timeout = 10s
# The default write timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 10s
#http_write_timeout = 10s
# HTTP proxy for outgoing HTTP connections
#http_proxy_uri =
# Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch
# on heavily used systems with large indices, but it will decrease search performance. The default is to optimize
# cycled indices.
#disable_index_optimization = true
# Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch
# on heavily used systems with large indices, but it will decrease search performance. The default is 1.
#index_optimization_max_num_segments = 1
# The threshold of the garbage collection runs. If GC runs take longer than this threshold, a system notification
# will be generated to warn the administrator about possible problems with the system. Default is 1 second.
#gc_warning_threshold = 1s
# Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds.
#ldap_connection_timeout = 2000
# Enable collection of Graylog-related metrics into MongoDB
# WARNING: This will add *a lot* of data into your MongoDB database on a regular interval (1 second)!
# DEPRECATED: This setting and the respective feature will be removed in a future version of Graylog.
#enable_metrics_collection = false
# Disable the use of SIGAR for collecting system stats
#disable_sigar = false
# The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second)
#dashboard_widget_default_cache_time = 10s
# Automatically load content packs in "content_packs_dir" on the first start of Graylog.
content_packs_loader_enabled = true
# The directory which contains content packs which should be loaded on the first start of Graylog.
content_packs_dir = /usr/share/graylog/data/contentpacks
# A comma-separated list of content packs (files in "content_packs_dir") which should be applied on
# the first start of Graylog.
content_packs_auto_load = grok-patterns.json
0

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<Configuration packages="org.graylog2.log4j">
<Appenders>
<Console name="STDOUT" target="SYSTEM_OUT">
<PatternLayout pattern="%d %-5p: %c - %m%n"/>
</Console>
<!-- Internal Graylog log appender. Please do not disable. This makes internal log messages available via REST calls. -->
<Memory name="graylog-internal-logs" bufferSize="500"/>
</Appenders>
<Loggers>
<!-- Application Loggers -->
<Logger name="org.graylog2" level="info"/>
<Logger name="com.github.joschi.jadconfig" level="warn"/>
<!-- this emits a harmless warning for ActiveDirectory every time which we can't work around :( -->
<Logger name="org.apache.directory.api.ldap.model.message.BindRequestImpl" level="error"/>
<!-- Prevent DEBUG message about Lucene Expressions not found. -->
<Logger name="org.elasticsearch.script" level="warn"/>
<!-- Disable messages from the version check -->
<Logger name="org.graylog2.periodical.VersionCheckThread" level="off"/>
<!-- Suppress crazy byte array dump of Drools -->
<Logger name="org.drools.compiler.kie.builder.impl.KieRepositoryImpl" level="warn"/>
<!-- Silence chatty natty -->
<Logger name="com.joestelmach.natty.Parser" level="warn"/>
<!-- Silence Kafka log chatter -->
<Logger name="kafka.log.Log" level="warn"/>
<Logger name="kafka.log.OffsetIndex" level="warn"/>
<Root level="warn">
<AppenderRef ref="STDOUT"/>
<AppenderRef ref="graylog-internal-logs"/>
</Root>
</Loggers>
</Configuration>

View File

@@ -0,0 +1,35 @@
---
# main tasks for running graylog on a docker host
# create folders for certs, data,
- name: create data folders (/opt/dkregistry)
file:
path: "{{ item }}"
state: directory
owner: root
group: docker
mode: 0777
with_items:
- /opt/shared/graylog/config
- /opt/shared/graylog/data/config
- /opt/shared/graylog/data/elasticsearch
- /opt/shared/graylog/data/journal
- /opt/shared/graylog/data/mongo
- name: install default config files
copy:
src: "{{ item }}"
dest: "/opt/shared/graylog/config/{{ item }}"
with_items:
- graylog.conf
- log4j2.xml
- name: copy composer file
copy:
src: docker-compose.yml
dest: /opt/shared/graylog/docker-compose.yml
- name: run docker up
shell: "docker-compose down && docker-compose up -d"
args:
chdir: /opt/shared/graylog

View File

@@ -4,16 +4,17 @@ registry:
ports:
- 5000:5000
environment:
REGISTRY_HTTP_SECRET: aabuioqlwlcpp2
# REGISTRY_HTTP_TLS_CERTIFICATE: /certs/cert.pem
# REGISTRY_HTTP_TLS_KEY: /certs/privkey.pem
# REGISTRY_HTTP_LETSENCRYPT_CACHEFILE:
REGISTRY_HTTP_LETSENCRYPT_CACHEFILE: /var/run/letsencrypt.cache
REGISTRY_HTTP_LETSENCRYPT_EMAIL: r_morgan@sympatico.ca
REGISTRY_HTTP_HOST: https://192.168.2.41:5000
# REGISTRY_HTTP_HOST: https://192.168.2.41:5000
# REGISTRY_HTTP_ADDR: 192.168.2.41:5000
# REGISTRY_AUTH: htpasswd
# REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
# REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
REGISTRY_AUTH: htpasswd
REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
volumes:
- /opt/dkregistry/data:/var/lib/registry
- /opt/shared/dkregistry/data:/var/lib/registry
- /etc/letsencrypt/live/dkregistry.xai-corp.net:/certs
- /opt/dkregistry/auth:/auth
- /opt/shared/dkregistry/auth:/auth

View File

@@ -2,7 +2,10 @@
# Main task for creating a docker registry
- name: clean up old config
command: "rm -rf /opt/dkrepository"
command: "{{ item }}"
with_items:
- "rm -rf /opt/dkrepository"
- "rm -rf /opt/shared/dkrepository/auth"
# create folders for certs, data,
- name: create data folders (/opt/dkregistry)
@@ -13,15 +16,15 @@
group: docker
mode: 0770
with_items:
- /opt/dkregistry/data
- /opt/dkregistry/auth
- /opt/shared/dkregistry/data
- /opt/shared/dkregistry/auth
# make auth files using docker container
- name: create auth file
shell: echo '' > /opt/dkregistry/auth/htpasswd
shell: echo '' > /opt/shared/dkregistry/auth/htpasswd
- name: add user to auth file
shell: "docker run --entrypoint htpasswd registry:2 -Bbn {{ item.name }} {{ item.pass }} >> /opt/dkregistry/auth/htpasswd"
shell: "docker run --entrypoint htpasswd registry:2 -Bbn {{ item.name }} {{ item.pass }} >> /opt/shared/dkregistry/auth/htpasswd"
with_items:
- { "name" : "richard", "pass" : "richard" }
- { "name" : "testuser", "pass" : "testpassword" }
@@ -32,6 +35,6 @@
dest: /opt/dkregistry/docker-compose.yml
- name: run docker up
shell: "docker-compose up -d"
shell: "docker-compose down && docker-compose create && docker-compose start"
args:
chdir: /opt/dkregistry

View File

@@ -1,6 +1,5 @@
{
"insecure-registries": [
"dkregistry.xai-corp.net:5000",
"192.168.2.41:5000"
],
"dns": [

View File

@@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2376

View File

@@ -0,0 +1,7 @@
---
# handlers for docker services
- name: restart docker
service:
name: docker
state: restarted

View File

@@ -12,29 +12,19 @@
- "apt-transport-https"
- "ca-certificates"
#- name: run docker install script
# command: "wget -qO- https://get.docker.com/ | sh"
# args:
# creates: /usr/bin/docker
#- name: install repo keys
# apt_key:
# keyserver: hkp://ha.pool.sks-keyservers.net:80
# id: 58118E89F3A912897C070ADBF76221572C52609D
#- stat:
# path: /usr/bin/docker
# register: docker
#
#- debug: var=docker
- name: install apt repo
apt_repository:
repo: deb https://apt.dockerproject.org/repo ubuntu-xenial main
state: present
- name: install prerequisits
shell: apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
#- name: download install script
# get_url:
# url: https://get.docker.com/
# dest: /tmp/docker_install.sh
# mode: 500
# when: docker.stat.exists == false
#
#- name: run install script
# script: /tmp/docker_install.sh
# args:
# creates: /usr/bin/docker
# when: docker.stat.exists == false
- name: create docker group
group: state=present name=docker gid=999 system=yes
@@ -46,41 +36,52 @@
- name: install via apt
apt:
state: latest
update_cache: true
package: "{{ item }}"
with_items:
- docker-engine
- docker-compose
- virtualbox-guest-dkms
# - virtualbox-guest-dkms
#
#- name: copy docker config file
# copy:
# src: daemon.json
# dest: /etc/docker/daemon.json
#
#- name: create mount points for shares
# file:
# state: absent
# mode: 0774
# group: docker
# dest: /opt/shared
#
#- name: create mount points for shares
# file:
# state: link
# mode: 0774
# group: docker
# src: /media/sf_dkhost
# dest: /opt/shared
- name: copy docker config file
copy:
src: daemon.json
dest: /etc/docker/daemon.json
- name: create mount points for shares
## install docker-compose
- name: install docker-compose from git repo
shell: |
curl -L https://github.com/docker/compose/releases/download/1.9.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
## expose the docker daemon on tcp
- name: expose docker daemon
file:
state: directory
dest: /mnt/dkhost
path: /etc/systemd/system/docker.service.d
# vbox shares are mounted in home folders of users with vboxsf group
#- name: add root to vboxsf group
# user: name={{ item }} groups=vboxsf append=yes
# with_items: "{{ dockerhost.users }}"
- name: expose docker daemon
copy:
src: docker.service.host.conf
dest: /etc/systemd/system/docker.service.d/host.conf
notify:
- restart docker
- name: setup host shares in fstab
lineinfile:
state: present
dest: /etc/fstab
insertafter: EOF
regexp: '^#?dkhost '
line: 'dkhost /mnt/dkhost vboxsf noauto 0 0'
- name: mount host shares in rc.local
lineinfile:
state: present
dest: /etc/rc.local
insertafter: 'exit 0'
regexp: '^#?mount dkhost'
line: 'mount dkhost'

View File

@@ -0,0 +1,10 @@
---
# create cronjob to set dynamic ip
- name: create zone edit cronjob
cron:
name: zoneedit
minute: "*/30"
user: root
job: 'IP=`curl -s http://api.ipify.org` && wget -O - --http-user=rmorgan15 --http-passwd=D422B334D3768ACD "https://dynamic.zoneedit.com/auth/dynamic.html?host=test.xai-corp.net&dnsto=$IP" &>/dev/null'
cron_file: zoneedit

View File

@@ -11,18 +11,7 @@ xai-corp.net. IN MX 0 mail.xai-corp.net.
xai-corp.net. IN TXT "v=spf1 ip4:192.168.2.11/32 mx ptr mx:mail.xai-corp.net ~all"
ns IN A 192.168.2.22
mail IN A 192.168.2.11
getafix IN CNAME ns
test IN CNAME ns
home IN CNAME ns
tunedb IN CNAME ns
sql IN CNAME ns
jenkins IN CNAME ns
cik IN CNAME ns
phonecat IN CNAME ns
blog IN CNAME ns
www IN A 208.94.116.179
IN A 208.94.116.21
IN A 208.94.117.26
gateway IN A 192.168.2.1
wireless IN A 192.168.2.3
printer IN A 192.168.2.13
@@ -32,6 +21,16 @@ tv IN A 192.168.2.16
xaicorp1 IN A 192.168.2.103
garden IN A 192.168.2.20
fs IN A 192.168.2.41
git IN A 192.168.2.41
jenkins IN A 192.168.2.41
home IN A 192.168.2.11
home02 IN A 192.168.2.22
dkhost01 IN A 192.168.2.41
dkregistry IN A 192.168.2.41
dkui IN A 192.168.2.41
sql IN A 192.168.2.41
logs IN A 192.168.2.42
dkhost02 IN A 192.168.2.43