Fixing bind9 config

cubox-m still doesn't respond to outside requests
This commit is contained in:
2022-08-25 08:03:45 -04:00
parent 47dfd8aa22
commit 20d18ec966
26 changed files with 350 additions and 350 deletions

View File

@@ -3,7 +3,6 @@
- hosts: gfs - hosts: gfs
remote_user: ansible
gather_facts: no gather_facts: no
become: true become: true

View File

@@ -6,13 +6,14 @@ cubox-i ansible_ssh_host=192.168.4.12
cubox-m ansible_ssh_host=192.168.4.15 cubox-m ansible_ssh_host=192.168.4.15
[ns] [ns]
home ansible_ssh_host=192.168.4.11 ;home ansible_ssh_host=192.168.4.11
cubox-i ansible_ssh_host=192.168.4.12 cubox-i ansible_ssh_host=192.168.4.12
cubox-m ansible_ssh_host=192.168.4.15 cubox-m ansible_ssh_host=192.168.4.15
[gfs] [gfs]
;home ansible_ssh_host=192.168.4.11 ;home ansible_ssh_host=192.168.4.11
cubox-i ansible_ssh_host=192.168.4.12 cubox-i ansible_ssh_host=192.168.4.12
cubox-m ansible_ssh_host=192.168.4.15
[kube] [kube]
home ansible_ssh_host=192.168.4.11 home ansible_ssh_host=192.168.4.11

View File

@@ -15,4 +15,5 @@
- acl - acl
- bind9-dnsutils - bind9-dnsutils
- iputils-ping - iputils-ping
- gpg
when: ansible_os_family == "Debian" when: ansible_os_family == "Debian"

View File

@@ -1,5 +1,6 @@
127.0.0.1 localhost 127.0.0.1 localhost
127.0.1.1 {{inventory_hostname}} 127.0.0.1 {{inventory_hostname}}
127.0.0.1 {{inventory_hostname}}.xai-corp.net
# The following lines are desirable for IPv6 capable hosts # The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback ::1 localhost ip6-localhost ip6-loopback

View File

@@ -1,16 +1,29 @@
--- ---
# main tasks for installing glusterfs # main tasks for installing glusterfs
- name: add the glusterfs repo
ansible.builtin.lineinfile:
path: /etc/apt/sources.list.d/gluster.list
state: present
create: yes
owner: root
group: root
mode: 644
line: deb [arch=arm64] https://download.gluster.org/pub/gluster/glusterfs/6/6.9/Debian/bullseye/arm64/apt bullseye main
- name: Add an Apt signing key, uses whichever key is at the URL
ansible.builtin.apt_key:
url: https://download.gluster.org/pub/gluster/glusterfs/6/rsa.pub
state: present
# install packages # install packages
- name: Ensure glusterfs server is installed. - name: Ensure glusterfs server is installed.
apt: ansible.builtin.apt:
name: "{{ item }}" name: "{{ item }}"
update_cache: yes update_cache: yes
cache_valid_time: 3600 cache_valid_time: 3600
state: latest state: present
with_items: with_items:
- glusterfs-server - glusterfs-server=6.9
- xfsprogs - xfsprogs
- xfsdump - xfsdump
@@ -26,4 +39,3 @@
ansible.builtin.service: ansible.builtin.service:
name: glusterfs-server name: glusterfs-server
state: started state: started
when: ansible_architecture == 'armv7l'

View File

@@ -0,0 +1,64 @@
---
# main tasks for installing glusterfs
- name: add the glusterfs repo
ansible.builtin.lineinfile:
path: /etc/apt/sources.list.d/gluster.list
state: present
create: yes
owner: root
group: root
mode: 644
regexp: deb-src
line: deb-src https://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/bullseye/amd64/apt bullseye main
- name: make a build dir
ansible.builtin.file:
path: src/debian
state: directory
mode: '0755'
- name: Add an Apt signing key, uses whichever key is at the URL
ansible.builtin.apt_key:
url: https://download.gluster.org/pub/gluster/glusterfs/6/rsa.pub
state: present
- name: Ensure build tools are installed.
ansible.builtin.apt:
name: "{{ item }}"
state: present
with_items:
- dpkg-dev
- name: Ensure build deps are installed.
ansible.builtin.apt:
name: "{{ item }}"
state: build-dep
with_items:
- glusterfs-server
# install packages
#- name: Ensure glusterfs server is installed.
# ansible.builtin.apt:
# name: "{{ item }}"
# update_cache: yes
# cache_valid_time: 3600
# state: present
# with_items:
# - glusterfs-server=6.9
# - xfsprogs
# - xfsdump
#
#- name: Start service gluster, if not started
# block:
## - name: start on home
## ansible.builtin.service:
## name: glusterd
## state: started
## when: "ansible_lsb.major_release >= '20'"
#
# - name: start on cubox-i
# ansible.builtin.service:
# name: glusterfs-server
# state: started

View File

@@ -1,14 +1,13 @@
--- ---
- name: install on ubuntu #- name: install on ubuntu
ansible.builtin.include_role: # ansible.builtin.include_role:
name: geerlingguy.glusterfs # name: geerlingguy.glusterfs
when: ansible_architecture != 'armv7l' # when: ansible_architecture != 'armv7l'
- include_tasks: install.yaml - include_tasks: install_src.yaml
when: ansible_architecture == 'armv7l'
- include_tasks: manage_volumes.yml #- include_tasks: manage_volumes.yml
## doesn't work ## doesn't work
#- name: "check the underlying config" #- name: "check the underlying config"

View File

@@ -5,10 +5,14 @@
bind: bind:
user: root user: root
group: bind group: bind
service: bind9 service: named
zonefiles: zonefiles:
- xai-corp.net.internal - xai-corp.net.internal
- localhost.zone
- xai-corp.net.external - xai-corp.net.external
- xai-corp.net.reverse - xai-corp.net.reverse
namedfiles:
- named.conf.local
- named.conf.options
- named.conf.default-zones
cleanup:
- /etc/bind/db.localhost.zone

View File

@@ -0,0 +1,77 @@
---
# install and configure bind9/named
- name: install bind package
apt:
name: "{{ item }}"
# update_cache: yes
# cache_valid_time: 86400
state: latest
with_items:
- bind9
- name: set correct permissions for logging
file:
state=directory
path=/var/log/named/
recurse=yes
owner={{ bind.user }}
group={{ bind.group }}
mode=0777
- name: set correct permissions on cache
file:
state: directory
path: /var/cache/bind/
owner: "{{ bind.user }}"
group: "{{ bind.group }}"
recurse: yes
- name: copy zone files to /etc/bind/
template:
src: "{{ item }}.j2"
dest: /etc/bind/db.{{ item }}
owner: "{{ bind.user }}"
group: "{{ bind.group }}"
mode: 0644
with_items: "{{ bind.zonefiles }}"
notify:
- restart bind
- name: test zone files
command: named-checkzone {{ item }}
changed_when: false
with_items:
- xai-corp.net /etc/bind/db.xai-corp.net.internal
- 4.168.192.IN-ADDR.ARPA. /etc/bind/db.xai-corp.net.reverse
- name: copy named.confs to /etc/bind/
template: src={{ item }}.j2 dest=/etc/bind/{{ item }} owner={{ bind.user }} group={{ bind.group }} mode=0640
with_items: "{{ bind.namedfiles }}"
notify:
- restart bind
- name: test config files
command: named-checkconf /etc/bind/{{ item }}
changed_when: false
with_items: "{{ bind.namedfiles }}"
- name: set named startup options
ansible.builtin.copy:
content: OPTIONS="-d1"
dest: /etc/default/named
- name: set permisions on rndc.key
ansible.builtin.file:
mode: 644
path: /etc/bind/rndc.key
- name: remove old named sysconfig options
file:
state: absent
path: "{{item}}"
with_items: "{{ bind.cleanup }}"
- name: Force all notified handlers to run
ansible.builtin.meta: flush_handlers

View File

@@ -1,48 +1,9 @@
--- ---
# tasks/main.yml # tasks/main.yml
# define tasks here
- name: install bind package - include_tasks: config_named.yaml
apt:
name: "{{ item }}"
update_cache: yes
cache_valid_time: 86400
state: latest
with_items:
- bind9
- name: set correct permissions for logging # tests
file: - include_tasks: test_named.yaml
state=directory
path=/var/log/named/
owner={{ bind.user }}
group={{ bind.group }}
mode=0777
notify:
- restart bind
- name: copy zone files to /etc/bind/
template:
src: "{{ item }}.j2"
dest: /etc/bind/db.{{ item }}
owner: "{{ bind.user }}"
group: "{{ bind.group }}"
mode: 0644
with_items: "{{ bind.zonefiles }}"
notify:
- restart bind
- name: test zone files
command: named-checkzone xai-corp.net /etc/bind/db.xai-corp.net.internal
changed_when: false
- name: copy named.confs to /etc/bind/
template: src={{ item }}.j2 dest=/etc/bind/{{ item }} owner={{ bind.user }} group={{ bind.group }} mode=0640
with_items:
- named.conf.local
- named.conf.options
- named.conf.default-zones
notify:
- restart bind
- include_tasks: dynamic_ip.yml - include_tasks: dynamic_ip.yml

View File

@@ -0,0 +1,21 @@
---
# run tests against nameservers
- name: test local access
command: dig @{{item}} google.ca +tcp
with_items:
- "{{inventory_hostname}}"
- "{{inventory_hostname}}.xai-corp.net"
- "localhost"
- "127.0.0.1"
changed_when: false
- name: test dig from {{ansible_play_hosts_all}} names
command: dig @{{item}} google.ca +tcp
with_items: "{{ansible_play_hosts_all}}"
changed_when: false
- name: test dig from {{ansible_play_hosts_all}}.xai-corp.net
command: dig @{{item}}.xai-corp.net google.ca +tcp
with_items: "{{ansible_play_hosts_all}}"
changed_when: false

View File

@@ -1,9 +1,13 @@
// prime the server with knowledge of the root servers /*
* using views for internal zone requires a view for defaults
*/
view "defaults" { view "defaults" {
// zone "." { // prime the server with knowledge of the root servers
// type hint; zone "." {
// file "/etc/bind/db.root"; type hint;
// }; file "/usr/share/dns/root.hints";
};
// be authoritative for the localhost forward and reverse zones, and for // be authoritative for the localhost forward and reverse zones, and for
// broadcast zones as per RFC 1912 // broadcast zones as per RFC 1912

View File

@@ -1,205 +0,0 @@
/*
* Refer to the named.conf(5) and named(8) man pages, and the documentation
* in /usr/share/doc/bind-9 for more details.
* Online versions of the documentation can be found here:
* http://www.isc.org/software/bind/documentation
*
* If you are going to set up an authoritative server, make sure you
* understand the hairy details of how DNS works. Even with simple mistakes,
* you can break connectivity for affected parties, or cause huge amounts of
* useless Internet traffic.
*/
acl "xfer" {
/* Deny transfers by default except for the listed hosts.
* If we have other name servers, place them here.
*/
none;
};
/*
* You might put in here some ips which are allowed to use the cache or
* recursive queries
*/
acl "trusted" {
127.0.0.0/8;
192.168.4.0/24;
::1/128;
};
options {
directory "/var/bind";
pid-file "/var/run/named/named.pid";
/* https://www.isc.org/solutions/dlv >=bind-9.7.x only */
//bindkeys-file "/etc/bind/bind.keys";
/*listen-on-v6 { ::1; };*/
listen-on { 127.0.0.1; 192.168.4.12; };
allow-query {
/*
* Accept queries from our "trusted" ACL. We will
* allow anyone to query our master zones below.
* This prevents us from becoming a free DNS server
* to the masses.
*/
trusted;
};
allow-query-cache {
/* Use the cache for the "trusted" ACL. */
trusted;
};
// allow-recursion {
// /* Only trusted addresses are allowed to use recursion. */
// trusted;
// };
allow-transfer {
/* Zone tranfers are denied by default. */
none;
};
allow-update {
/* Don't allow updates, e.g. via nsupdate. */
none;
};
/*
* If you've got a DNS server around at your upstream provider, enter its
* IP address here, and enable the line below. This will make you benefit
* from its cache, thus reduce overall DNS traffic in the Internet.
*
* Uncomment the following lines to turn on DNS forwarding, and change
* and/or update the forwarding ip address(es):
*/
forward first;
forwarders {
// 207.164.234.129; // Your ISP NS
// 207.164.234.193; // Your ISP NS
8.8.8.8; // Google Open DNS
8.8.4.4; // Google Open DNS
4.2.2.1; // Level3 Public DNS
4.2.2.2; // Level3 Public DNS
};
//dnssec-enable yes;
//dnssec-validation yes;
/*
* As of bind 9.8.0:
* "If the root key provided has expired,
* named will log the expiration and validation will not work."
*/
//dnssec-validation auto;
/* if you have problems and are behind a firewall: */
//query-source address * port 53;
};
logging {
channel default_log {
file "/var/log/named/named.log" versions 3 size 5M;
severity notice;
print-time yes;
print-severity yes;
print-category yes;
};
category default { default_log; };
category general { default_log; };
};
include "/etc/bind/rndc.key";
controls {
inet 127.0.0.1 port 953 allow { 127.0.0.1/24; ::1/128; } keys { "rndc-key"; };
};
view "internal" {
match-clients { 192.168.4.12; localhost; 192.168.4.0/24; };
recursion yes;
// zone "." in {
// type hint;
// file "/var/bind/named.cache";
// };
zone "localhost" IN {
type master;
file "pri/localhost.zone";
notify no;
};
zone "127.in-addr.arpa" IN {
type master;
file "pri/localhost.zone";
notify no;
};
zone "xai-corp.net." IN {
type master;
file "pri/xai-corp.net.internal";
allow-transfer { none; };
};
zone "4.168.192.in-addr.arpa." IN {
type master;
file "pri/xai-corp.net.reverse";
allow-update { none; };
};
};
view "external" {
match-clients { none; };
recursion no;
// zone "xai-corp.net" {
// type master;
// file "pri/xai-corp.net.external";
// allow-query { none; };
// allow-transfer { 127.0.0.1; };
// };
};
/*
* Briefly, a zone which has been declared delegation-only will be effectively
* limited to containing NS RRs for subdomains, but no actual data beyond its
* own apex (for example, its SOA RR and apex NS RRset). This can be used to
* filter out "wildcard" or "synthesized" data from NAT boxes or from
* authoritative name servers whose undelegated (in-zone) data is of no
* interest.
* See http://www.isc.org/software/bind/delegation-only for more info
*/
//zone "COM" { type delegation-only; };
//zone "NET" { type delegation-only; };
//zone "YOUR-DOMAIN.TLD" {
// type master;
// file "/var/bind/pri/YOUR-DOMAIN.TLD.zone";
// allow-query { any; };
// allow-transfer { xfer; };
//};
//zone "YOUR-SLAVE.TLD" {
// type slave;
// file "/var/bind/sec/YOUR-SLAVE.TLD.zone";
// masters { <MASTER>; };
/* Anybody is allowed to query but transfer should be controlled by the master. */
// allow-query { any; };
// allow-transfer { none; };
/* The master should be the only one who notifies the slaves, shouldn't it? */
// allow-notify { <MASTER>; };
// notify no;
//};

View File

@@ -3,29 +3,15 @@
# - local zones and views # - local zones and views
view "internal" { view "internal" {
match-clients { trusted; }; match-clients {
any;
};
recursion yes; recursion yes;
// zone "." in {
// type hint;
// file "/etc/bind/named.cache";
// };
zone "localhost" IN {
type master;
file "/etc/bind/db.127";
notify no;
};
zone "127.in-addr.arpa" IN {
type master;
file "/etc/bind/db.127";
notify no;
};
zone "xai-corp.net." IN { zone "xai-corp.net." IN {
type master; type master;
file "/etc/bind/db.xai-corp.net.internal"; file "/etc/bind/db.xai-corp.net.internal";
allow-query { any; };
allow-transfer { none; }; allow-transfer { none; };
}; };
@@ -37,9 +23,9 @@ view "internal" {
}; };
view "external" { //view "external" {
match-clients { none; }; // match-clients { none; };
recursion no; // recursion no;
// zone "xai-corp.net" { // zone "xai-corp.net" {
@@ -48,7 +34,7 @@ view "external" {
// allow-query { none; }; // allow-query { none; };
// allow-transfer { 127.0.0.1; }; // allow-transfer { 127.0.0.1; };
// }; // };
}; //};
/* /*
* Briefly, a zone which has been declared delegation-only will be effectively * Briefly, a zone which has been declared delegation-only will be effectively

View File

@@ -22,14 +22,17 @@ acl "xfer" {
* recursive queries * recursive queries
*/ */
acl "trusted" { acl "trusted" {
127.0.0.0/8; 192.168.4.11;
192.168.4.12;
192.168.4.15;
192.168.4.0/24; 192.168.4.0/24;
::1/128; localhost;
localnets;
}; };
statistics-channels { //statistics-channels {
inet * port 8053 allow { trusted; }; // inet * port 8053 allow { any; };
}; //};
options { options {
directory "/var/cache/bind"; directory "/var/cache/bind";
@@ -57,29 +60,35 @@ options {
// If BIND logs error messages about the root key being expired, // If BIND logs error messages about the root key being expired,
// you will need to update your keys. See https://www.isc.org/bind-keys // you will need to update your keys. See https://www.isc.org/bind-keys
//======================================================================== //========================================================================
dnssec-validation auto; //dnssec-validation auto; //auto?
dnssec-validation no; //auto?
auth-nxdomain no; # conform to RFC1035 //auth-nxdomain no; # conform to RFC1035
listen-on-v6 { any; }; listen-on-v6 { any; };
//allow-recursion { trusted; };
//listen-on { any; };
//query-source address *;
//allow-query { trusted; };
}; };
logging { //logging {
channel default_log { // channel default_log {
file "/var/log/named/named.log" versions 3 size 5M; // file "/var/log/named/named.log" versions 3 size 5M;
severity notice; // severity notice;
print-time yes; // print-time yes;
print-severity yes; // print-severity yes;
print-category yes; // print-category yes;
}; // };
//
category default { default_log; }; // category default { default_log; };
category general { default_log; }; // category general { default_log; };
}; //};
include "/etc/bind/rndc.key"; //include "etc/bind/rndc.key";
controls { //controls {
inet 127.0.0.1 port 953 allow { 127.0.0.1; ::1; } keys { "rndc-key"; }; // inet 127.0.0.1 port 953 allow { 127.0.0.1; ::1; } keys { "rndc-key"; };
}; //};

View File

@@ -1,16 +1,17 @@
$TTL 1D $TTL 1D
@ IN SOA xai-corp.net. root.xai-corp.net. ( @ IN SOA xai-corp.net. root.xai-corp.net. (
20150920; serial 202208241; serial
3h ; refresh 3h ; refresh
1h ; retry 1h ; retry
1w ; expiry 1w ; expiry
1d ) ; minimum 1d ) ; minimum
xai-corp.net. IN NS ns.xai-corp.net. xai-corp.net. IN NS 192.168.4.12.
xai-corp.net. IN NS 192.168.4.15.
xai-corp.net. IN MX 0 mail.xai-corp.net. xai-corp.net. IN MX 0 mail.xai-corp.net.
xai-corp.net. IN TXT "v=spf1 ip4:192.168.4.11/32 mx ptr mx:mail.xai-corp.net ~all" xai-corp.net. IN TXT "v=spf1 ip4:192.168.4.11/32 mx ptr mx:mail.xai-corp.net ~all"
xai-corp.net. IN A 192.168.4.11 xai-corp.net. IN A 192.168.4.11
;mail IN A 192.168.4.12 mail IN A 192.168.4.12
gateway IN A 192.168.4.4 gateway IN A 192.168.4.4
wireless IN A 192.168.4.3 wireless IN A 192.168.4.3
@@ -72,7 +73,6 @@ sql IN CNAME dkhost
xaibox IN CNAME dkhost xaibox IN CNAME dkhost
;office IN CNAME dkhost ;office IN CNAME dkhost
www IN CNAME dkhost www IN CNAME dkhost
mail IN CNAME dkhost
tunedb IN CNAME dkhost tunedb IN CNAME dkhost
abcapi IN CNAME dkhost abcapi IN CNAME dkhost
;prometheus IN CNAME dkhost ;prometheus IN CNAME dkhost

View File

@@ -1,7 +1,7 @@
$TTL 24h $TTL 24h
$ORIGIN 4.168.192.IN-ADDR.ARPA. $ORIGIN 4.168.192.IN-ADDR.ARPA.
@ IN SOA ns.xai-corp.net. root.xai-corp.net. ( @ IN SOA ns.xai-corp.net. root.xai-corp.net. (
20150920; serial 202208241; serial
3h ; refresh 3h ; refresh
1h ; retry 1h ; retry
1w ; expiry 1w ; expiry

View File

@@ -3,6 +3,7 @@
fstab: fstab:
gluster: gluster:
#state can be present, mounted, absent, unmounted, remounted
# - name: jenkins # - name: jenkins
# path: "/var/lib/jenkins" # path: "/var/lib/jenkins"
# state: mounted # state: mounted
@@ -11,7 +12,10 @@ fstab:
state: present state: present
- name: vmshares - name: vmshares
path: "/opt/shared" path: "/opt/shared"
state: present state: mounted
# - name: mariadb
# path: "/opt/mariadb"
# state: mounted
helm: helm:
repos: repos:

View File

@@ -0,0 +1,15 @@
---
#https://itnext.io/kubernetes-storage-part-2-glusterfs-complete-tutorial-77542c12a602
apiVersion: v1
kind: Endpoints
metadata:
name: glusterfs-cluster
labels:
storage.k8s.io/name: glusterfs
subsets:
- addresses:
- ip: 192.168.4.12
hostname: home
ports:
- port: 1

View File

@@ -0,0 +1,15 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: glusterfs-mariadb-claim
spec:
storageClassName: "" # Empty string must be explicitly set otherwise default StorageClass will be set
volumeName: glusterfs-mariadb
accessModes:
- ReadWriteOnce
- ReadOnlyMany
- ReadWriteMany
resources:
requests:
storage: 8Gi

View File

@@ -0,0 +1,21 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: glusterfs-mariadb
labels:
storage.k8s.io/name: glusterfs
spec:
accessModes:
- ReadWriteOnce
- ReadOnlyMany
- ReadWriteMany
capacity:
storage: 10Gi
storageClassName: ""
persistentVolumeReclaimPolicy: Recycle
volumeMode: Filesystem
glusterfs:
endpoints: glusterfs-cluster
path: mariadb
readOnly: no

View File

@@ -26,3 +26,6 @@ spec:
- name: hello-world-volume - name: hello-world-volume
configMap: configMap:
name: hello-world name: hello-world
- name: gluster-mariadb-volume
hostPath:
path: "/opt/mariadb"

View File

@@ -1,5 +1,7 @@
--- ---
#TODO: create mount points?
- name: Create glusterfs fstab entries - name: Create glusterfs fstab entries
ansible.posix.mount: ansible.posix.mount:
path: "{{item.path}}" path: "{{item.path}}"

View File

@@ -2,15 +2,17 @@
# provisioning services in k3s cluster # provisioning services in k3s cluster
# mount gluster # mount gluster
#- include_tasks: gluster.fstab.yml - include_tasks: gluster.fstab.yml
# add helm repositories # add helm repositories
#- include_tasks: add_repos.yml - include_tasks: add_repos.yml
- include_tasks: cert_manager.yml - include_tasks: cert_manager.yml
- include_tasks: hello-world.yaml - include_tasks: hello-world.yaml
#- include_tasks: mariadb.yaml
# https://artifacthub.io/packages/helm/twuni/docker-registry # https://artifacthub.io/packages/helm/twuni/docker-registry
#- name: Deploy latest version of docker-registry in dev-tools namespace #- name: Deploy latest version of docker-registry in dev-tools namespace
# local_action: # local_action:

View File

@@ -0,0 +1,4 @@
---
#https://github.com/bitnami/charts/tree/master/bitnami/mariadb

View File

@@ -3,7 +3,7 @@
- debug: var=rsyslog - debug: var=rsyslog
- name: remove packages - name: add packages
apt: apt:
state: present state: present
name: "{{item}}" name: "{{item}}"