Initial cluster configuration
This commit is contained in:
commit
14f6348bf4
8 changed files with 233 additions and 0 deletions
18
ansible.cfg
Normal file
18
ansible.cfg
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
[defaults]
|
||||
inventory = inventory/hosts.yml
|
||||
remote_user = root
|
||||
host_key_checking = False
|
||||
retry_files_enabled = False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp/ansible_facts
|
||||
fact_caching_timeout = 86400
|
||||
stdout_callback = yaml
|
||||
forks = 10
|
||||
|
||||
[privilege_escalation]
|
||||
become = False
|
||||
|
||||
[ssh_connection]
|
||||
pipelining = True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s
|
||||
12
inventory/group_vars/all.yml
Normal file
12
inventory/group_vars/all.yml
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
timezone: America/New_York
|
||||
nas_ip: 192.168.1.251
|
||||
nas_media_share: /volume1/Media
|
||||
nas_downloads_share: /volume1/Downloads
|
||||
nas_docker_share: /volume1/docker
|
||||
ntfy_server: https://ntfy.3ddbrewery.com
|
||||
ntfy_autoheal_topic: autoheal-proxmox
|
||||
ntfy_watchtower_topic: watchtower-proxmox
|
||||
docker_compose_version: "3.8"
|
||||
docker_data_dir: /home/docker/appdata
|
||||
65
inventory/hosts.yml
Normal file
65
inventory/hosts.yml
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
all:
|
||||
children:
|
||||
proxmox_nodes:
|
||||
hosts:
|
||||
pve2:
|
||||
ansible_host: 192.168.1.3
|
||||
ansible_user: root
|
||||
pve-dell:
|
||||
ansible_host: 192.168.1.4
|
||||
ansible_user: root
|
||||
vms:
|
||||
hosts:
|
||||
replicant:
|
||||
ansible_host: 192.168.1.80
|
||||
ansible_user: maddox
|
||||
ansible_become: yes
|
||||
docker_appdata: /home/maddox/docker/appdata
|
||||
databases:
|
||||
ansible_host: 192.168.1.81
|
||||
ansible_user: root
|
||||
immich:
|
||||
ansible_host: 192.168.1.82
|
||||
ansible_user: root
|
||||
lxcs:
|
||||
hosts:
|
||||
media-transcode:
|
||||
ansible_host: 192.168.1.120
|
||||
ansible_user: root
|
||||
network-services:
|
||||
ansible_host: 192.168.1.121
|
||||
ansible_user: root
|
||||
download-stack:
|
||||
ansible_host: 192.168.1.122
|
||||
ansible_user: root
|
||||
docker666:
|
||||
ansible_host: 192.168.1.123
|
||||
ansible_user: root
|
||||
docker_appdata: /root/docker/appdata
|
||||
tailscale-home:
|
||||
ansible_host: 192.168.1.124
|
||||
ansible_user: root
|
||||
infrastructure:
|
||||
hosts:
|
||||
dns-lxc:
|
||||
ansible_host: 192.168.1.125
|
||||
ansible_user: root
|
||||
legacy:
|
||||
hosts:
|
||||
nas:
|
||||
ansible_host: 192.168.1.251
|
||||
ansible_user: maddox
|
||||
ansible_port: 44822
|
||||
alien:
|
||||
ansible_host: 192.168.1.252
|
||||
ansible_user: maddox
|
||||
docker_hosts:
|
||||
children:
|
||||
vms:
|
||||
lxcs:
|
||||
all_managed:
|
||||
children:
|
||||
proxmox_nodes:
|
||||
vms:
|
||||
lxcs:
|
||||
infrastructure:
|
||||
26
playbooks/check-status.yml
Normal file
26
playbooks/check-status.yml
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
- name: Check cluster status
|
||||
hosts: all_managed
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
tasks:
|
||||
- name: Get disk usage
|
||||
shell: df -h / | tail -1 | awk '{print $5}'
|
||||
register: disk_usage
|
||||
changed_when: false
|
||||
|
||||
- name: Get memory usage
|
||||
shell: free -m | awk '/^Mem:/ {printf "%.0f%%", $3/$2 * 100}'
|
||||
register: memory_usage
|
||||
changed_when: false
|
||||
|
||||
- name: Get container count
|
||||
shell: docker ps -q 2>/dev/null | wc -l
|
||||
register: container_count
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Display status
|
||||
debug:
|
||||
msg: "{{ inventory_hostname }}: Disk={{ disk_usage.stdout }} Mem={{ memory_usage.stdout }} Containers={{ container_count.stdout | default('N/A') }}"
|
||||
28
playbooks/deploy-utils.yml
Normal file
28
playbooks/deploy-utils.yml
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
- name: Deploy standardized utils stack
|
||||
hosts: docker_hosts
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
vars:
|
||||
utils_path: /home/docker/appdata/utils
|
||||
host_ip: "{{ ansible_default_ipv4.address }}"
|
||||
host_name: "{{ inventory_hostname }}"
|
||||
|
||||
tasks:
|
||||
- name: Create utils directory
|
||||
file:
|
||||
path: "{{ utils_path }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Create .env file
|
||||
copy:
|
||||
dest: "{{ utils_path }}/.env"
|
||||
content: |
|
||||
HOST_IP={{ host_ip }}
|
||||
HOST_NAME={{ host_name }}
|
||||
mode: '0600'
|
||||
|
||||
- name: Display completion
|
||||
debug:
|
||||
msg: "Utils directory created at {{ utils_path }} for {{ inventory_hostname }}"
|
||||
19
playbooks/docker-prune.yml
Normal file
19
playbooks/docker-prune.yml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
- name: Docker cleanup on all hosts
|
||||
hosts: docker_hosts
|
||||
become: yes
|
||||
gather_facts: no
|
||||
|
||||
tasks:
|
||||
- name: Prune with docker CLI
|
||||
shell: |
|
||||
docker image prune -f
|
||||
docker network prune -f
|
||||
docker builder prune -f
|
||||
register: prune_result
|
||||
changed_when: "'Total reclaimed space' in prune_result.stdout"
|
||||
|
||||
- name: Show prune output
|
||||
debug:
|
||||
var: prune_result.stdout_lines
|
||||
when: prune_result.stdout_lines | length > 0
|
||||
27
playbooks/restart-utils.yml
Normal file
27
playbooks/restart-utils.yml
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
- name: Restart utils stack on Docker hosts
|
||||
hosts: docker_hosts
|
||||
become: yes
|
||||
gather_facts: no
|
||||
|
||||
tasks:
|
||||
- name: Set utils path
|
||||
set_fact:
|
||||
utils_path: "{{ docker_appdata | default('/home/docker/appdata') }}/utils"
|
||||
|
||||
- name: Check if utils compose file exists
|
||||
stat:
|
||||
path: "{{ utils_path }}/docker-compose.yml"
|
||||
register: compose_file
|
||||
|
||||
- name: Restart utils stack
|
||||
shell: docker compose pull && docker compose up -d --force-recreate
|
||||
args:
|
||||
chdir: "{{ utils_path }}"
|
||||
when: compose_file.stat.exists
|
||||
register: restart_result
|
||||
|
||||
- name: Skip if no utils stack
|
||||
debug:
|
||||
msg: "{{ inventory_hostname }}: No utils stack found at {{ utils_path }}"
|
||||
when: not compose_file.stat.exists
|
||||
38
playbooks/update-all.yml
Normal file
38
playbooks/update-all.yml
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
- name: Update all Docker hosts
|
||||
hosts: docker_hosts
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
vars:
|
||||
reboot: false
|
||||
|
||||
tasks:
|
||||
- name: Update apt cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
|
||||
- name: Upgrade all packages
|
||||
apt:
|
||||
upgrade: dist
|
||||
autoremove: yes
|
||||
autoclean: yes
|
||||
register: upgrade_result
|
||||
|
||||
- name: Check if reboot is required
|
||||
stat:
|
||||
path: /var/run/reboot-required
|
||||
register: reboot_required
|
||||
|
||||
- name: Notify if reboot needed
|
||||
debug:
|
||||
msg: "{{ inventory_hostname }} requires a reboot"
|
||||
when: reboot_required.stat.exists
|
||||
|
||||
- name: Reboot if required and allowed
|
||||
reboot:
|
||||
msg: "Ansible triggered reboot after updates"
|
||||
reboot_timeout: 300
|
||||
when:
|
||||
- reboot_required.stat.exists
|
||||
- reboot | bool
|
||||
Loading…
Reference in a new issue