Add hetzner host im (192.168.12.3) to inventory
This commit is contained in:
parent
e8d2cd68a0
commit
17450c4b65
9 changed files with 427 additions and 0 deletions
93
CLAUDE.md
Normal file
93
CLAUDE.md
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Overview
|
||||
|
||||
This is an Infrastructure as Code repository for a Docker-based homelab. It uses Ansible to manage Docker Compose deployments across multiple VMs and LXC containers on Proxmox.
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
# Run from the repository root: ~/clustered-fucks
|
||||
|
||||
# Check status of all managed hosts (disk, memory, containers)
|
||||
ansible-playbook playbooks/check-status.yml
|
||||
|
||||
# Deploy compose files to hosts (generic deployment)
|
||||
ansible-playbook playbooks/deploy-compose.yml # All hosts, all stacks
|
||||
ansible-playbook playbooks/deploy-compose.yml --limit databases # Single host
|
||||
ansible-playbook playbooks/deploy-compose.yml -e stack_filter=mealie # Specific stack
|
||||
ansible-playbook playbooks/deploy-compose.yml -e restart_stacks=true # Deploy and restart
|
||||
|
||||
# Deploy specific service
|
||||
ansible-playbook playbooks/deploy-<service>.yml
|
||||
|
||||
# Run ad-hoc commands on hosts
|
||||
ansible docker_hosts -m ping # Test connectivity
|
||||
ansible docker_hosts -m shell -a "docker ps -q | wc -l" # Container counts
|
||||
ansible databases -m shell -a "docker logs mealie 2>&1 | tail -30" # View logs
|
||||
|
||||
# Maintenance
|
||||
ansible-playbook playbooks/update-all.yml # apt upgrade all docker hosts
|
||||
ansible-playbook playbooks/docker-prune.yml # Clean unused Docker resources
|
||||
ansible-playbook playbooks/restart-utils.yml # Restart autoheal/watchtower/portainer
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Host Organization
|
||||
|
||||
Services are organized by deployment target in `compose-files/<hostname>/<service>/`:
|
||||
|
||||
| Host | IP Suffix | Type | Primary Services |
|
||||
|------|-----------|------|------------------|
|
||||
| replicant | .80 | VM | Arr stack, Emby, Navidrome, Homepage |
|
||||
| databases | .81 | VM | PostgreSQL, Forgejo, Mealie |
|
||||
| immich | .82 | VM | Immich photo management |
|
||||
| network-services | .121 | LXC | Unifi, Docker proxy |
|
||||
| download-stack | .122 | LXC | NZBGet, ruTorrent, slskd |
|
||||
| docker666 | .123 | LXC | Gluetun, misc services |
|
||||
|
||||
### Ansible Groups
|
||||
|
||||
- `docker_hosts` - All VMs + LXCs (primary deployment targets)
|
||||
- `all_managed` - Everything including Proxmox nodes
|
||||
- `proxmox_nodes` - Hypervisors only
|
||||
- `legacy` - Systems being migrated (nas, alien)
|
||||
|
||||
### Deployment Pattern
|
||||
|
||||
1. Compose files stored in `compose-files/<host>/<service>/docker-compose.yml`
|
||||
2. Secrets in `.env` files (not in git, copied via playbooks)
|
||||
3. Service-specific playbooks in `playbooks/deploy-<service>.yml`
|
||||
4. Default appdata path on hosts: `/home/docker/appdata/<service>/`
|
||||
5. Shared Docker networks: `proxy`, `media`, `download`, `database`
|
||||
|
||||
### Service Dependencies
|
||||
|
||||
- Immich and Mealie depend on PostgreSQL on the databases VM
|
||||
- All docker hosts run the utils stack (autoheal, watchtower, portainer-agent)
|
||||
- Media services share NFS mounts from Synology NAS at 192.168.1.251
|
||||
|
||||
## Creating New Deployments
|
||||
|
||||
```bash
|
||||
# 1. Create compose directory
|
||||
mkdir -p compose-files/<target_host>/<service>
|
||||
|
||||
# 2. Add docker-compose.yml and .env (secrets)
|
||||
|
||||
# 3. Create playbook (use existing deploy-*.yml as template)
|
||||
# Key pattern: copy compose + .env, ensure network exists, docker compose up -d
|
||||
|
||||
# 4. Deploy
|
||||
ansible-playbook playbooks/deploy-<service>.yml
|
||||
```
|
||||
|
||||
## Key Files
|
||||
|
||||
- `ansible.cfg` - Ansible config (inventory path, SSH settings, fact caching)
|
||||
- `inventory/hosts.yml` - All managed hosts with IPs and connection settings
|
||||
- `inventory/group_vars/all.yml` - Global variables (timezone, NAS paths, ntfy topics)
|
||||
- `docs/control-server-guide.md` - Detailed operations guide
|
||||
128
add-im.sh
Executable file
128
add-im.sh
Executable file
|
|
@ -0,0 +1,128 @@
|
|||
#!/bin/bash
|
||||
# Add Host 'im' to Ansible Management
|
||||
# Target: 192.168.12.3 (different subnet)
|
||||
# Run this on the control server (CT 127)
|
||||
|
||||
set -e
|
||||
|
||||
HOST_ALIAS="im"
|
||||
HOST_IP="192.168.12.3"
|
||||
HOST_USER="maddox"
|
||||
HOST_PORT="22"
|
||||
|
||||
echo "=== Adding Host '$HOST_ALIAS' to Ansible Management ==="
|
||||
echo "IP: $HOST_IP"
|
||||
echo "User: $HOST_USER"
|
||||
echo ""
|
||||
|
||||
# ============================================
|
||||
# 1. Add to ~/.ssh/config
|
||||
# ============================================
|
||||
|
||||
if grep -q "^Host $HOST_ALIAS$" ~/.ssh/config 2>/dev/null; then
|
||||
echo "⚠️ Host '$HOST_ALIAS' already exists in ~/.ssh/config - skipping"
|
||||
else
|
||||
cat >> ~/.ssh/config << EOF
|
||||
|
||||
Host $HOST_ALIAS
|
||||
HostName $HOST_IP
|
||||
User $HOST_USER
|
||||
EOF
|
||||
echo "✅ Added '$HOST_ALIAS' to ~/.ssh/config"
|
||||
fi
|
||||
|
||||
# ============================================
|
||||
# 2. Add to tmux-hosts.conf (if exists)
|
||||
# ============================================
|
||||
|
||||
if [ -f ~/.ssh/tmux-hosts.conf ]; then
|
||||
if grep -q "^$HOST_ALIAS$" ~/.ssh/tmux-hosts.conf 2>/dev/null; then
|
||||
echo "⚠️ Host '$HOST_ALIAS' already in tmux-hosts.conf - skipping"
|
||||
else
|
||||
echo "$HOST_ALIAS" >> ~/.ssh/tmux-hosts.conf
|
||||
echo "✅ Added '$HOST_ALIAS' to tmux-hosts.conf"
|
||||
fi
|
||||
else
|
||||
echo "ℹ️ No tmux-hosts.conf found - skipping"
|
||||
fi
|
||||
|
||||
# ============================================
|
||||
# 3. Create inventory snippet
|
||||
# ============================================
|
||||
|
||||
INVENTORY_FILE=~/clustered-fucks/inventory/hosts.yml
|
||||
|
||||
echo ""
|
||||
echo "============================================"
|
||||
echo "=== Ansible Inventory Update Required ==="
|
||||
echo "============================================"
|
||||
echo ""
|
||||
echo "Add the following to $INVENTORY_FILE"
|
||||
echo ""
|
||||
echo "Under 'docker_hosts > hosts:' section:"
|
||||
echo "----------------------------------------"
|
||||
cat << 'EOF'
|
||||
im:
|
||||
ansible_host: 192.168.12.3
|
||||
ansible_user: maddox
|
||||
docker_appdata: /volume1/docker
|
||||
EOF
|
||||
echo "----------------------------------------"
|
||||
echo ""
|
||||
echo "Make sure 'im' is also included in these groups:"
|
||||
echo " - all_managed (children section)"
|
||||
echo ""
|
||||
|
||||
# ============================================
|
||||
# 4. Test SSH connectivity
|
||||
# ============================================
|
||||
|
||||
echo "============================================"
|
||||
echo "=== Testing SSH Connectivity ==="
|
||||
echo "============================================"
|
||||
echo ""
|
||||
echo "Attempting: ssh -o ConnectTimeout=5 -o BatchMode=yes $HOST_ALIAS 'echo OK'"
|
||||
echo ""
|
||||
|
||||
if ssh -o ConnectTimeout=5 -o BatchMode=yes $HOST_ALIAS 'echo OK' 2>/dev/null; then
|
||||
echo "✅ SSH connection successful (key already authorized)"
|
||||
else
|
||||
echo "⚠️ SSH key not authorized or host unreachable"
|
||||
echo ""
|
||||
echo "To copy your SSH key, run:"
|
||||
echo " ssh-copy-id $HOST_ALIAS"
|
||||
echo ""
|
||||
echo "If host is unreachable, verify:"
|
||||
echo " - Host is powered on"
|
||||
echo " - IP $HOST_IP is correct"
|
||||
echo " - Routing exists to 192.168.12.x subnet"
|
||||
echo " - SSH service is running on target"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "============================================"
|
||||
echo "=== Next Steps ==="
|
||||
echo "============================================"
|
||||
echo ""
|
||||
echo "1. COPY SSH KEY (if needed):"
|
||||
echo " ssh-copy-id $HOST_ALIAS"
|
||||
echo ""
|
||||
echo "2. EDIT ANSIBLE INVENTORY:"
|
||||
echo " vim ~/clustered-fucks/inventory/hosts.yml"
|
||||
echo " # Add the YAML snippet shown above"
|
||||
echo ""
|
||||
echo "3. TEST ANSIBLE CONNECTION:"
|
||||
echo " cd ~/clustered-fucks"
|
||||
echo " ansible $HOST_ALIAS -m ping"
|
||||
echo ""
|
||||
echo "4. VERIFY GROUP MEMBERSHIP:"
|
||||
echo " ansible-inventory --graph"
|
||||
echo ""
|
||||
echo "5. COMMIT TO GIT:"
|
||||
echo " cd ~/clustered-fucks"
|
||||
echo " git add -A && git commit -m 'Add host im (192.168.12.3) to inventory' && git push"
|
||||
echo ""
|
||||
echo "============================================"
|
||||
echo "Done! Host '$HOST_ALIAS' added to SSH config."
|
||||
echo "Remember to manually update the Ansible inventory."
|
||||
echo "============================================"
|
||||
31
compose-files/databases/silverbullet/docker-compose.yml
Normal file
31
compose-files/databases/silverbullet/docker-compose.yml
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
services:
|
||||
silverbullet:
|
||||
image: ghcr.io/silverbulletmd/silverbullet
|
||||
container_name: silverbullet
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./space:/space
|
||||
- ./.ssh:/home/silverbullet/.ssh:ro
|
||||
ports:
|
||||
- "53510:3000"
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "autoheal=true"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
cpus: '0.5'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl --fail http://localhost:3000/.ping || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
21
compose-files/replicant/cyberchef/docker-compose.yml
Normal file
21
compose-files/replicant/cyberchef/docker-compose.yml
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
services:
|
||||
cyberchef:
|
||||
image: mpepping/cyberchef:latest
|
||||
container_name: cyberchef
|
||||
ports:
|
||||
- "7318:8000"
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- proxy
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
cpus: '0.5'
|
||||
labels:
|
||||
- "autoheal=true"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
21
compose-files/replicant/web-check/docker-compose.yml
Normal file
21
compose-files/replicant/web-check/docker-compose.yml
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
services:
|
||||
web-check:
|
||||
image: lissy93/web-check:latest
|
||||
container_name: web-check
|
||||
ports:
|
||||
- "6160:3000"
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- proxy
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
cpus: '0.5'
|
||||
labels:
|
||||
- "autoheal=true"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
|
|
@ -53,13 +53,21 @@ all:
|
|||
alien:
|
||||
ansible_host: 192.168.1.252
|
||||
ansible_user: maddox
|
||||
hetzner:
|
||||
hosts:
|
||||
im:
|
||||
ansible_host: 192.168.12.3
|
||||
ansible_user: maddox
|
||||
docker_appdata: /volume1/docker
|
||||
docker_hosts:
|
||||
children:
|
||||
vms:
|
||||
lxcs:
|
||||
hetzner:
|
||||
all_managed:
|
||||
children:
|
||||
proxmox_nodes:
|
||||
vms:
|
||||
lxcs:
|
||||
infrastructure:
|
||||
hetzner:
|
||||
|
|
|
|||
32
playbooks/deploy-cyberchef.yml
Normal file
32
playbooks/deploy-cyberchef.yml
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
- name: Deploy CyberChef to replicant
|
||||
hosts: replicant
|
||||
vars:
|
||||
service_name: cyberchef
|
||||
service_dir: "{{ docker_appdata }}/{{ service_name }}"
|
||||
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
|
||||
|
||||
tasks:
|
||||
- name: Create service directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ service_dir }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy docker-compose.yml
|
||||
ansible.builtin.copy:
|
||||
src: "{{ compose_src }}/docker-compose.yml"
|
||||
dest: "{{ service_dir }}/docker-compose.yml"
|
||||
mode: '0644'
|
||||
|
||||
- name: Pull latest image
|
||||
community.docker.docker_image:
|
||||
name: mpepping/cyberchef:latest
|
||||
source: pull
|
||||
force_source: yes
|
||||
|
||||
- name: Deploy container
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ service_dir }}"
|
||||
state: present
|
||||
pull: missing
|
||||
61
playbooks/deploy-silverbullet.yml
Normal file
61
playbooks/deploy-silverbullet.yml
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
- name: Deploy SilverBullet to databases
|
||||
hosts: databases
|
||||
become: yes
|
||||
vars:
|
||||
appdata_path: /home/docker/appdata/silverbullet
|
||||
compose_src: ~/clustered-fucks/compose-files/databases/silverbullet
|
||||
|
||||
tasks:
|
||||
- name: Create appdata directory
|
||||
file:
|
||||
path: "{{ appdata_path }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy docker-compose.yml
|
||||
copy:
|
||||
src: "{{ compose_src }}/docker-compose.yml"
|
||||
dest: "{{ appdata_path }}/docker-compose.yml"
|
||||
mode: '0644'
|
||||
|
||||
- name: Copy .env file
|
||||
copy:
|
||||
src: "{{ compose_src }}/.env"
|
||||
dest: "{{ appdata_path }}/.env"
|
||||
mode: '0600'
|
||||
|
||||
- name: Pull latest image
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ appdata_path }}"
|
||||
pull: always
|
||||
state: present
|
||||
|
||||
- name: Start SilverBullet
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ appdata_path }}"
|
||||
state: present
|
||||
|
||||
- name: Wait for container to be healthy
|
||||
shell: |
|
||||
for i in {1..30}; do
|
||||
status=$(docker inspect --format='{% raw %}{{.State.Health.Status}}{% endraw %}' silverbullet 2>/dev/null || echo "not_found")
|
||||
if [ "$status" = "healthy" ]; then
|
||||
echo "Container is healthy"
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "Timeout waiting for healthy status"
|
||||
exit 1
|
||||
register: health_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show container status
|
||||
shell: docker ps --filter name=silverbullet --format "table {% raw %}{{.Names}}\t{{.Status}}\t{{.Ports}}{% endraw %}"
|
||||
register: container_status
|
||||
changed_when: false
|
||||
|
||||
- name: Display status
|
||||
debug:
|
||||
var: container_status.stdout_lines
|
||||
32
playbooks/deploy-web-check.yml
Normal file
32
playbooks/deploy-web-check.yml
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
- name: Deploy Web-Check to replicant
|
||||
hosts: replicant
|
||||
vars:
|
||||
service_name: web-check
|
||||
service_dir: "{{ docker_appdata }}/{{ service_name }}"
|
||||
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
|
||||
|
||||
tasks:
|
||||
- name: Create service directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ service_dir }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy docker-compose.yml
|
||||
ansible.builtin.copy:
|
||||
src: "{{ compose_src }}/docker-compose.yml"
|
||||
dest: "{{ service_dir }}/docker-compose.yml"
|
||||
mode: '0644'
|
||||
|
||||
- name: Pull latest image
|
||||
community.docker.docker_image:
|
||||
name: lissy93/web-check:latest
|
||||
source: pull
|
||||
force_source: yes
|
||||
|
||||
- name: Deploy container
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ service_dir }}"
|
||||
state: present
|
||||
pull: missing
|
||||
Loading…
Reference in a new issue