Phase 2 Batch 2: tinymediamanager, autoscan, channeltube, dispatcharr → replicant

This commit is contained in:
Maddox 2026-01-26 23:34:49 +00:00
parent 45aee7ecce
commit 77b153a054
13 changed files with 607 additions and 0 deletions

View file

@ -0,0 +1,35 @@
services:
autoscan:
image: cloudb0x/autoscan:latest
container_name: autoscan
environment:
- PUID=1000
- PGID=1000
- TZ=America/Indiana/Indianapolis
volumes:
- ./config:/config
- /volume1/Media:/media:ro
- /volume1/Downloads:/downloads:ro
ports:
- "3030:3030"
restart: unless-stopped
networks:
- proxy
deploy:
resources:
limits:
memory: 512M
cpus: '1.0'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3030/health"]
interval: 30s
timeout: 10s
start_period: 40s
retries: 3
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
networks:
proxy:
external: true

View file

@ -0,0 +1,29 @@
services:
channeltube:
image: thewicklowwolf/channeltube:latest
container_name: channeltube
environment:
- PUID=1000
- PGID=1000
- TZ=America/Indiana/Indianapolis
volumes:
- ./config:/channeltube/config
- /volume1/Media/Youtube/movies:/channeltube/downloads
- /volume1/Media/Youtube/audio:/channeltube/audio_downloads
ports:
- "5444:5000"
restart: unless-stopped
networks:
- proxy
deploy:
resources:
limits:
memory: 1G
cpus: '2.0'
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
networks:
proxy:
external: true

View file

@ -0,0 +1,42 @@
services:
dispatcharr:
image: ghcr.io/dispatcharr/dispatcharr:latest
container_name: dispatcharr
environment:
- PUID=1000
- PGID=1000
- TZ=America/Indiana/Indianapolis
- PORT=9191
# Route traffic through gluetun's HTTP proxy on download-stack
- HTTP_PROXY=http://192.168.1.122:38888
- HTTPS_PROXY=http://192.168.1.122:38888
- http_proxy=http://192.168.1.122:38888
- https_proxy=http://192.168.1.122:38888
# GPU settings (for future pve-alien, or CPU fallback)
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
volumes:
- ./data:/data
ports:
- "9191:9191"
restart: unless-stopped
networks:
- proxy
# Intel QuickSync access (pve-z620 has limited decode capability)
devices:
- /dev/dri:/dev/dri
group_add:
- video
- render
deploy:
resources:
limits:
memory: 2G
cpus: '2.0'
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
networks:
proxy:
external: true

View file

@ -0,0 +1,56 @@
# Matrix Ansible Controller
# Portable container for managing matrix-docker-ansible-deploy playbook
FROM python:3.12-alpine
LABEL maintainer="maddox"
LABEL description="Portable Ansible controller for matrix-docker-ansible-deploy"
# Install system dependencies
RUN apk add --no-cache \
git \
openssh-client \
bash \
curl \
rsync \
gcc \
musl-dev \
libffi-dev \
openssl-dev \
python3-dev \
just \
nano \
vim \
tmux \
jq
# Install Ansible and required Python packages
RUN pip install --no-cache-dir \
ansible>=2.17.0 \
passlib \
dnspython \
netaddr \
jmespath \
docker \
requests
# Create working directories
RUN mkdir -p /playbook /inventory /ssh
# Set up SSH directory with proper permissions
RUN mkdir -p /root/.ssh && chmod 700 /root/.ssh
# Copy entrypoint script
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Set working directory to playbook
WORKDIR /playbook
# Default environment
ENV ANSIBLE_HOST_KEY_CHECKING=False
ENV ANSIBLE_FORCE_COLOR=True
ENV TERM=xterm-256color
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/bin/bash"]

View file

@ -0,0 +1,49 @@
# Matrix Ansible Controller
# Portable container for managing matrix-docker-ansible-deploy playbook
#
# Usage:
# docker compose up -d
# docker exec -it matrix-ansible-controller bash
# just install-all
services:
controller:
build: .
image: matrix-ansible-controller:latest
container_name: matrix-ansible-controller
hostname: matrix-controller
# Keep container running for interactive use
stdin_open: true
tty: true
volumes:
# SSH keys (read-only) - for connecting to matrix server
- /root/.ssh:/ssh:ro
# Persistent playbook directory (survives container rebuilds)
- ./data/playbook:/playbook
# Your inventory configuration (vars.yml, hosts, etc.)
- ./data/inventory:/inventory
# Persist ansible cache/facts
- ./data/ansible-cache:/root/.ansible
environment:
- ANSIBLE_HOST_KEY_CHECKING=False
- ANSIBLE_FORCE_COLOR=True
- UPDATE_ROLES=false
network_mode: bridge
deploy:
resources:
limits:
memory: 1G
cpus: '2.0'
labels:
- "com.centurylinklabs.watchtower.enable=false"
restart: unless-stopped

View file

@ -0,0 +1,84 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE} Matrix Ansible Controller${NC}"
echo -e "${BLUE}========================================${NC}"
# --- SSH Key Setup ---
if [ -d "/ssh" ] && [ "$(ls -A /ssh 2>/dev/null)" ]; then
echo -e "${GREEN}[SSH]${NC} Setting up SSH keys from /ssh mount..."
cp -r /ssh/* /root/.ssh/ 2>/dev/null || true
chmod 700 /root/.ssh
chmod 600 /root/.ssh/* 2>/dev/null || true
chmod 644 /root/.ssh/*.pub 2>/dev/null || true
chmod 644 /root/.ssh/known_hosts 2>/dev/null || true
chmod 644 /root/.ssh/config 2>/dev/null || true
echo -e "${GREEN}[SSH]${NC} Keys configured"
else
echo -e "${YELLOW}[SSH]${NC} No SSH keys mounted at /ssh"
echo -e "${YELLOW}[SSH]${NC} Mount with: -v ~/.ssh:/ssh:ro"
fi
# --- Playbook Setup ---
if [ ! -f "/playbook/setup.yml" ]; then
echo -e "${GREEN}[PLAYBOOK]${NC} Cloning matrix-docker-ansible-deploy..."
git clone https://github.com/spantaleev/matrix-docker-ansible-deploy.git /tmp/playbook
mv /tmp/playbook/* /playbook/
mv /tmp/playbook/.* /playbook/ 2>/dev/null || true
rm -rf /tmp/playbook
echo -e "${GREEN}[PLAYBOOK]${NC} Playbook cloned successfully"
else
echo -e "${GREEN}[PLAYBOOK]${NC} Playbook already present"
fi
# --- Inventory Setup ---
if [ -d "/inventory" ] && [ "$(ls -A /inventory 2>/dev/null)" ]; then
echo -e "${GREEN}[INVENTORY]${NC} Linking inventory from /inventory mount..."
rm -rf /playbook/inventory 2>/dev/null || true
ln -sf /inventory /playbook/inventory
echo -e "${GREEN}[INVENTORY]${NC} Inventory linked: /playbook/inventory -> /inventory"
else
echo -e "${YELLOW}[INVENTORY]${NC} No inventory mounted at /inventory"
echo -e "${YELLOW}[INVENTORY]${NC} Mount with: -v /path/to/inventory:/inventory"
mkdir -p /playbook/inventory
fi
# --- Install/Update Ansible Roles ---
if [ -f "/playbook/requirements.yml" ]; then
if [ ! -d "/playbook/roles/galaxy" ] || [ "${UPDATE_ROLES:-false}" = "true" ]; then
echo -e "${GREEN}[ROLES]${NC} Installing Ansible Galaxy roles..."
cd /playbook
rm -rf roles/galaxy
ansible-galaxy install -r requirements.yml -p roles/galaxy/ --force
echo -e "${GREEN}[ROLES]${NC} Roles installed successfully"
else
echo -e "${GREEN}[ROLES]${NC} Roles already installed (set UPDATE_ROLES=true to refresh)"
fi
fi
# --- Display Status ---
echo ""
echo -e "${BLUE}----------------------------------------${NC}"
echo -e "${GREEN}Status:${NC}"
echo -e " Ansible: $(ansible --version | head -1)"
echo -e " Playbook: /playbook"
echo -e " Inventory: /playbook/inventory"
echo ""
echo -e "${BLUE}Quick Commands:${NC}"
echo -e " just install-all # Full installation"
echo -e " just setup-all # Setup all components"
echo -e " just roles # Update roles"
echo -e " just update # git pull + update roles"
echo ""
echo -e "${BLUE}----------------------------------------${NC}"
echo ""
exec "$@"

View file

@ -0,0 +1,33 @@
services:
tinymediamanager:
image: romancin/tinymediamanager:latest-v4
container_name: tinymediamanager
environment:
- USER_ID=1000
- GROUP_ID=1000
- TZ=America/Indiana/Indianapolis
- DISPLAY_WIDTH=1920
- DISPLAY_HEIGHT=1080
- KEEP_APP_RUNNING=1
- CLEAN_TMP_DIR=1
volumes:
- ./config:/config
- /volume1/Media:/media
ports:
- "45800:5800" # Web UI
- "45900:5900" # VNC
restart: unless-stopped
networks:
- proxy
deploy:
resources:
limits:
memory: 1G
cpus: '2.0'
labels:
- "autoheal=true"
- "com.centurylinklabs.watchtower.enable=true"
networks:
proxy:
external: true

View file

@ -0,0 +1,40 @@
---
- name: Deploy Autoscan to replicant
hosts: replicant
vars:
service_name: autoscan
service_dir: /home/maddox/docker/appdata/{{ service_name }}
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
tasks:
- name: Create service directory
ansible.builtin.file:
path: "{{ service_dir }}"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Create config subdirectory
ansible.builtin.file:
path: "{{ service_dir }}/config"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Copy docker-compose.yml
ansible.builtin.copy:
src: "{{ compose_src }}/docker-compose.yml"
dest: "{{ service_dir }}/docker-compose.yml"
owner: maddox
group: maddox
mode: '0644'
- name: Deploy container
community.docker.docker_compose_v2:
project_src: "{{ service_dir }}"
state: present
pull: always
become: yes
become_user: maddox

View file

@ -0,0 +1,40 @@
---
- name: Deploy ChannelTube to replicant
hosts: replicant
vars:
service_name: channeltube
service_dir: /home/maddox/docker/appdata/{{ service_name }}
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
tasks:
- name: Create service directory
ansible.builtin.file:
path: "{{ service_dir }}"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Create config subdirectory
ansible.builtin.file:
path: "{{ service_dir }}/config"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Copy docker-compose.yml
ansible.builtin.copy:
src: "{{ compose_src }}/docker-compose.yml"
dest: "{{ service_dir }}/docker-compose.yml"
owner: maddox
group: maddox
mode: '0644'
- name: Deploy container
community.docker.docker_compose_v2:
project_src: "{{ service_dir }}"
state: present
pull: always
become: yes
become_user: maddox

View file

@ -0,0 +1,40 @@
---
- name: Deploy Dispatcharr to replicant
hosts: replicant
vars:
service_name: dispatcharr
service_dir: /home/maddox/docker/appdata/{{ service_name }}
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
tasks:
- name: Create service directory
ansible.builtin.file:
path: "{{ service_dir }}"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Create data subdirectory
ansible.builtin.file:
path: "{{ service_dir }}/data"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Copy docker-compose.yml
ansible.builtin.copy:
src: "{{ compose_src }}/docker-compose.yml"
dest: "{{ service_dir }}/docker-compose.yml"
owner: maddox
group: maddox
mode: '0644'
- name: Deploy container
community.docker.docker_compose_v2:
project_src: "{{ service_dir }}"
state: present
pull: always
become: yes
become_user: maddox

View file

@ -0,0 +1,104 @@
---
# Deploy Matrix Ansible Controller
#
# Usage:
# ansible-playbook playbooks/deploy-matrix-ansible-controller.yml
#
# After deployment:
# ssh replicant
# docker exec -it matrix-ansible-controller bash
# # Copy your vars.yml to data/inventory/host_vars/matrix.yourdomain.com/
- name: Deploy Matrix Ansible Controller
hosts: replicant
vars:
service_name: matrix-ansible-controller
service_dir: /home/maddox/docker/appdata/{{ service_name }}
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
tasks:
- name: Create service directory structure
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: '0755'
loop:
- "{{ service_dir }}"
- "{{ service_dir }}/data"
- "{{ service_dir }}/data/playbook"
- "{{ service_dir }}/data/inventory"
- "{{ service_dir }}/data/inventory/host_vars"
- "{{ service_dir }}/data/ansible-cache"
- name: Copy Dockerfile
ansible.builtin.copy:
src: "{{ compose_src }}/Dockerfile"
dest: "{{ service_dir }}/Dockerfile"
mode: '0644'
- name: Copy entrypoint script
ansible.builtin.copy:
src: "{{ compose_src }}/entrypoint.sh"
dest: "{{ service_dir }}/entrypoint.sh"
mode: '0755'
- name: Copy docker-compose.yml
ansible.builtin.copy:
src: "{{ compose_src }}/docker-compose.yml"
dest: "{{ service_dir }}/docker-compose.yml"
mode: '0644'
- name: Build Docker image
community.docker.docker_image:
name: matrix-ansible-controller
tag: latest
source: build
build:
path: "{{ service_dir }}"
pull: yes
state: present
force_source: yes
- name: Deploy container
community.docker.docker_compose_v2:
project_src: "{{ service_dir }}"
state: present
pull: always
- name: Display next steps
ansible.builtin.debug:
msg: |
✅ Matrix Ansible Controller deployed!
=== NEXT STEPS ===
1. Copy your Matrix inventory to the container:
ssh replicant
cd /home/maddox/docker/appdata/matrix-ansible-controller/data/inventory
# Create the structure:
mkdir -p host_vars/matrix.yourdomain.com
# Copy your vars.yml (from wherever it currently lives):
# Option A: From another machine via scp
# Option B: Create/paste manually
# Also create/copy the hosts file:
cat > hosts << 'HOSTS'
[matrix_servers]
matrix.yourdomain.com ansible_host=YOUR_MATRIX_IP ansible_ssh_user=root
HOSTS
2. Enter the container and test:
docker exec -it matrix-ansible-controller bash
ansible -i inventory/hosts all -m ping
3. Run Matrix updates:
just update # Update playbook + roles
just install-all # Deploy changes
===================================

View file

@ -0,0 +1,15 @@
---
# Combined playbook for Phase 2 Batch 2 migrations
# Run with: ansible-playbook playbooks/deploy-phase2-batch2.yml
- name: Deploy TinyMediaManager
import_playbook: deploy-tinymediamanager.yml
- name: Deploy Autoscan
import_playbook: deploy-autoscan.yml
- name: Deploy ChannelTube
import_playbook: deploy-channeltube.yml
- name: Deploy Dispatcharr
import_playbook: deploy-dispatcharr.yml

View file

@ -0,0 +1,40 @@
---
- name: Deploy TinyMediaManager to replicant
hosts: replicant
vars:
service_name: tinymediamanager
service_dir: /home/maddox/docker/appdata/{{ service_name }}
compose_src: "{{ playbook_dir }}/../compose-files/replicant/{{ service_name }}"
tasks:
- name: Create service directory
ansible.builtin.file:
path: "{{ service_dir }}"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Create config subdirectory
ansible.builtin.file:
path: "{{ service_dir }}/config"
state: directory
owner: maddox
group: maddox
mode: '0755'
- name: Copy docker-compose.yml
ansible.builtin.copy:
src: "{{ compose_src }}/docker-compose.yml"
dest: "{{ service_dir }}/docker-compose.yml"
owner: maddox
group: maddox
mode: '0644'
- name: Deploy container
community.docker.docker_compose_v2:
project_src: "{{ service_dir }}"
state: present
pull: always
become: yes
become_user: maddox