Migrate Portainer stacks to Komodo

- Add 11 new stack compose files from syn01 and syn02
- syn01 stacks: postgres01, paperless-ngx, minio, beszel-hub, gitea, adguard
- syn02 stacks: radarr, lidarr, sabnzbd, sonarr, arr-cleanup
- Update resources.toml with all new stack definitions
- Remove embedded file_contents from prowlarr stack
- Use environment variables for sensitive data (passwords, API keys)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-14 19:01:33 -08:00
parent fbcb5a5983
commit 4c3963a9c3
12 changed files with 738 additions and 16 deletions

View File

@@ -100,22 +100,146 @@ git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/prowlarr.compose.yml"]
file_contents = """
services:
prowlarr:
container_name: prowlarr
image: ghcr.io/hotio/prowlarr
ports:
- "9696:9696"
environment:
- PUID=1000
- PGID=1000
- UMASK=002
- TZ=Etc/UTC
volumes:
- /volume2/data:/data:rw
- /volume2/docker/prowlarr:/config:rw
"""
##
[[stack]]
name = "postgres01"
description = "PostgreSQL database with PgBouncer and pgAdmin"
tags = ["database", "syn01"]
[stack.config]
server = "syn01"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/postgres01.compose.yml"]
##
[[stack]]
name = "paperless-ngx"
description = "Paperless-ngx document management system"
tags = ["documents", "syn01"]
[stack.config]
server = "syn01"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/paperless-ngx.compose.yml"]
##
[[stack]]
name = "minio"
description = "MinIO object storage server"
tags = ["storage", "syn01"]
[stack.config]
server = "syn01"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/minio.compose.yml"]
##
[[stack]]
name = "beszel-hub"
description = "Beszel monitoring hub and agent for syn01"
tags = ["monitoring", "syn01"]
[stack.config]
server = "syn01"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/beszel-hub.compose.yml"]
##
[[stack]]
name = "gitea"
description = "Gitea self-hosted Git service"
tags = ["git", "syn01"]
[stack.config]
server = "syn01"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/gitea.compose.yml"]
##
[[stack]]
name = "adguard"
description = "AdGuard Home DNS ad blocker"
tags = ["dns", "syn01"]
[stack.config]
server = "syn01"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/adguard.compose.yml"]
##
[[stack]]
name = "lidarr"
description = "Lidarr music collection manager"
tags = ["media", "arr", "syn02"]
[stack.config]
server = "syn02"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/lidarr.compose.yml"]
##
[[stack]]
name = "sabnzbd"
description = "SABnzbd usenet downloader"
tags = ["download", "syn02"]
[stack.config]
server = "syn02"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/sabnzbd.compose.yml"]
##
[[stack]]
name = "sonarr"
description = "Sonarr TV show collection manager"
tags = ["media", "arr", "syn02"]
[stack.config]
server = "syn02"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/sonarr.compose.yml"]
##
[[stack]]
name = "arr-cleanup"
description = "Automated cleanup for Radarr and Sonarr unmonitored items"
tags = ["automation", "arr", "syn02"]
[stack.config]
server = "syn02"
git_provider = "192.168.1.51:3052"
git_https = false
git_account = "komodo"
repo = "stray/komodo"
file_paths = ["stacks/arr-cleanup.compose.yml"]
##

View File

@@ -0,0 +1,15 @@
services:
adguard:
image: adguard/adguardhome
container_name: AdGuard
mem_limit: 2g
cpu_shares: 768
security_opt:
- no-new-privileges:true
network_mode: host
volumes:
- /volume1/docker/adguard/config:/opt/adguardhome/conf:rw
- /volume1/docker/adguard/data:/opt/adguardhome/work:rw
environment:
TZ: America/Los_Angeles
restart: on-failure:5

View File

@@ -0,0 +1,111 @@
version: '3.8'
services:
arr-cleanup:
image: alpine:latest
container_name: arr-cleanup
restart: unless-stopped
environment:
- TZ=America/Los_Angeles
- RADARR_URL=${RADARR_URL:-http://192.168.1.52:7878}
- RADARR_API_KEY=${RADARR_API_KEY}
- SONARR_URL=${SONARR_URL:-http://192.168.1.52:8989}
- SONARR_API_KEY=${SONARR_API_KEY}
volumes:
- cleanup-scripts:/scripts
- cleanup-logs:/logs
entrypoint: /bin/sh
command:
- -c
- |
# Install dependencies
apk add --no-cache curl jq dcron
# Create Radarr cleanup script
cat > /scripts/cleanup-radarr.sh <<'EOF'
#!/bin/sh
echo "=== Radarr Cleanup (Unmonitored + No Files) ==="
echo "Fetching movie list..."
MOVIES=$$(curl -s "$${RADARR_URL}/api/v3/movie?apikey=$${RADARR_API_KEY}")
TO_DELETE=$$(echo "$$MOVIES" | jq -c '[.[] | select(.monitored == false and .hasFile == false)]')
COUNT=$$(echo "$$TO_DELETE" | jq 'length')
if [ "$$COUNT" -eq 0 ]; then
echo "No unmonitored movies found."
exit 0
fi
echo "Found $$COUNT unmonitored movies with no files:"
echo "$$TO_DELETE" | jq -r '.[] | " - \(.title) (\(.year))"'
echo "Deleting from Radarr..."
echo "$$TO_DELETE" | jq -c '.[]' | while read movie; do
ID=$$(echo "$$movie" | jq -r '.id')
TITLE=$$(echo "$$movie" | jq -r '.title')
curl -s -X DELETE "$${RADARR_URL}/api/v3/movie/$$ID?apikey=$${RADARR_API_KEY}&deleteFiles=false" > /dev/null
echo " ✓ Deleted: $$TITLE"
done
echo "=== Complete ==="
EOF
# Create Sonarr cleanup script
cat > /scripts/cleanup-sonarr.sh <<'EOF'
#!/bin/sh
echo "=== Sonarr Cleanup (Unmonitored + No Files) ==="
echo "Fetching series list..."
SERIES=$$(curl -s "$${SONARR_URL}/api/v3/series?apikey=$${SONARR_API_KEY}")
TO_DELETE=$$(echo "$$SERIES" | jq -c '[.[] | select(.monitored == false and .statistics.episodeFileCount == 0)]')
COUNT=$$(echo "$$TO_DELETE" | jq 'length')
if [ "$$COUNT" -eq 0 ]; then
echo "No unmonitored series found."
exit 0
fi
echo "Found $$COUNT unmonitored series with no files:"
echo "$$TO_DELETE" | jq -r '.[] | " - \(.title) (\(.year))"'
echo "Deleting from Sonarr..."
echo "$$TO_DELETE" | jq -c '.[]' | while read show; do
ID=$$(echo "$$show" | jq -r '.id')
TITLE=$$(echo "$$show" | jq -r '.title')
curl -s -X DELETE "$${SONARR_URL}/api/v3/series/$$ID?apikey=$${SONARR_API_KEY}&deleteFiles=false" > /dev/null
echo " ✓ Deleted: $$TITLE"
done
echo "=== Complete ==="
EOF
# Create master script
cat > /scripts/cleanup-all.sh <<'EOF'
#!/bin/sh
echo "========================================"
echo "ARR Services Auto-Cleanup"
echo "$(date)"
echo "========================================"
/scripts/cleanup-radarr.sh
echo ""
/scripts/cleanup-sonarr.sh
echo "========================================"
EOF
# Make executable
chmod +x /scripts/*.sh
# Setup cron (daily at 3 AM)
echo "0 3 * * * /scripts/cleanup-all.sh >> /logs/cleanup.log 2>&1" > /etc/crontabs/root
# Log startup
echo "========================================" | tee /logs/startup.log
echo "ARR Cleanup Container Ready" | tee -a /logs/startup.log
echo "$(date)" | tee -a /logs/startup.log
echo "Schedule: Daily at 3:00 AM" | tee -a /logs/startup.log
echo "Manual run: docker exec arr-cleanup /scripts/cleanup-all.sh" | tee -a /logs/startup.log
echo "View logs: docker exec arr-cleanup cat /logs/cleanup.log" | tee -a /logs/startup.log
echo "========================================" | tee -a /logs/startup.log
# Start cron
crond -f -l 2
networks:
- arr-network
networks:
arr-network:
driver: bridge
volumes:
cleanup-scripts:
cleanup-logs:

View File

@@ -0,0 +1,21 @@
services:
beszel:
container_name: Beszel
image: henrygd/beszel:latest
volumes:
- /volume1/docker/beszel:/beszel_data:rw
ports:
- 8095:8090
restart: on-failure:5
beszel-agent:
image: henrygd/beszel-agent
container_name: Beszel-Agent
network_mode: host
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: 45876
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHcTZEemBy0qPGuNEkV15ZX6T+j87KVHYZNMY/GJK+QD'
FILESYSTEM: /dev/sda1
restart: on-failure:5

52
stacks/gitea.compose.yml Normal file
View File

@@ -0,0 +1,52 @@
services:
db:
image: postgres:17
container_name: Gitea-DB
hostname: gitea-db
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "pg_isready", "-q", "-d", "gitea", "-U", "giteauser"]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume1/docker/gitea/db:/var/lib/postgresql/data:rw
environment:
- POSTGRES_DB=gitea
- POSTGRES_USER=giteauser
- POSTGRES_PASSWORD=${GITEA_DB_PASSWORD}
restart: on-failure:5
gitea:
image: gitea/gitea:latest
container_name: Gitea
hostname: gitea
depends_on:
db:
condition: service_healthy
security_opt:
- no-new-privileges:true
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:3000/ || exit 1
interval: 10s
timeout: 5s
retries: 5
ports:
- 3052:3000
- 2222:22
volumes:
- /volume1/docker/gitea/data:/data
- /volume1/docker/gitea/data:/data/git/repositories:rw
- /etc/TZ:/etc/TZ:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID=1026
- USER_GID=100
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=gitea-db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=giteauser
- GITEA__database__PASSWD=${GITEA_DB_PASSWORD}
- ROOT_URL=https://gitea.straymoog.xyz
restart: on-failure:5

18
stacks/lidarr.compose.yml Normal file
View File

@@ -0,0 +1,18 @@
services:
lidarr:
image: ghcr.io/linuxserver/lidarr:latest
container_name: LIDARR
healthcheck:
test: curl -f http://localhost:8686/ || exit 1
restart: on-failure:5
security_opt:
- no-new-privileges:true
volumes:
- /volume2/data:/data:rw
- /volume2/docker/lidarr:/config:rw
environment:
TZ: America/Los_Angeles
PGID: 100
PUID: 1026
ports:
- 8686:8686

12
stacks/minio.compose.yml Normal file
View File

@@ -0,0 +1,12 @@
services:
minio:
image: minio/minio:latest
ports:
- "9100:9000"
- "9101:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-stray}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
volumes:
- /volume1/docker/minio/data:/data
command: server /data --console-address ":9001"

View File

@@ -0,0 +1,148 @@
version: "3.9"
services:
redis:
image: redis:7
command:
- /bin/sh
- -c
- redis-server --requirepass redispass
container_name: PaperlessNGX-REDIS
hostname: paper-redis
security_opt:
- no-new-privileges:true
read_only: true
user: 1026:100
healthcheck:
test: ["CMD-SHELL", "redis-cli ping || exit 1"]
interval: 30s
timeout: 10s
retries: 3
volumes:
- /volume1/docker/paperlessngx/redis:/data:rw
environment:
TZ: America/Los_Angeles
restart: on-failure:5
networks:
- paperless-ngx_default
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
gotenberg:
image: gotenberg/gotenberg:latest
container_name: PaperlessNGX-GOTENBERG
hostname: gotenberg
security_opt:
- no-new-privileges:true
user: 1026:100
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
restart: on-failure:5
networks:
- paperless-ngx_default
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 1G
tika:
image: ghcr.io/paperless-ngx/tika:latest
container_name: PaperlessNGX-TIKA
hostname: tika
security_opt:
- no-new-privileges:true
user: 1026:100
restart: on-failure:5
environment:
JAVA_OPTS: "-Xmx2g -Xms1g"
networks:
- paperless-ngx_default
deploy:
resources:
limits:
memory: 3G
reservations:
memory: 1.5G
paperless:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
container_name: PaperlessNGX
hostname: paperless-ngx
security_opt:
- no-new-privileges:true
healthcheck:
test: ["CMD", "curl", "-fs", "-S", "--max-time", "2", "http://localhost:8000"]
interval: 30s
timeout: 10s
retries: 5
ports:
- 8777:8000
volumes:
- /volume1/docker/paperlessngx/data:/usr/src/paperless/data:rw
- /volume1/docker/paperlessngx/media:/usr/src/paperless/media:rw
- /volume1/docker/paperlessngx/export:/usr/src/paperless/export:rw
- /volume1/docker/paperlessngx/consume:/usr/src/paperless/consume:rw
- /volume1/docker/paperlessngx/trash:/usr/src/paperless/trash:rw
environment:
PAPERLESS_REDIS: redis://:redispass@paper-redis:6379
PAPERLESS_DBENGINE: postgresql
PAPERLESS_DBHOST: PgBouncer
PAPERLESS_DBPORT: 6432
PAPERLESS_DBNAME: paperless
PAPERLESS_DBUSER: paperless
PAPERLESS_DBPASS: ${PAPERLESS_DBPASS}
PAPERLESS_EMPTY_TRASH_DIR: ../trash
PAPERLESS_FILENAME_FORMAT: '{created_year}/{correspondent}/{document_type}/{title}'
PAPERLESS_OCR_ROTATE_PAGES_THRESHOLD: 6
PAPERLESS_OCR_PAGES: 0 # Process all pages (current default)
PAPERLESS_OCR_MODE: skip # Skip OCR on already-OCRed docs (current default)
PAPERLESS_TASK_WORKERS: 2
PAPERLESS_THREADS_PER_WORKER: 2 # For OCR parallelization
PAPERLESS_WEBSERVER_WORKERS: 1 # Keep at 1 for 4-core system
USERMAP_UID: 1026
USERMAP_GID: 100
PAPERLESS_TIME_ZONE: America/Los_Angeles
PAPERLESS_ADMIN_USER: stray
PAPERLESS_ADMIN_PASSWORD: ${PAPERLESS_ADMIN_PASSWORD}
PAPERLESS_URL: https://paperlessngx.straymoog.xyz
PAPERLESS_CSRF_TRUSTED_ORIGINS: https://paperlessngx.straymoog.xyz
PAPERLESS_OCR_LANGUAGE: eng
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
PAPERLESS_OCR_USER_ARGS: '{"invalidate_digital_signatures": true}'
PAPERLESS_DB_READ_CACHE_ENABLED: true
PAPERLESS_READ_CACHE_TTL: 3600 # 1 hour cache
PAPERLESS_DB_POOLSIZE: 4
restart: on-failure:5
depends_on:
redis:
condition: service_healthy
tika:
condition: service_started
gotenberg:
condition: service_started
networks:
- paperless-ngx_default
- paperless_shared
deploy:
resources:
limits:
memory: 5G
reservations:
memory: 2.5G
networks:
paperless-ngx_default:
driver: bridge
paperless_shared:
external: true
name: paperless_shared

View File

@@ -0,0 +1,166 @@
version: '3.8'
services:
db:
image: postgres:16
container_name: PostgreSQL
environment:
- POSTGRES_DB=postgres
- POSTGRES_USER=root
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_INITDB_ARGS=--encoding=UTF8 --lc-collate=C --lc-ctype=C
command:
- "postgres"
- "-c"
- "shared_buffers=2GB"
- "-c"
- "effective_cache_size=6GB"
- "-c"
- "maintenance_work_mem=512MB"
- "-c"
- "checkpoint_completion_target=0.9"
- "-c"
- "wal_buffers=16MB"
- "-c"
- "default_statistics_target=100"
- "-c"
- "random_page_cost=1.1"
- "-c"
- "effective_io_concurrency=200"
- "-c"
- "work_mem=32MB"
- "-c"
- "min_wal_size=1GB"
- "-c"
- "max_wal_size=4GB"
- "-c"
- "max_worker_processes=8"
- "-c"
- "max_parallel_workers_per_gather=4"
- "-c"
- "max_parallel_workers=8"
- "-c"
- "max_parallel_maintenance_workers=4"
- "-c"
- "max_connections=100"
- "-c"
- "log_min_duration_statement=1000"
- "-c"
- "log_line_prefix=%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h "
- "-c"
- "log_checkpoints=on"
- "-c"
- "log_connections=on"
- "-c"
- "log_disconnections=on"
- "-c"
- "log_lock_waits=on"
- "-c"
- "log_temp_files=0"
- "-c"
- "autovacuum_max_workers=3"
- "-c"
- "autovacuum_naptime=10s"
- "-c"
- "shared_preload_libraries=pg_stat_statements"
volumes:
- /volume1/docker/postgresql:/var/lib/postgresql/data
networks:
- postgres01_default
- paperless_shared
deploy:
resources:
limits:
memory: 8G
reservations:
memory: 4G
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -q -d postgres -U root"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
pgbouncer:
image: pgbouncer/pgbouncer:latest
container_name: PgBouncer
environment:
- DATABASES_HOST=PostgreSQL
- DATABASES_PORT=5432
- DATABASES_USER=root
- DATABASES_PASSWORD=${POSTGRES_PASSWORD}
- DATABASES_DBNAME=paperless
- POOL_MODE=transaction
- MAX_CLIENT_CONN=100
- DEFAULT_POOL_SIZE=25
- MIN_POOL_SIZE=10
- RESERVE_POOL_SIZE=5
- RESERVE_POOL_TIMEOUT=3
- SERVER_IDLE_TIMEOUT=600
- LOG_CONNECTIONS=1
- LOG_DISCONNECTIONS=1
networks:
- paperless_shared
depends_on:
db:
condition: service_healthy
deploy:
resources:
limits:
memory: 256M
reservations:
memory: 128M
restart: unless-stopped
pgadmin:
image: dpage/pgadmin4:latest
container_name: pgAdmin
environment:
- PGADMIN_DEFAULT_EMAIL=${PGADMIN_EMAIL:-admin@admin.com}
- PGADMIN_DEFAULT_PASSWORD=${PGADMIN_PASSWORD:-admin}
- PGADMIN_CONFIG_SERVER_MODE=True
- PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED=True
volumes:
- /volume1/docker/postgresadmin:/var/lib/pgadmin
ports:
- "2660:80"
networks:
- postgres01_default
depends_on:
db:
condition: service_healthy
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:80"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
postgres01_default:
driver: bridge
paperless_shared:
external: true
name: paperless_shared

18
stacks/radarr.compose.yml Normal file
View File

@@ -0,0 +1,18 @@
services:
radarr:
image: ghcr.io/linuxserver/radarr:latest
container_name: RADARR
healthcheck:
test: curl -f http://localhost:7878/ || exit 1
restart: on-failure:5
security_opt:
- no-new-privileges:true
volumes:
- /volume2/data:/data:rw
- /volume2/docker/radarr:/config:rw
environment:
TZ: America/Los_Angeles
PGID: 100
PUID: 1026
ports:
- 7878:7878

View File

@@ -0,0 +1,19 @@
services:
sabnzbd:
image: ghcr.io/linuxserver/sabnzbd:latest
container_name: SABNZBD
healthcheck:
test: curl -f http://localhost:8080/ || exit 1
restart: on-failure:5
security_opt:
- no-new-privileges:true
volumes:
- /volume2/docker/sabnzbd/config:/config:rw
- /volume2/data:/data:rw
environment:
TZ: America/Los_Angeles
PGID: 100
PUID: 1026
ports:
- 8775:8080
- 9093:9090

18
stacks/sonarr.compose.yml Normal file
View File

@@ -0,0 +1,18 @@
services:
sonarr:
image: ghcr.io/linuxserver/sonarr:latest
container_name: SONARR
healthcheck:
test: curl -f http://localhost:8989/ || exit 1
restart: on-failure:5
security_opt:
- no-new-privileges:true
volumes:
- /volume2/data:/data:rw
- /volume2/docker/sonarr:/config:rw
environment:
TZ: America/Los_Angeles
PGID: 100
PUID: 1026
ports:
- 8989:8989