27 Commits

Author SHA1 Message Date
qdev 8193ec801a adding gitea and document caddyfile 2026-05-11 12:12:28 -04:00
qdev 78709ad392 reorg files and add some services 2026-05-09 19:35:08 -04:00
qdev 8c991a4926 Add metube, refactor docker-compose setup + wine 2026-05-09 15:03:46 -04:00
qdev f13c393406 work in progress :/ 2026-05-03 20:53:13 -04:00
qdev e32c5ec76f get rid of old compose IG 2026-05-03 15:19:49 -04:00
qdev 241bd493f3 add navidrome 2026-05-03 15:19:18 -04:00
qdev 967c9cd62b quick test 2026-05-03 15:10:47 -04:00
qdev 6dddda44f6 I'm not having any fun 2026-04-29 12:28:28 -04:00
qdev f6d2efb6d8 cool 2026-04-29 00:01:29 -04:00
qdev ce2ba6da6c cleanup and config work 🤷 2026-04-29 00:00:28 -04:00
qdev 9772d15baa rm autobrr 2026-04-28 23:11:44 -04:00
qdev 32302fae5e add autobrr 2026-04-28 22:44:34 -04:00
qdev f58c878045 fix pubkey setting? 2026-04-27 21:47:48 -04:00
qdev dad6a6901f ig this is how it format 2026-04-27 15:48:11 -04:00
qdev 3deff372b7 re-add dietpi 2026-04-27 15:43:33 -04:00
qdev d9def86b06 stringify output 2026-04-26 22:56:46 -04:00
qdev 565630cfe4 config json thing 2026-04-26 22:53:01 -04:00
qdev eda66b7e69 messing with more just riffin at this point 2026-04-26 21:48:08 -04:00
qdev 0fecb99e8e workin on config stuff and interface things 2026-04-26 20:05:36 -04:00
qdev b89bfa5315 small rms 2026-04-25 18:31:22 -04:00
qdev 85fee78990 workin on my own service/backup thing 2026-04-25 18:25:55 -04:00
qdev 3b5db89dc0 add bun again 2026-04-25 17:34:43 -04:00
qdev 6364eea9c2 fix incorrect transmission download volume path 2026-04-25 14:37:14 -04:00
qdev 3ae70f02d6 add new media drive 2026-04-25 14:09:08 -04:00
qdev b7f549a067 I think fix mounts, untested :3 2026-04-24 19:26:45 -04:00
qdev 759811ab68 home manager? 2026-04-23 10:12:47 -04:00
qdev 0122f2955c - update mounts
- disable navidrome
2026-04-17 15:20:47 -04:00
28 changed files with 805 additions and 300 deletions
+2 -1
View File
@@ -2,6 +2,7 @@
"recommendations": [
"paisleysoftworks.renpywarp",
"luquedaniel.languague-renpy",
"jnoortheen.nix-ide"
"jnoortheen.nix-ide",
"matthewpi.caddyfile-support"
]
}
+63
View File
@@ -0,0 +1,63 @@
jf.soupclown.com {
reverse_proxy qmoran-desktop-1.tailc7e587.ts.net:8096
}
nd.soupclown.com {
reverse_proxy qmoran-desktop-1.tailc7e587.ts.net:4533
}
paperless.soupclown.com {
reverse_proxy qmoran-desktop.tailc7e587.ts.net:8000
}
immich.soupclown.com {
reverse_proxy qmoran-desktop.tailc7e587.ts.net:2283
}
seerr.soupclown.com {
reverse_proxy qmoran-desktop-1.tailc7e587.ts.net:5055
}
soupclown.com {
header /.well-known/matrix/* {
Access-Control-Allow-Origin *
Content-Type application/json
Cache-Control public,max-age=806400
}
# Matrix client-server well-known
handle /.well-known/matrix/client {
respond `{
"m.homeserver": {
"base_url": "https://matrix.soupclown.com"
},
"org.matrix.msc3575.proxy": {
"url": "https://matrix.soupclown.com"
}
}` 200
}
# Matrix server-server well-known
handle /.well-known/matrix/server {
respond `{
"m.server": "matrix.soupclown.com:443"
}` 200
}
# Matrix Support contact information (MSC1929)
handle /.well-known/matrix/support {
respond `{
"contacts": [
{
"matrix_id": "@qmoran:soupclown.com",
"email_address": "",
"role": "m.role.admin"
}
]
}` 200
}
}
matrix.soupclown.com, soupclown.com:8448 {
reverse_proxy qmoran-desktop.tailc7e587.ts.net:6167
}
+34
View File
@@ -0,0 +1,34 @@
# dependencies (bun install)
node_modules
# output
out
dist
*.tgz
# code coverage
coverage
*.lcov
# logs
logs
_.log
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# caches
.eslintcache
.cache
*.tsbuildinfo
# IntelliJ based IDEs
.idea
# Finder (MacOS) folder config
.DS_Store
+34
View File
@@ -0,0 +1,34 @@
{
"lockfileVersion": 1,
"configVersion": 1,
"workspaces": {
"": {
"name": "soupclown",
"dependencies": {
"commander": "^14.0.3",
"zod": "^4.3.6",
},
"devDependencies": {
"@types/bun": "latest",
},
"peerDependencies": {
"typescript": "^5",
},
},
},
"packages": {
"@types/bun": ["@types/bun@1.3.13", "", { "dependencies": { "bun-types": "1.3.13" } }, "sha512-9fqXWk5YIHGGnUau9TEi+qdlTYDAnOj+xLCmSTwXfAIqXr2x4tytJb43E9uCvt09zJURKXwAtkoH4nLQfzeTXw=="],
"@types/node": ["@types/node@25.6.0", "", { "dependencies": { "undici-types": "~7.19.0" } }, "sha512-+qIYRKdNYJwY3vRCZMdJbPLJAtGjQBudzZzdzwQYkEPQd+PJGixUL5QfvCLDaULoLv+RhT3LDkwEfKaAkgSmNQ=="],
"bun-types": ["bun-types@1.3.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-QXKeHLlOLqQX9LgYaHJfzdBaV21T63HhFJnvuRCcjZiaUDpbs5ED1MgxbMra71CsryN/1dAoXuJJJwIv/2drVA=="],
"commander": ["commander@14.0.3", "", {}, "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw=="],
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
"undici-types": ["undici-types@7.19.2", "", {}, "sha512-qYVnV5OEm2AW8cJMCpdV20CDyaN3g0AjDlOGf1OW4iaDEx8MwdtChUp4zu4H0VP3nDRF/8RKWH+IPp9uW0YGZg=="],
"zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="],
}
}
+3
View File
@@ -0,0 +1,3 @@
import { RUN_CLI } from "./src/interface/cli";
RUN_CLI()
+16
View File
@@ -0,0 +1,16 @@
{
"name": "soupclown",
"module": "index.ts",
"type": "module",
"private": true,
"devDependencies": {
"@types/bun": "latest"
},
"peerDependencies": {
"typescript": "^5"
},
"dependencies": {
"commander": "^14.0.3",
"zod": "^4.3.6"
}
}
+30
View File
@@ -0,0 +1,30 @@
import { Command } from "commander";
import { HOST } from "./host";
export function RUN_CLI(){
const c = new Command();
c
.name('soupclown')
.description('Soupclown system')
c
.command('host <action>')
.action(async (action, opts) => {
console.log(opts)
switch(action){
case 'init':
console.log('init action!')
console.log(`HOSTNAME: ${await HOST.getHostname()}`)
await HOST.initSystem()
break;
default:
console.log('fool!')
break;
}
})
c.parse();
}
+75
View File
@@ -0,0 +1,75 @@
import z from "zod";
import { HOST } from "./host";
const DEFAULT_CONFIG_PATH = "../../state.soupclown.json";
const hostConfigSchema = z.object({
configurationPath: z.string(),
services: z.array(z.object({
name: z.string(),
desiredState: z.enum(['up', 'down']),
})),
})
export type HOST_CONFIG_SCHEMA_T = z.infer<typeof hostConfigSchema>;
const CONFIG_SCHEMA = z.object({
v: z.literal('v1'),
data: z.map(z.string(), hostConfigSchema)
});
export type CONFIG_SCHEMA_T = z.infer<typeof CONFIG_SCHEMA>
export class SC_CONFIG_C {
constructor(
protected runningConfig = {
v: 'v1',
data: new Map([
]),
} as CONFIG_SCHEMA_T,
){}
public static async loadConfigFile(path = DEFAULT_CONFIG_PATH){
const configFile = Bun.file(path);
let fileJsonData: any = null;
try{
fileJsonData = await configFile.json();
}catch(e){
throw "FAILED_SCHEMA_JSON_PARSE"
}
const configParseJsonResult = CONFIG_SCHEMA.safeParse(fileJsonData);
if(!configParseJsonResult.success){
throw "FAILED_CONFIG_SCHEMA_PARSE"
}
return configParseJsonResult.data;
}
public static async init(path = DEFAULT_CONFIG_PATH){
const newConfig = new SC_CONFIG_C();
await newConfig._loadConfigFile(path);
await newConfig._writeConfigFile(path);
return newConfig;
}
private async _loadConfigFile(path = DEFAULT_CONFIG_PATH){
try{
this.runningConfig = await SC_CONFIG_C.loadConfigFile(path);
const hostconfig = this.runningConfig.data.get(await HOST.getHostname());
if(hostconfig){
this.configurationPath = hostconfig.configurationPath;
this.services = hostconfig.services;
}
}catch{
console.error('Failed to load config, assuming you know what you\'re doing');
}
}
private async _writeConfigFile(path = DEFAULT_CONFIG_PATH){
await Bun.write(path, JSON.stringify(this.runningConfig, null, 2));
}
}
export const SC_CONFIG = await SC_CONFIG_C.init();
+39
View File
@@ -0,0 +1,39 @@
import { $ } from "bun";
import z, { parse } from "zod";
const CLI_ContainerInfoSchema = z.object({
ID: z.string(),
Image: z.string(),
CreatedAt: z.string(),
State: z.string(),
Names: z.string(),
})
type CLI_ContainerInfo = z.infer<typeof CLI_ContainerInfoSchema>;
const SC_ContainerInfo = z.object({
id: z.string(),
name: z.string(),
image: z.string(),
state: z.unknown(),
created: z.date(),
})
type SC_ContainerInfoT = z.infer<typeof SC_ContainerInfo>;
async function PS (){
const result = $`docker ps -a --format json`
const containerData: CLI_ContainerInfo[] = [];
for await (let line of result.lines()){
// containerData.push(JSON.parse(line));
if(line.length < "{}".length){
continue;
}
const parsedData = CLI_ContainerInfoSchema.parse(JSON.parse(line))
containerData.push(parsedData);
}
return containerData;
}
export const SC_DOCKER = {
PS,
}
+16
View File
@@ -0,0 +1,16 @@
import { $ } from "bun";
import { SC_CONFIG } from "./config";
async function getHostname(){
const result = await $`hostname`.quiet().text();
return result.trim();
}
async function initSystem(){
// await $`mkdir -p ${SC_CONFIG.configurationPath}`
}
export const HOST = {
getHostname,
initSystem,
}
+29
View File
@@ -0,0 +1,29 @@
{
"compilerOptions": {
// Environment setup & latest features
"lib": ["ESNext"],
"target": "ESNext",
"module": "Preserve",
"moduleDetection": "force",
"jsx": "react-jsx",
"allowJs": true,
// Bundler mode
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"noEmit": true,
// Best practices
"strict": true,
"skipLibCheck": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
"noImplicitOverride": true,
// Some stricter flags (disabled by default)
"noUnusedLocals": false,
"noUnusedParameters": false,
"noPropertyAccessFromIndexSignature": false
}
}
+106
View File
@@ -0,0 +1,106 @@
AUTO_SETUP_GLOBAL_PASSWORD=someGenericPassword
AUTO_SETUP_NET_HOSTNAME=pt1
AUTO_SETUP_HEADLESS=1
AUTO_SETUP_AUTOMATED=1
AUTO_SETUP_INSTALL_SOFTWARE_ID=137 152
AUTO_SETUP_SSH_PUBKEY=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAkhSg+CLjIYSZ+lTNkChYAP7uxpPrl1TvVPwCfYgSoa
AUTO_SETUP_SSH_PUBKEY=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINhzgTeNWuHeznJJlRXRysKA3msMnUvALMkADOIJyGwb
AUTO_SETUP_LOCALE=C.UTF-8
AUTO_SETUP_KEYBOARD_LAYOUT=us
AUTO_SETUP_TIMEZONE=America/New_York
AUTO_SETUP_NET_ETHERNET_ENABLED=1
AUTO_SETUP_NET_WIFI_ENABLED=1
AUTO_SETUP_NET_WIFI_COUNTRY_CODE=US
AUTO_SETUP_NET_USESTATIC=0
AUTO_SETUP_NET_STATIC_IP=192.168.0.100
AUTO_SETUP_NET_STATIC_MASK=255.255.255.0
AUTO_SETUP_NET_STATIC_GATEWAY=192.168.0.1
AUTO_SETUP_NET_STATIC_DNS=9.9.9.9 149.112.112.112
AUTO_SETUP_DHCP_TO_STATIC=0
AUTO_SETUP_NET_ETH_FORCE_SPEED=0
AUTO_SETUP_BOOT_WAIT_FOR_NETWORK=1
AUTO_SETUP_SWAPFILE_SIZE=1
AUTO_SETUP_SWAPFILE_LOCATION=/var/swap
CONFIG_SERIAL_CONSOLE_ENABLE=0
AUTO_UNMASK_LOGIND=0
AUTO_SETUP_CUSTOM_SCRIPT_EXEC=0
AUTO_SETUP_BACKUP_RESTORE=0
AUTO_SETUP_SSH_SERVER_INDEX=-2
AUTO_SETUP_DESKTOP=none
AUTO_SETUP_LOGGING_INDEX=-3
AUTO_SETUP_RAMLOG_MAXSIZE=50
AUTO_SETUP_WEB_SERVER_INDEX=0
AUTO_SETUP_BROWSER_INDEX=0
AUTO_SETUP_AUTOSTART_TARGET_INDEX=0
AUTO_SETUP_AUTOSTART_LOGIN_USER=root
SURVEY_OPTED_IN=0
CONFIG_CPU_GOVERNOR=schedutil
CONFIG_CPU_ONDEMAND_SAMPLE_RATE=25000
CONFIG_CPU_ONDEMAND_SAMPLE_DOWNFACTOR=40
CONFIG_CPU_USAGE_THROTTLE_UP=50
CONFIG_CPU_TEMP_PATH=auto
CONFIG_CPU_MAX_FREQ=Disabled
CONFIG_CPU_MIN_FREQ=Disabled
CONFIG_CPU_DISABLE_TURBO=0
CONFIG_GPU_DRIVER=none
# System-wide proxy settings
# - Do not modify, you must use dietpi-config > "Network Options: Adapters" to apply
CONFIG_PROXY_ADDRESS=MyProxyServer.com
CONFIG_PROXY_PORT=8080
CONFIG_PROXY_USERNAME=
CONFIG_PROXY_PASSWORD=
CONFIG_G_CHECK_URL_TIMEOUT=10
CONFIG_G_CHECK_URL_ATTEMPTS=2
CONFIG_CHECK_CONNECTION_IP=9.9.9.9
CONFIG_CHECK_CONNECTION_IPV6=2620:fe::fe
CONFIG_CHECK_DNS_DOMAIN=dietpi.com
CONFIG_CHECK_DIETPI_UPDATES=1
CONFIG_CHECK_APT_UPDATES=2
CONFIG_NTP_MODE=4
CONFIG_SOUNDCARD=none
CONFIG_LCDPANEL=none
CONFIG_ENABLE_IPV6=0
CONFIG_APT_RASPBIAN_MIRROR=http://raspbian.raspberrypi.com/raspbian
CONFIG_APT_DEBIAN_MIRROR=https://deb.debian.org/debian
CONFIG_NTP_MIRROR=default
SOFTWARE_DISABLE_SSH_PASSWORD_LOGINS=1
SOFTWARE_WIREGUARD_MODE=Server
SOFTWARE_WIREGUARD_PORT=51820
SOFTWARE_VNCSERVER_WIDTH=1280
SOFTWARE_VNCSERVER_HEIGHT=720
SOFTWARE_VNCSERVER_DEPTH=16
SOFTWARE_VNCSERVER_DISPLAY_INDEX=1
SOFTWARE_VNCSERVER_SHARE_DESKTOP=0
SOFTWARE_NEXTCLOUD_USERNAME=admin
SOFTWARE_NEXTCLOUD_DATADIR=/mnt/dietpi_userdata/nextcloud_data
SOFTWARE_SYNAPSE_USERNAME=dietpi
SOFTWARE_WIFI_HOTSPOT_SSID=DietPi-HotSpot
SOFTWARE_WIFI_HOTSPOT_KEY=dietpihotspot
SOFTWARE_WIFI_HOTSPOT_CHANNEL=3
SOFTWARE_WIFI_HOTSPOT_WIFI4=0
SOFTWARE_WIFI_HOTSPOT_WIFI5=0
SOFTWARE_WIFI_HOTSPOT_WIFI6=0
SOFTWARE_WIFI_HOTSPOT_5G=0
SOFTWARE_WIFI_HOTSPOT_5G_CHANNEL=36
SOFTWARE_XORG_DPI=96
SOFTWARE_CHROMIUM_RES_X=1280
SOFTWARE_CHROMIUM_RES_Y=720
SOFTWARE_CHROMIUM_AUTOSTART_URL=https://dietpi.com/
SOFTWARE_HOMEASSISTANT_APT_DEPS=
SOFTWARE_HOMEASSISTANT_PIP_DEPS=
SOFTWARE_K3S_EXEC=
SOFTWARE_DIETPI_DASHBOARD_VERSION=Stable
SOFTWARE_DIETPI_DASHBOARD_BACKEND=0
SOFTWARE_ROONSERVER_EARLYACCESS=0
DEV_GITBRANCH=master
DEV_GITOWNER=MichaIng
# UrBackup Server
# - Backup path, optional, defaults to "/mnt/dietpi_userdata/urbackup", effective on fresh UrBackup Server installs only
SOFTWARE_URBACKUP_BACKUPPATH=/mnt/dietpi_userdata/urbackup
+1
View File
@@ -0,0 +1 @@
/etc/.soupclown.env
+1
View File
@@ -0,0 +1 @@
!.env
-12
View File
@@ -1,12 +0,0 @@
#!/run/current-system/sw/bin/bash
SERVICE_NAMES=(
# "paperless"
# "immich"
"navidrome"
"downloadin"
# "matrix"
)
for SERVICE in "${SERVICE_NAMES[@]}"; do
docker compose --env-file /etc/.soupclown.env -f $(pwd)/${SERVICE}-compose.yaml down
done
-12
View File
@@ -1,12 +0,0 @@
#!/run/current-system/sw/bin/bash
SERVICE_NAMES=(
# "paperless"
# "immich"
"navidrome"
"downloadin"
# "matrix"
)
for SERVICE in "${SERVICE_NAMES[@]}"; do
docker compose --env-file /etc/.soupclown.env -f $(pwd)/${SERVICE}-compose.yaml pull
done
-12
View File
@@ -1,12 +0,0 @@
#!/run/current-system/sw/bin/bash
SERVICE_NAMES=(
# "paperless"
# "immich"
"navidrome"
"downloadin"
# "matrix"
)
for SERVICE in "${SERVICE_NAMES[@]}"; do
docker compose --env-file /etc/.soupclown.env -f $(pwd)/${SERVICE}-compose.yaml up -d
done
+1
View File
@@ -0,0 +1 @@
{}
+315
View File
@@ -0,0 +1,315 @@
services:
### START DOWNLOAD
vpn:
container_name: vpn
profiles:
- download
image: qmcgaw/gluetun:v3.40
cap_add:
- NET_ADMIN # Quin does not like these
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ${PATH_ETC}/gluetun:/gluetun
environment:
- VPN_SERVICE_PROVIDER=private internet access
- SERVER_REGIONS=Netherlands
- OPENVPN_USER=${PIA_USERNAME}
- OPENVPN_PASSWORD=${PIA_PASSWORD}
- UPDATER_PERIOD=24h
ports:
- 9091:9091 # Transmission
- 51413:51413 # Transmission
- 51413:51413/udp # Transmission
- 9696:9696 # Prowlarr
- 7878:7878 # Radarr
- 8989:8989 # Sonarr
- 8686:8686 # Lidarr
- 3333:3333 # bitmagnet API and WebUI port
- 3334:3334/tcp # bitmagnet BitTorrent ports
- 3334:3334/udp # bitmagnet BitTorrent ports
# - 5055:5055 # seerr
# - 8112:8112 # Deluge
# - 6881:6881 # Deluge
# - 6881:6881/udp # Deluge
# - 58846:58846 # idk Deluge
- 7474:7474 # Autobrr
restart: unless-stopped
transmission:
container_name: transmission
profiles:
- download
image: lscr.io/linuxserver/transmission:latest
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${PATH_ETC}/transmission:/config
- /storage/transmission/downloads:/downloads
restart: unless-stopped
prowlarr:
container_name: prowlarr
profiles:
- download
image: lscr.io/linuxserver/prowlarr:latest
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${PATH_ETC}/prowlarr:/config
depends_on:
vpn:
condition: service_started
restart: true
required: true
restart: unless-stopped
radarr:
container_name: radarr
profiles:
- download
image: lscr.io/linuxserver/radarr:latest
network_mode: "service:vpn"
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${PATH_ETC}/radarr:/config
- /storage/media:/media # Access to media library
- /storage/transmission/downloads:/downloads # Access to transmission downloads
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
sonarr:
container_name: sonarr
profiles:
- download
image: lscr.io/linuxserver/sonarr:latest
network_mode: "service:vpn"
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${PATH_ETC}/sonarr:/config
- /storage/media:/media # Access to media library
- /storage/transmission/downloads:/downloads # Access to transmission downloads
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
lidarr:
container_name: lidarr
profiles:
- download
image: lscr.io/linuxserver/lidarr:latest
network_mode: "service:vpn"
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${PATH_ETC}/lidarr:/config
- /storage/media:/media # Access to media library
- /storage/transmission/downloads:/downloads # Access to transmission downloads
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
bitmagnet:
container_name: bitmagnet
profiles:
- download
image: ghcr.io/bitmagnet-io/bitmagnet:latest
restart: unless-stopped
network_mode: "service:vpn"
environment:
- POSTGRES_PASSWORD=${BITMAGENT_DB_PASS}
- TMDB_API_KEY=${TMDB_API_KEY}
volumes:
- ${PATH_ETC}/bitmagnet:/root/.config/bitmagnet
command:
- worker
- run
- --keys=http_server
- --keys=queue_server
# disable the next line to run without DHT crawler
- --keys=dht_crawler
depends_on:
postgres:
condition: service_healthy
postgres:
container_name: bitmagnet-postgres
profiles:
- download
image: postgres:16-alpine
network_mode: "service:vpn"
volumes:
- ${PATH_ETC}/bitmagnet-postgres:/var/lib/postgresql/data
restart: unless-stopped
environment:
- POSTGRES_PASSWORD=${BITMAGENT_DB_PASS}
- POSTGRES_DB=bitmagnet
- PGUSER=postgres
shm_size: 1g
healthcheck:
test:
- CMD-SHELL
- pg_isready
start_period: 20s
interval: 10s
seerr:
container_name: seerr
profiles:
- download
image: ghcr.io/seerr-team/seerr:latest
init: true
ports:
- 5055:5055
environment:
- TZ=${TZ}
- LOG_LEVEL=debug
volumes:
- ${PATH_ETC}/seer:/app/config
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:5055/api/v1/status || exit 1
start_period: 20s
timeout: 3s
interval: 15s
retries: 3
restart: unless-stopped
lidatube:
container_name: lidatube
profiles:
- download
image: thewicklowwolf/lidatube:latest
volumes:
- ${PATH_ETC}/lidatube:/lidatube/config
- /storage/lidatube:/lidatube/downloads
- /etc/localtime:/etc/localtime:ro
environment:
- attempt_lidarr_import=True
ports:
- 5000:5000
restart: unless-stopped
aurral:
container_name: aurral
profiles:
- download
image: ghcr.io/lklynet/aurral:latest
ports:
- 3001:3001
environment:
- DOWNLOAD_FOLDER=${DL_FOLDER:-./data/downloads}
volumes:
- ${PATH_ETC}/aurral:/app/backend/data
- /storage/aurral:/app/downloads
restart: unless-stopped
### END DOWNLOAD
### START APP
navidrome:
profiles:
- app
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000
ports:
- "4533:4533"
volumes:
- ${PATH_ETC}/navidrome:/data
- /storage/media/Music:/music
restart: unless-stopped
metube:
profiles:
- app
image: ghcr.io/alexta69/metube
container_name: metube
ports:
- "8081:8081"
volumes:
- ./bin/metube-options.json:/config/metube-options.json # Use config from repo :)
- /storage/metube/downloads:/downloads
environment:
- YTDL_OPTIONS_FILE=/config/metube-options.json
restart: unless-stopped
gitea:
profiles:
- app
container_name: gitea
image: docker.gitea.com/gitea:1.26.1
ports:
- 3000:3000
- 222:22
volumes:
- ${PATH_ETC}/gitea-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__NAME=gitea
- GITEA__database__USER=${GITEA_DB_USERNAME}
- GITEA__database__PASSWD=${GITEA_DB_PASSWORD}
- GITEA__database__HOST=giteadb:5432
restart: unless-stopped
depends_on:
- giteadb
giteadb:
profiles:
- app
container_name: gitea-postgres
image: docker.io/library/postgres:14
environment:
- POSTGRES_USER=${GITEA_DB_USERNAME}
- POSTGRES_PASSWORD=${GITEA_DB_PASSWORD}
- POSTGRES_DB=gitea
volumes:
- ${PATH_ETC}/gitea-postgres:/var/lib/postgresql/data
healthcheck:
test:
- CMD-SHELL
- pg_isready
start_period: 20s
interval: 10s
restart: unless-stopped
### END APP
-167
View File
@@ -1,167 +0,0 @@
services:
vpn:
container_name: vpn
image: qmcgaw/gluetun:v3.40
cap_add:
- NET_ADMIN # Quin does not like these
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ${DOWNLOAD_ETC_PATH}/gluetun:/gluetun
environment:
- VPN_SERVICE_PROVIDER=private internet access
- SERVER_REGIONS=Netherlands
- OPENVPN_USER=${PIA_USERNAME}
- OPENVPN_PASSWORD=${PIA_PASSWORD}
- UPDATER_PERIOD=24h
ports:
- 9091:9091 # Transmission
- 51413:51413 # Transmission
- 51413:51413/udp # Transmission
- 9696:9696 # Prowlarr
- 7878:7878 # Radarr
- 8989:8989 # Sonarr
- 3333:3333 # bitmagnet API and WebUI port
- 3334:3334/tcp # bitmagnet BitTorrent ports
- 3334:3334/udp # bitmagnet BitTorrent ports
# - 5055:5055 # seerr
# - 8112:8112 # Deluge
# - 6881:6881 # Deluge
# - 6881:6881/udp # Deluge
# - 58846:58846 # idk Deluge
restart: unless-stopped
transmission:
image: lscr.io/linuxserver/transmission:latest
container_name: transmission
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${DOWNLOAD_ETC_PATH}/transmission:/config
- ${DOWNLOAD_IN_PROGRESS_PATH}:/downloads
restart: unless-stopped
prowlarr:
container_name: prowlarr
image: lscr.io/linuxserver/prowlarr:latest
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${DOWNLOAD_ETC_PATH}/prowlarr:/config
depends_on:
vpn:
condition: service_started
restart: true
required: true
restart: unless-stopped
radarr:
image: lscr.io/linuxserver/radarr:latest
network_mode: "service:vpn"
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${DOWNLOAD_ETC_PATH}/radarr:/config
- /mnt/tmpMedia/app_data/transmission/downloads/incomplete:/downloads/incomplete #optional
- /mnt/tmpMedia/app_data/jellyfin/media:/soupclown/localmedia
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
network_mode: "service:vpn"
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${DOWNLOAD_ETC_PATH}/sonarr:/config
- /mnt/tmpMedia/app_data/jellyfin/media:/soupclown/localmedia
- /mnt/tmpMedia/app_data/transmission/downloads/incomplete:/downloads/incomplete #optional
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
bitmagnet:
image: ghcr.io/bitmagnet-io/bitmagnet:latest
container_name: bitmagnet
restart: unless-stopped
network_mode: "service:vpn"
environment:
- POSTGRES_PASSWORD=${DOWNLOAD_BITMAGENT_DB_PASS}
- TMDB_API_KEY=${TMDB_API_KEY}
volumes:
- ${SOUPCLOWN_ETC_PATH}/downloadin/bitmagnet/etc:/root/.config/bitmagnet
command:
- worker
- run
- --keys=http_server
- --keys=queue_server
# disable the next line to run without DHT crawler
- --keys=dht_crawler
depends_on:
postgres:
condition: service_healthy
postgres:
image: postgres:16-alpine
container_name: bitmagnet-postgres
network_mode: "service:vpn"
volumes:
- ${SOUPCLOWN_ETC_PATH}/downloadin/bitmagnet/postgresdata:/var/lib/postgresql/data
restart: unless-stopped
environment:
- POSTGRES_PASSWORD=${DOWNLOAD_BITMAGENT_DB_PASS}
- POSTGRES_DB=bitmagnet
- PGUSER=postgres
shm_size: 1g
healthcheck:
test:
- CMD-SHELL
- pg_isready
start_period: 20s
interval: 10s
seerr:
image: ghcr.io/seerr-team/seerr:latest
init: true
container_name: seerr
restart: unless-stopped
ports:
- 5055:5055
environment:
- TZ=${TZ}
- LOG_LEVEL=debug
volumes:
- ${SOUPCLOWN_ETC_PATH}/seer:/app/config
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:5055/api/v1/status || exit 1
start_period: 20s
timeout: 3s
interval: 15s
retries: 3
-75
View File
@@ -1,75 +0,0 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich-server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of IMMICH_UPLOAD_LOCATION in the .env file
- ${IMMICH_UPLOAD_LOCATION}:/data
- /mnt/mega/immich/old-album:/old-album
- /etc/localtime:/etc/localtime:ro
env_file:
- /etc/.soupclown.env
ports:
- "2283:2283"
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich-machine-learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- /etc/.soupclown.env
restart: always
healthcheck:
disable: false
redis:
container_name: immich-redis
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich-postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${IMMICH_DB_PASSWORD}
POSTGRES_USER: ${IMMICH_DB_USERNAME}
POSTGRES_DB: ${IMMICH_DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: "--data-checksums"
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of IMMICH_DB_DATA_LOCATION in the .env file
- ${IMMICH_DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:
-11
View File
@@ -1,11 +0,0 @@
services:
navidrome:
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000
ports:
- "4533:4533"
volumes:
- /root/navidrome:/data
- /mnt/mega/beetsMusic:/music
restart: unless-stopped
Generated
+3 -4
View File
@@ -25,16 +25,15 @@
]
},
"locked": {
"lastModified": 1716729631,
"narHash": "sha256-IerjU5GUeKc0eW9FPOdlPveSGJ2ZrO+lIfuHPUmUF2I=",
"lastModified": 1776950293,
"narHash": "sha256-t6KMARLILjPuTBSRoYanUxV+FU50IFZ7L5XVdOcdtaY=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "fc4492181833eaaa7a26a8081c0615d95792d825",
"rev": "6837e0d6c5eda81fd26308489799fbf83a160465",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-23.05",
"repo": "home-manager",
"type": "github"
}
+14 -4
View File
@@ -8,13 +8,23 @@
import-tree.url = "github:vic/import-tree";
home-manager = {
url = "github:nix-community/home-manager/release-23.05";
url = "github:nix-community/home-manager";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = inputs@{ flake-parts, nixos-hardware, import-tree, nixpkgs, ... }:
flake-parts.lib.mkFlake { inherit inputs; } (top@{ config, withSystem, moduleWithSystem, ... }: {
outputs = inputs@{
flake-parts,
nixos-hardware,
import-tree,
home-manager,
nixpkgs,
...
}:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.home-manager.flakeModules.home-manager
];
flake = {
nixosConfigurations.qmoran-laptop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
@@ -36,7 +46,7 @@
systems = [
"x86_64-linux"
];
});
};
}
+1
View File
@@ -38,5 +38,6 @@
git
tailscale
rclone
mergerfs
];
}
+3
View File
@@ -35,6 +35,9 @@
screen
jellyfin-desktop
renpy
bun
wine
beets
];
};
+15 -2
View File
@@ -30,13 +30,26 @@
options = [ "fmask=0077" "dmask=0077" ];
};
fileSystems."/mnt/tmpMedia" =
fileSystems."/mnt/disks/disk1" =
{
device = "/dev/disk/by-uuid/fa20e116-e04e-4f3e-bf5a-c2e2c1fad610";
fsType = "ext4";
options = ["noatime" "nodiratime" "noauto"];
options = ["noatime" "nodiratime"];
};
fileSystems."/mnt/disks/disk2" =
{
device = "/dev/disk/by-uuid/887500d5-1d4d-4080-84e3-5ef424c9f310";
fsType = "ext4";
options = ["noatime" "nodiratime"];
};
fileSystems."/storage" = {
fsType = "fuse.mergerfs";
device = "/mnt/disks/*";
options = ["cache.files=partial" "dropcacheonclose=true" "category.create=mfs"];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/cf4cff49-15d7-4145-86c2-8be30e71fe4c"; }
];
+4
View File
@@ -0,0 +1,4 @@
{
"v": "v1",
"data": {}
}