49 Commits

Author SHA1 Message Date
qdev 8193ec801a adding gitea and document caddyfile 2026-05-11 12:12:28 -04:00
qdev 78709ad392 reorg files and add some services 2026-05-09 19:35:08 -04:00
qdev 8c991a4926 Add metube, refactor docker-compose setup + wine 2026-05-09 15:03:46 -04:00
qdev f13c393406 work in progress :/ 2026-05-03 20:53:13 -04:00
qdev e32c5ec76f get rid of old compose IG 2026-05-03 15:19:49 -04:00
qdev 241bd493f3 add navidrome 2026-05-03 15:19:18 -04:00
qdev 967c9cd62b quick test 2026-05-03 15:10:47 -04:00
qdev 6dddda44f6 I'm not having any fun 2026-04-29 12:28:28 -04:00
qdev f6d2efb6d8 cool 2026-04-29 00:01:29 -04:00
qdev ce2ba6da6c cleanup and config work 🤷 2026-04-29 00:00:28 -04:00
qdev 9772d15baa rm autobrr 2026-04-28 23:11:44 -04:00
qdev 32302fae5e add autobrr 2026-04-28 22:44:34 -04:00
qdev f58c878045 fix pubkey setting? 2026-04-27 21:47:48 -04:00
qdev dad6a6901f ig this is how it format 2026-04-27 15:48:11 -04:00
qdev 3deff372b7 re-add dietpi 2026-04-27 15:43:33 -04:00
qdev d9def86b06 stringify output 2026-04-26 22:56:46 -04:00
qdev 565630cfe4 config json thing 2026-04-26 22:53:01 -04:00
qdev eda66b7e69 messing with more just riffin at this point 2026-04-26 21:48:08 -04:00
qdev 0fecb99e8e workin on config stuff and interface things 2026-04-26 20:05:36 -04:00
qdev b89bfa5315 small rms 2026-04-25 18:31:22 -04:00
qdev 85fee78990 workin on my own service/backup thing 2026-04-25 18:25:55 -04:00
qdev 3b5db89dc0 add bun again 2026-04-25 17:34:43 -04:00
qdev 6364eea9c2 fix incorrect transmission download volume path 2026-04-25 14:37:14 -04:00
qdev 3ae70f02d6 add new media drive 2026-04-25 14:09:08 -04:00
qdev b7f549a067 I think fix mounts, untested :3 2026-04-24 19:26:45 -04:00
qdev 759811ab68 home manager? 2026-04-23 10:12:47 -04:00
qdev 0122f2955c - update mounts
- disable navidrome
2026-04-17 15:20:47 -04:00
qdev c4a7aec364 add packages, renpy, jellyfin-desktop, rm script 2026-04-13 13:18:51 -04:00
qdev cb984df47c moving media local :( 2026-04-12 19:04:01 -04:00
qdev d013ac354d remove transfers config 2026-04-11 16:45:51 -04:00
qdev 584b303142 update torrent upload script 2026-04-11 14:51:48 -04:00
qdev 32338c14ec upload downloadin file mapping 2026-04-11 14:51:37 -04:00
qdev a13cfe0b31 fix invalid config for mount and add screen package for quin 2026-04-11 11:44:41 -04:00
qdev 614a90c4c4 add move for sonarr folder too 2026-04-11 11:41:37 -04:00
qdev 7e04f6a936 update filesystem mount(?) 2026-04-11 11:35:08 -04:00
qdev a801685f55 add upload script for complete torrents 2026-04-11 11:03:12 -04:00
qdev 0735605395 re up navidrome 2026-04-07 19:00:56 -04:00
qdev ec27991b23 take seerr outta vpn and add nomadnet to nix 2026-04-07 18:59:44 -04:00
qdev 5fc2d21405 got rnsd and nomadnet working on dietpi with ansible, now can ez build :3 2026-04-07 00:24:59 -04:00
qdev 9d08cd4b85 some work on configuring rns on dietpi 2026-04-06 23:13:19 -04:00
qdev f3d39b04d2 meh 2026-04-05 22:01:33 -04:00
qdev 1e9b31f67d Merge branch 'tryingFlakeParts' 2026-04-05 22:00:43 -04:00
qdev 877677d79f import tree is cool 2026-04-05 22:00:18 -04:00
qdev 5d63a6463d fix import thing 2026-04-05 20:58:38 -04:00
qdev 7e4255bf76 remove unneeded thing, it already does that! 2026-04-05 19:33:34 -04:00
qdev d29e73e6aa so far nothing work :( 2026-04-05 19:31:46 -04:00
qdev 56e2a38f82 organize 2026-04-05 16:13:37 -04:00
qdev 87a14e9db1 add codium 2026-04-04 15:21:14 -04:00
qdev 3568382402 add disk to server 2026-04-04 12:55:47 -04:00
62 changed files with 1020 additions and 1009 deletions
+8
View File
@@ -0,0 +1,8 @@
{
"recommendations": [
"paisleysoftworks.renpywarp",
"luquedaniel.languague-renpy",
"jnoortheen.nix-ide",
"matthewpi.caddyfile-support"
]
}
+63
View File
@@ -0,0 +1,63 @@
jf.soupclown.com {
reverse_proxy qmoran-desktop-1.tailc7e587.ts.net:8096
}
nd.soupclown.com {
reverse_proxy qmoran-desktop-1.tailc7e587.ts.net:4533
}
paperless.soupclown.com {
reverse_proxy qmoran-desktop.tailc7e587.ts.net:8000
}
immich.soupclown.com {
reverse_proxy qmoran-desktop.tailc7e587.ts.net:2283
}
seerr.soupclown.com {
reverse_proxy qmoran-desktop-1.tailc7e587.ts.net:5055
}
soupclown.com {
header /.well-known/matrix/* {
Access-Control-Allow-Origin *
Content-Type application/json
Cache-Control public,max-age=806400
}
# Matrix client-server well-known
handle /.well-known/matrix/client {
respond `{
"m.homeserver": {
"base_url": "https://matrix.soupclown.com"
},
"org.matrix.msc3575.proxy": {
"url": "https://matrix.soupclown.com"
}
}` 200
}
# Matrix server-server well-known
handle /.well-known/matrix/server {
respond `{
"m.server": "matrix.soupclown.com:443"
}` 200
}
# Matrix Support contact information (MSC1929)
handle /.well-known/matrix/support {
respond `{
"contacts": [
{
"matrix_id": "@qmoran:soupclown.com",
"email_address": "",
"role": "m.role.admin"
}
]
}` 200
}
}
matrix.soupclown.com, soupclown.com:8448 {
reverse_proxy qmoran-desktop.tailc7e587.ts.net:6167
}
+5
View File
@@ -0,0 +1,5 @@
[soupclown1]
root@187.77.193.76
[pt1]
dietpi@192.168.68.54
+27
View File
@@ -0,0 +1,27 @@
---
- hosts: pt1
become: yes
roles:
- dietpi
vars:
radios:
B407:
name: B407
serialpath: /dev/serial/by-id/usb-1a86_USB_Single_Serial_54FC052298-if00
5678:
name: 5678
serialpath: /dev/serial/by-id/usb-1a86_USB_Single_Serial_54F7017826-if00
D77E:
name: D77E
serialpath: /dev/serial/by-id/usb-1a86_USB_Single_Serial_54F7017033-if00
XXXX:
enabled: "no"
name: XXXX
serialpath: /dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0
# C123:
# name: C123
# serialpath: /dev/ttyACM1
# frequency: 915000000
# bandwidth: 125000
# txpower: 14
+8
View File
@@ -0,0 +1,8 @@
- name: Install pyenv
import_tasks: pyenv.yaml
- name: Install Reticulum Network Stack (RNS)
import_tasks: rns.yaml
- name: Install nomadnet (lxmf message daemon)
import_tasks: nomadnet.yaml
+28
View File
@@ -0,0 +1,28 @@
---
- name: Create systemd service for nomadnet
copy:
dest: /etc/systemd/system/nomadnet.service
content: |
[Unit]
Description=Nomadnet Daemon
After=multi-user.target
[Service]
Type=simple
Restart=always
RestartSec=3
User=root
ExecStart=nomadnet -d
[Install]
WantedBy=multi-user.target
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Start nomadnet service after config update
systemd:
name: nomadnet.service
enabled: yes
state: started
+59
View File
@@ -0,0 +1,59 @@
- name: Install build environment
become: true
apt:
update_cache: true
cache_valid_time: 3600
pkg:
- git
- build-essential
- libssl-dev
- zlib1g-dev
- libbz2-dev
- libreadline-dev
- libsqlite3-dev
- curl
- libncursesw5-dev
- xz-utils
- tk-dev
- libxml2-dev
- libxmlsec1-dev
- libffi-dev
- liblzma-dev
- python3-pip
- python3-venv
- name: Check if pyenv is installed
stat:
path: /root/.pyenv
register: pyenv_installed
- name: Download pyenv install script
get_url:
url: https://pyenv.run
dest: /tmp/install_pyenv.sh
mode: 0755
when: not pyenv_installed.stat.exists
- name: Run install script
shell: /tmp/install_pyenv.sh
when: not pyenv_installed.stat.exists
- name: Add pyenv to .bashrc
blockinfile:
dest: "/home/dietpi/.bashrc"
block: |
export PYENV_ROOT="$HOME/.pyenv"
command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
marker: '# {mark} ANSIBLE MANAGED BLOCK - pyenv'
create: yes
- name: Add pyenv to .profile
blockinfile:
dest: "/home/dietpi/.profile"
block: |
export PYENV_ROOT="$HOME/.pyenv"
command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
marker: '# {mark} ANSIBLE MANAGED BLOCK - pyenv'
create: yes
+93
View File
@@ -0,0 +1,93 @@
---
- name: Install the rns package
ansible.builtin.pip:
name: rns
state: present
break_system_packages: yes
- name: Install the lxmf package
ansible.builtin.pip:
name: lxmf
state: present
break_system_packages: yes
- name: Install the nomadnet package
ansible.builtin.pip:
name: nomadnet
state: present
break_system_packages: yes
- name: Create systemd service for rns
copy:
dest: /etc/systemd/system/rns.service
content: |
[Unit]
Description=Reticulum Network Stack Daemon
After=multi-user.target
[Service]
Type=simple
Restart=always
RestartSec=3
User=root
ExecStart=rnsd --service
[Install]
WantedBy=multi-user.target
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Ensure rns service is configured
systemd:
name: rns.service
enabled: yes
state: stopped
- name: Ensure Reticulum config directory exists
file:
path: /root/.reticulum
state: directory
mode: "0755"
- name: Deploy rns configuration file (using block for friendly file updates)
copy:
dest: /root/.reticulum/config
content: |
[reticulum]
enable_transport = Yes
share_instance = Yes
instance_name = {{ansible_hostname}}
discover_interfaces = Yes
panic_on_interface_error = No
respond_to_probes = Yes
[logging]
loglevel = 6
[interfaces]
[[Default Interface]]
type = AutoInterface
enabled = Yes
{% for key, radio in radios.items() %}
[[RNode LoRa {{radio.name}}]]
type = RNodeInterface
enabled = {{ radio.enabled | default('yes') }}
port = {{ radio.serialpath | default('/dev/ttyACM0') }}
frequency = {{ radio.frequency | default(915000000) }}
bandwidth = {{ radio.bandwidth | default(125000) }}
txpower = {{ radio.txpower | default(7) }}
spreadingfactor = {{ radio.spreadingfactor | default(8) }}
codingrate = {{ radio.codingrate | default(5) }}
id_callsign = {{ radio.name | default(key) }}
id_interval = {{ radio.id_interval | default(600) }}
{% endfor %}
- name: Start rns service after config update
systemd:
name: rns.service
enabled: yes
state: started
+34
View File
@@ -0,0 +1,34 @@
# dependencies (bun install)
node_modules
# output
out
dist
*.tgz
# code coverage
coverage
*.lcov
# logs
logs
_.log
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# caches
.eslintcache
.cache
*.tsbuildinfo
# IntelliJ based IDEs
.idea
# Finder (MacOS) folder config
.DS_Store
+34
View File
@@ -0,0 +1,34 @@
{
"lockfileVersion": 1,
"configVersion": 1,
"workspaces": {
"": {
"name": "soupclown",
"dependencies": {
"commander": "^14.0.3",
"zod": "^4.3.6",
},
"devDependencies": {
"@types/bun": "latest",
},
"peerDependencies": {
"typescript": "^5",
},
},
},
"packages": {
"@types/bun": ["@types/bun@1.3.13", "", { "dependencies": { "bun-types": "1.3.13" } }, "sha512-9fqXWk5YIHGGnUau9TEi+qdlTYDAnOj+xLCmSTwXfAIqXr2x4tytJb43E9uCvt09zJURKXwAtkoH4nLQfzeTXw=="],
"@types/node": ["@types/node@25.6.0", "", { "dependencies": { "undici-types": "~7.19.0" } }, "sha512-+qIYRKdNYJwY3vRCZMdJbPLJAtGjQBudzZzdzwQYkEPQd+PJGixUL5QfvCLDaULoLv+RhT3LDkwEfKaAkgSmNQ=="],
"bun-types": ["bun-types@1.3.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-QXKeHLlOLqQX9LgYaHJfzdBaV21T63HhFJnvuRCcjZiaUDpbs5ED1MgxbMra71CsryN/1dAoXuJJJwIv/2drVA=="],
"commander": ["commander@14.0.3", "", {}, "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw=="],
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
"undici-types": ["undici-types@7.19.2", "", {}, "sha512-qYVnV5OEm2AW8cJMCpdV20CDyaN3g0AjDlOGf1OW4iaDEx8MwdtChUp4zu4H0VP3nDRF/8RKWH+IPp9uW0YGZg=="],
"zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="],
}
}
+3
View File
@@ -0,0 +1,3 @@
import { RUN_CLI } from "./src/interface/cli";
RUN_CLI()
+16
View File
@@ -0,0 +1,16 @@
{
"name": "soupclown",
"module": "index.ts",
"type": "module",
"private": true,
"devDependencies": {
"@types/bun": "latest"
},
"peerDependencies": {
"typescript": "^5"
},
"dependencies": {
"commander": "^14.0.3",
"zod": "^4.3.6"
}
}
+30
View File
@@ -0,0 +1,30 @@
import { Command } from "commander";
import { HOST } from "./host";
export function RUN_CLI(){
const c = new Command();
c
.name('soupclown')
.description('Soupclown system')
c
.command('host <action>')
.action(async (action, opts) => {
console.log(opts)
switch(action){
case 'init':
console.log('init action!')
console.log(`HOSTNAME: ${await HOST.getHostname()}`)
await HOST.initSystem()
break;
default:
console.log('fool!')
break;
}
})
c.parse();
}
+75
View File
@@ -0,0 +1,75 @@
import z from "zod";
import { HOST } from "./host";
const DEFAULT_CONFIG_PATH = "../../state.soupclown.json";
const hostConfigSchema = z.object({
configurationPath: z.string(),
services: z.array(z.object({
name: z.string(),
desiredState: z.enum(['up', 'down']),
})),
})
export type HOST_CONFIG_SCHEMA_T = z.infer<typeof hostConfigSchema>;
const CONFIG_SCHEMA = z.object({
v: z.literal('v1'),
data: z.map(z.string(), hostConfigSchema)
});
export type CONFIG_SCHEMA_T = z.infer<typeof CONFIG_SCHEMA>
export class SC_CONFIG_C {
constructor(
protected runningConfig = {
v: 'v1',
data: new Map([
]),
} as CONFIG_SCHEMA_T,
){}
public static async loadConfigFile(path = DEFAULT_CONFIG_PATH){
const configFile = Bun.file(path);
let fileJsonData: any = null;
try{
fileJsonData = await configFile.json();
}catch(e){
throw "FAILED_SCHEMA_JSON_PARSE"
}
const configParseJsonResult = CONFIG_SCHEMA.safeParse(fileJsonData);
if(!configParseJsonResult.success){
throw "FAILED_CONFIG_SCHEMA_PARSE"
}
return configParseJsonResult.data;
}
public static async init(path = DEFAULT_CONFIG_PATH){
const newConfig = new SC_CONFIG_C();
await newConfig._loadConfigFile(path);
await newConfig._writeConfigFile(path);
return newConfig;
}
private async _loadConfigFile(path = DEFAULT_CONFIG_PATH){
try{
this.runningConfig = await SC_CONFIG_C.loadConfigFile(path);
const hostconfig = this.runningConfig.data.get(await HOST.getHostname());
if(hostconfig){
this.configurationPath = hostconfig.configurationPath;
this.services = hostconfig.services;
}
}catch{
console.error('Failed to load config, assuming you know what you\'re doing');
}
}
private async _writeConfigFile(path = DEFAULT_CONFIG_PATH){
await Bun.write(path, JSON.stringify(this.runningConfig, null, 2));
}
}
export const SC_CONFIG = await SC_CONFIG_C.init();
+39
View File
@@ -0,0 +1,39 @@
import { $ } from "bun";
import z, { parse } from "zod";
const CLI_ContainerInfoSchema = z.object({
ID: z.string(),
Image: z.string(),
CreatedAt: z.string(),
State: z.string(),
Names: z.string(),
})
type CLI_ContainerInfo = z.infer<typeof CLI_ContainerInfoSchema>;
const SC_ContainerInfo = z.object({
id: z.string(),
name: z.string(),
image: z.string(),
state: z.unknown(),
created: z.date(),
})
type SC_ContainerInfoT = z.infer<typeof SC_ContainerInfo>;
async function PS (){
const result = $`docker ps -a --format json`
const containerData: CLI_ContainerInfo[] = [];
for await (let line of result.lines()){
// containerData.push(JSON.parse(line));
if(line.length < "{}".length){
continue;
}
const parsedData = CLI_ContainerInfoSchema.parse(JSON.parse(line))
containerData.push(parsedData);
}
return containerData;
}
export const SC_DOCKER = {
PS,
}
+16
View File
@@ -0,0 +1,16 @@
import { $ } from "bun";
import { SC_CONFIG } from "./config";
async function getHostname(){
const result = await $`hostname`.quiet().text();
return result.trim();
}
async function initSystem(){
// await $`mkdir -p ${SC_CONFIG.configurationPath}`
}
export const HOST = {
getHostname,
initSystem,
}
+29
View File
@@ -0,0 +1,29 @@
{
"compilerOptions": {
// Environment setup & latest features
"lib": ["ESNext"],
"target": "ESNext",
"module": "Preserve",
"moduleDetection": "force",
"jsx": "react-jsx",
"allowJs": true,
// Bundler mode
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"noEmit": true,
// Best practices
"strict": true,
"skipLibCheck": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
"noImplicitOverride": true,
// Some stricter flags (disabled by default)
"noUnusedLocals": false,
"noUnusedParameters": false,
"noPropertyAccessFromIndexSignature": false
}
}
+3 -1
View File
@@ -1,9 +1,11 @@
AUTO_SETUP_GLOBAL_PASSWORD=someGenericPassword
AUTO_SETUP_NET_HOSTNAME=pt1
AUTO_SETUP_HEADLESS=1
AUTO_SETUP_AUTOMATED=1
AUTO_SETUP_INSTALL_SOFTWARE_ID=137 152
AUTO_SETUP_SSH_PUBKEY=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAkhSg+CLjIYSZ+lTNkChYAP7uxpPrl1TvVPwCfYgSoa
AUTO_SETUP_SSH_PUBKEY=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAkhSg+CLjIYSZ+lTNkChYAP7uxpPrl1TvVPwCfYgSoa
AUTO_SETUP_SSH_PUBKEY=ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINhzgTeNWuHeznJJlRXRysKA3msMnUvALMkADOIJyGwb
AUTO_SETUP_LOCALE=C.UTF-8
AUTO_SETUP_KEYBOARD_LAYOUT=us
+1
View File
@@ -0,0 +1 @@
/etc/.soupclown.env
+1
View File
@@ -0,0 +1 @@
!.env
+1
View File
@@ -0,0 +1 @@
{}
+315
View File
@@ -0,0 +1,315 @@
services:
### START DOWNLOAD
vpn:
container_name: vpn
profiles:
- download
image: qmcgaw/gluetun:v3.40
cap_add:
- NET_ADMIN # Quin does not like these
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ${PATH_ETC}/gluetun:/gluetun
environment:
- VPN_SERVICE_PROVIDER=private internet access
- SERVER_REGIONS=Netherlands
- OPENVPN_USER=${PIA_USERNAME}
- OPENVPN_PASSWORD=${PIA_PASSWORD}
- UPDATER_PERIOD=24h
ports:
- 9091:9091 # Transmission
- 51413:51413 # Transmission
- 51413:51413/udp # Transmission
- 9696:9696 # Prowlarr
- 7878:7878 # Radarr
- 8989:8989 # Sonarr
- 8686:8686 # Lidarr
- 3333:3333 # bitmagnet API and WebUI port
- 3334:3334/tcp # bitmagnet BitTorrent ports
- 3334:3334/udp # bitmagnet BitTorrent ports
# - 5055:5055 # seerr
# - 8112:8112 # Deluge
# - 6881:6881 # Deluge
# - 6881:6881/udp # Deluge
# - 58846:58846 # idk Deluge
- 7474:7474 # Autobrr
restart: unless-stopped
transmission:
container_name: transmission
profiles:
- download
image: lscr.io/linuxserver/transmission:latest
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${PATH_ETC}/transmission:/config
- /storage/transmission/downloads:/downloads
restart: unless-stopped
prowlarr:
container_name: prowlarr
profiles:
- download
image: lscr.io/linuxserver/prowlarr:latest
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${PATH_ETC}/prowlarr:/config
depends_on:
vpn:
condition: service_started
restart: true
required: true
restart: unless-stopped
radarr:
container_name: radarr
profiles:
- download
image: lscr.io/linuxserver/radarr:latest
network_mode: "service:vpn"
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${PATH_ETC}/radarr:/config
- /storage/media:/media # Access to media library
- /storage/transmission/downloads:/downloads # Access to transmission downloads
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
sonarr:
container_name: sonarr
profiles:
- download
image: lscr.io/linuxserver/sonarr:latest
network_mode: "service:vpn"
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${PATH_ETC}/sonarr:/config
- /storage/media:/media # Access to media library
- /storage/transmission/downloads:/downloads # Access to transmission downloads
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
lidarr:
container_name: lidarr
profiles:
- download
image: lscr.io/linuxserver/lidarr:latest
network_mode: "service:vpn"
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${PATH_ETC}/lidarr:/config
- /storage/media:/media # Access to media library
- /storage/transmission/downloads:/downloads # Access to transmission downloads
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
bitmagnet:
container_name: bitmagnet
profiles:
- download
image: ghcr.io/bitmagnet-io/bitmagnet:latest
restart: unless-stopped
network_mode: "service:vpn"
environment:
- POSTGRES_PASSWORD=${BITMAGENT_DB_PASS}
- TMDB_API_KEY=${TMDB_API_KEY}
volumes:
- ${PATH_ETC}/bitmagnet:/root/.config/bitmagnet
command:
- worker
- run
- --keys=http_server
- --keys=queue_server
# disable the next line to run without DHT crawler
- --keys=dht_crawler
depends_on:
postgres:
condition: service_healthy
postgres:
container_name: bitmagnet-postgres
profiles:
- download
image: postgres:16-alpine
network_mode: "service:vpn"
volumes:
- ${PATH_ETC}/bitmagnet-postgres:/var/lib/postgresql/data
restart: unless-stopped
environment:
- POSTGRES_PASSWORD=${BITMAGENT_DB_PASS}
- POSTGRES_DB=bitmagnet
- PGUSER=postgres
shm_size: 1g
healthcheck:
test:
- CMD-SHELL
- pg_isready
start_period: 20s
interval: 10s
seerr:
container_name: seerr
profiles:
- download
image: ghcr.io/seerr-team/seerr:latest
init: true
ports:
- 5055:5055
environment:
- TZ=${TZ}
- LOG_LEVEL=debug
volumes:
- ${PATH_ETC}/seer:/app/config
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:5055/api/v1/status || exit 1
start_period: 20s
timeout: 3s
interval: 15s
retries: 3
restart: unless-stopped
lidatube:
container_name: lidatube
profiles:
- download
image: thewicklowwolf/lidatube:latest
volumes:
- ${PATH_ETC}/lidatube:/lidatube/config
- /storage/lidatube:/lidatube/downloads
- /etc/localtime:/etc/localtime:ro
environment:
- attempt_lidarr_import=True
ports:
- 5000:5000
restart: unless-stopped
aurral:
container_name: aurral
profiles:
- download
image: ghcr.io/lklynet/aurral:latest
ports:
- 3001:3001
environment:
- DOWNLOAD_FOLDER=${DL_FOLDER:-./data/downloads}
volumes:
- ${PATH_ETC}/aurral:/app/backend/data
- /storage/aurral:/app/downloads
restart: unless-stopped
### END DOWNLOAD
### START APP
navidrome:
profiles:
- app
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000
ports:
- "4533:4533"
volumes:
- ${PATH_ETC}/navidrome:/data
- /storage/media/Music:/music
restart: unless-stopped
metube:
profiles:
- app
image: ghcr.io/alexta69/metube
container_name: metube
ports:
- "8081:8081"
volumes:
- ./bin/metube-options.json:/config/metube-options.json # Use config from repo :)
- /storage/metube/downloads:/downloads
environment:
- YTDL_OPTIONS_FILE=/config/metube-options.json
restart: unless-stopped
gitea:
profiles:
- app
container_name: gitea
image: docker.gitea.com/gitea:1.26.1
ports:
- 3000:3000
- 222:22
volumes:
- ${PATH_ETC}/gitea-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__NAME=gitea
- GITEA__database__USER=${GITEA_DB_USERNAME}
- GITEA__database__PASSWD=${GITEA_DB_PASSWORD}
- GITEA__database__HOST=giteadb:5432
restart: unless-stopped
depends_on:
- giteadb
giteadb:
profiles:
- app
container_name: gitea-postgres
image: docker.io/library/postgres:14
environment:
- POSTGRES_USER=${GITEA_DB_USERNAME}
- POSTGRES_PASSWORD=${GITEA_DB_PASSWORD}
- POSTGRES_DB=gitea
volumes:
- ${PATH_ETC}/gitea-postgres:/var/lib/postgresql/data
healthcheck:
test:
- CMD-SHELL
- pg_isready
start_period: 20s
interval: 10s
restart: unless-stopped
### END APP
Generated
+53 -4
View File
@@ -1,5 +1,23 @@
{
"nodes": {
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1775087534,
"narHash": "sha256-91qqW8lhL7TLwgQWijoGBbiD4t7/q75KTi8NxjVmSmA=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "3107b77cd68437b9a76194f0f7f9c55f2329ca5b",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
@@ -7,20 +25,34 @@
]
},
"locked": {
"lastModified": 1716729631,
"narHash": "sha256-IerjU5GUeKc0eW9FPOdlPveSGJ2ZrO+lIfuHPUmUF2I=",
"lastModified": 1776950293,
"narHash": "sha256-t6KMARLILjPuTBSRoYanUxV+FU50IFZ7L5XVdOcdtaY=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "fc4492181833eaaa7a26a8081c0615d95792d825",
"rev": "6837e0d6c5eda81fd26308489799fbf83a160465",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-23.05",
"repo": "home-manager",
"type": "github"
}
},
"import-tree": {
"locked": {
"lastModified": 1773693634,
"narHash": "sha256-BtZ2dtkBdSUnFPPFc+n0kcMbgaTxzFNPv2iaO326Ffg=",
"owner": "vic",
"repo": "import-tree",
"rev": "c41e7d58045f9057880b0d85e1152d6a4430dbf1",
"type": "github"
},
"original": {
"owner": "vic",
"repo": "import-tree",
"type": "github"
}
},
"nixos-hardware": {
"locked": {
"lastModified": 1773533765,
@@ -53,9 +85,26 @@
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1774748309,
"narHash": "sha256-+U7gF3qxzwD5TZuANzZPeJTZRHS29OFQgkQ2kiTJBIQ=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "333c4e0545a6da976206c74db8773a1645b5870a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"root": {
"inputs": {
"flake-parts": "flake-parts",
"home-manager": "home-manager",
"import-tree": "import-tree",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs"
}
+37 -25
View File
@@ -4,38 +4,50 @@
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
nixos-hardware.url = "github:NixOS/nixos-hardware/master";
flake-parts.url = "github:hercules-ci/flake-parts";
import-tree.url = "github:vic/import-tree";
home-manager = {
url = "github:nix-community/home-manager/release-23.05";
url = "github:nix-community/home-manager";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, home-manager, nixos-hardware, ... }@inputs: {
nixosConfigurations.qmoran-laptop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./nix/qlhc.nix
./nix/common.nix
./nix/user-quin.nix
./nix/gui1.nix
./nix/mega.nix
nixos-hardware.nixosModules.framework-11th-gen-intel
outputs = inputs@{
flake-parts,
nixos-hardware,
import-tree,
home-manager,
nixpkgs,
...
}:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.home-manager.flakeModules.home-manager
];
flake = {
nixosConfigurations.qmoran-laptop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
(import-tree ./modules)
nixos-hardware.nixosModules.framework-11th-gen-intel
./nix/qlhc.nix
];
};
nixosConfigurations.qmoran-desktop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
(import-tree ./modules)
./nix/qdhc.nix
./nix/jf-server.nix
];
};
};
systems = [
"x86_64-linux"
];
};
nixosConfigurations.qmoran-desktop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./nix/qdhc.nix
./nix/common.nix
./nix/user-quin.nix
./nix/gui1.nix
./nix/mega.nix
./nix/jf-server.nix
];
};
};
}
+6
View File
@@ -18,6 +18,10 @@
"flakes"
];
nixpkgs.config.permittedInsecurePackages = [
"python3.12-ecdsa-0.19.1" # I'm sure this is fine (just don't use python for anything important like usual)
];
i18n.extraLocaleSettings = {
LC_ADDRESS = "en_US.UTF-8";
LC_IDENTIFICATION = "en_US.UTF-8";
@@ -33,5 +37,7 @@
environment.systemPackages = with pkgs; [
git
tailscale
rclone
mergerfs
];
}
View File
View File
@@ -28,6 +28,16 @@
gimp
kicad-small
rpi-imager
vscodium-fhs
ansible_2_18
usbutils
python313Packages.nomadnet
screen
jellyfin-desktop
renpy
bun
wine
beets
];
};
-13
View File
@@ -1,13 +0,0 @@
# It's NixOS!
Hello MtV and welcome to my crib. These are all the things pertaining to my nixos stuff.
- [./common.nix](./common.nix) is all the common things that hosts will have configured
- Services
- locale settings
- timezone
- [./gui1](./gui1.nix) all config things for a linux UX experience, KDE Plasma in this instance, but perhaps there could be a gui2 someday...
- [./qlhc.nix](./qlhc.nix) this stands for quins-laptop-hardware-configuraion. I'll find a better way to do this stuff later.
- [./user-quin](./user-quin.nix) my personal user account, here for all to see.
- packages
- groups
+21 -1
View File
@@ -30,6 +30,26 @@
options = [ "fmask=0077" "dmask=0077" ];
};
fileSystems."/mnt/disks/disk1" =
{
device = "/dev/disk/by-uuid/fa20e116-e04e-4f3e-bf5a-c2e2c1fad610";
fsType = "ext4";
options = ["noatime" "nodiratime"];
};
fileSystems."/mnt/disks/disk2" =
{
device = "/dev/disk/by-uuid/887500d5-1d4d-4080-84e3-5ef424c9f310";
fsType = "ext4";
options = ["noatime" "nodiratime"];
};
fileSystems."/storage" = {
fsType = "fuse.mergerfs";
device = "/mnt/disks/*";
options = ["cache.files=partial" "dropcacheonclose=true" "category.create=mfs"];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/cf4cff49-15d7-4145-86c2-8be30e71fe4c"; }
];
@@ -79,4 +99,4 @@
users.users.qmoran.packages = with pkgs; [ nvtopPackages.nvidia ];
hardware.nvidia-container-toolkit.enable = true;
virtualisation.docker.daemon.settings.features.cdi = true;
}
}
-24
View File
@@ -1,24 +0,0 @@
{ pkgs, lib, ... }:
let
mountPoint = "/mnt/mega";
remoteName = "data";
bucketName = "enc";
configFile = "/etc/rclone.conf";
in
{
environment.systemPackages = with pkgs;[
rclone
];
systemd.mounts = lib.singleton {
where = mountPoint;
what = "${remoteName}:${bucketName}";
type = "rclone";
# I think we need to be able to set a timeout here but nix is too new
options = "_netdev,allow-other,vfs-cache-mode=full,vfs-read-chunk-size=512M,vfs-read-chunk-size-limit=1G,vfs-write-back=48h,vfs-cache-max-age=3h,config=${configFile},vvv,daemon-wait=0";
};
#systemd.automounts = lib.singleton {
# where = mountPoint;
# wantedBy = [ "multi-user.target" ];
#};
}
-23
View File
@@ -1,23 +0,0 @@
# Ansible host setup
### About
This is made with the intent to be reuseable when any hosts on a network need to be configured back to a common state.
I've actually decided that this will only get the system configs and lower level configs in place for larger tools to then use the host for service orchistration.
# Tasks
- Install dependencies
- Create users
- Write env file
- Install Docker
- Install Rclone
## Variables:
```
- soupclown_users: list (Creates a user for each user and then also sets them as sudoer)
- name: <username>
isMod: true | false (sets as sudoer)
- soupclown_drive_configs: (not upgraded yet)
```
-44
View File
@@ -1,44 +0,0 @@
---
- name: Install mergerfs if not present
apt:
name: mergerfs
state: present
update_cache: yes
- name: Update mergerfs systemd service with drive paths
template:
src: mergerfs.service.j2
dest: /etc/systemd/system/mergerfs.service
mode: "0644"
owner: root
group: root
- name: Ensure fstab is updated with drive configurations
lineinfile:
regexp: "^PARTUUID={{ item.partuuid }}"
path: /etc/fstab
line: "PARTUUID={{ item.partuuid }} {{ mount_point }}/{{ item.name }} {{ item.fs_type }} defaults,auto,nofail 0 2"
state: present
loop: "{{ drive_configs }}"
- name: Ensure mount points exist and have appropriate permissions
file:
path: "{{ mount_point }}/{{ item.name }}"
state: directory
mode: "0755"
owner: root
group: root
loop: "{{ drive_configs }}"
- name: Systemctl daemon-reload
command: systemctl daemon-reload
- name: Mount all filesystems
command: mount -a
ignore_errors: yes
- name: Start and enable mergerfs service
systemd:
name: mergerfs
state: started
enabled: yes
-7
View File
@@ -1,7 +0,0 @@
---
- hosts: qmoran-desktop
become: yes
roles:
- oneenv
vars_files:
- secrets.yaml
@@ -1,93 +0,0 @@
# ============================================================================
# CTOP Installation Task
# ============================================================================
#
# ## Overview
# This task sequence installs **ctop** (a top-like interface for container metrics)
# on the target system using the azlux repository.
#
# ## Prerequisites
# - Target system must be Debian/Ubuntu-based
# - Root or sudo privileges required
# - Internet connectivity to download packages and GPG keys
#
# ## Workflow
# 1. **Prerequisite Check**: Verifies if ctop is already installed via marker file
# 2. **Dependency Installation**: Installs required system packages
# - ca-certificates (SSL/TLS support)
# - curl (download utilities)
# - gnupg (GPG key management)
# - lsb-release (OS release detection)
# 3. **Repository Setup**:
# - Adds azlux GPG key for package verification
# - Validates GPG key integrity
# - Adds azlux apt repository with signed packages
# - Refreshes apt cache
# 4. **Package Installation**: Installs docker-ctop package
#
# ## Key Features
# - ✅ Idempotent: Skips execution if ctop is already installed
# - ✅ GPG Validation: Ensures repository authenticity
# - ✅ Error Handling: Fails playbook if GPG key or repo validation fails
# - ✅ Architecture-Aware: Uses ansible_architecture variable for multi-arch support
#
# ## Variables Used
# - `ansible_architecture`: Target system CPU architecture (auto-detected)
# - `ctop_installed_check`: Registration variable tracking installation status
# - `apt_key_finger_output`: GPG key validation results
# - `azlux_repo_check`: Repository addition verification results
#
# ## Failure Points
# - GPG key import fails or key not found in fingerprint output
# - Repository addition fails or cannot be verified
# - Package installation fails
#
# ## Notes
# - Uses `signed-by` parameter for secure apt repository configuration (modern approach)
# - Creates marker file at `/var/ctop_installed` to track installation state
# - `lsb_release -cs` dynamically determines Debian/Ubuntu codename
# - Ensure that the system is updated before running this playbook for best results
- name: Check if ctop is installed
stat:
path: /var/ctop_installed
register: ctop_installed_check
- name: Install prerequisites for ctop
apt:
name:
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: yes
when: not ctop_installed_check.stat.exists
- name: Add azlux GPG key for ctop
apt_key:
url: https://azlux.fr/repo.gpg.key
state: present
keyring: /usr/share/keyrings/azlux-archive-keyring.gpg
when: not ctop_installed_check.stat.exists
- name: Add azlux repository for ctop
shell: |
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/azlux-archive-keyring.gpg trusted=yes] http://packages.azlux.fr/debian stable main" | sudo tee /etc/apt/sources.list.d/azlux.list >/dev/null
when: not ctop_installed_check.stat.exists
- name: Update apt cache after adding azlux repository
command: apt-get update
when: not ctop_installed_check.stat.exists
- name: Install ctop
apt:
name: docker-ctop
state: present
when: not ctop_installed_check.stat.exists
- name: Create marker file to indicate ctop installation
file:
path: /var/ctop_installed
state: touch
when: not ctop_installed_check.stat.exists
@@ -1,21 +0,0 @@
- name: Check if lazydocker is installed
stat:
path: /var/lazydocker_installed
register: lazydocker_installed_check
- name: Download install script for lazydocker
get_url:
url: https://raw.githubusercontent.com/jesseduffield/lazydocker/master/scripts/install_update_linux.sh
dest: /tmp/install_lazydocker.sh
mode: "0755"
when: not lazydocker_installed_check.stat.exists
- name: Run lazydocker installer script
command: /tmp/install_lazydocker.sh
when: not lazydocker_installed_check.stat.exists
- name: Create file indicating lazydocker is installed
file:
path: /var/lazydocker_installed
state: touch
when: not lazydocker_installed_check.stat.exists
@@ -1,34 +0,0 @@
# get the installer script
# curl --proto '=https' --tlsv1.2 -fsSL https://get.opentofu.org/install-opentofu.sh -o install-opentofu.sh
- name: Check if opentofu is installed
stat:
path: /var/opentofu_installed
register: opentofu_installed_check
- name: Download installer script
get_url:
url: https://get.opentofu.org/install-opentofu.sh
dest: /tmp/install-opentofu.sh
mode: "0755"
when: not opentofu_installed_check.stat.exists
- name: Make installer script executable
command: chmod +x /tmp/install-opentofu.sh
when: not opentofu_installed_check.stat.exists
- name: Install opentofu
command: /tmp/install-opentofu.sh --install-method deb
when: not opentofu_installed_check.stat.exists
- name: Remove installer script
file:
path: /tmp/install-opentofu.sh
state: absent
when: not opentofu_installed_check.stat.exists
- name: Create file indicating opentofu is installed
file:
path: /var/opentofu_installed
state: touch
when: not opentofu_installed_check.stat.exists
@@ -1,8 +0,0 @@
- name: Install opentofu
import_tasks: install-opentofu.yaml
- name: Install lazydocker
import_tasks: install-lazydocker.yaml
- name: Install ctop
import_tasks: install-ctop.yaml
-2
View File
@@ -1,2 +0,0 @@
- name: Install Reticulum Network Stack (RNS)
import_tasks: rns.yaml
-138
View File
@@ -1,138 +0,0 @@
---
# - name: Install python3 packages
# pip:
# name: "{{ item }}"
# state: present
# executable: pip3
# with_items:
# - rns
# - lxmf
- name: Create systemd service for rns
copy:
dest: /etc/systemd/system/rns.service
content: |
[Unit]
Description=Reticulum Network Stack Daemon
After=multi-user.target
[Service]
Type=simple
Restart=always
RestartSec=3
User=root
ExecStart=rnsd --service
[Install]
WantedBy=multi-user.target
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Ensure rns service is enabled and started
systemd:
name: rns.service
enabled: yes
state: started
- name: Stop service for config file update
systemd:
name: rns.service
state: stopped
- name: Ensure Reticulum config directory exists
file:
path: /root/.reticulum
state: directory
mode: "0755"
- name: Deploy rns configuration file (using block for friendly file updates)
copy:
dest: /root/.reticulum/config
content: |
[reticulum]
enable_transport = True
share_instance = Yes
instance_name = B407
discover_interfaces = Yes
panic_on_interface_error = No
# If you're connecting to a large external network, you
# can use one or more external blackhole list to block
# spammy and excessive announces onto your network. This
# funtionality is especially useful if you're hosting public
# entrypoints or gateways. The list source below provides a
# functional example, but better, more timely maintained
# lists probably exist in the community.
# blackhole_sources = 521c87a83afb8f29e4455e77930b973b
[logging]
# Valid log levels are 0 through 7:
# 0: Log only critical information
# 1: Log errors and lower log levels
# 2: Log warnings and lower log levels
# 3: Log notices and lower log levels
# 4: Log info and lower (this is the default)
# 5: Verbose logging
# 6: Debug logging
# 7: Extreme logging
loglevel = 4
# The interfaces section defines the physical and virtual
# interfaces Reticulum will use to communicate on. This
# section will contain examples for a variety of interface
# types. You can modify these or use them as a basis for
# your own config, or simply remove the unused ones.
[interfaces]
[[Default Interface]]
type = AutoInterface
enabled = Yes
[[RNode LoRa Interface]]
type = RNodeInterface
enabled = yes
port = /dev/ttyACM0
# Set frequency to 915 MHz (US ISM Band)
frequency = 915000000
# Set LoRa bandwidth to 125 KHz
bandwidth = 125000
# Set TX power to 7 dBm (5 mW)
txpower = 7
# Select spreading factor 8. Valid
# range is 7 through 12, with 7
# being the fastest and 12 having
# the longest range.
spreadingfactor = 8
# Select coding rate 5. Valid range
# is 5 throough 8, with 5 being the
# fastest, and 8 the longest range.
codingrate = 5
# You can configure the RNode to send
# out identification on the channel with
# a set interval by configuring the
# following two parameters.
id_callsign = B407
id_interval = 600
# For certain homebrew RNode interfaces
# with low amounts of RAM, using packet
# flow control can be useful. By default
# it is disabled.
# flow_control = False
# It is possible to limit the airtime
# utilisation of an RNode by using the
# following two configuration options.
# The short-term limit is applied in a
# window of approximately 15 seconds,
# and the long-term limit is enforced
# over a rolling 60 minute window. Both
# options are specified in percent.
# airtime_limit_long = 1.5
# airtime_limit_short = 33
- name: Start rns service after config update
systemd:
name: rns.service
state: started
-66
View File
@@ -1,66 +0,0 @@
# add block to /etc/udev/rules.d/70-pcpanel.rules
# SUBSYSTEM=="usb", ATTRS{idVendor}=="04D8", ATTRS{idProduct}=="eb52", TAG+="uaccess"
# SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c4", TAG+="uaccess"
# SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c5", TAG+="uaccess"
- name: Check if we've already set up pcpanel
stat:
path: /etc/udev/rules.d/70-pcpanel.rules
register: pcpanel_rules_file
- name: Check if we've installed the deb file before
stat:
path: /var/pcpanel_installed
register: pcpanel_deb_file
- name: Ensure pulseaudio is installed
apt:
name: "{{ item }}"
state: present
update_cache: yes
with_items:
- pulseaudio
- pulseaudio-utils
when: not pcpanel_rules_file.stat.exists
- name: Create required directories for pcpanel
file:
path: /etc/udev/rules.d
state: directory
mode: "0755"
when: not pcpanel_rules_file.stat.exists
- name: Add udev rules for pcpanel
copy:
content: |
# SUBSYSTEM=="usb", ATTRS{idVendor}=="04D8", ATTRS{idProduct}=="eb52", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c4", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c5", TAG+="uaccess"
dest: /etc/udev/rules.d/70-pcpanel.rules
mode: "0644"
owner: root
group: root
when: not pcpanel_rules_file.stat.exists
- name: Reload udev rules
shell: udevadm control --reload-rules && udevadm trigger
when: not pcpanel_rules_file.stat.exists
- name: Download pcpanel deb file
get_url:
url: "https://github.com/nvdweem/PCPanel/releases/download/v1.7.1/pcpanel_1.7.1_amd64.deb"
dest: /tmp/pcpanel_1.7.1_amd64.deb
mode: "0644"
when: not pcpanel_deb_file.stat.exists
- name: Install pcpanel deb file
apt:
deb: /tmp/pcpanel_1.7.1_amd64.deb
when: not pcpanel_deb_file.stat.exists
- name: Create file to indicate pcpanel installed
file:
path: /var/pcpanel_installed
state: touch
mode: "0644"
when: not pcpanel_deb_file.stat.exists
@@ -1,18 +0,0 @@
soupclown_mod_group_name: soupclown_moderator
soupclown_users:
- name: dbowen
isMod: false
soupclown_drive_configs:
- name: d1
partuuid: ea8eb756-01
fs_type: ext4
- name: d2
partuuid: d9892c39-c6f2-4090-bccc-1b976f85c762
fs_type: ext4
- name: DATA
partuuid: 9f9dfead-c8bd-40bc-a012-7c3fa996e610
fs_type: ext4
@@ -1,13 +0,0 @@
- name: Create soupclown moderator group
group:
name: "{{ soupclown_mod_group_name }}"
state: present
- name: For each user in config, ensure user exists and if isMod true, add to moderator group
user:
name: "{{ item.name }}"
state: present
groups: "{{ [soupclown_mod_group_name] if item.isMod else [] }}"
password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') | password_hash('sha512') }}"
update_password: on_create
loop: "{{ soupclown_users }}"
@@ -1,47 +0,0 @@
---
- name: Check if our custom file indicating Docker is installed exists
stat:
path: /var/docker_installed
register: docker_installed_check
- name: Uninstall old Docker versions if present
apt:
name:
- docker
- docker-engine
- docker.io
- containerd
- runc
state: absent
purge: yes
update_cache: yes
when: not docker_installed_check.stat.exists
- name: Install required packages for Docker
apt:
name:
- gpg
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: yes
when: not docker_installed_check.stat.exists
# curl -fsSL https://get.docker.com -o get-docker.sh
- name: Get temporary Docker installation script
get_url:
url: https://get.docker.com
dest: /tmp/get-docker.sh
mode: "0755"
when: not docker_installed_check.stat.exists
- name: Install Docker using the official installation script
command: sh /tmp/get-docker.sh
when: not docker_installed_check.stat.exists
- name: Create file indicating Docker is installed
file:
path: /var/docker_installed
state: touch
@@ -1,42 +0,0 @@
# NOTE: Never use apt for rclone, use official script
- name: Remove defualt packages
apt:
name:
- firefox
- thunderbird
state: absent
- name: Install common packages
apt:
name:
- btop
- htop
- desktop-file-utils
- git
- gh
- ssh-import-id
- python3-venv
- python3-pip
- name: Install snap packages
snap:
name: "{{ item }}"
state: present
with_items:
- vivaldi
- signal-desktop
- name: Install python3 packages
pip:
name: "{{ item }}"
state: present
break_system_packages: yes
executable: pip3
with_items:
- pyyaml
- cryptography
- passlib
- name: Install Docker
import_tasks: docker-install.yaml
@@ -1,9 +0,0 @@
---
- name: Install all dependencies
import_tasks: install-deps.yaml
- name: Create users
import_tasks: create-users.yaml
- name: Install rclone (MEGA)
import_tasks: rclone-mega-install.yaml
@@ -1,71 +0,0 @@
- name: Check if rclone is installed
stat:
path: /var/rclone_installed
register: rclone_installed_check
- name: Install rclone (mega)
command:
# This command is for sure not working but I ran it manually and it worked. So I blame ansible
cmd: "curl https://rclone.org/install.sh | sudo bash ; touch /var/rclone_installed"
creates: /var/rclone_installed
when: not rclone_installed_check.stat.exists
- name: Create required directories
file:
path: "{{ item }}"
state: directory
mode: "0770"
with_items:
- /root/.config/rclone
- /mnt/mega
- name: Create rclone config
copy:
content: |
[mega]
type = mega
user = tnuu9h362@mozmail.com
pass = {{ rclone_mega_key }}
[data]
type = crypt
remote = mega:enc
password = {{ rclone_encPass_one }}
password2 = {{ rclone_encPass_two }}
dest: /root/.config/rclone/rclone.conf
- name: Write service file for rclone mount
copy:
content: |
[Unit]
Description=Rclone mount service
After=local-fs.target network.target
[Service]
Type=simple
User=root
KillMode=control-group
ExecStart=rclone mount data:enc /mnt/mega -vv \
--vfs-cache-mode full \
--allow-other \
--umask 0 \
--dir-perms 0777 \
--file-perms 0777 \
--dir-cache-time 300h
ExecStop=umount /mnt/mega
Restart=on-failure
[Install]
WantedBy=default.target
dest: /etc/systemd/system/rclone.service
mode: "0644"
owner: root
group: root
- name: Systemctl daemon-reload
command: systemctl daemon-reload
- name: Enable and start rclone service
systemd:
name: rclone
state: started
enabled: yes
-10
View File
@@ -1,10 +0,0 @@
; qmoran-laptop (personal laptop)
[qmoran-laptop]
qmoran@100.118.33.8 ansible_ssh_private_key_file=~/.ssh/po/qmoran@qmoran-laptop_ed25519
[qmoran-desktop]
qmoran@qmoran-desktop-1.tailc7e587.ts.net
[po1]
qmoran@po1
-2
View File
@@ -1,2 +0,0 @@
[all]
root@187.77.193.76
-3
View File
@@ -1,3 +0,0 @@
immich.env
downloadin.env
matrix.env
-12
View File
@@ -1,12 +0,0 @@
#!/run/current-system/sw/bin/bash
SERVICE_NAMES=(
# "paperless"
# "immich"
# "navidrome"
"downloadin"
# "matrix"
)
for SERVICE in "${SERVICE_NAMES[@]}"; do
docker compose --env-file /etc/.soupclown.env -f $(pwd)/${SERVICE}-compose.yaml down
done
-12
View File
@@ -1,12 +0,0 @@
#!/run/current-system/sw/bin/bash
SERVICE_NAMES=(
# "paperless"
# "immich"
# "navidrome"
"downloadin"
# "matrix"
)
for SERVICE in "${SERVICE_NAMES[@]}"; do
docker compose --env-file /etc/.soupclown.env -f $(pwd)/${SERVICE}-compose.yaml pull
done
-12
View File
@@ -1,12 +0,0 @@
#!/run/current-system/sw/bin/bash
SERVICE_NAMES=(
# "paperless"
# "immich"
# "navidrome"
"downloadin"
# "matrix"
)
for SERVICE in "${SERVICE_NAMES[@]}"; do
docker compose --env-file /etc/.soupclown.env -f $(pwd)/${SERVICE}-compose.yaml up -d
done
-167
View File
@@ -1,167 +0,0 @@
services:
vpn:
container_name: vpn
image: qmcgaw/gluetun:v3.40
cap_add:
- NET_ADMIN # Quin does not like these
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ${DOWNLOAD_ETC_PATH}/gluetun:/gluetun
environment:
- VPN_SERVICE_PROVIDER=private internet access
- SERVER_REGIONS=Netherlands
- OPENVPN_USER=${PIA_USERNAME}
- OPENVPN_PASSWORD=${PIA_PASSWORD}
- UPDATER_PERIOD=24h
ports:
- 9091:9091 # Transmission
- 51413:51413 # Transmission
- 51413:51413/udp # Transmission
- 9696:9696 # Prowlarr
- 7878:7878 # Radarr
- 8989:8989 # Sonarr
- 3333:3333 # bitmagnet API and WebUI port
- 3334:3334/tcp # bitmagnet BitTorrent ports
- 3334:3334/udp # bitmagnet BitTorrent ports
- 5055:5055 # seerr
# - 8112:8112 # Deluge
# - 6881:6881 # Deluge
# - 6881:6881/udp # Deluge
# - 58846:58846 # idk Deluge
restart: unless-stopped
transmission:
image: lscr.io/linuxserver/transmission:latest
container_name: transmission
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${DOWNLOAD_ETC_PATH}/transmission:/config
- ${DOWNLOAD_IN_PROGRESS_PATH}:/downloads
- /mnt/mega/jellyfinMedia:/media
restart: unless-stopped
prowlarr:
container_name: prowlarr
image: lscr.io/linuxserver/prowlarr:latest
network_mode: "service:vpn"
environment:
- TZ=${TZ}
- PUID=1000
- PGID=1000
volumes:
- ${DOWNLOAD_ETC_PATH}/prowlarr:/config
depends_on:
vpn:
condition: service_started
restart: true
required: true
restart: unless-stopped
radarr:
image: lscr.io/linuxserver/radarr:latest
network_mode: "service:vpn"
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${DOWNLOAD_ETC_PATH}/radarr:/config
- ${DOWNLOAD_IN_PROGRESS_PATH}:/downloads #optional
- /mnt/mega/jellyfinMedia:/media
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
network_mode: "service:vpn"
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=${TZ}
volumes:
- ${DOWNLOAD_ETC_PATH}/sonarr:/config
- ${DOWNLOAD_IN_PROGRESS_PATH}:/downloads #optional
- /mnt/mega/jellyfinMedia:/media
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
bitmagnet:
image: ghcr.io/bitmagnet-io/bitmagnet:latest
container_name: bitmagnet
restart: unless-stopped
network_mode: "service:vpn"
environment:
- POSTGRES_PASSWORD=${DOWNLOAD_BITMAGENT_DB_PASS}
- TMDB_API_KEY=${TMDB_API_KEY}
volumes:
- ${SOUPCLOWN_ETC_PATH}/downloadin/bitmagnet/etc:/root/.config/bitmagnet
command:
- worker
- run
- --keys=http_server
- --keys=queue_server
# disable the next line to run without DHT crawler
- --keys=dht_crawler
depends_on:
postgres:
condition: service_healthy
postgres:
image: postgres:16-alpine
container_name: bitmagnet-postgres
network_mode: "service:vpn"
volumes:
- ${SOUPCLOWN_ETC_PATH}/downloadin/bitmagnet/postgresdata:/var/lib/postgresql/data
restart: unless-stopped
environment:
- POSTGRES_PASSWORD=${DOWNLOAD_BITMAGENT_DB_PASS}
- POSTGRES_DB=bitmagnet
- PGUSER=postgres
shm_size: 1g
healthcheck:
test:
- CMD-SHELL
- pg_isready
start_period: 20s
interval: 10s
seerr:
image: ghcr.io/seerr-team/seerr:latest
init: true
container_name: seerr
network_mode: "service:vpn"
restart: unless-stopped
environment:
- TZ=${TZ}
- LOG_LEVEL=debug
volumes:
- ${SOUPCLOWN_ETC_PATH}/seer:/app/config
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:5055/api/v1/status || exit 1
start_period: 20s
timeout: 3s
interval: 15s
retries: 3
-75
View File
@@ -1,75 +0,0 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich-server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of IMMICH_UPLOAD_LOCATION in the .env file
- ${IMMICH_UPLOAD_LOCATION}:/data
- /mnt/mega/immich/old-album:/old-album
- /etc/localtime:/etc/localtime:ro
env_file:
- /etc/.soupclown.env
ports:
- "2283:2283"
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich-machine-learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- /etc/.soupclown.env
restart: always
healthcheck:
disable: false
redis:
container_name: immich-redis
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich-postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${IMMICH_DB_PASSWORD}
POSTGRES_USER: ${IMMICH_DB_USERNAME}
POSTGRES_DB: ${IMMICH_DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: "--data-checksums"
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of IMMICH_DB_DATA_LOCATION in the .env file
- ${IMMICH_DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:
-11
View File
@@ -1,11 +0,0 @@
services:
navidrome:
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000
ports:
- "4533:4533"
volumes:
- /root/navidrome:/data
- /mnt/mega/beetsMusic:/music
restart: unless-stopped
+4
View File
@@ -0,0 +1,4 @@
{
"v": "v1",
"data": {}
}
+1 -1
View File
@@ -1,3 +1,3 @@
#!/usr/bin/env bash
sudo nixos-rebuild --flake .#$(hostname) switch
sudo nixos-rebuild --flake . switch