Cleaning up

This commit is contained in:
quinm0
2026-02-23 16:39:06 -05:00
commit affbfcd116
43 changed files with 1224 additions and 0 deletions
+56
View File
@@ -0,0 +1,56 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
node_modules
/.pnp
.pnp.js
# testing
/coverage
.venv/
# database
/prisma/db.sqlite
/prisma/db.sqlite-journal
db.sqlite
# next.js
.next/
out/
next-env.d.ts
# production
build
credentials.json
tokens/
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
# local env files
# do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables
.env
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
# idea files
.idea
.env*
.flaskenv*
!.env.project
!.env.vault
dist/
+86
View File
@@ -0,0 +1,86 @@
#!/bin/bash
set -e
set -o pipefail
APPIMAGE_PATH=$1
if [ -z "$APPIMAGE_PATH" ]; then
echo "Missing argument: appimage"
exit 1
fi
if [ ! -f "$APPIMAGE_PATH" ]; then
echo "File not found: $APPIMAGE_PATH"
exit 1
fi
TEMP_SQUASHFS_PATH=$(mktemp -d)
APPIMAGE_FULLPATH=$(readlink -e "$APPIMAGE_PATH")
APPIMAGE_FILENAME=$(basename "$APPIMAGE_PATH")
APP_NAME="${APPIMAGE_FILENAME%.*}"
DESKTOP_ENTRY_PATH="${HOME}/.local/share/applications/$APP_NAME.desktop"
ICON_FOLDER="${HOME}/.local/share/icons"
mkdir -p "${ICON_FOLDER}"
if [ "$2" == "--remove" ]; then
rm -f "$DESKTOP_ENTRY_PATH"
find "${ICON_FOLDER}" -maxdepth 1 -type f -name "$APP_NAME.*" -delete
echo "Removed"
exit 0
fi
pushd $TEMP_SQUASHFS_PATH
"$APPIMAGE_FULLPATH" --appimage-extract > /dev/null
cd squashfs-root/
echo "Choose icon: "
mapfile -t FILENAMES < <(find -L . -maxdepth 1 -type f \( -iname '*.png' -o -iname '*.svg' \))
i=1
for filename in "${FILENAMES[@]}"
do
printf " %d) %s\n" "$i" "$filename"
i=$((i + 1))
done
read -r SELECTED_INDEX
ICON_SRC=${FILENAMES[$((SELECTED_INDEX - 1))]}
ICON_EXT="${ICON_SRC##*.}"
ICON_DST="${ICON_FOLDER}/$APP_NAME.$ICON_EXT"
cp "$ICON_SRC" "$ICON_DST"
cat <<EOT > "$DESKTOP_ENTRY_PATH"
[Desktop Entry]
Name=$APP_NAME
StartupWMClass=$APP_NAME
Exec=$APPIMAGE_FULLPATH
Icon=$ICON_DST
Type=Application
Terminal=false
EOT
popd
cat <<EOT > /etc/apparmor.d/$APP_NAME
# This profile allows everything and only exists to give the
# application a name instead of having the label "unconfined"
abi <abi/3.0>,
include <tunables/global>
profile $APP_NAME $APPIMAGE_FULLPATH flags=(default_allow) {
userns,
# Site-specific additions and overrides. See local/README for details.
include if exists <local/$APP_NAME>
}
EOT
popd
rm -rf $TEMP_SQUASHFS_PATH
systemctl reload apparmor.service
echo "Created"
+23
View File
@@ -0,0 +1,23 @@
# Ansible host setup
### About
This is made with the intent to be reuseable when any hosts on a network need to be configured back to a common state.
I've actually decided that this will only get the system configs and lower level configs in place for larger tools to then use the host for service orchistration.
# Tasks
- Install dependencies
- Create users
- Write env file
- Install Docker
- Install Rclone
## Variables:
```
- soupclown_users: list (Creates a user for each user and then also sets them as sudoer)
- name: <username>
isMod: true | false (sets as sudoer)
- soupclown_drive_configs: (not upgraded yet)
```
+44
View File
@@ -0,0 +1,44 @@
---
- name: Install mergerfs if not present
apt:
name: mergerfs
state: present
update_cache: yes
- name: Update mergerfs systemd service with drive paths
template:
src: mergerfs.service.j2
dest: /etc/systemd/system/mergerfs.service
mode: "0644"
owner: root
group: root
- name: Ensure fstab is updated with drive configurations
lineinfile:
regexp: "^PARTUUID={{ item.partuuid }}"
path: /etc/fstab
line: "PARTUUID={{ item.partuuid }} {{ mount_point }}/{{ item.name }} {{ item.fs_type }} defaults,auto,nofail 0 2"
state: present
loop: "{{ drive_configs }}"
- name: Ensure mount points exist and have appropriate permissions
file:
path: "{{ mount_point }}/{{ item.name }}"
state: directory
mode: "0755"
owner: root
group: root
loop: "{{ drive_configs }}"
- name: Systemctl daemon-reload
command: systemctl daemon-reload
- name: Mount all filesystems
command: mount -a
ignore_errors: yes
- name: Start and enable mergerfs service
systemd:
name: mergerfs
state: started
enabled: yes
+3
View File
@@ -0,0 +1,3 @@
[dietpi-inv]
; B407 Raspberry pi node
root@192.168.68.52 ansible_ssh_private_key_file=~/.ssh/id_ed25519
+4
View File
@@ -0,0 +1,4 @@
---
- hosts: dietpi-inv
roles:
- dietpi
+28
View File
@@ -0,0 +1,28 @@
---
- name: Playbook
hosts: qmoran-laptop
become: true
vars:
omz_install_zsh: true
users:
- name: "qmoran"
group: "qmoran"
settings: |
export PATH="/usr/local/sbin:$PATH"
alias desktop="ssh qmoran@100.96.19.102"
alias po1="ssh qmoran@100.70.86.70"
tasks:
- name: Run oh-my-zsh.
include_role:
name: "oh-my-zsh"
vars:
omz_user: "{{ item }}"
# Only create `.zshrc` for user 'qmoran'; item.settings will be
# appended to `.zshrc` for the user 'qmoran'.
omz_zshrc_create: "{{ (item.name == 'qmoran') | ternary(true, false) }}"
omz_plugins:
# - "autojump"
- "git"
with_items: "{{ users }}"
+7
View File
@@ -0,0 +1,7 @@
---
- hosts: qmoran-desktop
become: yes
roles:
- devtools
vars_files:
- secrets.yaml
@@ -0,0 +1,87 @@
# ============================================================================
# CTOP Installation Task
# ============================================================================
#
# ## Overview
# This task sequence installs **ctop** (a top-like interface for container metrics)
# on the target system using the azlux repository.
#
# ## Prerequisites
# - Target system must be Debian/Ubuntu-based
# - Root or sudo privileges required
# - Internet connectivity to download packages and GPG keys
#
# ## Workflow
# 1. **Prerequisite Check**: Verifies if ctop is already installed via marker file
# 2. **Dependency Installation**: Installs required system packages
# - ca-certificates (SSL/TLS support)
# - curl (download utilities)
# - gnupg (GPG key management)
# - lsb-release (OS release detection)
# 3. **Repository Setup**:
# - Adds azlux GPG key for package verification
# - Validates GPG key integrity
# - Adds azlux apt repository with signed packages
# - Refreshes apt cache
# 4. **Package Installation**: Installs docker-ctop package
#
# ## Key Features
# - ✅ Idempotent: Skips execution if ctop is already installed
# - ✅ GPG Validation: Ensures repository authenticity
# - ✅ Error Handling: Fails playbook if GPG key or repo validation fails
# - ✅ Architecture-Aware: Uses ansible_architecture variable for multi-arch support
#
# ## Variables Used
# - `ansible_architecture`: Target system CPU architecture (auto-detected)
# - `ctop_installed_check`: Registration variable tracking installation status
# - `apt_key_finger_output`: GPG key validation results
# - `azlux_repo_check`: Repository addition verification results
#
# ## Failure Points
# - GPG key import fails or key not found in fingerprint output
# - Repository addition fails or cannot be verified
# - Package installation fails
#
# ## Notes
# - Uses `signed-by` parameter for secure apt repository configuration (modern approach)
# - Creates marker file at `/var/ctop_installed` to track installation state
# - `lsb_release -cs` dynamically determines Debian/Ubuntu codename
# - Ensure that the system is updated before running this playbook for best results
- name: Check if ctop is installed
stat:
path: /var/ctop_installed
register: ctop_installed_check
- name: Install prerequisites for ctop
apt:
name:
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: yes
when: not ctop_installed_check.stat.exists
- name: Add azlux GPG key for ctop
apt_key:
url: https://azlux.fr/repo.gpg.key
state: present
keyring: /usr/share/keyrings/azlux-archive-keyring.gpg
when: not ctop_installed_check.stat.exists
- name: Add azlux repository for ctop
shell: |
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/azlux-archive-keyring.gpg trusted=yes] http://packages.azlux.fr/debian stable main" | sudo tee /etc/apt/sources.list.d/azlux.list >/dev/null
when: not ctop_installed_check.stat.exists
- name: Update apt cache after adding azlux repository
command: apt-get update
when: not ctop_installed_check.stat.exists
- name: Install ctop
apt:
name: docker-ctop
state: present
when: not ctop_installed_check.stat.exists
@@ -0,0 +1,21 @@
- name: Check if lazydocker is installed
stat:
path: /var/lazydocker_installed
register: lazydocker_installed_check
- name: Download install script for lazydocker
get_url:
url: https://raw.githubusercontent.com/jesseduffield/lazydocker/master/scripts/install_update_linux.sh
dest: /tmp/install_lazydocker.sh
mode: "0755"
when: not lazydocker_installed_check.stat.exists
- name: Run lazydocker installer script
command: /tmp/install_lazydocker.sh
when: not lazydocker_installed_check.stat.exists
- name: Create file indicating lazydocker is installed
file:
path: /var/lazydocker_installed
state: touch
when: not lazydocker_installed_check.stat.exists
@@ -0,0 +1,34 @@
# get the installer script
# curl --proto '=https' --tlsv1.2 -fsSL https://get.opentofu.org/install-opentofu.sh -o install-opentofu.sh
- name: Check if opentofu is installed
stat:
path: /var/opentofu_installed
register: opentofu_installed_check
- name: Download installer script
get_url:
url: https://get.opentofu.org/install-opentofu.sh
dest: /tmp/install-opentofu.sh
mode: "0755"
when: not opentofu_installed_check.stat.exists
- name: Make installer script executable
command: chmod +x /tmp/install-opentofu.sh
when: not opentofu_installed_check.stat.exists
- name: Install opentofu
command: /tmp/install-opentofu.sh --install-method deb
when: not opentofu_installed_check.stat.exists
- name: Remove installer script
file:
path: /tmp/install-opentofu.sh
state: absent
when: not opentofu_installed_check.stat.exists
- name: Create file indicating opentofu is installed
file:
path: /var/opentofu_installed
state: touch
when: not opentofu_installed_check.stat.exists
@@ -0,0 +1,8 @@
- name: Install opentofu
import_tasks: install-opentofu.yaml
- name: Install lazydocker
import_tasks: install-lazydocker.yaml
- name: Install ctop
import_tasks: install-ctop.yaml
+2
View File
@@ -0,0 +1,2 @@
- name: Install Reticulum Network Stack (RNS)
import_tasks: rns.yaml
+138
View File
@@ -0,0 +1,138 @@
---
# - name: Install python3 packages
# pip:
# name: "{{ item }}"
# state: present
# executable: pip3
# with_items:
# - rns
# - lxmf
- name: Create systemd service for rns
copy:
dest: /etc/systemd/system/rns.service
content: |
[Unit]
Description=Reticulum Network Stack Daemon
After=multi-user.target
[Service]
Type=simple
Restart=always
RestartSec=3
User=root
ExecStart=rnsd --service
[Install]
WantedBy=multi-user.target
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Ensure rns service is enabled and started
systemd:
name: rns.service
enabled: yes
state: started
- name: Stop service for config file update
systemd:
name: rns.service
state: stopped
- name: Ensure Reticulum config directory exists
file:
path: /root/.reticulum
state: directory
mode: "0755"
- name: Deploy rns configuration file (using block for friendly file updates)
copy:
dest: /root/.reticulum/config
content: |
[reticulum]
enable_transport = True
share_instance = Yes
instance_name = B407
discover_interfaces = Yes
panic_on_interface_error = No
# If you're connecting to a large external network, you
# can use one or more external blackhole list to block
# spammy and excessive announces onto your network. This
# funtionality is especially useful if you're hosting public
# entrypoints or gateways. The list source below provides a
# functional example, but better, more timely maintained
# lists probably exist in the community.
# blackhole_sources = 521c87a83afb8f29e4455e77930b973b
[logging]
# Valid log levels are 0 through 7:
# 0: Log only critical information
# 1: Log errors and lower log levels
# 2: Log warnings and lower log levels
# 3: Log notices and lower log levels
# 4: Log info and lower (this is the default)
# 5: Verbose logging
# 6: Debug logging
# 7: Extreme logging
loglevel = 4
# The interfaces section defines the physical and virtual
# interfaces Reticulum will use to communicate on. This
# section will contain examples for a variety of interface
# types. You can modify these or use them as a basis for
# your own config, or simply remove the unused ones.
[interfaces]
[[Default Interface]]
type = AutoInterface
enabled = Yes
[[RNode LoRa Interface]]
type = RNodeInterface
enabled = yes
port = /dev/ttyACM0
# Set frequency to 915 MHz (US ISM Band)
frequency = 915000000
# Set LoRa bandwidth to 125 KHz
bandwidth = 125000
# Set TX power to 7 dBm (5 mW)
txpower = 7
# Select spreading factor 8. Valid
# range is 7 through 12, with 7
# being the fastest and 12 having
# the longest range.
spreadingfactor = 8
# Select coding rate 5. Valid range
# is 5 throough 8, with 5 being the
# fastest, and 8 the longest range.
codingrate = 5
# You can configure the RNode to send
# out identification on the channel with
# a set interval by configuring the
# following two parameters.
id_callsign = B407
id_interval = 600
# For certain homebrew RNode interfaces
# with low amounts of RAM, using packet
# flow control can be useful. By default
# it is disabled.
# flow_control = False
# It is possible to limit the airtime
# utilisation of an RNode by using the
# following two configuration options.
# The short-term limit is applied in a
# window of approximately 15 seconds,
# and the long-term limit is enforced
# over a rolling 60 minute window. Both
# options are specified in percent.
# airtime_limit_long = 1.5
# airtime_limit_short = 33
- name: Start rns service after config update
systemd:
name: rns.service
state: started
+66
View File
@@ -0,0 +1,66 @@
# add block to /etc/udev/rules.d/70-pcpanel.rules
# SUBSYSTEM=="usb", ATTRS{idVendor}=="04D8", ATTRS{idProduct}=="eb52", TAG+="uaccess"
# SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c4", TAG+="uaccess"
# SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c5", TAG+="uaccess"
- name: Check if we've already set up pcpanel
stat:
path: /etc/udev/rules.d/70-pcpanel.rules
register: pcpanel_rules_file
- name: Check if we've installed the deb file before
stat:
path: /var/pcpanel_installed
register: pcpanel_deb_file
- name: Ensure pulseaudio is installed
apt:
name: "{{ item }}"
state: present
update_cache: yes
with_items:
- pulseaudio
- pulseaudio-utils
when: not pcpanel_rules_file.stat.exists
- name: Create required directories for pcpanel
file:
path: /etc/udev/rules.d
state: directory
mode: "0755"
when: not pcpanel_rules_file.stat.exists
- name: Add udev rules for pcpanel
copy:
content: |
# SUBSYSTEM=="usb", ATTRS{idVendor}=="04D8", ATTRS{idProduct}=="eb52", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c4", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="a3c5", TAG+="uaccess"
dest: /etc/udev/rules.d/70-pcpanel.rules
mode: "0644"
owner: root
group: root
when: not pcpanel_rules_file.stat.exists
- name: Reload udev rules
shell: udevadm control --reload-rules && udevadm trigger
when: not pcpanel_rules_file.stat.exists
- name: Download pcpanel deb file
get_url:
url: "https://github.com/nvdweem/PCPanel/releases/download/v1.7.1/pcpanel_1.7.1_amd64.deb"
dest: /tmp/pcpanel_1.7.1_amd64.deb
mode: "0644"
when: not pcpanel_deb_file.stat.exists
- name: Install pcpanel deb file
apt:
deb: /tmp/pcpanel_1.7.1_amd64.deb
when: not pcpanel_deb_file.stat.exists
- name: Create file to indicate pcpanel installed
file:
path: /var/pcpanel_installed
state: touch
mode: "0644"
when: not pcpanel_deb_file.stat.exists
@@ -0,0 +1,18 @@
soupclown_mod_group_name: soupclown_moderator
soupclown_users:
- name: dbowen
isMod: false
soupclown_drive_configs:
- name: d1
partuuid: ea8eb756-01
fs_type: ext4
- name: d2
partuuid: d9892c39-c6f2-4090-bccc-1b976f85c762
fs_type: ext4
- name: DATA
partuuid: 9f9dfead-c8bd-40bc-a012-7c3fa996e610
fs_type: ext4
@@ -0,0 +1,13 @@
- name: Create soupclown moderator group
group:
name: "{{ soupclown_mod_group_name }}"
state: present
- name: For each user in config, ensure user exists and if isMod true, add to moderator group
user:
name: "{{ item.name }}"
state: present
groups: "{{ [soupclown_mod_group_name] if item.isMod else [] }}"
password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') | password_hash('sha512') }}"
update_password: on_create
loop: "{{ soupclown_users }}"
@@ -0,0 +1,47 @@
---
- name: Check if our custom file indicating Docker is installed exists
stat:
path: /var/docker_installed
register: docker_installed_check
- name: Uninstall old Docker versions if present
apt:
name:
- docker
- docker-engine
- docker.io
- containerd
- runc
state: absent
purge: yes
update_cache: yes
when: not docker_installed_check.stat.exists
- name: Install required packages for Docker
apt:
name:
- gpg
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: yes
when: not docker_installed_check.stat.exists
# curl -fsSL https://get.docker.com -o get-docker.sh
- name: Get temporary Docker installation script
get_url:
url: https://get.docker.com
dest: /tmp/get-docker.sh
mode: "0755"
when: not docker_installed_check.stat.exists
- name: Install Docker using the official installation script
command: sh /tmp/get-docker.sh
when: not docker_installed_check.stat.exists
- name: Create file indicating Docker is installed
file:
path: /var/docker_installed
state: touch
@@ -0,0 +1,42 @@
# NOTE: Never use apt for rclone, use official script
- name: Remove defualt packages
apt:
name:
- firefox
- thunderbird
state: absent
- name: Install common packages
apt:
name:
- btop
- htop
- desktop-file-utils
- git
- gh
- ssh-import-id
- python3-venv
- python3-pip
- name: Install snap packages
snap:
name: "{{ item }}"
state: present
with_items:
- vivaldi
- signal-desktop
- name: Install python3 packages
pip:
name: "{{ item }}"
state: present
break_system_packages: yes
executable: pip3
with_items:
- pyyaml
- cryptography
- passlib
- name: Install Docker
import_tasks: docker-install.yaml
@@ -0,0 +1,9 @@
---
- name: Install all dependencies
import_tasks: install-deps.yaml
- name: Create users
import_tasks: create-users.yaml
- name: Install rclone (MEGA)
import_tasks: rclone-mega-install.yaml
@@ -0,0 +1,71 @@
- name: Check if rclone is installed
stat:
path: /var/rclone_installed
register: rclone_installed_check
- name: Install rclone (mega)
command:
# This command is for sure not working but I ran it manually and it worked. So I blame ansible
cmd: "curl https://rclone.org/install.sh | sudo bash ; touch /var/rclone_installed"
creates: /var/rclone_installed
when: not rclone_installed_check.stat.exists
- name: Create required directories
file:
path: "{{ item }}"
state: directory
mode: "0770"
with_items:
- /root/.config/rclone
- /mnt/mega
- name: Create rclone config
copy:
content: |
[mega]
type = mega
user = tnuu9h362@mozmail.com
pass = {{ rclone_mega_key }}
[data]
type = crypt
remote = mega:enc
password = {{ rclone_encPass_one }}
password2 = {{ rclone_encPass_two }}
dest: /root/.config/rclone/rclone.conf
- name: Write service file for rclone mount
copy:
content: |
[Unit]
Description=Rclone mount service
After=local-fs.target network.target
[Service]
Type=simple
User=root
KillMode=control-group
ExecStart=rclone mount data:enc /mnt/mega -vv \
--vfs-cache-mode full \
--allow-other \
--umask 0 \
--dir-perms 0777 \
--file-perms 0777 \
--dir-cache-time 300h
ExecStop=umount /mnt/mega
Restart=on-failure
[Install]
WantedBy=default.target
dest: /etc/systemd/system/rclone.service
mode: "0644"
owner: root
group: root
- name: Systemctl daemon-reload
command: systemctl daemon-reload
- name: Enable and start rclone service
systemd:
name: rclone
state: started
enabled: yes
+30
View File
@@ -0,0 +1,30 @@
$ANSIBLE_VAULT;1.1;AES256
35333866366632636535643265303930333238373736323863663930383864643861613638353135
3363643465343935306530376262376566303861346464380a356135316234363437636663636237
30363465383361656638313038326262623032613561373936363139343066636334316166373439
3834363166383931660a303833383930333839366465376563623939636664626231653930623634
65653263613263613539333866386536396431383562396561326530326434666532656337616439
34663336313836646530343665323830333233366139383034363530303530376532623963623132
35653330616666396565353263666632633636613931643861623732306363663830373936356664
30373337383739636561383964316664363466626536663636376237336635333330653738643232
64313365643763656339313863303662336466363862343839313739636130343838663337643439
39393632376539383735353236396633363664653665393834386635666437316431386633333731
63656238383736303336653938613266626231306561353035306634663938306435346437303231
35396464366130646464646261626534653933366161656534356530616637663135336664353731
34366163396361363665653031303066623234343831333334313862613665656166373063643464
63313935343166663231343736323933616463633738323032383137363666383531613563366365
31666239303665326230666264376439323032343862636165316339396638643837366562626665
63643066336332373365663030613133643061316333343062356531643835316437303465626162
33373166636163643830373639366634316364666237363832646636656634643534616432343734
30313562643832653530623462383938653963323036636365626264363130666434383535666331
30376366333564323637666136393834396138313034643464306439636134626136316464383936
61383432356534326335616433373237633338633936323836356561343933356562653934303334
64303566636335363864663166373833616363616330613362616534346532326134653430656632
35626439663662623961356235616239613237333564316662393333326162626466613737343361
32343338663065626666613735666166386438383261616535393032346236656532623462663662
61643330336430636632386662623336663363623535323861346433626666386331616363653634
62313162396431343135653336656665313536343033623635663832353037383032353861663064
31316131303563303139343763656539613034643330663030386438643938646237326137396233
32356161616639643930373763626164623531643736316363623731333935303330306632646139
63303661613537396439656134353835646232363163363664616462356337386662303062646261
64316366633133386437373339323333633433393763323531346330663839396130
+10
View File
@@ -0,0 +1,10 @@
; qmoran-laptop (personal laptop)
[qmoran-laptop]
qmoran@100.118.33.8 ansible_ssh_private_key_file=~/.ssh/po/qmoran@qmoran-laptop_ed25519
[qmoran-desktop]
qmoran@qmoran-desktop
[po1]
qmoran@po1
+2
View File
@@ -0,0 +1,2 @@
[all]
root@187.77.193.76
+2
View File
@@ -0,0 +1,2 @@
immich.env
downloadin.env
+6
View File
@@ -0,0 +1,6 @@
#!/bin/bash
SERVICE_NAMES=("paperless" "jellyfin" "immich" "navidrome" "deluge" "downloadin")
for SERVICE in "${SERVICE_NAMES[@]}"; do
bash $(pwd)/${SERVICE}-down.sh
done
+6
View File
@@ -0,0 +1,6 @@
#!/bin/bash
SERVICE_NAMES=("paperless" "jellyfin" "immich" "navidrome" "downloadin")
for SERVICE in "${SERVICE_NAMES[@]}"; do
bash $(pwd)/${SERVICE}-up.sh
done
+121
View File
@@ -0,0 +1,121 @@
# PIA_USERNAME: your PIA username
# PIA_PASSWORD: your PIA password
# ETC_PATH: path to store config files, e.g. /etc/services
# DOWNLOAD_PATH: path to store in-progress downloads
# STORAGE_PATH: path to store completed downloads
# STORAGE_MINIMUM_FREE_SPACE_GB*: minimum free space in GB for storage
# DOWNLOAD_MINIMUM_FREE_SPACE_GB*: minimum free space in GB for in-progress downloads
# * = not implemented
services:
vpn:
container_name: vpn
image: qmcgaw/gluetun:v3.40
cap_add:
- NET_ADMIN # Quin does not like these
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- ${ETC_PATH}/gluetun:/gluetun
environment:
- VPN_SERVICE_PROVIDER=private internet access
- SERVER_REGIONS=Netherlands
- OPENVPN_USER=${PIA_USERNAME}
- OPENVPN_PASSWORD=${PIA_PASSWORD}
- UPDATER_PERIOD=24h
ports:
# Deluge
- 8112:8112
- 6881:6881
- 6881:6881/udp
- 58846:58846 #optional
# Prowlarr
- 9696:9696
# Radarr
- 7878:7878
# Sonarr
- 8989:8989
restart: unless-stopped
deluge:
container_name: deluge
image: lscr.io/linuxserver/deluge:latest
network_mode: "service:vpn"
environment:
- TZ=America/New_York
- PUID=1000
- PGID=1000
volumes:
- ${ETC_PATH}/deluge:/config
- ${DOWNLOAD_PATH}:/downloads
- ${STORAGE_PATH}:/storage
depends_on:
vpn:
condition: service_started
restart: true
required: true
restart: unless-stopped
prowlarr:
container_name: prowlarr
image: lscr.io/linuxserver/prowlarr:latest
network_mode: "service:vpn"
environment:
- TZ=America/New_York
- PUID=1000
- PGID=1000
volumes:
- ${ETC_PATH}/prowlarr:/config
depends_on:
vpn:
condition: service_started
restart: true
required: true
restart: unless-stopped
radarr:
image: lscr.io/linuxserver/radarr:latest
network_mode: "service:vpn"
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- ${ETC_PATH}/radarr:/config
- ${STORAGE_PATH}/Movies:/Movies #optional
- ${DOWNLOAD_PATH}:/downloads #optional
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
sonarr:
image: lscr.io/linuxserver/sonarr:latest
network_mode: "service:vpn"
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- ${ETC_PATH}/sonarr:/config
- ${STORAGE_PATH}/Shows:/tv #optional
- ${DOWNLOAD_PATH}:/downloads #optional
depends_on:
vpn:
condition: service_started
restart: true
required: true
prowlarr:
condition: service_started
restart: true
required: true
restart: unless-stopped
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose --env-file $(pwd)/downloadin.env -f downloadin-compose.yaml down
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose --env-file $(pwd)/downloadin.env -f downloadin-compose.yaml up -d
+75
View File
@@ -0,0 +1,75 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich-server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /mnt/mega/immich/old-album:/old-album
- /etc/localtime:/etc/localtime:ro
env_file:
- immich.env
ports:
- "2283:2283"
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich-machine-learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- immich.env
restart: always
healthcheck:
disable: false
redis:
container_name: immich-redis
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich-postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: "--data-checksums"
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose --env-file $(pwd)/immich.env -f immich-compose.yaml down
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose --env-file $(pwd)/immich.env -f $(pwd)/immich-compose.yaml up -d
+32
View File
@@ -0,0 +1,32 @@
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
user: 1000:1000
ports:
- 8096:8096/tcp
- 7359:7359/udp
environment:
- TZ=America/New_York
volumes:
- /mnt/mega/jellyfin-etc:/config
- /mnt/mega/jellyfinMedia/cache:/cache
- type: bind
source: /mnt/mega/jellyfinMedia
target: /media
runtime: nvidia
devices:
- /dev/nvidia-caps:/dev/nvidia-caps
- /dev/nvidia0:/dev/nvidia0
- /dev/nvidiactl:/dev/nvidiactl
- /dev/nvidia-modeset:/dev/nvidia-modeset
- /dev/nvidia-uvm:/dev/nvidia-uvm
- /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
restart: "always"
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose -f jellyfin-compose.yaml down
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose -f jellyfin-compose.yaml up -d
+11
View File
@@ -0,0 +1,11 @@
services:
navidrome:
container_name: navidrome
image: deluan/navidrome:latest
user: 1000:1000
ports:
- "4533:4533"
volumes:
- /root/navidrome:/data
- /mnt/mega/jellyfinMedia/Music:/music
restart: unless-stopped
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose -f $(pwd)/navidrome-compose.yaml down
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose -f $(pwd)/navidrome-compose.yaml up -d
+40
View File
@@ -0,0 +1,40 @@
###############################################################################
# Paperless-ngx settings #
###############################################################################
# See http://docs.paperless-ngx.com/configuration/ for all available options.
# The UID and GID of the user used to run paperless in the container. Set this
# to your UID and GID on the host so that you have write access to the
# consumption directory.
#USERMAP_UID=1000
#USERMAP_GID=1000
PAPERLESS_CONSUMPTION_DIR=/usr/src/paperless/consume
# See the documentation linked above for all options. A few commonly adjusted settings
# are provided below.
# This is required if you will be exposing Paperless-ngx on a public domain
# (if doing so please consider security measures such as reverse proxy)
PAPERLESS_URL=http://qmoran-desktop:8000
# Adjust this key if you plan to make paperless available publicly. It should
# be a very long sequence of random characters. You don't need to remember it.
PAPERLESS_SECRET_KEY=faluw4hltilweui4thueiuraew4hro7wlrhalweifuafsuhelwu3erhaiwuwehfiufhaluhrfawle98f7aspdhf98apwh3raw9i3raw9efhawepfufaopudspifushf98eup4h89qra8wfqjw39rjfap
# Use this variable to set a timezone for the Paperless Docker containers. Defaults to UTC.
PAPERLESS_TIME_ZONE=America/New_York
# The default language to use for OCR. Set this to the language most of your
# documents are written in.
PAPERLESS_OCR_LANGUAGE=eng
# Additional languages to install for text recognition, separated by a whitespace.
# Note that this is different from PAPERLESS_OCR_LANGUAGE (default=eng), which defines
# the language used for OCR.
# The container installs English, German, Italian, Spanish and French by default.
# See https://packages.debian.org/search?keywords=tesseract-ocr-&searchon=names
# for available languages.
#PAPERLESS_OCR_LANGUAGES=tur ces
+62
View File
@@ -0,0 +1,62 @@
# Docker Compose file for running paperless from the Docker Hub.
# This file contains everything paperless needs to run.
# Paperless supports amd64, arm and arm64 hardware.
#
# All compose files of paperless configure paperless in the following way:
#
# - Paperless is (re)started on system boot, if it was running before shutdown.
# - Docker volumes for storing data are managed by Docker.
# - Folders for importing and exporting files are created in the same directory
# as this file and mounted to the correct folders inside the container.
# - Paperless listens on port 8000.
#
# In addition to that, this Docker Compose file adds the following optional
# configurations:
#
# - Instead of SQLite (default), PostgreSQL is used as the database server.
#
# To install and update paperless with this file, do the following:
#
# - Copy this file as 'docker-compose.yml' and the files 'docker-compose.env'
# and '.env' into a folder.
# - Run 'docker compose pull'.
# - Run 'docker compose up -d'.
#
# For more extensive installation and update instructions, refer to the
# documentation.
services:
broker:
container_name: paperless-redis
image: docker.io/library/redis:8
restart: unless-stopped
volumes:
- /root/paperless/redisdata:/data
db:
container_name: paperless-postgres
image: docker.io/library/postgres:18
restart: unless-stopped
volumes:
- /root/paperless/pgdata:/var/lib/postgresql
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: paperless
webserver:
container_name: paperless-web
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
depends_on:
- db
- broker
ports:
- "8000:8000"
volumes:
- /mnt/mega/paperless_data:/usr/src/paperless/data
- /mnt/mega/paperless_media:/usr/src/paperless/media
- /mnt/mega/paperless/export:/usr/src/paperless/export
- /mnt/mega/paperless/consume:/usr/src/paperless/consume
env_file: paperless-compose.env
environment:
PAPERLESS_URL: https://paperless.soupclown.com
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose --env-file $(pwd)/paperless-compose.env -f $(pwd)/paperless-compose.yaml down
+2
View File
@@ -0,0 +1,2 @@
#!/bin/bash
docker compose --env-file $(pwd)/paperless-compose.env -f $(pwd)/paperless-compose.yaml up -d