Compare commits

...

169 commits

Author SHA1 Message Date
ebbd106e0a Enable hsts for nextcloud 2024-12-14 03:53:59 +01:00
660f9561cf Store TrackMania in directory 2024-12-14 03:49:46 +01:00
ae101e2eb2 Fix path of nextcloud service proxy 2024-12-14 03:45:41 +01:00
a0874d9f37 Add jellyseerr by default 2024-12-13 02:43:32 +01:00
7df90179c9 Improve Nextcloud nginx config 2024-12-13 01:51:47 +01:00
99fa900c1a Fix incorrect git ssh script 2024-12-11 04:18:13 +01:00
4161fce8d7 Link gitea to forgejo 2024-12-11 04:18:00 +01:00
896c77559a Set up authentication automatically 2024-12-11 04:17:48 +01:00
ce70e66c70 Make forgejo work under Arch Linux 2024-12-11 03:30:36 +01:00
84818c2bac Refactor jellyfin nginx config 2024-12-11 02:27:49 +01:00
c349a2d5be Name forgejo user properly 2024-12-11 01:30:36 +01:00
56f7f4228b Fix ssh key permission error 2024-12-11 01:17:54 +01:00
cf717d7a50 Inherit profile script title properly 2024-12-11 01:13:02 +01:00
f0fef82fa1 Prevent PowerShell errors 2024-12-11 01:12:52 +01:00
db247315cf Fix malformed yq scripts 2024-12-08 18:04:59 +01:00
93284f2016 Disable auto login after installation 2024-12-08 17:27:03 +01:00
9a911cb93b Reboot system as root 2024-12-08 17:18:59 +01:00
da1870c250 Adjust boot label for server 2024-12-08 16:50:16 +01:00
97e372ae08 Fix permission issues 2024-12-08 06:15:43 +01:00
a8e0f37416 Return exit code properly 2024-12-08 05:48:20 +01:00
c1483d29e1 Fix typo 2024-12-08 05:20:42 +01:00
e435ce8c48 Prevent unnecessary config values 2024-12-08 04:56:41 +01:00
71a301ff0f Remove bash profile during installation script 2024-12-08 04:46:53 +01:00
2e087298a3 Normalize exit code of setup actions 2024-12-08 04:46:13 +01:00
37f3b3980b Fix malformed output 2024-12-08 03:07:54 +01:00
9007570d94 Install systemd-networkd during setup 2024-12-08 02:49:45 +01:00
73734293fd Enable DNS on the server 2024-12-08 02:49:28 +01:00
fad6cbe1d2 Fix incorrect configuration 2024-12-08 01:57:18 +01:00
bcdd7b0d8a Install networkmanager only if necessary 2024-12-08 01:47:04 +01:00
fa295f3b1a Allow configuring networkd 2024-12-08 01:03:06 +01:00
6a5ec8d5de Install NetworkManager only if enabled 2024-12-08 00:05:44 +01:00
19871a2e49 Allow configuriong systemd-networkd 2024-12-07 23:45:31 +01:00
ab8a6f8609 Allow choosing between NetworkManager and systemd-networkd 2024-12-07 23:38:17 +01:00
52fd59c017 Set profiles of btrfs volume 2024-12-07 23:02:03 +01:00
02dd399284 Forcefully write btrfs 2024-12-07 22:40:59 +01:00
7c3297b993 Collect all affected devices 2024-12-07 22:18:10 +01:00
ccbf77ce3f Fix outdated config names 2024-12-07 21:46:53 +01:00
1944356d05 Add support for partitioning btrfs volumes 2024-12-07 21:14:58 +01:00
5e7cfccff6 Add scripts for installing btrfs 2024-12-07 21:14:36 +01:00
a4fe8d78dc Remove trailing slashes properly 2024-12-07 21:09:18 +01:00
605c32e467 Fix incorrect config name 2024-12-07 20:03:58 +01:00
9caedac094 Create mount script in fileSystems module 2024-12-07 19:28:42 +01:00
f1ed876cc1 Add a separate option for mounts 2024-12-07 19:11:41 +01:00
3790fd267e Allow partitions without format 2024-12-07 18:45:44 +01:00
70a02aa8b9 Add btrfs support 2024-12-07 18:41:08 +01:00
0df6d5ece3 Remove unnecessary code 2024-12-07 18:36:54 +01:00
b16e1d420e Rename filesystem directory 2024-12-07 18:36:19 +01:00
5ffe98a56d Remove redundancy 2024-12-07 17:50:10 +01:00
7b5564a632 Store device scripts in scripts option 2024-12-07 17:49:57 +01:00
68e1990c87 Refactor terminology 2024-12-07 17:43:53 +01:00
0ce36394ac Move the script to the fileSystems module 2024-12-07 17:35:31 +01:00
ed4fa1a33d Split up disk script into multiple scripts 2024-12-07 16:12:04 +01:00
7b3b7fcdbc Remove separate os disk declaration 2024-12-07 16:11:19 +01:00
ce13e03ef1 Restructure partition config 2024-12-07 12:24:10 +01:00
77d8c350b7 Remove duplicated code 2024-12-06 16:22:05 +01:00
055955a1bf Pause tmux session on error 2024-12-06 13:52:26 +01:00
569204babe Fix permission issue 2024-12-06 13:51:08 +01:00
f3ff0cb0db Add missing location comment 2024-12-06 13:50:58 +01:00
f8eed79dd6 Disable cloud sync on the server 2024-12-06 13:40:04 +01:00
e527b33d82 Restore permissions in nixpkgs 2024-12-06 00:55:51 +01:00
19e9aecc5b Copy the .git directory to the setup environment 2024-12-05 22:36:14 +01:00
7f60744056 Ensure the nix channel directory exists 2024-12-05 20:31:56 +01:00
f3b6f3be23 Keep .git directory of nixpkgs 2024-12-05 19:40:41 +01:00
1e6bf93191 Install nginx throughout the setup 2024-12-05 18:27:10 +01:00
ad36fe02b3 Update to the most recent nixpkgs-unstable commit 2024-12-05 18:22:54 +01:00
656f21e849 Allow commits as nix versions 2024-12-05 18:19:18 +01:00
d6105a22b8 Fix permission issue 2024-12-05 18:18:57 +01:00
d0f7f6e4f9 Enable grub on the server 2024-12-05 17:08:41 +01:00
1051fe01c1 Ignore exit code of string-split0 2024-12-05 16:34:24 +01:00
6f5d13f07f Prevent unnecessary config fetches 2024-12-05 16:01:41 +01:00
eac4dba453 Update the nixpkgs version 2024-12-05 15:43:54 +01:00
87b032ed70 Add openssh as a dependency 2024-12-05 14:59:28 +01:00
800a6ed542 Install nixpkgs during setup 2024-12-05 14:13:52 +01:00
4ba2c1b1a5 Prompt backup config during install 2024-12-05 10:40:59 +01:00
ee4906cf4d Install valhalla dependencies during setup 2024-12-05 10:33:06 +01:00
8e02826f4b Adjust hook description 2024-12-05 10:25:28 +01:00
20c1f1558f Fetch missing variables 2024-12-05 10:13:35 +01:00
8b2d6a8a6a Add a function for initializing the OS 2024-12-05 10:09:35 +01:00
53c35a5742 Run OS setup first 2024-12-05 09:59:04 +01:00
4f446cc189 Create directories for ssh files 2024-12-05 09:53:31 +01:00
aebc398989 Create ssh key only if necessary 2024-12-05 09:47:32 +01:00
d52ce97c6c Fix missing variable 2024-12-05 09:45:49 +01:00
ff05734cdf Partition drives after action setup 2024-12-05 09:45:08 +01:00
a94a589c77 Create missing script 2024-12-05 09:41:33 +01:00
3758baba8b Remove unnecessary function 2024-12-05 09:41:20 +01:00
406c25b3d3 Get mount dir for preparing nix 2024-12-05 09:38:38 +01:00
45b0ed910c Fix non-existent variable 2024-12-05 09:27:53 +01:00
1c452512ef Create function for bootstrapping dependencies 2024-12-05 09:25:52 +01:00
500ab697f6 Fix incorrect repo path 2024-12-04 19:06:32 +01:00
ca2b623e1a Allow configuring backup during setup 2024-12-04 19:02:05 +01:00
a7b9572165 Make config function name more concise 2024-12-04 18:56:17 +01:00
972de35629 Use pre-existing setup user if existent 2024-12-04 18:35:15 +01:00
562c55b78f Add function for configuring backup server 2024-12-04 18:33:30 +01:00
801e010efb Enable anki-sync on the server 2024-12-04 18:31:35 +01:00
6f68f021a3 Create new ssh key implicitly 2024-12-04 18:26:45 +01:00
c75228380a Add missing docker services 2024-12-04 17:02:54 +01:00
4c23b600f2 Format files 2024-12-04 17:02:11 +01:00
3607765313 Ignore inexistent backup archives 2024-12-04 17:02:01 +01:00
a46ea0cacc Load backup user variable dynamically 2024-12-04 17:00:36 +01:00
5411281d95 Fix collabora service 2024-12-04 16:59:30 +01:00
86576476ba Add default port for terraria 2024-12-04 16:59:01 +01:00
0b2d39ea7c Fix websocket access for wekan 2024-12-04 16:58:51 +01:00
30254ea01c Configure wekan service properly 2024-12-04 16:58:41 +01:00
67ccef8474 Remove custom wekan user 2024-12-04 16:58:18 +01:00
84dc19dd95 Fix overcomplicated location config 2024-12-04 16:57:58 +01:00
2b34819227 Remove unnecessary proxy config 2024-12-04 16:57:40 +01:00
8f09b02fc2 Allow specifying comments for locations 2024-12-04 16:57:25 +01:00
1c93d4b3f4 Allow custom schemes for location settings 2024-12-04 16:55:56 +01:00
9dc93b1b8e Insert comment before location block 2024-12-04 16:54:54 +01:00
e0f4c7e74a Store rtorrent session files in volume 2024-12-04 16:53:24 +01:00
5384b38613 Normalize docker volume paths 2024-12-04 16:52:32 +01:00
0413612bce Update rtorrent and privoxy to proton community cli 2024-12-04 16:50:29 +01:00
f5001a28b7 Remove unnecessary parameter 2024-12-04 16:48:51 +01:00
9634789027 Generate extra server config with full domain 2024-12-04 16:48:35 +01:00
6d221cf07c Update script for new cgroup version 2024-12-04 16:47:11 +01:00
d2522d42a3 Fix typos 2024-12-04 16:46:57 +01:00
ba371f6417 Fix permission issues 2024-12-04 16:44:01 +01:00
7b53d0b506 Enable nix service immediately 2024-12-04 14:41:49 +01:00
351d3070d2 Normalize location of proton data 2024-11-29 02:20:26 +01:00
6abc2240be Format all files 2024-11-29 01:13:58 +01:00
046627eca4 FIx handling users with no docker services 2024-11-28 22:12:44 +01:00
084897f5ee Fix invalid git cli call 2024-11-28 22:12:15 +01:00
86fecfd3e4 Add missing function argument 2024-11-28 22:11:56 +01:00
c533b66c4e Inherit all env variables in the tmux session 2024-11-28 22:11:27 +01:00
8c9c6cd411 Back up teamspeak properly 2024-11-28 20:03:08 +01:00
91fee52e81 Force creation of an ssh key for backups 2024-11-27 05:52:42 +01:00
080cca791f Preserve PATH for setup actions 2024-11-27 05:24:53 +01:00
c9ba881ab2 Add scripts for installing a terraria server 2024-11-27 05:10:42 +01:00
f1f4364c63 Back up extra files using getExtraBackupPatterns 2024-11-27 04:07:14 +01:00
8bcce1c5a1 Remove world mounts for minecraft services 2024-11-27 04:00:20 +01:00
786e422aa6 Simplify xaseco command handling 2024-11-27 03:49:04 +01:00
53d140394a Silence unnecessary output 2024-11-27 03:48:43 +01:00
eb2436c562 Allow specifying xaseco operators and admins 2024-11-27 03:48:31 +01:00
fa98fc8f5b Normalize display name of vaultwarden mailer 2024-11-27 03:48:00 +01:00
a314e502e0 Store all game data of TrackMania service 2024-11-27 03:47:41 +01:00
59f0c8e254 Add declaration of transmission service 2024-11-27 03:47:21 +01:00
f36f26daeb Leave user of wekan container unchanged 2024-11-27 03:47:02 +01:00
c024066139 Store domain names in overrides 2024-11-27 03:41:26 +01:00
9b7f9275c9 Normalize the save path of service data 2024-11-27 03:02:59 +01:00
38d8eeedde Improve the format of the docker files 2024-11-27 02:59:07 +01:00
9a3d7d4f68 Rename secrets files to overrides 2024-11-27 02:54:44 +01:00
df74ac2337 Store Jellyfin domain in secrets 2024-11-26 16:01:38 +01:00
6fabcdc304 Allow specifying overrides for docker services 2024-11-26 15:47:38 +01:00
30c1618318 Allow specifying additional patterns to back up 2024-11-26 15:44:49 +01:00
8101b9e817 Configure forgejo runner by default 2024-11-26 15:42:18 +01:00
6a442f8392 Store all forgejo files in data/ 2024-11-26 15:42:07 +01:00
c5e15c21e4 Set forgejo domain by default 2024-11-26 15:41:51 +01:00
422f36f523 Reduce redundancy of drone setup 2024-11-26 14:54:52 +01:00
b2d6524b0b Fix order of properties in drone templates 2024-11-26 12:43:21 +01:00
d80dbadc7b Load enabled services from machine config 2024-11-25 19:11:14 +01:00
5589b6534b Ensure nix channels are functioning 2024-11-25 15:19:13 +01:00
9ed3aa6fe4 Force running actions as setup-user 2024-11-25 15:19:01 +01:00
a6414a60a2 Skip backup questions if specified 2024-11-23 17:26:12 +01:00
822008f459 Create backup whitelist for docker-compose 2024-11-23 16:57:11 +01:00
84e5a9d7c0 Add a profile for the server 2024-11-22 17:31:18 +01:00
bc914ff88e Add options for enabling docker-compose services 2024-11-22 17:31:08 +01:00
db0b8637ab Add scripts for installing woodpecker 2024-11-22 17:29:45 +01:00
a9f62c3648 Add scripts for installing wekan 2024-11-22 01:54:50 +01:00
d5bc49759b Add scripts for installing TrackMania server 2024-11-20 00:33:09 +01:00
8cd59ae6e9 Replace yq with go-yq 2024-11-20 00:30:08 +01:00
fe99897a2a Add scripts for installing TeamSpeak 2024-11-13 18:18:15 +01:00
5fd56d46fa Add scripts for installing ryot 2024-11-13 17:39:21 +01:00
5c37ec4649 Add scripts for installing nextcloud 2024-11-13 16:25:39 +01:00
941c89e896 Restructure service overrides 2024-11-13 16:22:57 +01:00
e2b08c77db Add scripts for installing Minecraft 2024-11-10 13:00:23 +01:00
83a5e144da Add settings for allowing VPN access in lxc 2024-11-10 12:56:48 +01:00
e6c28188e5 Improve look & feel of lxc config 2024-11-10 12:56:34 +01:00
9507bb1034 Add scripts for installing jellyfin 2024-11-10 12:33:36 +01:00
a538a3d3cf Add a script for installing forgejo 2024-11-07 23:58:06 +01:00
136 changed files with 4287 additions and 1045 deletions

View file

@ -5,17 +5,16 @@ fi
~/.automated_script.sh
if bash -c "ls /sys/class/backlight/*/max_brightness" > /dev/null 2>&1
then
cat /sys/class/backlight/*/max_brightness > /sys/class/backlight/*/brightness
if bash -c "ls /sys/class/backlight/*/max_brightness" >/dev/null 2>&1; then
cat /sys/class/backlight/*/max_brightness >/sys/class/backlight/*/brightness
fi
cd "/root/PortValhalla" || exit
git diff -p -R --no-ext-diff --no-color --diff-filter=M \
| grep -E "^(diff|(old|new) mode)" --color=never \
| sed "/^diff/{ x; d; }; x; /./{ p; z; }; x;" \
| git apply
git diff -p -R --no-ext-diff --no-color --diff-filter=M |
grep -E "^(diff|(old|new) mode)" --color=never |
sed "/^diff/{ x; d; }; x; /./{ p; z; }; x;" |
git apply
loadkeys de_CH-latin1
./scripts/Arch/OS/setup.fish

View file

@ -16,9 +16,8 @@ begin
and begin
git -C "$projectDir" ls-files
git -C "$projectDir" ls-files --exclude-standard --others
end | \
rsync --files-from=/dev/stdin --exclude={.gitignore,README.md,scripts,LICENSE,valhalla.patch} "$dir/.." "$contextRoot"
end &> /dev/null
end | rsync --files-from=/dev/stdin --exclude={.gitignore,README.md,scripts,LICENSE,valhalla.patch} "$dir/.." "$contextRoot"
end &>/dev/null
and git -C "$contextRoot" diff
end

View file

@ -2,5 +2,5 @@
# Updates the patch to be applicable to Arch's current `releng` template.
begin
set -l dir (status dirname)
"$dir/show-diff.fish" > "$dir/valhalla.patch"
"$dir/show-diff.fish" >"$dir/valhalla.patch"
end

View file

@ -1,23 +1,22 @@
diff --git a/airootfs/root/.zlogin b/airootfs/root/.zlogin
index bf6bc8f..bdbe55c 100644
index bf6bc8f..e71dc26 100644
--- a/airootfs/root/.zlogin
+++ b/airootfs/root/.zlogin
@@ -4,3 +4,18 @@ if grep -Fqa 'accessibility=' /proc/cmdline &> /dev/null; then
@@ -4,3 +4,17 @@ if grep -Fqa 'accessibility=' /proc/cmdline &> /dev/null; then
fi
~/.automated_script.sh
+
+if bash -c "ls /sys/class/backlight/*/max_brightness" > /dev/null 2>&1
+then
+ cat /sys/class/backlight/*/max_brightness > /sys/class/backlight/*/brightness
+if bash -c "ls /sys/class/backlight/*/max_brightness" >/dev/null 2>&1; then
+ cat /sys/class/backlight/*/max_brightness >/sys/class/backlight/*/brightness
+fi
+
+cd "/root/PortValhalla" || exit
+
+git diff -p -R --no-ext-diff --no-color --diff-filter=M \
+ | grep -E "^(diff|(old|new) mode)" --color=never \
+ | sed "/^diff/{ x; d; }; x; /./{ p; z; }; x;" \
+ | git apply
+git diff -p -R --no-ext-diff --no-color --diff-filter=M |
+ grep -E "^(diff|(old|new) mode)" --color=never |
+ sed "/^diff/{ x; d; }; x; /./{ p; z; }; x;" |
+ git apply
+
+loadkeys de_CH-latin1
+./scripts/Arch/OS/setup.fish

View file

@ -45,6 +45,7 @@
valhalla = {
DerGeret = import ./profiles/machines/manuel/DerGeret/Arch/config.nix;
ManuSurface = import ./profiles/machines/manuel/ManuSurface/Arch/config.nix;
server = import ./profiles/machines/manuel/server.nix;
};
};
}

View file

@ -5,8 +5,10 @@ let
overlay = [ ];
};
property = (builtins.getEnv "PROPERTY");
processor = if (builtins.stringLength property > 0) then
processor =
if (builtins.stringLength property > 0) then
(_: lib.attrsets.getAttrFromPath (lib.strings.splitString "." property) _)
else
(_: _);
in _: processor (lib.evalModules { modules = [ _ ]; }).config
in
_: processor (lib.evalModules { modules = [ _ ]; }).config

120
lib/modules/fileSystems.nix Normal file
View file

@ -0,0 +1,120 @@
{ lib, config, ... }:
let
inherit (lib) types mkOption;
cfg = config.valhalla.fileSystems;
mountType = types.submodule (
{ config, name, ... }: {
options = {
device = mkOption {
type = types.nullOr types.str;
description = "The device to mount.";
default = null;
};
mountPoint = mkOption {
type = types.str;
description = "The path to mount the device to.";
default = name;
};
fsType = mkOption {
type = types.nullOr types.str;
description = "The file system type of the mount.";
default = null;
};
options = mkOption {
type = types.listOf types.str;
description = "The options of the mount.";
default = [ ];
};
};
}
);
in
{
imports = [
./fileSystems/btrfs.nix
./fileSystems/disks.nix
];
options = {
valhalla = {
fileSystems = {
rootDir = mkOption {
type = types.str;
description = "The root of the installation directory to mount disks into.";
default = "/mnt";
};
mounts = mkOption {
type = types.attrsOf mountType;
description = "The devices to mount.";
default = { };
};
script = mkOption {
type = types.str;
description = "The script for preparing the system's mounts.";
};
};
};
};
config = {
valhalla = {
fileSystems = {
script =
let
inherit (lib.strings) normalizePath removeSuffix;
devices = (builtins.attrValues cfg.diskSetup.devices);
mountScript = lib.strings.concatLines (
(builtins.concatMap
(
_: [
"partprobe 2> /dev/null || true"
"udevadm trigger"
(builtins.concatStringsSep " " (
[ "sudo" "mount" "--mkdir" ] ++
(lib.optionals (_.fsType == "ntfs") [ "-t" "ntfs3" ]) ++
[
(builtins.concatStringsSep " " (builtins.map (_: "-o ${_}") _.options))
(_.device)
(removeSuffix "/" (normalizePath "/${cfg.rootDir}/${_.mountPoint}"))
]
))
]
)
(builtins.attrValues cfg.mounts))
);
affected = (builtins.map (_: _.deviceVariable) devices) ++
(builtins.concatMap
(_: builtins.map (_: lib.escapeShellArg _) _.devices)
(builtins.attrValues cfg.btrfs.volumes));
in
''
#!/bin/bash
set -o errexit
${cfg.diskSetup.scripts.init}
${lib.strings.concatLines (lib.optionals ((builtins.length devices) > 0) [
''echo "$(tput setaf 3)=== WARNING ====$(tput sgr0)"''
(''echo "Continuing this script will alter the partitions of ''
+ (lib.strings.concatStringsSep ", " (lib.lists.init affected))
+ (if (builtins.length affected) > 1 then " and " else "") + (lib.lists.last affected) + ''"'')
''
if ! fish ${./fileSystems/confirm.fish} "Are you sure you want to continue?" "n"; then
exit 1
fi
''
])}
${cfg.diskSetup.scripts.partition}
${cfg.btrfs.script}
${mountScript}
${cfg.diskSetup.scripts.swap}
'';
};
};
};
}

View file

@ -0,0 +1,108 @@
{ lib, config, ... }:
let
inherit (lib) types mkOption;
cfg = config.valhalla.fileSystems.btrfs;
profileType = types.enum [
"raid0"
"raid1"
"raid1c3"
"raid1c4"
"raid5"
"raid6"
"raid10"
"dup"
"single"
];
volumeType = types.submodule (
{ config, name, ... }: {
options = {
mountPoint = mkOption {
type = types.nullOr types.str;
description = "The path to mount the volume to.";
default = null;
};
devices = mkOption {
type = types.listOf types.str;
description = "The devices of the btrfs volume.";
};
label = mkOption {
type = types.nullOr types.str;
description = "The label of the volume.";
default = name;
};
dataProfile = mkOption {
type = types.nullOr profileType;
description = "The data profile.";
default = null;
};
metadataProfile = mkOption {
type = types.nullOr profileType;
description = "The metadata profile.";
default = null;
};
};
}
);
in
{
options = {
valhalla = {
fileSystems.btrfs = {
volumes = mkOption {
type = types.attrsOf volumeType;
description = "The btrfs volumes of the system.";
default = { };
};
script = mkOption {
type = types.str;
description = "The script for creating the btrfs volumes.";
default = lib.strings.concatLines (
builtins.map
(
_: builtins.concatStringsSep " " (
[ "mkfs.btrfs" "--force" ] ++
(lib.optionals (_.metadataProfile != null) [ "--metadata" "${_.metadataProfile}" ]) ++
(lib.optionals (_.dataProfile != null) [ "--data" "${_.dataProfile}" ]) ++
(lib.optionals (_.label != null) [ "--label" "${_.label}" ]) ++
_.devices
)
)
(builtins.attrValues cfg.volumes)
);
};
};
};
};
config = {
valhalla = {
linux.programs.btrfs = lib.optionalAttrs
(builtins.any
(_: (builtins.length _.devices) > 1)
(builtins.attrValues cfg.volumes))
{
enable = true;
pools = true;
};
fileSystems.mounts = lib.attrsets.concatMapAttrs
(
name: volume:
if (volume.mountPoint != null) then {
${volume.mountPoint} = {
device = builtins.elemAt volume.devices 0;
fsType = "btrfs";
};
} else { }
)
cfg.volumes;
};
};
}

View file

@ -22,7 +22,7 @@ function chooseDisk -a outFile message selectScript
end
end
select "$header" "$outFile" "$message" "No valid disk found!" "$(string collect $disks)" false
select "$header" "$outFile" "$message" "No valid device found!" "$(string collect $disks)" false
and begin
set -l disk (string split -n " " (cat "$outFile"))
echo "/dev/$disk[1]" >$outFile

View file

@ -0,0 +1,375 @@
{ lib, config, ... }:
let
inherit (lib) types mkOption;
fs = import ./fs.nix;
cfg = config.valhalla.fileSystems;
deviceListVarName = "myDevices";
isSwap = partition: builtins.elem partition.type [ fs.swap 19 ];
probeScript = builtins.concatStringsSep "\n" [
"partprobe 2> /dev/null || true"
"udevadm trigger"
];
mkDeviceType = types.submodule (
{ config, name, ... }: {
options = {
id = mkOption {
type = types.str;
description = "The internal identifier of the device.";
internal = true;
};
wipe = mkOption {
type = types.bool;
description = "A value indicating whether the device should be wiped.";
default = !(lib.lists.any (_: _.keepExisting) (builtins.attrValues config.partitions));
};
name = mkOption {
type = types.nullOr types.str;
description = "The name of the device.";
default = name;
};
path = mkOption {
type = types.nullOr types.str;
description = "The path to the device.";
default =
if config.name == null then
null
else
"/dev/${config.name}";
};
deviceScript = mkOption {
type = types.str;
description = "A command for loading the device path into the device variable";
internal = true;
};
deviceVariable = mkOption {
type = types.str;
description = "The name of the variable holding the name of the disk";
internal = true;
};
partitions = mkOption {
type = types.attrsOf (types.nullOr partitionType);
description = "The partitions of the disk.";
default = { };
};
scripts = {
init = mkOption {
type = types.str;
description = "A script for loading the device path into the device variable";
};
partition = mkOption {
type = types.str;
description = "A script for partitioning and formatting the device.";
};
};
};
config =
let
deviceVarName = "${deviceListVarName}[${config.id}]";
deviceVar = "\${${deviceVarName}}";
deviceSelector = ''
result="$(mktemp)"
fish ${./choose-device.fish} "$result" "Please select the \"${name}\" device:" ${./select.fish}
${deviceVarName}="$(cat "$result")"
'';
partitions = lib.lists.sortOn (_: _.index)
(builtins.filter (_: _ != null)
(builtins.attrValues config.partitions));
mkType = type:
lib.strings.escapeShellArg (
if builtins.isInt type then
"${lib.trivial.toHexString type}"
else
type
);
fdiskCommand = arguments: "sudo sfdisk ${arguments}";
fdiskScript = script: args: append:
"echo ${script} | ${
fdiskCommand "${builtins.concatStringsSep " " args} ${
if append then "--append" else ""
} ${deviceVar}"
}";
appendScript = index: script: fdiskScript script [ "-N" (builtins.toString index) ] true;
cleanup = lib.strings.concatLines (builtins.map
(partition: "${fdiskCommand "--delete ${deviceVar} ${toString partition.index}"} || true")
(lib.lists.sortOn
(partition: partition.index * -1)
(builtins.filter (_: !_.keepExisting) partitions)));
fdiskCommands = lib.strings.concatLines
(lib.optionals config.wipe [
cleanup
(fdiskScript "label: gpt" [ ] false)
] ++ (builtins.concatMap
(
partition:
let
inherit (partition) format index keepExisting label sizeScript type;
partVarName = "myPartition";
partVar = "\${${partVarName}}";
sizeOption = ''
${sizeScript} | sed -e "s/.*[^[:space:]]/size=\0/"
'';
formatScripts = {
${fs.ext4} = "mkfs.ext4 -F ${partVar}";
${fs.btrfs} = "mkfs.btrfs --force ${partVar}";
${fs.swap} = "mkswap ${partVar}";
${fs.ntfs} = "mkfs.ntfs -F ${partVar}";
${fs.fat32} = "mkfs.fat -F 32 ${partVar}";
};
labelScripts = {
${fs.ext4} = label: "e2label ${partVar} ${label}";
${fs.btrfs} = label: "btrfs filesystem label ${partVar} ${label}";
${fs.swap} = label: "swaplabel ${partVar} --label ${label}";
${fs.ntfs} = label: "ntfslabel ${partVar} ${label}";
${fs.fat32} = label: "fatlabel ${partVar} ${label}";
};
create = lib.strings.concatLines ([
(appendScript index ''${toString index}: "$(${sizeOption})" type=${mkType type}'')
probeScript
] ++ (lib.optionals (format != null) [
"sudo ${formatScripts.${format}}"
]));
fallback = ''
if ! { ls "${partVar}" 2>&1; } > /dev/null
then
${create}
fi
'';
in
[
''local diskPath="$(find -L /dev/disk/by-diskseq -samefile ${deviceVar})"''
''local ${partVarName}="$diskPath-part${toString index}"''
(if keepExisting then fallback else create)
] ++ (lib.optionals (format != null) [
"sudo ${labelScripts.${format} label}"
])
)
partitions));
fixType = lib.strings.concatLines (builtins.concatMap
(
partition:
lib.optional
(partition.keepExisting && !(builtins.isNull partition.type))
''sudo sfdisk --part-type ${deviceVar} ${toString partition.index} ${mkType partition.type}''
)
partitions);
in
{
id = "disk-${name}";
deviceVariable = deviceVar;
scripts = {
init =
if config.path == null then ''
${deviceSelector}
'' else ''
${deviceVarName}=${config.path}
if [ ! -b ${deviceVar} ]; then
function fallback() {
echo "Couldn't find the specified disk \"${deviceVar}\"."
if fish ${./confirm.fish} "Do you want to choose a different \"${name}\" disk?"; then
${deviceSelector}
else
exit 1
fi
}
fallback
fi
'';
partition = lib.mkDefault ''
function partition() {
${if (!config.wipe) then cleanup else ""}
${probeScript}
${fdiskCommands}
${fixType}
}
partition
'';
};
};
}
);
partitionType = types.submodule (
{ name, config, ... }: {
options = {
index = mkOption {
type = types.int;
description = "The index of the partition.";
};
label = mkOption {
type = types.str;
description = "The label of the partition.";
default = name;
};
keepExisting = mkOption {
type = types.bool;
description = "A value indicating whether the partition should be left untouched if it already exists.";
default = false;
};
type = mkOption {
type = types.nullOr (types.either types.str types.int);
description = "The type of the partition.";
default = null;
};
format = mkOption {
type = types.nullOr (types.enum (builtins.attrValues fs));
description = "The file system format of the partition.";
default =
if (isSwap config) then
fs.swap
else
null;
};
size = mkOption {
type = types.nullOr types.str;
description = "The size of the partition.";
default = null;
};
sizeScript = mkOption {
type = types.str;
description = "A script for printing the size to the console.";
internal = true;
};
useSwap = mkOption {
type = types.bool;
description = "A value indicating whether this partition should be used as swap.";
default = isSwap config;
};
mountPoint = mkOption {
type = types.nullOr types.str;
description = "The mountpoint of the partition.";
default = null;
};
mountOptions = mkOption {
type = types.listOf types.str;
description = "The options to apply to the mount.";
default = [ ];
};
};
config = {
sizeScript = (if isSwap config then
''echo "$(cat /proc/meminfo | awk -F " " '/^MemTotal/ { print $2 }' | awk '{ print int((($1 / 1024 / 1024) * 0.75) + 0.5)}')"G''
else
"echo ${lib.strings.escapeShellArg (toString config.size)}");
};
}
);
in
{
options = {
valhalla = {
fileSystems = {
diskSetup = {
devices = mkOption {
type = types.attrsOf (mkDeviceType);
description = "The disk devices to format.";
default = { };
};
scripts = {
init = mkOption {
type = types.str;
description = "The script for initializing the disk partitioning script.";
};
partition = mkOption {
type = types.str;
description = "The script for partitioning the disks.";
};
swap = mkOption {
type = types.str;
description = "The script for enabling swap devices.";
};
};
};
};
};
};
config = {
valhalla = {
fileSystems = {
mounts = (lib.attrsets.concatMapAttrs
(
name: device:
lib.attrsets.concatMapAttrs
(
name: partition:
if partition.mountPoint != null then {
${partition.mountPoint} = {
device = "/dev/disk/by-label/${partition.label}";
fsType = partition.format;
options = partition.mountOptions;
};
} else { }
)
device.partitions
)
cfg.diskSetup.devices);
diskSetup = {
scripts =
let
partPath = part: "/dev/disk/by-label/${part.label}";
disks = ((builtins.attrValues cfg.diskSetup.devices));
partitions = (builtins.concatMap (_: (builtins.attrValues _.partitions)) disks);
in
{
init = lib.strings.concatLines (builtins.map (_: _.scripts.init) disks);
partition = lib.strings.concatLines (builtins.map (_: _.scripts.partition) disks);
swap = lib.strings.concatLines (
(builtins.map
(
_: ''
${probeScript}
sudo swapon ${partPath _}
''
)
(builtins.filter (_: _.useSwap) partitions))
);
};
};
};
};
};
}

View file

@ -1,5 +1,6 @@
{
ext4 = "ext4";
btrfs = "btrfs";
swap = "swap";
ntfs = "ntfs";
fat32 = "fat32";

View file

@ -3,7 +3,8 @@ let
inherit (lib) mkOption types;
optionalAttrs = lib.attrsets.optionalAttrs;
hw = config.valhalla.hardware;
in {
in
{
options = {
valhalla = {
hardware = {

View file

@ -4,7 +4,8 @@ in {
options = {
valhalla = mkOption {
type = types.submodule (
{ extendModules, ... }: let
{ extendModules, ... }:
let
osVariant = extendModules {
modules = [
({ config, ... }: {
@ -35,7 +36,8 @@ in {
};
windowsVariant = osVariant.extendModules { };
in {
in
{
options = {
linux = mkOption {
inherit (linuxVariant) type;
@ -51,7 +53,8 @@ in {
visible = "shallow";
};
};
});
}
);
description = "Configuration for PortValhalla.";
default = { };

View file

@ -1,5 +0,0 @@
{ ... }: {
imports = [
./partition/disks.nix
];
}

View file

@ -1,360 +0,0 @@
{ lib, config, ... }:
let
inherit (lib) types mkOption;
fs = import ./fs.nix;
diskListVarName = "myDisks";
isSwap = partition: builtins.elem partition.type [ fs.swap 19 ];
probeScript = builtins.concatStringsSep "\n" [
"partprobe 2> /dev/null || true"
"udevadm trigger"
];
mkDiskType = osDisk: types.submodule (
{ config, name, ... }: {
options = {
id = mkOption {
type = types.str;
description = "The internal identifier of the disk.";
internal = true;
};
wipe = mkOption {
type = types.bool;
description = "A value indicating whether the disk should be wiped.";
default = !(lib.lists.any (_: _.keepExisting) (builtins.attrValues config.partitions));
};
deviceName = mkOption {
type = types.nullOr types.str;
description = "The name of the device.";
default = if osDisk then null else name;
};
devicePath = mkOption {
type = if osDisk then
types.nullOr types.str
else
types.str;
description = "The path to the device.";
default = if osDisk && config.deviceName == null then
null
else
"/dev/${config.deviceName}";
};
deviceScript = mkOption {
type = types.str;
description = "A command for loading the device path into the device variable";
internal = true;
};
deviceVariable = mkOption {
type = types.str;
description = "The name of the variable holding the name of the disk";
internal = true;
};
partitions = mkOption {
type = types.attrsOf (types.nullOr partitionType);
description = "The partitions of the disk.";
default = { };
};
script = mkOption {
type = types.str;
description = "The script for formatting the disk.";
};
};
config = let
diskVarName = "${diskListVarName}[${config.id}]";
diskVar = "\${${diskVarName}}";
diskSelector = ''
result="$(mktemp)"
fish ${./choose-disk.fish} "$result" "Which disk do you wish to install the OS on?" ${./select.fish}
${diskVarName}="$(cat "$result")"
'';
partitions = lib.lists.sortOn (_: _.index)
(builtins.filter (_: _ != null)
(builtins.attrValues config.partitions));
mkType = type:
lib.strings.escapeShellArg (
if builtins.isInt type then
"${lib.trivial.toHexString type}"
else
type);
fdiskCommand = arguments: "sudo sfdisk ${arguments}";
fdiskScript = script: args: append:
"echo ${script} | ${
fdiskCommand "${builtins.concatStringsSep " " args} ${
if append then "--append" else ""
} ${diskVar}"
}";
wipeScript = script: fdiskScript script [] false;
appendScript = index: script: fdiskScript script ["-N" (builtins.toString index)] true;
cleanup = lib.strings.concatLines (builtins.map
(partition: "${fdiskCommand "--delete ${diskVar} ${toString partition.index}"} || true")
(lib.lists.sortOn
(partition: partition.index * -1)
(builtins.filter (_: !_.keepExisting) partitions)));
fdiskCommands = lib.strings.concatLines
(lib.optionals config.wipe [
cleanup
(wipeScript "label: gpt")
] ++ (builtins.concatMap (
partition:
let
inherit (partition) format index keepExisting label sizeScript type;
partVarName = "myPartition";
partVar = "\${${partVarName}}";
sizeOption = ''
${sizeScript} | sed -e "s/.*[^[:space:]]/size=\0/"
'';
formatScripts = {
${fs.ext4} = "mkfs.ext4 -F ${partVar}";
${fs.swap} = "mkswap ${partVar}";
${fs.ntfs} = "mkfs.ntfs -F ${partVar}";
${fs.fat32} = "mkfs.fat -F 32 ${partVar}";
};
labelScripts = {
${fs.ext4} = label: "e2label ${partVar} ${label}";
${fs.swap} = label: "swaplabel ${partVar} --label ${label}";
${fs.ntfs} = label: "ntfslabel ${partVar} ${label}";
${fs.fat32} = label: "fatlabel ${partVar} ${label}";
};
create = lib.strings.concatLines [
(appendScript index ''${toString index}: "$(${sizeOption})" type=${mkType type}'')
probeScript
"sudo ${formatScripts.${format}}"
];
fallback = ''
if ! { ls "${partVar}" 2>&1; } > /dev/null
then
${create}
fi
'';
in [
''local diskPath="$(find -L /dev/disk/by-diskseq -samefile ${diskVar})"''
''local ${partVarName}="$diskPath-part${toString index}"''
(if keepExisting then fallback else create)
"sudo ${labelScripts.${format} label}"
]) partitions));
fixType = lib.strings.concatLines (builtins.concatMap (
partition:
lib.optional
(partition.keepExisting && !(builtins.isNull partition.type))
''sudo sfdisk --part-type ${diskVar} ${toString partition.index} ${mkType partition.type}'')
partitions);
in {
id = if osDisk then "os" else "disk-${name}";
deviceVariable = diskVar;
deviceScript = if osDisk && config.devicePath == null then ''
${diskSelector}
'' else ''
${diskVarName}=${config.devicePath}
${if osDisk then ''
if [ ! -b ${diskVar} ]; then
function fallback() {
echo "Couldn't find the specified disk \"${diskVar}\"."
if fish ${./confirm.fish} "Do you want to install the OS on another disk?"; then
${diskSelector}
else
exit 1
fi
}
fallback
fi
'' else
""}
'';
script = lib.mkDefault ''
function partition() {
${if (!config.wipe) then cleanup else ""}
${probeScript}
${fdiskCommands}
${fixType}
}
partition
'';
};
});
partitionType = types.submodule (
{ name, config, ... }: {
options = {
index = mkOption {
type = types.int;
description = "The index of the partition.";
};
label = mkOption {
type = types.str;
description = "The label of the partition.";
default = name;
};
keepExisting = mkOption {
type = types.bool;
description = "A value indicating whether the partition should be left untouched if it already exists.";
default = false;
};
type = mkOption {
type = types.nullOr (types.either types.str types.int);
description = "The type of the partition.";
default = null;
};
format = mkOption {
type = types.enum (builtins.attrValues fs);
description = "The file system format of the partition.";
default = if (isSwap config) then
fs.swap
else
throw ("Partition format not specified.");
};
size = mkOption {
type = types.nullOr types.str;
description = "The size of the partition.";
default = null;
};
sizeScript = mkOption {
type = types.str;
description = "A script for printing the size to the console.";
internal = true;
};
useSwap = mkOption {
type = types.bool;
description = "A value indicating whether this partition should be used as swap.";
default = isSwap config;
};
mountPoint = mkOption {
type = types.nullOr types.str;
description = "The mountpoint of the partition.";
default = null;
};
mountOptions = mkOption {
type = types.listOf types.str;
description = "The options to apply to the mount.";
default = [ ];
};
};
config = {
sizeScript = (if isSwap config then
''echo "$(cat /proc/meminfo | awk -F " " '/^MemTotal/ { print $2 }' | awk '{ print int((($1 / 1024 / 1024) * 0.75) + 0.5)}')"G''
else
"echo ${lib.strings.escapeShellArg (toString config.size)}");
};
});
in {
options = {
valhalla = {
partition = {
rootDir = mkOption {
type = types.str;
description = "The root of the installation directory to mount disks into.";
default = "/mnt";
};
os = mkOption {
type = mkDiskType true;
description = "The partition layout of the OS disk.";
};
disks = mkOption {
type = types.attrsOf (mkDiskType false);
description = "The additional disks to format.";
default = { };
};
script = mkOption {
type = types.str;
description = "The script for partitioning the system's disks.";
};
};
};
};
config = {
valhalla = {
partition = {
script = lib.mkDefault (let
cfg = config.valhalla.partition;
inherit (cfg) os rootDir;
inherit (lib.strings) normalizePath;
partPath = part: "/dev/disk/by-label/${part.label}";
disks = ([ os ] ++ (builtins.attrValues cfg.disks));
partitions = (builtins.concatMap (_: (builtins.attrValues _.partitions)) disks);
mountScript = lib.strings.concatLines (builtins.concatMap (
_: [
probeScript
(builtins.concatStringsSep " " ([
"sudo"
"mount"
"--mkdir"
] ++ (lib.optionals (_.format == "ntfs") [
"-t" "ntfs3"
]) ++ [
(builtins.concatStringsSep " " (builtins.map (_: "-o ${_}") _.mountOptions))
(partPath _)
(normalizePath "/${rootDir}/${_.mountPoint}")
]))
]) (lib.lists.sortOn
(_: normalizePath "/${_.mountPoint}")
(builtins.filter (_: _.mountPoint != null) partitions)));
swapScript = lib.strings.concatLines (builtins.map (
_: ''
${probeScript}
sudo swapon ${partPath _}
'') (builtins.filter (_: _.useSwap) partitions));
in lib.strings.concatLines ([
"#!/bin/bash"
"set -o errexit"
]
++ (builtins.map (_: _.deviceScript) disks)
++ lib.optionals ((builtins.length disks) > 0) [
''echo "$(tput setaf 3)==== WARNING ====$(tput sgr0)"''
(''echo "Continuing this script will alter the partitions of ''
+ (lib.strings.concatStringsSep ", " (builtins.map (_: "${_.deviceVariable}") (lib.lists.init disks)))
+ (if (builtins.length disks) > 1 then " and " else "") + (lib.lists.last disks).deviceVariable + ''"'')
''
if ! fish ${./confirm.fish} "Are you sure you want to continue?" "n"; then
exit 1
fi
''
] ++ (builtins.map (_: _.script) disks) ++ [
mountScript
swapScript
]));
};
};
};
}

View file

@ -5,88 +5,103 @@ let
mkUsersOption = programs: osConfig: mkOption {
type = types.attrsOf (types.submodule (
{ ... }: {
{ config, ... }: {
options = {
inherit programs;
};
config = {
programs = builtins.mapAttrs (
name: config: {
enable = mkDefault config.enable;
}) osConfig.programs;
programs = lib.attrsets.concatMapAttrs
(
name: program:
if (builtins.elem name (builtins.attrNames config.programs)) then {
${name} = {
enable = mkDefault program.enable;
};
}));
} else { }
)
osConfig.programs;
};
}
));
};
mkPrograms = infos: builtins.foldl' (programs: info:
mkPrograms = infos: builtins.foldl'
(programs: info:
programs // {
${builtins.elemAt info 0} = {
enable = mkEnableOption (builtins.elemAt info 1);
};
}) { } infos;
})
{ }
infos;
programs = mkPrograms [
["aliae" "aliae"]
["brave" "Brave Browser"]
["discord" "Discord"]
["docker" "docker"]
["firefox" "Firefox Web Browser"]
["openssh" "OpenSSH"]
["osu!lazer" "osu!lazer"]
["pennywise" "Pennywise"]
["powershell" "PowerShell Core"]
["retroarch" "RetroArch"]
["steam" "Steam"]
["thunderbird" "Thunderbird"]
["vscode" "Visual Studio Code"]
["zoxide" "zoxide"]
[ "aliae" "aliae" ]
[ "brave" "Brave Browser" ]
[ "discord" "Discord" ]
[ "firefox" "Firefox Web Browser" ]
[ "openssh" "OpenSSH" ]
[ "osu!lazer" "osu!lazer" ]
[ "pennywise" "Pennywise" ]
[ "powershell" "PowerShell Core" ]
[ "retroarch" "RetroArch" ]
[ "steam" "Steam" ]
[ "thunderbird" "Thunderbird" ]
[ "vscode" "Visual Studio Code" ]
[ "zoxide" "zoxide" ]
];
linuxPrograms = mkPrograms [
["bash" "Bash"]
["fish" "fish"]
["icedtea" "IcedTea"]
["grub" "GRUB"]
["logo-ls" "logo-ls"]
["lutris" "Lutris"]
["minegrub-theme" "Minegrub Theme"]
["nodejs-n" "n"]
["nuke-usb" "nuke-usb"]
["nvidia-dkms" "Nvidia Drivers"]
["plasma" "Plasma"]
["pyenv" "pyenv"]
["sddm" "SDDM"]
["vim" "Vim"]
["virt-manager" "Virtual Machine Manager"]
["waydroid" "Waydroid"]
["xone" "xone"]
[ "bash" "Bash" ]
[ "fish" "fish" ]
[ "icedtea" "IcedTea" ]
[ "grub" "GRUB" ]
[ "logo-ls" "logo-ls" ]
[ "lutris" "Lutris" ]
[ "minegrub-theme" "Minegrub Theme" ]
[ "networkmanager" "NetworkManager" ]
[ "nginx" "nginx" ]
[ "nodejs-n" "n" ]
[ "nuke-usb" "nuke-usb" ]
[ "nvidia-dkms" "Nvidia Drivers" ]
[ "plasma" "Plasma" ]
[ "pyenv" "pyenv" ]
[ "sddm" "SDDM" ]
[ "vim" "Vim" ]
[ "virt-manager" "Virtual Machine Manager" ]
[ "waydroid" "Waydroid" ]
[ "xone" "xone" ]
];
windowsPrograms = mkPrograms [
["lghub" "Logitech G Hub"]
["maniaplanet" "ManiaPlanet"]
["msedge-redirect" "MSEdgeRedirect"]
["nvs" "Node Version Switcher"]
["osu!" "Osu!"]
["posh-git" "posh-git"]
["putty" "PuTTY"]
["rewasd" "reWASD"]
["terminal-icons" "Terminal Icons"]
["tm-nations-forever" "TrackMania Nations Forever"]
["tm-united-forever" "TrackMania United Forever"]
["tobii-gamehub" "Tobii Game Hub"]
["tobii-ghost" "Tobii Ghost"]
["ubiquiti-unifi-controller" "Ubiquiti UniFi Controller"]
["visualstudio" "Visual Studio"]
["winscp" "WinSCP"]
[ "lghub" "Logitech G Hub" ]
[ "maniaplanet" "ManiaPlanet" ]
[ "msedge-redirect" "MSEdgeRedirect" ]
[ "nvs" "Node Version Switcher" ]
[ "osu!" "Osu!" ]
[ "posh-git" "posh-git" ]
[ "putty" "PuTTY" ]
[ "rewasd" "reWASD" ]
[ "terminal-icons" "Terminal Icons" ]
[ "tm-nations-forever" "TrackMania Nations Forever" ]
[ "tm-united-forever" "TrackMania United Forever" ]
[ "tobii-gamehub" "Tobii Game Hub" ]
[ "tobii-ghost" "Tobii Ghost" ]
[ "ubiquiti-unifi-controller" "Ubiquiti UniFi Controller" ]
[ "visualstudio" "Visual Studio" ]
[ "winscp" "WinSCP" ]
];
in {
in
{
imports = [
./programs/btrfs.nix
./programs/docker.nix
./programs/git.nix
./programs/nextcloud.nix
./programs/oh-my-posh.nix
./programs/rclone.nix
./programs/systemd-networkd.nix
];
options = {

View file

@ -0,0 +1,14 @@
{ lib, ... }:
let
inherit (lib) mkEnableOption;
in
{
options = {
valhalla = {
linux.programs.btrfs = {
enable = mkEnableOption "btrfs tools";
pools = mkEnableOption "btrfs pool support in bootloaders";
};
};
};
}

View file

@ -0,0 +1,47 @@
{ lib, ... }:
let
inherit (lib) mkEnableOption mkOption types;
commonOptions = {
enable = mkEnableOption "docker";
};
in
{
options = {
valhalla = {
programs.docker = commonOptions;
users = mkOption {
type = types.attrsOf (types.submodule (
{ ... }: {
options = {
programs.docker = commonOptions;
};
}
));
};
linux = {
programs = {
docker = {
services = {
anki-sync.enable = mkEnableOption "Anki Sync server";
drone.enable = mkEnableOption "drone server";
forgejo.enable = mkEnableOption "Forgejo server";
jellyfin.enable = mkEnableOption "Jellyfin media server";
minecraft.enable = mkEnableOption "Minecraft server";
nextcloud.enable = mkEnableOption "Nextcloud server";
ryot.enable = mkEnableOption "ryot server";
teamspeak.enable = mkEnableOption "TeamSpeak server";
terraria.enable = mkEnableOption "Terraria server";
trackmania.enable = mkEnableOption "TrackMania server";
vaultwarden.enable = mkEnableOption "Vaultwarden server";
wekan.enable = mkEnableOption "Wekan server";
woodpecker.enable = mkEnableOption "Woodpecker CI server";
};
};
};
};
};
};
}

View file

@ -31,7 +31,8 @@ let
default = { };
};
};
in {
in
{
options = {
valhalla = {
programs.git = gitOption;
@ -42,7 +43,8 @@ in {
options = {
programs.git = gitOption;
};
}));
}
));
};
};
};

View file

@ -31,7 +31,8 @@ let
default = [ ];
};
};
in {
in
{
options = {
valhalla = {
programs.nextcloud = commonOptions;
@ -42,7 +43,8 @@ in {
options = {
programs.nextcloud = commonOptions;
};
}));
}
));
};
windows.users = mkOption {
@ -51,7 +53,8 @@ in {
options = {
programs.nextcloud = userOptions;
};
}));
}
));
};
};
};

View file

@ -16,7 +16,8 @@ let
default = lib.strings.removeSuffix ".omp" (lib.strings.removeSuffix ".json" (builtins.baseNameOf config.source));
};
};
});
}
);
commonOptions = {
enable = mkEnableOption "Oh My Posh";
@ -35,7 +36,8 @@ let
default = [ ];
};
};
in {
in
{
options = {
valhalla = {
programs.oh-my-posh = commonOptions;
@ -46,7 +48,8 @@ in {
options = {
programs.oh-my-posh = userOptions;
};
}));
}
));
};
};
};

View file

@ -16,7 +16,8 @@ let
default = null;
};
};
});
}
);
commonOptions = {
enable = mkEnableOption "rclone";
@ -29,7 +30,8 @@ let
default = { };
};
};
in {
in
{
options = {
valhalla.linux = {
programs.rclone = commonOptions;
@ -40,7 +42,8 @@ in {
options = {
programs.rclone = userOptions;
};
}));
}
));
};
};
};

View file

@ -0,0 +1,26 @@
{ lib, config, ... }:
let
inherit (lib) mkEnableOption mkOption types;
in
{
options = {
valhalla = {
linux.programs.systemd-networkd = {
enable = mkEnableOption "systemd-networkd";
networks = mkOption {
type = types.attrsOf types.attrs;
description = "The networks to configure.";
};
networkFiles = mkOption {
type = types.attrsOf types.str;
description = "The files for configuring the networks.";
default = builtins.mapAttrs
(name: network: lib.generators.toINI { listsAsDuplicateKeys = true; } network)
config.valhalla.linux.programs.systemd-networkd.networks;
};
};
};
};
}

View file

@ -1,6 +1,6 @@
{ lib, ... }:
let inherit (lib) mkOption types;
in {
let inherit (lib) mkOption types;
in {
imports = [
./programs.nix
];
@ -14,11 +14,16 @@
cfg = config;
inherit (cfg.software) coding desktopExperience essential gaming server socialMedia;
mkPrograms = programs: builtins.foldl' (
mkPrograms = programs: builtins.foldl'
(
programs: name: programs // {
${name}.enable = true;
}) {} programs;
in {
}
)
{ }
programs;
in
{
options = {
software = {
essential = mkOption {
@ -109,6 +114,7 @@
"vim"
])) // (optionalAttrs desktopExperience (mkPrograms [
"icedtea"
"networkmanager"
"plasma"
"sddm"
"waydroid"
@ -144,7 +150,8 @@
"tm-united-forever"
]));
};
});
};
};
}
);
};
};
}

View file

@ -25,7 +25,8 @@ let
default = [ ];
};
};
});
}
);
linuxUserType = types.submodule (
{ ... }: {
@ -36,7 +37,8 @@ let
default = null;
};
};
});
}
);
winUserType = types.submodule (
{ ... }: {
@ -47,8 +49,10 @@ let
default = false;
};
};
});
in {
}
);
in
{
options = {
valhalla = {
users = mkOption {

View file

@ -2,10 +2,10 @@
let inherit (lib) mkOption types;
in {
imports = [
./fileSystems.nix
./hardware.nix
./i18n.nix
./os.nix
./partition.nix
./programs.nix
./software.nix
./users.nix

View file

@ -2,7 +2,8 @@
let
inherit (lib) mkDefault mkEnableOption mkIf mkOption types;
capitalize = (import ../text.nix { inherit lib; }).capitalize;
in {
in
{
options = {
valhalla = {
windows = {

View file

@ -25,7 +25,7 @@
};
};
partition.os.partitions = {
fileSystems.diskSetup.devices.OS.partitions = {
# Keep Windows' boot partition
Boot.keepExisting = true;

View file

@ -1,12 +1,12 @@
{ lib, config, ... }:
let fs = import ../../../../lib/modules/partition/fs.nix;
let fs = import ../../../../lib/modules/fileSystems/fs.nix;
in {
imports = [ ../defaults.nix ];
config = {
valhalla = {
partition = {
os = {
fileSystems.diskSetup.devices = {
OS = {
partitions = {
Boot = {
index = 1;
@ -37,7 +37,8 @@ in {
keyboardLayout = "ch";
i18n = {
localeSettings = let defaultLocale = "en_US.UTF-8";
localeSettings =
let defaultLocale = "en_US.UTF-8";
in {
LANG = "de_CH.UTF-8";
LANGUAGE = defaultLocale;
@ -58,7 +59,8 @@ in {
linux.programs.grub.enable = true;
programs = {
git = let defaultBranch = "main";
git =
let defaultBranch = "main";
in {
inherit defaultBranch;

View file

@ -0,0 +1,127 @@
{ lib, config, ... }:
let fs = import ../../../lib/modules/fileSystems/fs.nix;
in {
imports = [ ../../users/manuel/config.nix ];
config = {
valhalla = {
boot.label = "Arch";
fileSystems = {
diskSetup.devices = {
OS = {
path = "/dev/sda";
partitions = {
Boot = {
index = 1;
type = "uefi";
size = "+1G";
format = fs.fat32;
mountPoint = config.valhalla.boot.efiMountPoint;
};
Swap = {
index = 2;
type = "swap";
};
OS = {
index = 3;
label = lib.mkDefault config.valhalla.boot.label;
type = "linux";
};
};
};
};
btrfs = {
volumes = {
OS = {
mountPoint = "/";
devices = [ "/dev/sda3" "/dev/sdb" ];
metadataProfile = "raid1";
dataProfile = "single";
};
};
};
};
hostname = "nuth.ch";
timeZone = "Europe/Zurich";
keyMap = "de_CH-latin1";
keyboardLayout = "ch";
i18n = {
localeSettings =
let defaultLocale = "en_US.UTF-8";
in {
LANG = "de_CH.UTF-8";
LANGUAGE = defaultLocale;
LC_MESSAGE = defaultLocale;
};
};
software = {
essential = true;
server = true;
};
programs.docker.enable = true;
linux.programs = {
grub.enable = true;
systemd-networkd = {
enable = true;
networks =
let device = "enp0s31f6";
in {
${device} = {
Match = {
Name = device;
};
Network = {
Address = "2a01:4f8:10b:2644::2/64";
Gateway = [
"94.130.48.193"
"fe80::1"
];
DNS = [
"5.9.164.112"
"1.1.1.1"
];
};
Address = {
Address = "94.130.48.251";
Peer = "94.130.48.193/32";
};
};
};
};
docker = {
services = {
anki-sync.enable = true;
drone.enable = true;
forgejo.enable = true;
jellyfin.enable = true;
minecraft.enable = true;
nextcloud.enable = true;
ryot.enable = true;
teamspeak.enable = true;
terraria.enable = true;
trackmania.enable = true;
vaultwarden.enable = true;
wekan.enable = true;
woodpecker.enable = true;
};
};
};
};
};
}

View file

@ -29,10 +29,12 @@
windows.users.manuel = {
programs = {
nextcloud = {
folderSyncs = let
folderSyncs =
let
localPath = "C:/tools/RetroArch-Win64";
remotePath = "/Saved Games/RetroArch";
in [
in
[
{
remotePath = "${remotePath}/Saves";
localPath = "${localPath}/saves";

View file

@ -20,11 +20,11 @@ begin
sudo sed -i \
-e "/esp=/{" \
-e "a esp=$(echo "$efiDir" | string escape)" \
-e "d" \
-e d \
-e "}" \
-e "/bootloader_id=/{" \
-e "a bootloader_id=$(echo "$label" | string escape)" \
-e "d" \
-e d \
-e "}" \
/etc/secureboot.conf

View file

@ -1,11 +1,7 @@
#!/bin/env fish
begin
set -l dir (status dirname)
function installValhallaDeps -V dir
source "$dir/../lib/software.fish"
and pacinst fish git jq nix sudo tmux
end
source "$dir/../lib/dependencies.fish"
function getDeploymentScript -V dir
echo "$dir/../lib/deploy.fish"
@ -16,7 +12,6 @@ begin
waitNetwork
or exit
sudo systemctl enable --now nix-daemon
and source "$dir/../lib/software.fish"
and source "$dir/../Software/base-devel/main.fish"
and source "$dir/../Software/pacman/main.fish"
@ -27,7 +22,7 @@ begin
and yayinst \
linux-headers \
pacman-contrib \
yq
go-yq
end
source "$dir/../../Common/OS/install.fish"

View file

@ -1,6 +1,8 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l autologinConfig /etc/systemd/system/getty@tty1.service.d/autologin.conf
source "$dir/../lib/dependencies.fish"
source "$dir/../../Common/OS/setup.fish"
source "$dir/../../lib/settings.fish"
@ -8,13 +10,13 @@ begin
arch-chroot $argv
end
function installValhallaDeps -S
pacstrap -K "$mountDir" fish git jq nix sudo tmux
function bootstrapSetup -S
pacstrap -K (getOSConfig fileSystems.rootDir) fish
end
function installDrivers -S
if isOSEnabled hardware.surfaceBook
pacstrap -K "$mountDir" linux-firmware-marvell
pacstrap -K (getOSConfig fileSystems.rootDir) linux-firmware-marvell
end
end
@ -22,9 +24,30 @@ begin
echo "$dir/install.fish"
end
function initOS -V dir
source "$dir/../lib/dependencies.fish"
source "$dir/../../lib/wait-network.fish"
set -l mountDir (getOSConfig fileSystems.rootDir)
waitNetwork
and pacman-key --init
and pacman-key --populate
and pacstrap -K "$mountDir" \
base \
linux \
linux-firmware \
man-db \
man-pages \
texinfo
installValhallaDeps "$mountDir"
end
function setupOS -S -V dir -S
source "$dir/../../lib/hooks.fish"
source "$dir/../../lib/wait-network.fish"
set -l mountDir (getOSConfig fileSystems.rootDir)
waitNetwork
and begin
@ -39,20 +62,12 @@ begin
timedatectl set-timezone "$timezone"
end
and pacman-key --init
and pacman-key --populate
and pacstrap -K "$mountDir" \
base \
linux \
linux-firmware \
networkmanager \
man-db \
man-pages \
texinfo
and genfstab -U "$mountDir" >>"$mountDir/etc/fstab"
and arch-chroot "$mountDir" systemctl enable NetworkManager
and if isProgramEnabled "networkmanager"
and pacstrap -K "$mountDir" networkmanager
arch-chroot "$mountDir" systemctl enable NetworkManager
end
and if set -q timezone
arch-chroot "$mountDir" ln -sf "/usr/share/zoneinfo/$timezone" /etc/localtime
@ -61,7 +76,7 @@ begin
and arch-chroot "$mountDir" hwclock --systohc
and begin
getOSConfig i18n.localeSettings --json | \
getOSConfig i18n.localeSettings --json |
jq --raw-output '[.[] | split(".") | .[0]] | unique | join("\\\\|")'
end | begin
read LOCALES
@ -70,7 +85,7 @@ begin
end
and begin
getOSConfig i18n.localeSettings --json | \
getOSConfig i18n.localeSettings --json |
jq --raw-output '[keys[] as $key | "\($key)=\(.[$key])"] | join("\n")'
end | arch-chroot "$mountDir" tee /etc/locale.conf >/dev/null
@ -80,6 +95,8 @@ begin
and echo (getOSConfig hostname) | arch-chroot "$mountDir" tee /etc/hostname >/dev/null
and runInOS fish "$tempDir/../Software/systemd-networkd/main.fish"
and runInOS fish "$tempDir/../Software/btrfs/main.fish"
and arch-chroot "$mountDir" mkinitcpio -P
and runInOS fish "$tempDir/../Software/grub/main.fish"
@ -95,16 +112,20 @@ begin
end
end
function autologin -S
set -l file "/etc/systemd/system/getty@tty1.service.d/autologin.conf"
arch-chroot "$mountDir" mkdir -p (dirname "$file")
function autologin -S -V autologinConfig
set -l mountDir (getOSConfig fileSystems.rootDir)
arch-chroot "$mountDir" mkdir -p (dirname "$autologinConfig")
and begin
printf %s\n \
"[Service]" \
"ExecStart=" \
"ExecStart=-/sbin/agetty -o '-p -f -- \\u' --noclear --autologin root %I \$TERM"
end | arch-chroot "$mountDir" tee "$file" >/dev/null
end | arch-chroot "$mountDir" tee "$autologinConfig" >/dev/null
end
function getAutologinDisableCommand -V autologinConfig
echo "rm -rf $(string escape (dirname "$autologinConfig"))"
end
if not type -q getInstallerScript

View file

@ -0,0 +1,19 @@
#!/bin/env fish
begin
set -l dir (status dirname)
source "$dir/../../lib/software.fish"
function installSW
pacinst btrfs-progs
end
function configureSW -V dir
source "$dir/../../../lib/settings.fish"
if isOSEnabled "programs.btrfs.pools"
echo "HOOKS+=(btrfs)" | sudo tee /etc/mkinitcpio.conf.d/btrfs.conf >/dev/null
end
end
runInstaller $argv
end

View file

@ -10,6 +10,8 @@ begin
docker-compose \
docker-buildx \
docker-scan
installSWBase $argv
end
runInstaller $argv

View file

@ -0,0 +1,14 @@
#!/bin/env fish
begin
set -l dir (status dirname)
source "$dir/../../../lib/software.fish"
inherit "$dir/../../../Common/Software/systemd-networkd/main.fish"
function configureSW -V dir
sudo systemctl enable systemd-networkd
sudo systemctl enable systemd-resolved
configureSWBase $argv
end
runInstaller $argv
end

View file

@ -0,0 +1,11 @@
function installValhallaDeps -V dir -a mountDir
source "$dir/../lib/software.fish"
set -l args
if [ -n "$mountDir" ]
set -a args --root "$mountDir"
end
and pacinst $args fish git jq nix openssh sudo tmux
sudo systemctl enable --now nix-daemon
end

View file

@ -31,7 +31,7 @@ function deploySoftware -d "Deploys a the specified software action" -a action
source "$dir/../Software/xone/main.fish" $argv
end
and for component in (getOSConfig hardware.components --json | jq '.[]' --raw-output0 | string split0)
and for component in (getOSConfig hardware.components --json | jq '.[]' --raw-output0 | string split0 || true)
switch "$component"
case "Logitech G903"
source "$dir/../../Common/Drivers/Logitech G903/main.fish" $argv
@ -71,6 +71,8 @@ function deploySoftware -d "Deploys a the specified software action" -a action
end
and source "$dir/../../Common/Software/bash/main.fish" $argv
and source "$dir/../Software/systemd-networkd/main.fish" $argv
and source "$dir/../Software/btrfs/main.fish" $argv
and source "$dir/../../Common/Software/nuke-usb/main.fish" $argv
and source "$dir/../Software/sudo/main.fish" $argv
and source "$dir/../Software/aliae/main.fish" $argv
@ -147,10 +149,13 @@ function deploySoftware -d "Deploys a the specified software action" -a action
and yayinst propertree-git # mac .plist config file editor
end
and if isProgramEnabled "thunderbird" && $isInstall
and if isProgramEnabled thunderbird && $isInstall
yayinst thunderbird
end
# Server
and source "$dir/../Software/nginx/main.fish" $argv
# School & Studies
and if collectionActive school && $isInstall
yayinst \
@ -200,7 +205,7 @@ function deploySoftware -d "Deploys a the specified software action" -a action
audius-client-bin
end
and if isProgramEnabled "nextcloud"
and if isProgramEnabled nextcloud
yayinst nextcloud-client
end
end
@ -284,7 +289,7 @@ function deploySoftware -d "Deploys a the specified software action" -a action
yayinst osu-lazer-bin
end
and if isProgramEnabled "retroarch"
and if isProgramEnabled retroarch
yayinst libretro
end
end

View file

@ -4,6 +4,7 @@ source "$dir/../../lib/action.fish"
function backupAction -V dir
source "$dir/../../lib/hooks.fish"
source "$dir/../../lib/restoration.fish"
if not type -q getDeploymentScript
function getDeploymentScript
@ -13,31 +14,7 @@ function backupAction -V dir
end
set -l deployScript (getDeploymentScript)
if fish "$dir/../../../lib/modules/partition/confirm.fish" "Do you wish to store the backup on an SSH server?" n
read -xP "Please specify the host name of the SSH server: " VALHALLA_BACKUP_SERVER
read -xP "Please specify the port of the SSH server (default 22): " VALHALLA_BACKUP_SERVER_PORT
read -xP "Please specify the name of the user to log in to the SSH server: " VALHALLA_BACKUP_SERVER_USER
if [ -z "$VALHALLA_BACKUP_SERVER_PORT" ]
set -x VALHALLA_BACKUP_SERVER_PORT 22
end
if [ -n "$VALHALLA_BACKUP_SERVER_USER" ]
set -x VALHALLA_BACKUP_SERVER "$VALHALLA_BACKUP_SERVER_USER@$VALHALLA_BACKUP_SERVER"
end
echo
echo "$(tput setaf 3)==== WARNING ====$(tput sgr0)"
echo "For a seamless experience, please make sure that you are able to establish an unattended ssh connection using key authentication."
echo
echo "$(tput bold)This command should succeed without user interaction:$(tput sgr0)"
echo "ssh -o PasswordAuthentication=no -p $VALHALLA_BACKUP_SERVER_PORT $VALHALLA_BACKUP_SERVER true"
read -P "Press enter once you're done: "
echo
end
read -xP "Please specify the path to the directory to save the backup to: " VALHALLA_BACKUP_DIR
initBackupConfig --action backup
runHook backupSoftware || begin
echo "Backing up software..."
@ -49,9 +26,9 @@ function backupAction -V dir
runHook backupUsers || begin
if [ -n "$deployScript" ]
for name in (getUsers | jq '.[]' --raw-output0 | string split0)
for name in (getUsers | jq '.[]' --raw-output0 | string split0 || true)
echo "Backing up user `$name`..."
and source $deployScript userBackup $name
and source $deployScript userBackup --user $name
end
end
end

View file

@ -4,6 +4,7 @@ source "$dir/../../lib/action.fish"
function installAction -V dir
source "$dir/../../lib/hooks.fish"
source "$dir/../../lib/restoration.fish"
if not type -q getDeploymentScript
function getDeploymentScript
@ -13,6 +14,7 @@ function installAction -V dir
end
set -l deployScript (getDeploymentScript)
initBackupConfig --action restore
runHook initialize || true
and runHook installOS || true
@ -32,9 +34,9 @@ function installAction -V dir
if [ -n "$deployScript" ]
source "$dir/../../lib/settings.fish"
for name in (getUsers | jq '.[]' --raw-output0 | string split0)
for name in (getUsers | jq '.[]' --raw-output0 | string split0 || true)
echo "Configuring user `$name`..."
and source $deployScript userConfig $name
and source $deployScript userConfig --user $name
end
end
end
@ -52,7 +54,7 @@ function installAction -V dir
and echo "This machine will reboot in 5 seconds..."
and echo "Press CTRL-C to abort..."
and sleep 5
and systemctl reboot -i
and sudo systemctl reboot -i
end
runSetupUserAction installAction

View file

@ -6,12 +6,12 @@ function runSetup
set -l projectName (basename "$projectRoot")
set -l PROJECT_CLONE_ROOT "/opt/$(basename "$projectName")"
function setupAction -V projectRoot -V PROJECT_CLONE_ROOT
function setupAction -V dir -V projectRoot -V PROJECT_CLONE_ROOT
source "$dir/../../lib/hooks.fish"
source "$dir/../../lib/nix.fish"
source "$dir/../../lib/restoration.fish"
source "$dir/../../lib/settings.fish"
set -l mountDir (getOSConfig partition.rootDir)
set -l script (mktemp)
chmod +x "$script"
set -l mountDir (getOSConfig fileSystems.rootDir)
if not type -q runChroot
function runChroot -S
@ -19,6 +19,12 @@ function runSetup
end
end
if not type -q getAutologinDisableCommand
function getAutologinDisableCommand
echo "true"
end
end
function getCloneFile -S -a path
set -l relativeDir (realpath --relative-to "$projectRoot" "$dir")
set -l relativePath (realpath --relative-to "$dir" "$path")
@ -39,38 +45,73 @@ function runSetup
"$argv"
end
initBackupConfig --action restore
echo "Partitioning drives..."
begin
set -l script (mktemp)
chmod +x "$script"
and getOSConfig fileSystems.script >"$script"
and "$script"
and rm "$script"
end
if [ -n "$VALHALLA_BACKUP_SERVER_KEY" ]
set -l knownHosts /root/.ssh/known_hosts
mkdir -p (dirname "$mountDir$VALHALLA_BACKUP_SERVER_KEY")
mkdir -p (dirname "$mountDir$knownHosts")
cp "$VALHALLA_BACKUP_SERVER_KEY" "$mountDir$VALHALLA_BACKUP_SERVER_KEY"
cp $knownHosts "$mountDir$knownHosts"
end
and echo "Cloning project..."
and source "$dir/../../lib/copy-repo.fish" "$mountDir$PROJECT_CLONE_ROOT"
runChroot "$mountDir" git config --system --add safe.directory "$PROJECT_CLONE_ROOT"
and begin
set -l path "$mountDir/$nixPkgsDir"
and mkdir -p (dirname "$path")
and cp -r "$nixPkgsDir" "$path"
and git -C "$path" reset --hard
end
and runHook initOS "Please set up a function `initOS` for initializing the mounted OS and installing valhalla dependencies"
and runChroot "$mountDir" git config --system --add safe.directory "$PROJECT_CLONE_ROOT"
and runHook --force bootstrapSetup "Please set up a function `bootstrapSetup` for installing `fish` into the "
and runHook setupOS
and echo "Preparing auto-login..."
and runHook --force autologin "Please set up a function `autologin` for setting up autologin for the `root` user"
and begin
set -l profile "/root/.bash_profile"
begin
set -l profile (string escape "$profile")
set -l tmp (string escape "$profile""_")
set -l script (string escape (getCloneFile (getInstallerScript)))
wrapScript (
printf "%s\n" \
"mv $profile $tmp" \
(wrapScript (
string join " " \
"CONFIG_NAME=$(string escape "$CONFIG_NAME")" \
(string escape $script))
end | runChroot "$mountDir" tee /root/.bash_profile >/dev/null
(begin
for var in \
CONFIG_NAME \
VALHALLA_BACKUP_DISABLED \
VALHALLA_BACKUP_DIR \
VALHALLA_BACKUP_SERVER \
VALHALLA_BACKUP_SERVER_PORT \
VALHALLA_BACKUP_SERVER_USER \
VALHALLA_BACKUP_SERVER_KEY
echo "$var=$(string escape "$$var")"
end
function prepareNix
source "$dir/../../lib/nix.fish"
# Copy `nixpkgs` channel
mkdir -p (dirname "$mountDir/$nixPkgsDir")
cp -r "$nixPkgsDir" "$mountDir/$nixPkgsDir"
end) \
(string escape $script) "&&")) \
"rm $tmp &&" \
"$(getAutologinDisableCommand) ||" \
"mv $tmp $profile"
end | runChroot "$mountDir" tee "$profile" >/dev/null
end
function actionPreRun
echo "Partitioning drives..."
and getOSConfig partition.script >"$script"
and "$script"
and rm "$script"
end
function actionPostRun

View file

@ -5,7 +5,7 @@ set -l users (getUsers)
echo "Creating users..."
for name in (echo "$users" | jq '.[]' --raw-output0 | string split0)
for name in (echo "$users" | jq '.[]' --raw-output0 | string split0 || true)
echo "Creating user `$name`..."
function getUserInfo -V name -a config

View file

@ -27,7 +27,7 @@ $null = New-Module {
Value = "$Value";
} `
-User $User `
".alias |= [((. // [])[] | select(.name != env.Name))] + [{ name: env.Name, value: env.Value }]";
'.alias |= [((. // [])[] | select(.name != env(Name)))] + [{ "name": strenv(Name), "value": strenv(Value) }]';
}
<#
@ -53,7 +53,7 @@ $null = New-Module {
Value = "$Value";
} `
-User $User `
".env |= [((. // [])[] | select(.name != env.Name))] + [{ name: env.Name, value: env.Value }]";
'.env |= [((. // [])[] | select(.name != env(Name)))] + [{ "name": strenv(Name), "value": strenv(Value) }]';
}
<#
@ -92,7 +92,7 @@ $null = New-Module {
}
sudo @using:flags pwsh -CommandWithArgs 'Get-Content -Raw $args[0]' $using:path | Set-Content $file;
yq -yi $using:Script $file;
yq -i $using:Script $file;
sudo @using:flags pwsh -CommandWithArgs 'Set-Content $args[0] -Value $args[1]' $using:path (Get-Content -Raw $file);
Remove-Item $file;
} | Receive-Job -Wait;

View file

@ -6,7 +6,7 @@ begin
function configureSW -V dir
source "$dir/../bash/profile.fish"
source "$dir/../fish/profile.fish"
set -l file (pwsh -CommandWithArgs '. $args[0]; Get-GlobalConfigPath' "$dir/Constants.ps1")
set -l file (pwsh -NoProfile -CommandWithArgs '. $args[0]; Get-GlobalConfigPath' "$dir/Constants.ps1")
sudo install -Dm644 "$dir/aliae.yml" "$file"
begin

View file

@ -33,7 +33,7 @@ function installExtension -d "Installs a Chromium extension for the browser with
sudo chmod -R u+w "$extensionDir"
sudo mkdir -p "$policyDir"
for manifest in (find "$policyDir" -name "*.json" -print0 | string split0)
for manifest in (find "$policyDir" -name "*.json" -print0 | string split0 || true)
if [ (cat "$manifest" | jq -r ".$property") = "$destination" ]
sudo rm "$manifest"
end

View file

@ -12,5 +12,19 @@ begin
sudo usermod -aG docker "$_flag_user"
end
function installSWDependencies -V dir -V args
source "$dir/../../../lib/settings.fish"
set -la argv $args
argparse -i "name=" "user=" -- $argv
set -l services (getProgramConfig --name "$_flag_name" --json | jq '.services')
for service in (echo "$services" | jq '. // {} | keys[]' --raw-output0 | string split0 || true)
if echo "$services" | SERVICE=$service jq --exit-status ".[env.SERVICE].enable" >/dev/null
fish "$dir/services/$service/main.fish" $argv
end
end
end
runInstaller $argv
end

View file

@ -2,8 +2,5 @@ services:
anki:
image: yangchuansheng/anki-sync-server
restart: unless-stopped
extends:
file: docker-compose.secrets.yml
service: anki
volumes:
- ./data:/ankisyncdir

View file

@ -2,18 +2,18 @@
begin
set -l name anki
set -l dir (status dirname)
set -l source "$dir/docker-compose.secrets.yml"
set -l source "$dir/docker-compose.overrides.yml"
source "$dir/../service.fish"
function installSW -V dir -V source
set -l pw (nix-shell -p keepassxc --run "keepassxc-cli generate --length 32")
set -l userKey ".services.anki.environment.SYNC_USER1"
set -l user (yq --raw-output "$userKey" "$source" | sed "s/:.*\$/:$pw/")
set -l user (yq "$userKey" "$source" | sed "s/:.*\$/:$pw/")
initializeServiceInstallation $argv
sudo cp "$dir/docker-compose.yml" (getServiceRoot $argv)
sudo cp "$dir/docker-compose.base.yml" (getServiceRoot $argv)
USER=$user yq "$userKey = env.USER" "$source" | \
sudo tee (getServiceSecretsConfig $argv) >/dev/null
USER=$user yq "$userKey = env(USER)" "$source" |
sudo tee (getServiceOverrides $argv) >/dev/null
installDockerService $argv
end

View file

@ -0,0 +1,4 @@
include:
- path:
- docker-compose.base.yml
- docker-compose.overrides.yml

View file

@ -0,0 +1,23 @@
services:
ci:
build:
context: .
dockerfile: ci.Dockerfile
restart: unless-stopped
env_file:
- server.common.env
docker:
image: drone/drone-runner-docker
restart: unless-stopped
env_file: runner.common.env
environment:
DRONE_RUNNER_NAME: docker-runner
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ssh:
image: drone/drone-runner-ssh
restart: unless-stopped
env_file:
- runner.common.env
environment:
DRONE_RUNNER_NAME: ssh-runner

View file

@ -1,35 +1,19 @@
services:
ci-template:
build:
context: .
dockerfile: ci.Dockerfile
extends:
file: docker-compose.secrets.yml
service: template
restart: unless-stopped
env_file:
- server.common.env
file: docker-compose.core.yml
service: ci
env_file: []
environment: {}
docker-template:
image: drone/drone-runner-docker
restart: unless-stopped
extends:
file: docker-compose.secrets.yml
service: template
file: docker-compose.core.yml
service: docker
depends_on: []
env_file:
- runner.common.env
environment:
DRONE_RUNNER_NAME: docker-runner
volumes:
- /var/run/docker.sock:/var/run/docker.sock
env_file: []
ssh-template:
image: drone/drone-runner-ssh
restart: unless-stopped
extends:
file: docker-compose.secrets.yml
service: template
file: docker-compose.core.yml
service: ssh
depends_on: []
env_file:
- runner.common.env
environment:
DRONE_RUNNER_NAME: ssh-runner
env_file: []

View file

@ -10,16 +10,16 @@ begin
function installSW -V dir -V environments -V source
set -l root (getServiceRoot $argv)
set -l config "$root/docker-compose.yml"
set -l secrets (getServiceSecretsConfig $argv)
set -l ciTemplate (yq (getServiceKey ci-template) "$source")
set -l dockerTemplate (yq (getServiceKey docker-template) "$source")
set -l sshTemplate (yq (getServiceKey ssh-template) "$source")
set -l config "$root/docker-compose.base.yml"
set -l overrides (getServiceOverrides $argv)
set -l ciTemplate (yq -oj (getServiceKey ci-template) "$source")
set -l dockerTemplate (yq -oj (getServiceKey docker-template) "$source")
set -l sshTemplate (yq -oj (getServiceKey ssh-template) "$source")
initializeServiceInstallation $argv
echo "{}" | sudo tee "$config" >/dev/null
echo "{}" | sudo tee "$secrets" >/dev/null
echo "{}" | sudo tee "$overrides" >/dev/null
cp "$dir"/{ci.Dockerfile,.dockerignore} "$root"
sudo cp "$dir"/{ci.Dockerfile,docker-compose.core.yml,.dockerignore} "$root"
echo "DRONE_JSONNET_ENABLED=true" | sudo tee "$root/server.common.env" >/dev/null
echo "DRONE_RUNNER_CAPACITY=2" | sudo tee "$root/runner.common.env" >/dev/null
@ -32,7 +32,7 @@ begin
set -l runners ssh docker
set -l services ci $runners
set -l tmpConfig (mktemp)
set -l tmpSecrets (mktemp)
set -l tmpOverrides (mktemp)
set -l ciName "$name-ci"
set -l sshName "$name-ssh-runner"
set -l dockerName "$name-docker-runner"
@ -41,7 +41,7 @@ begin
set -l dockerEnv
cp "$config" "$tmpConfig"
cp "$secrets" "$tmpSecrets"
cp "$overrides" "$tmpOverrides"
for serviceName in $services
set -l file (mktemp)
@ -62,53 +62,32 @@ begin
echo "DRONE_RPC_SECRET=$secret" | sudo tee "$root/$SECRET_ENV" >/dev/null
yq "$ciKey = $ciTemplate" "$tmpConfig" | \
ENTRY="./data/$name:/data" yq "$ciKey.volumes = [ env.ENTRY ]" | \
yq "$ciKey.env_file |= . + [ env.SECRET_ENV ]" | \
PROTO=https yq "$ciEnv.DRONE_SERVER_PROTO = env.PROTO" | \
HOST=(getServiceDomain "$subdomain" "$domain") yq "$ciEnv.DRONE_SERVER_HOST = env.HOST" | \
yq "$dockerKey = $dockerTemplate" | \
yq "$sshKey = $sshTemplate" | \
yq "$dockerKey.depends_on = [ env.CI_NAME ]" | \
yq -y "." | \
yq "$ciKey = $ciTemplate" "$tmpConfig" |
ENTRY="./data/$name:/data" yq "$ciKey.volumes = [ env(ENTRY) ]" |
yq "$ciKey.env_file |= . + [ env(SECRET_ENV) ]" |
PROTO=https yq "$ciEnv.DRONE_SERVER_PROTO = env(PROTO)" |
HOST=(getServiceDomain "$subdomain" "$domain") yq "$ciEnv.DRONE_SERVER_HOST = env(HOST)" |
yq "$dockerKey = $dockerTemplate" |
yq "$sshKey = $sshTemplate" |
yq "$dockerKey.depends_on = [ env(CI_NAME) ]" |
sudo tee "$config" >/dev/null
for key in $dockerKey $sshKey
set -l file (mktemp)
yq "$key.depends_on = [ env.CI_NAME ]" "$config" | \
yq "$key.env_file |= . + [ env.RUNNER_ENV, env.SECRET_ENV ]" | \
yq -y "." | \
yq "$key.depends_on = [ env(CI_NAME) ]" "$config" |
yq "$key.env_file |= . + [ env(RUNNER_ENV), env(SECRET_ENV) ]" |
tee "$file" >/dev/null
sudo cp "$file" "$config"
rm "$file"
end
PORT="127.0.0.1:1337:80" yq "$ciKey.ports = [ env.PORT ]" "$tmpSecrets" | \
yq -y "." | \
sudo tee "$secrets" >/dev/null
PORT="127.0.0.1:1337:80" yq "$ciKey.ports = [ env(PORT) ]" "$tmpOverrides" |
sudo tee "$overrides" >/dev/null
end
end
set -l services (yq --raw-output0 ".services | keys[]" "$config" | string split0)
for service in $services
set -l file (mktemp)
set -l key "$(getServiceKey "$service").extends"
cat "$config" | if string match "*-ci" "$service" >/dev/null
SERVICE="$service" yq "$key.service |= env.SERVICE" "$config"
else
yq "del($key)" "$config"
end | \
yq -y "." | \
tee "$file" >/dev/null
sudo cp "$file" "$config"
rm "$file"
end
installDockerService $argv
end
@ -133,8 +112,8 @@ begin
printf "%s\0" "$name-ci" /
end
function getBackupArgs
printf "%s\n" --hidden --no-ignore "data|\.secrets?\." (getServiceRoot $argv)
function getExtraBackupPatterns
echo "\.secret\.env\$"
end
runInstaller --force $argv

View file

@ -0,0 +1,54 @@
services:
forgejo:
image: codeberg.org/forgejo/forgejo:7
restart: unless-stopped
depends_on:
- db
environment:
USER_UID: 1337
USER_GID: 1337
FORGEJO__database__DB_TYPE: mysql
FORGEJO__database__HOST: db
FORGEJO__database__LOG_SQL: "false"
FORGEJO__repository__DEFAULT_BRANCH: main
FORGEJO__server__SSH_DOMAIN: "%(DOMAIN)s"
FORGEJO__server__ROOT_URL: https://%(DOMAIN)s/
FORGEJO__server__DISABLE_SSH: "false"
FORGEJO__server__LFS_START_SERVER: "true"
FORGEJO__service__REGISTER_MANUAL_CONFIRM: "true"
FORGEJO__actions__ENABLED: "true"
FORGEJO__openid__ENABLE_OPENID_SIGNUP: "false"
FORGEJO__cron.git_gc_repos__ENABLED: "true"
FORGEJO__cron.gc_lfs__ENABLED: "true"
volumes:
- ./data/forgejo:/data
- config:/data/gitea/conf
- /etc/timezone:/etc/timezone:ro
- /srv/git/.ssh:/data/git/.ssh
db:
image: mariadb
restart: unless-stopped
environment:
MARIADB_RANDOM_ROOT_PASSWORD: "yes"
MARIADB_AUTO_UPGRADE: "yes"
volumes:
- ./data/db:/var/lib/mysql
command:
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
bridge:
image: shenxn/protonmail-bridge
restart: unless-stopped
volumes:
- ./data/bridge:/root
runner:
image: gitea/act_runner
restart: unless-stopped
depends_on:
- forgejo
volumes:
- ./data/act:/data
- /var/run/docker.sock:/var/run/docker.sock
volumes:
config: {}

View file

@ -0,0 +1,8 @@
services:
forgejo:
environment: {}
ports:
- 127.0.0.1:1337:3000
- 127.0.0.1:1338:22
db: {}
runner: {}

View file

@ -0,0 +1,109 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l user git
set -l domain git
set -l server "$domain" ""
set -l service "forgejo"
source "$dir/../service.fish"
function getSSHPortKey -V service
echo "$(getServiceKey "$service").ports[1]"
end
function installSW -V dir -V domain -V server -V service
set -l root (getServiceRoot $argv)
set -l overrides (getServiceOverrides $argv)
set -l source "$dir/$(basename "$overrides")"
set -l pw (nix-shell -p keepassxc --run "keepassxc-cli generate --length 32")
set -l port (getRandomPort)
initializeServiceInstallation $argv
sudo cp "$dir/docker-compose.base.yml" "$root"
set port (yq (getSSHPortKey) "$source" | mutatePort "$port")
DOMAIN=(getServiceDomain $server) PW=$pw DB=Git USER=forgejo PW=$pw begin
set -l gitEnv "$(getServiceKey "$service").environment"
set -l actEnv "$(getServiceKey "runner").environment"
set -l dbEnv "$(getServiceKey "db").environment"
PORT=$port yq "$(getSSHPortKey) = env(PORT)" "$source" |
yq "$gitEnv.FORGEJO__server__DOMAIN = env(DOMAIN)" |
yq "$gitEnv.FORGEJO__database__NAME = env(DB)" |
yq "$gitEnv.FORGEJO__database__USER = env(USER)" |
yq "$gitEnv.FORGEJO__database__PASSWD = env(PW)" |
yq "$dbEnv.MARIADB_DATABASE = env(DB)" |
yq "$dbEnv.MARIADB_USER = env(USER)" |
yq "$dbEnv.MARIADB_PASSWORD = env(PW)" |
URL="https://$DOMAIN/" yq "$actEnv.GITEA_INSTANCE_URL = env(URL)" |
sudo tee "$overrides" >/dev/null
end
installDockerService $argv
end
function configureSW -V dir -V user -V service
set -l uid
set -l gid
set -l port
set -l file (mktemp)
set -l root (getServiceRoot $argv)
set -l home /srv/git
set -l sshDir "$home/.ssh"
set -l keyFile "$sshDir/id_rsa"
set -l dir "$root/data"
set -l bin /usr/local/bin/forgejo
set -l config "$root/docker-compose.base.yml"
set -l overrides (getServiceOverrides $argv)
set -l envKey "$(getServiceKey "$service").environment"
configureDockerService $argv
cp "$config" "$file"
and sudo useradd \
--system \
--shell /bin/bash \
--comment 'Git Version Control' \
$user
set uid (id -u $user)
set gid (id -g $user)
and sudo usermod -d "$home" "$user"
and yq "$envKey.USER_UID = $uid" "$file" |
yq "$envKey.USER_GID = $gid" |
sudo tee "$config" >/dev/null
rm "$file"
and sudo mkdir -p "$dir"
and sudo mkdir -p "$home"
and sudo chown -R $uid:$gid "$dir"
and sudo chown -R $uid:$gid "$home"
and sudo -u "#$uid" mkdir -p "$sshDir"
and sudo -u "#$uid" ssh-keygen -t rsa -b 4096 -C "Forgejo Host Key" -f "$keyFile" -N ""
and sudo -u "#$uid" cat "$keyFile.pub" | sudo -u "#$uid" tee -a "$sshDir/authorized_keys"
and sudo chmod 600 "$sshDir/authorized_keys"
set port (yq (getSSHPortKey) "$overrides" | extractPort)
begin
printf "%s\n" \
"#!/bin/sh" \
"ssh -p $port -o StrictHostKeyChecking=no git@127.0.0.1 \"SSH_ORIGINAL_COMMAND=\\\"\$SSH_ORIGINAL_COMMAND\\\" \$0 \$@\""
end | sudo tee "$bin" >/dev/null
sudo chmod +x "$bin"
sudo ln -s "$bin" "$(dirname "$bin")/gitea"
end
function getServiceServers -V server
printf "%s\0" $server
end
function getServiceLocations
argparse -i "name=" -- $argv
printf "%s\0" "$_flag_name" /
end
runInstaller --force $argv
end

View file

@ -0,0 +1,6 @@
config/
downloads/
media/
docker-compose.yml
*.env
*.Dockerfile

View file

@ -0,0 +1,151 @@
services:
jellyfin:
image: jellyfin/jellyfin
restart: unless-stopped
user: 1337:1337
hostname: Jellyfin
volumes:
- ./data/config/jellyfin:/config
- cache:/cache
- ./data/media:/media
jellyseerr:
image: fallenbagel/jellyseerr
restart: unless-stopped
user: 1337:1337
environment:
LOG_LEVEL: debug
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/config/jellyseerr:/app/config
radarr:
image: linuxserver/radarr
restart: unless-stopped
environment:
PUID: 1337
PGID: 1337
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/config/radarr:/config
- ./data/media/movies:/movies
- ./data/downloads:/downloads
sonarr:
image: linuxserver/sonarr
restart: unless-stopped
environment:
PUID: 1337
PGID: 1337
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/config/sonarr:/config
- ./data/media/series:/tv
- ./data/downloads:/downloads
lidarr:
image: linuxserver/lidarr
restart: unless-stopped
environment:
PUID: 1337
PGID: 1337
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/config/lidarr:/config
- ./data/media/music:/music
- ./data/downloads:/downloads
prowlarr:
image: linuxserver/prowlarr
restart: unless-stopped
environment:
PUID: 1337
PGID: 1337
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/config/prowlarr:/config
flaresolverr:
image: flaresolverr/flaresolverr
restart: unless-stopped
environment:
LOG_LEVEL: info
LOG_HTML: "false"
CAPTCHA_SOLVER: none
privoxy:
build:
context: .
dockerfile_inline: |
FROM walt3rl/proton-privoxy
RUN apk --update add ip6tables
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
devices:
- /dev/net/tun
cap_add:
- NET_ADMIN
flood:
image: jesec/flood
restart: unless-stopped
user: 1337:1337
command: --baseuri /flood
--rundir /flood
--allowedpath /downloads
--rtsocket /rtorrent/.local/share/rtorrent/rtorrent.sock
volumes:
- ./data/config/flood:/flood
- ./data/downloads:/downloads
- rtorrent:/rtorrent
rtorrent:
build:
dockerfile: ./rtorrent.Dockerfile
context: .
restart: unless-stopped
hostname: rtorrent
environment:
PUID: 1337
PGID: 1337
PHOME: /config
MAX_UPTIME: 43200
command: -o ratio.enable=
-o ratio.min.set=200
-o ratio.max.set=10000
-o directory.default.set=/downloads
-o 'method.set=group.seeding.ratio.command, "d.cloase = ; d.erase = "'
volumes:
- /etc/localtime:/etc/localtime:ro
- rtorrent:/config
- ./data/downloads:/downloads
- ./data/proton:/proton
devices:
- /dev/net/tun
cap_add:
- NET_ADMIN
# transmission:
# build:
# dockerfile: ./transmission.Dockerfile
# context: .
# restart: unless-stopped
# hostname: transmission
# env_file:
# - ./proton.env
# environment:
# PUID: 1337
# PGID: 1337
# PVPN_TIER: 1337
# TZ: Europe/Zurich
# TRANSMISSION_WEB_HOME: /transmission
# TRANSMISSION_WEB_USER: "scott"
# TRANSMISSION_WEB_PASS: "tiger"
# MAX_UPTIME: -1
# command: --no-incomplete-dir
# --download-dir /downloads
# ports:
# - 127.0.0.1:1337:9091
# volumes:
# - ./config/transmission:/config
# - ./downloads:/downloads
# - /etc/localtime:/etc/localtime:ro
# devices:
# - /dev/net/tun
# cap_add:
# - NET_ADMIN
volumes:
cache: {}
rtorrent: {}

View file

@ -0,0 +1,23 @@
services:
jellyfin:
environment: {}
ports:
- 127.0.0.1:1337:8096
jellyseerr:
ports:
- 127.0.0.1:1337:5055
radarr:
ports:
- 127.0.0.1:1337:7878
sonarr:
ports:
- 127.0.0.1:1337:8989
lidarr:
ports:
- 127.0.0.1:1337:8686
prowlarr:
ports:
- 127.0.0.1:1337:9696
flood:
ports:
- 127.0.0.1:1337:3000

View file

@ -0,0 +1,175 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l user jellyfin
set -l domain media
set -l server "$domain" ""
set -l suggest "jellyseerr"
set -l servarr radarr sonarr lidarr prowlarr
set -l flood flood
set -l service $user
source "$dir/../service.fish"
function installSW -V dir -V domain -V server -V service
set -l root (getServiceRoot $argv)
set -l overrides (getServiceOverrides $argv)
set -l source "$dir/$(basename "$overrides")"
set -l port (getRandomPort)
set -l servarrKeys
initializeServiceInstallation $argv
sudo cp "$dir/docker-compose.base.yml" "$root"
sudo cp "$dir/.dockerignore" "$root"
sudo cp "$dir/pvpn-cli.py" "$root"
sudo cp "$dir/rtorrent.Dockerfile" "$root"
sudo cp "$source" "$overrides"
installDockerService $argv
end
function configureSW -V dir -V user -V domain -V service -V servarr -V flood
set -l uid
set -l gid
set -l port
set -l file (mktemp)
set -l root (getServiceRoot $argv)
set -l config "$root/docker-compose.base.yml"
set -l overrides (getServiceOverrides $argv)
set -l envKey "$(getServiceKey "$service").environment"
configureDockerService $argv
and sudo useradd \
--system \
--shell /bin/false \
--comment 'Jellyfin server' \
--create-home \
$user
set uid (id -u $user)
set gid (id -g $user)
for name in $service $flood
set -l userKey "$(getServiceKey "$name").user"
cp "$config" "$file"
USER=$uid:$gid yq "$userKey = env(USER)" "$file" | sudo tee "$config" >/dev/null
end
for name in $servarr rtorrent
set -l envKey "$(getServiceKey "$name").environment"
sudo cp "$config" "$file"
and yq "$envKey.PUID = $uid" "$file" |
yq "$envKey.PGID = $gid" |
sudo tee "$config" >/dev/null
end
cp "$overrides" "$file"
URL="https://$(getServiceDomain "$domain" "")/" yq "$(getServiceKey "$service").environment.JELLYFIN_PublishedServerUrl = env(URL)" "$file" |
sudo tee "$overrides" >/dev/null
for dir in "$root"/data/{downloads,config/{,jellyfin,flood,radarr,sonarr,lidarr,prowlarr},media/{,movies,series,music}}
sudo mkdir -p "$dir"
and sudo chown -R $uid:$gid "$dir"
end
rm "$file"
end
function getServiceServers -V server
printf "%s\0" $server
end
function getServiceLocations -V suggest -V servarr -V flood
argparse -i "name=" -- $argv
printf "%s\0" \
"$_flag_name" / "" \
"$suggest" "/suggest" "Jellyseerr" (
for app in $servarr
printf "%s\n" "$app" "/$app" ""
printf "%s\n" "$app" "/$app/signalr" ""
end) \
flood "/flood/"
end
function getServiceLocationConfig -a domain s location -V service -V flood
if [ "$s" = "$service" ]
set -l argv $argv[4..]
printf "%s\n" \
"location = / {" \
'return 302 $scheme://$host/web/;' \
"}"
getServiceDefaultProxy $domain $s "$location" --comment "Proxy main Jellyfin traffic" $argv
getServiceDefaultProxy $domain $s "= /web/" --path "/web/index.html" --comment "Proxy main Jellyfin traffic" $argv
getServiceDefaultProxy $domain $s /socket --comment "Proxy Jellyfin Websockets traffic" $argv
else if [ "$s" = "$flood" ]
getServiceDefaultProxy $argv
printf "%s\n" \
"location = /flood {" \
'return 302 $scheme://$host$uri/$is_args$args;' \
"}"
else
getServiceDefaultProxy $argv
end
end
function getExtraLocationSettings -a domain s location -V service -V suggest -V servarr
set -l wsConfig (
printf "%s\n" \
'# Websocket' \
"proxy_http_version 1.1;" \
'proxy_set_header Upgrade $http_upgrade;' \
'proxy_set_header Connection "upgrade";')
if [ "$s" = "$service" ]
if [ "$location" = / ]
printf "%s\n" \
"# Disable buffering when the nginx proxy gets very resource heavy upon streaming" \
"proxy_buffering off;"
else if [ "$location" = /socket ]
echo "$wsConfig"
end
else if [ "$s" = "$suggest" ]
printf "%s\n" \
"" \
'set $app \'suggest\';' \
"# Remove /suggest path to pass to the app" \
'rewrite ^/suggest/?(.*)$ /$1 break;' \
"" \
"#Redirect location headers" \
'proxy_redirect ^ /$app;' \
'proxy_redirect /setup /$app/setup;' \
'proxy_redirect /login /$app/login;' \
"" \
"# Sub filters to replace hardcoded paths" \
'proxy_set_header Accept-Encoding "";' \
'sub_filter_once off;' \
'sub_filter_types *;' \
'sub_filter \'</head>\' \'<script language="javascript">(()=>{var t="$app";let e=history.pushState;history.pushState=function a(){arguments[2]&&!arguments[2].startsWith("/"+t)&&(arguments[2]="/"+t+arguments[2]);let s=e.apply(this,arguments);return window.dispatchEvent(new Event("pushstate")),s};let a=history.replaceState;history.replaceState=function e(){arguments[2]&&!arguments[2].startsWith("/"+t)&&(arguments[2]="/"+t+arguments[2]);let s=a.apply(this,arguments);return window.dispatchEvent(new Event("replacestate")),s},window.addEventListener("popstate",()=>{console.log("popstate")})})();</script></head>\';' \
'sub_filter \'href="/"\' \'href="/$app"\';' \
'sub_filter \'href="/login"\' \'href="/$app/login"\'' \
'sub_filter \'href:"/"\' \'href:"/$app"\';' \
'sub_filter \'\/_next\' \'\/$app\/_next\';' \
'sub_filter \'/_next\' \'/$app/_next\';' \
'sub_filter \'/api/v1\' \'/$app/api/v1\';' \
'sub_filter \'/login/plex/loading\' \'/$app/login/plex/loading\';' \
'sub_filter \'/images/\' \'/$app/images/\';' \
'sub_filter \'/android-\' \'/$app/android-\';' \
'sub_filter \'/apple-\' \'/$app/apple-\';' \
'sub_filter \'/favicon\' \'/$app/favicon\';' \
'sub_filter \'/logo_\' \'/$app/logo_\';' \
'sub_filter \'/site.webmanifest\' \'/$app/site.webmanifest\';'
else if contains "$s" $servarr
if string match --regex ".*/signalr^" "$location" >/dev/null
echo "$wsConfig"
end
end
end
function getExtraBackupPatterns
echo "^proton\.env\$"
end
runInstaller --force $argv
end

View file

@ -0,0 +1,100 @@
from argparse import ArgumentParser
from os import chmod, environ
from os.path import dirname
from random import choice
from re import M
import shlex
import subprocess
import sys
from protonvpn_cli import connection
from protonvpn_cli.constants import PASSFILE
from protonvpn_cli.utils import check_init, get_fastest_server, get_servers, set_config_value, pull_server_data
def run_proton(args):
exit(
subprocess.run(
["proton"],
cwd="/app",
env=dict(
environ,
PVPN_CMD_ARGS=" ".join(args))).returncode)
environ["PVPN_USERNAME"] = environ["PVPN_USERNAME"] + (environ["PVPN_TAGS"] or "")
with open(PASSFILE, "w") as f:
f.write("{0}\n{1}".format(environ["PVPN_USERNAME"], environ["PVPN_PASSWORD"]))
chmod(PASSFILE, 0o600)
check_init()
set_config_value("USER", "username", environ["PVPN_USERNAME"])
set_config_value("USER", "tier", environ["PVPN_TIER"])
set_config_value("USER", "default_protocol", environ["PVPN_PROTOCOL"])
set_config_value("USER", "initialized", 1)
args = sys.argv[1:]
if not args:
args = shlex.split(environ.get("PVPN_CMD_ARGS") or "")
environ["PVPN_CMD_ARGS"] = ""
parser = ArgumentParser(exit_on_error=False)
subParsers = parser.add_subparsers(dest="command")
initParser = subParsers.add_parser("init", aliases=["i"])
connectParser = subParsers.add_parser("connect", aliases=["c"])
for aliases in [
["-f", "--fastest"],
["-r", "--random"],
["-s", "--streaming"],
["--sc"],
["--p2p"],
["--tor"]
]:
connectParser.add_argument(*aliases, action="store_true")
connectParser.add_argument("--cc")
parsedArgs = None
try:
parsedArgs = parser.parse_args(args)
except:
pass
if parsedArgs is not None and (
len(
list(
filter(
lambda item: item[1] not in [False, None],
vars(parsedArgs).items()))) > 1):
def match(server):
features = list()
if parsedArgs.streaming:
pass
if parsedArgs.sc:
pass
if parsedArgs.p2p:
pass
if parsedArgs.tor:
pass
return (parsedArgs.cc is None or server.exit_country.lower() == parsedArgs.cc.lower()) and (
all(feature in server.features for feature in features))
pull_server_data(force=True)
servers = list(filter(lambda server: match(server), get_servers()))
if len(servers) > 0:
if parsedArgs.fastest or not parsedArgs.random:
server = get_fastest_server(servers)
else:
server = choice(servers)
run_proton(["connect", server["Name"]])
else:
raise Exception(
f"Unable to find a server matching the specified criteria {args[1:]}!")
else:
run_proton(args)

View file

@ -0,0 +1,122 @@
FROM walt3rl/proton-privoxy AS proton
FROM jesec/rtorrent AS rtorrent
FROM debian
ARG PVPN_CLI_VER=2.2.12
ARG USERNAME=proton
ENV PVPN_USERNAME= \
PVPN_USERNAME_FILE= \
PVPN_PASSWORD= \
PVPN_PASSWORD_FILE= \
PVPN_TIER=2 \
PVPN_PROTOCOL=udp \
PVPN_TAGS="+pmp" \
PVPN_CMD_ARGS="connect --p2p --random" \
PVPN_DEBUG= \
HOST_NETWORK= \
DNS_SERVERS_OVERRIDE= \
PUID=1000 \
PGID=1000 \
PHOME=/home/${USERNAME} \
NATPMP_TIMEOUT=60 \
NATPMP_INTERVAL= \
MAX_UPTIME=
WORKDIR /root
COPY --from=rtorrent / /
RUN apt-get update -y \
&& apt-get upgrade -y \
&& apt-get install -y \
git \
iproute2 \
iptables \
natpmpc \
openvpn \
pipenv \
procps \
python3 \
python3-pip \
python3-setuptools \
sudo \
&& rm -rf /var/lib/apt/lists
RUN pip3 install --break-system-packages git+https://github.com/Rafficer/linux-cli-community.git@v$PVPN_CLI_VER#egg=protonvpn-cli
RUN mkdir /app
COPY --from=proton /app/proton-privoxy/run /app/proton
COPY --from=proton /root/.pvpn-cli/pvpn-cli.cfg.clean /root/.pvpn-cli/pvpn-cli.cfg
RUN \
sed -i \
-e "/^exec privoxy/d" \
-e "/^ln -s/d" \
/app/proton \
&& install -t /usr/local/bin /app/proton \
&& rm /app/proton
RUN printf "%s\n" \
"python3 /app/pvpn-cli.py \"\$@\"" > ./pvpn-cli \
&& install -Dm 755 ./pvpn-cli /usr/local/bin \
&& rm ./pvpn-cli
RUN printf "%s\n" \
"#!/bin/bash" \
"groupadd --gid \$PGID ${USERNAME} > /dev/null" \
"useradd --create-home --home-dir \$PHOME ${USERNAME} --uid \$PUID -g ${USERNAME} 2>/dev/null" \
"chown ${USERNAME} \$PHOME" \
'[ ! -z "$1" ] && [ "$1" = "init" ] && export PVPN_CMD_ARGS="$@"' \
'if [ -z "$PVPN_USERNAME" ] && [ -z "$PVPN_USERNAME_FILE" ]; then' \
" echo 'Error: Either env var \$PVPN_USERNAME or \$PVPN_USERNAME_FILE is required.'" \
"exit 1" \
"fi" \
"" \
'if [ -z "$PVPN_PASSWORD" ] && [ -z "$PVPN_PASSWORD_FILE" ]; then' \
"echo 'Error: Either env var \$PVPN_PASSWORD or \$PVPN_PASSWORD_FILE is required.'" \
"exit 1" \
"fi" \
"" \
'[ -f "$PVPN_USERNAME_FILE" ] && PVPN_USERNAME=$(cat "$PVPN_USERNAME_FILE")' \
'[ -f "$PVPN_PASSWORD_FILE" ] && PVPN_PASSWORD=$(cat "$PVPN_PASSWORD_FILE")' \
"pvpn-cli || exit" \
'ip link show proton0 > /dev/null 2>&1 || exit' \
'fallback="$(expr ${NATPMP_TIMEOUT} \* 3 / 4)"' \
'export NATPMP_INTERVAL="${NATPMP_INTERVAL:-$fallback}"' \
'echo "Opening a port using NAT-PMP for $NATPMP_TIMEOUT seconds…"' \
'output="$(natpmpc -a 0 0 tcp "$NATPMP_TIMEOUT")"' \
'natpmpc -a 0 0 udp "$NATPMP_TIMEOUT"' \
'port="$(echo "$output" | grep -m 1 " public port [[:digit:]]\+ " | sed "s/.* public port \([[:digit:]]\+\).*/\\1/")"' \
'echo "Port $port has been opened for P2P data transfer!"' \
'echo "The NAT-PMP port forwarding will be updated every $NATPMP_INTERVAL seconds"' \
'export PEERPORT="$port"' \
"{" \
" while true" \
" do" \
' echo "Refreshing NAT-PMP port forwarding…"' \
' natpmpc -a 0 0 udp "$NATPMP_TIMEOUT"' \
' natpmpc -a 0 0 tcp "$NATPMP_TIMEOUT"' \
' echo "NAT-PMP port forwarding has been refreshed!"' \
' sleep "$NATPMP_INTERVAL"' \
" done" \
"} &" \
"set -m" \
'[ ${MAX_UPTIME:-0} -gt 0 ] && {' \
' sudo -iu '"${USERNAME}"' rtorrent -o network.port_range.set=$port-$port,system.daemon.set=true $@ &' \
' pid=$!' \
' sleep "$MAX_UPTIME"' \
' pkill -9 $pid' \
'} || {' \
' sudo -u '"${USERNAME}"' rtorrent -o network.port_range.set=$port-$port,system.daemon.set=true $@' \
'}' > ./rtorrent-entrypoint \
&& install -Dm 755 ./rtorrent-entrypoint /usr/local/bin \
&& rm ./rtorrent-entrypoint
COPY pvpn-cli.py /app/pvpn-cli.py
#RUN apt-get update -y \
# && apt-get install -y sudo
# RUN echo "${USERNAME} ALL=(ALL:ALL) NOPASSWD: ALL" >> /etc/sudoers
VOLUME [ "/proton" ]
ENTRYPOINT [ "rtorrent-entrypoint" ]

View file

@ -0,0 +1,30 @@
services:
legacy:
image: itzg/minecraft-server
restart: unless-stopped
environment:
UID: 1337
GID: 1337
EULA: "TRUE"
ports:
- 25565:25565
volumes:
- /etc/localtime:/etc/localtime:ro
- ./mods:/mods
- ./config/legacy:/config
- ./data/legacy:/data
- ./legacy.properties:/data/server.properties
bedrock:
image: itzg/minecraft-bedrock-server
restart: unless-stopped
environment:
UID: 1337
GID: 1337
EULA: "TRUE"
ports:
- 19132:19132/udp
- 19133:19133/udp
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/bedrock:/data
- ./bedrock.properties:/data/server.properties

View file

@ -0,0 +1,73 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l user minecraft
source "$dir/../service.fish"
function installSW -V dir -V domain -V server -V service
initializeServiceInstallation $argv
sudo cp "$dir/docker-compose.yml" (getServiceRoot $argv)
installDockerService $argv
end
function configureSW -V dir -V user -V service
set -l uid
set -l gid
set -l file (mktemp)
set -l root (getServiceRoot $argv)
set -l dirs "$root"/{mods,{data,config}/{legacy,bedrock}}
set -l files "$root"/{legacy,bedrock}.properties
set -l config "$root/docker-compose.yml"
configureDockerService $argv
and sudo useradd \
--system \
--shell /bin/false \
--comment 'Minecraft server' \
--groups docker \
$user
set uid (id -u $user)
set gid (id -g $user)
for service in (yq -0 ".services | keys[]" "$config" | string split0 || true)
set -l envKey "$(getServiceKey "$service").environment"
cp "$config" "$file"
and yq "$envKey.UID = $uid" "$file" |
yq "$envKey.GID = $gid" |
sudo tee "$config" >/dev/null
end
and for dir in $dirs
sudo mkdir -p "$dir"
end
and begin
set -l file
for file in $files
sudo mkdir -p (dirname "$file")
sudo touch "$file"
end
end
and for item in $dirs $files
sudo chown -R $uid:$gid "$item"
end
rm "$file"
end
function getServiceServers -V server
end
function getServiceLocations
end
function getExtraBackupPatterns
echo "\.properties\$|^(config|mods)\$"
end
runInstaller --force $argv
end

View file

@ -0,0 +1 @@
*

View file

@ -0,0 +1,3 @@
FROM redis
ENV REDIS_HOST_PASSWORD default-password
CMD ["sh", "-c", "exec redis-server --requirepass \"${REDIS_HOST_PASSWORD}\""]

View file

@ -0,0 +1,33 @@
FROM nextcloud:fpm
# Workaround for Nextcloud image not including `bz2`
RUN apt-get update && \
apt-get install -y \
libbz2-dev \
libfcgi-bin && \
rm -rf /var/lib/apt/lists/* && \
docker-php-ext-install bz2
RUN apt-get update && \
curl -fsSL https://deb.nodesource.com/setup_lts.x | bash - && \
apt-get install -y nodejs && \
rm -rf /var/lib/apt/lists/*
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libmagickcore-6.q16-6-extra \
libxss1 \
libx11-xcb1 \
wget && \
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \
sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list' && \
apt-get update && \
apt-get install -y google-chrome-stable --no-install-recommends && \
apt-get remove -y wget gnupg && \
rm -rf /var/lib/apt/lists/* && \
rm /etc/apt/sources.list.d/google.list
RUN curl -Lo /usr/local/bin/php-fpm-healthcheck https://raw.githubusercontent.com/renatomefi/php-fpm-healthcheck/v0.5.0/php-fpm-healthcheck && \
chmod +x /usr/local/bin/php-fpm-healthcheck
RUN npm install --global pageres-cli --unsafe-perm

View file

@ -0,0 +1,107 @@
services:
web:
image: nginx
extends:
file: docker-compose.core.yml
service: web
volumes:
- ./nginx/web.conf:/etc/nginx/nginx.conf
- ./nginx/logs/nginx:/var/log/nginx
depends_on:
core:
condition: service_healthy
installer:
extends:
file: docker-compose.core.yml
service: setup
image: nextcloud:fpm
restart: on-failure
entrypoint:
- bash
- -c
- /entrypoint.sh php-fpm & while ! echo '' 2>/dev/null >/dev/tcp/127.0.0.1/9000; do sleep 1; done; kill -9 $!; true;
db:
image: mariadb:lts
restart: unless-stopped
env_file: db.env
environment:
MARIADB_RANDOM_ROOT_PASSWORD: "yes"
MARIADB_MYSQL_LOCALHOST_USER: 1
volumes:
- ./data/db:/var/lib/mysql
command:
- --innodb_read_only_compressed=OFF
healthcheck:
test: [CMD, healthcheck.sh, --su-mysql, --connect, --innodb_initialized]
start_period: 1m
start_interval: 10s
interval: 1m
timeout: 5s
retries: 3
cache:
build:
context: .
dockerfile: cache.Dockerfile
restart: unless-stopped
env_file:
- cache.env
volumes:
- ./data/cache:/data
healthcheck:
test: [CMD, bash, -c, echo '' > /dev/tcp/127.0.0.1/6379]
interval: 5s
timeout: 3s
retries: 5
core:
extends:
file: docker-compose.core.yml
service: nextcloud
build:
context: .
dockerfile: cloud.Dockerfile
cap_add:
- SYS_ADMIN
depends_on: &nextcloud-conditions
db:
condition: service_healthy
cache:
condition: service_healthy
installer:
condition: service_completed_successfully
restart: true
healthcheck:
test: [CMD, bash, -c , php-fpm-healthcheck]
start_period: 1m
start_interval: 10s
interval: 1m
timeout: 5s
retries: 5
cron:
extends:
file: docker-compose.core.yml
service: nextcloud
image: nextcloud:fpm
depends_on:
<<: *nextcloud-conditions
entrypoint: /cron.sh
bridge:
image: shenxn/protonmail-bridge
restart: unless-stopped
volumes:
- ./data/bridge:/root
turn:
image: instrumentisto/coturn
restart: unless-stopped
collabora:
image: collabora/code
restart: unless-stopped
environment:
dictionaries: de_CH de en
extra_params: '--o:logging.level_startup=warning --o:ssl.enable=true --o:ssl.termination=true --o:user_interface.mode=notebookbar'
volumes:
- /etc/localtime:/etc/localtime:ro
cap_add:
- MKNOD
volumes:
webroot: {}

View file

@ -0,0 +1,32 @@
services:
web:
restart: unless-stopped
volumes:
- webroot:/var/www/html:z
- ./php.ini:/usr/local/etc/php/conf.d/nextcloud.ini
- ./data/cloud/apps:/var/www/html/custom_apps
- ./data/cloud/config:/var/www/html/config
- ./data/cloud/data:/var/www/html/data
- ./data/cloud/themes:/var/www/html/themes
- ./data/public:/public
- ../jellyfin/downloads:/downloads
setup:
extends:
service: web
environment:
FCGI_STATUS_PATH: "/fpm-status"
env_file:
- db.env
- cache.env
volumes:
- ./fpm/nextcloud.conf:/usr/local/etc/php-fpm.d/zz-nextcloud.conf
- ./fpm/status.conf:/usr/local/etc/php-fpm.d/zz-status.conf
nextcloud:
extends:
service: setup
env_file:
- nextcloud.env
environment:
MYSQL_HOST: db
REDIS_HOST: cache
TRUSTED_PROXIES: 172.16.0.0/12

View file

@ -0,0 +1,12 @@
services:
web:
ports:
- 127.0.0.1:1337:80
turn:
ports:
- 1337:3478/tcp
- 1337:3478/udp
command: [-n, --log-file=stdout, --min-port=49160, --max-port=49200, --use-auth-secret]
collabora:
ports:
- 127.0.0.1:1337:9980

View file

@ -0,0 +1,5 @@
[www]
; pm.max_children = 200
; pm.start_servers = 50
; pm.min_spare_servers = 50
; pm.max_spare_servers = 150

View file

@ -0,0 +1 @@
pm.status_path = /fpm-status

View file

@ -0,0 +1,163 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l turn turn
set -l domain cloud ""
set -l service web
set -l office collabora
set -l officeDomain office ""
set -l server $service $domain
set -l services \
$server \
$office $officeDomain
source "$dir/../service.fish"
function installSW -V dir -V domain -V service -V turn -V office -V officeDomain
set -l genPW __generatePW
function $genPW -a length
if [ -z "$length" ]
set length 32
end
nix-shell -p keepassxc --run "keepassxc-cli generate --length $length"
end
set -l file (mktemp)
set -l root (getServiceRoot $argv)
set -l source "$dir/docker-compose.core.yml"
set -l core "$root/$(basename "$source")"
set -l domain (getServiceDomain $domain)
set -l overrides (getServiceOverrides $argv)
set -l overridesSource "$dir/$(basename "$overrides")"
set -l turnKey "$(getServiceKey "$turn")"
set -l portKey "$turnKey.ports[1]"
set -l officeEnv "$(getServiceKey "$office").environment"
set -l dbPW ($genPW)
set -l turnPW ($genPW)
set -l turnPort
set -l redisPW ($genPW)
set -l nextcloudPW ($genPW 64)
initializeServiceInstallation $argv
sudo cp -r "$dir"/{cache.Dockerfile,cloud.Dockerfile,docker-compose.{base,core}.yml,.dockerignore,fpm,nginx,php.ini} "$root"
begin
printf "%s\n" \
"MYSQL_DATABASE=Nextcloud" \
"MYSQL_USER=nextcloud" \
"MYSQL_PASSWORD=$dbPW"
end | sudo tee "$root/db.env" >/dev/null
echo "REDIS_HOST_PASSWORD=$redisPW" | sudo tee "$root/cache.env" >/dev/null
begin
printf "%s\n" \
"NEXTCLOUD_ADMIN_USER=admin" \
"NEXTCLOUD_ADMIN_PASSWORD=$nextcloudPW" \
"NEXTCLOUD_TRUSTED_DOMAINS=$domain" \
"OVERWRITEPROTOCOL=https" \
"OVERWRITEHOST=$domain" \
"OVERWRITECLIURL=https://$domain"
end | sudo tee "$root/nextcloud.env" >/dev/null
PROTO="https" DOMAIN="$domain" begin
set -l key "$turnKey.command"
PW="--static-auth-secret=$turnPW" \
DOMAIN="--realm=$domain" \
yq "$key |= . + [env(PW), env(DOMAIN)]" "$overridesSource" |
DOMAIN=(getServiceDomain $officeDomain) yq "$officeEnv.server_name = env(DOMAIN)" |
URL="https://$(string escape --style regex "$DOMAIN"):443" yq "$officeEnv.aliasgroup1 = env(URL)" |
sudo tee "$overrides" >/dev/null
end
installDockerService $argv
set turnPort (yq (getServicePortKey "$turn") "$overrides" | extractPort)
set turnPort (yq "$portKey" "$overrides" | mutatePort "$turnPort")
cp "$overrides" "$file"
PORT="$turnPort" yq "$portKey = env(PORT)" "$file" | sudo tee "$overrides" >/dev/null
rm "$file"
end
function configureSW -V dir
configureDockerService $argv
end
function getServiceServers -V services
for i in (seq 1 3 (count $services))
printf "%s\0" $services[(math $i + 1)] $services[(math $i + 2)]
end
end
function getServiceLocations -V services -V office -a index
set -l i (math (math (math $index - 1) / 2 "*" 3) + 1)
set -l name $services[$i]
if [ "$name" != "$office" ]
printf "%s\0" "$name" / ""
else
printf "%s\0" \
"$name" "^~ /browser" "static files" \
"$name" "^~ /hosting/discovery" "WOPI discovery URL" \
"$name" "^~ /hosting/capabilities" Capabilities \
"$name" "~ ^/cool/(.*)/ws\$" "main websocket" \
"$name" "~ ^/(c|l)ool" "download, presentation and image upload" \
"$name" "^~ /cool/adminws" "Admin Console websocket"
end
end
function getServiceLocationConfig -a domain s location -V service -V office -V flood
if [ "$s" = "$office" ]
set -l argv $argv[4..]
getServiceDefaultProxy $domain $s "$location" --scheme https $argv
else
getServiceDefaultProxy $argv
if [ "$s" = "$service" ]
printf "%s\n" \
"server_tokens off;" \
"" \
"client_max_body_size 512M;" \
"client_body_timeout 300s;" \
"fastcgi_buffers 64 4K;" \
"" \
"# enable gzip but do not remove ETag headers" \
"gzip on;" \
"gzip_vary on;" \
"gzip_comp_level 4;" \
"gzip_min_length 256;" \
"gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;" \
"gzip_types application/atom+xml text/javascript application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/wasm application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;" \
"" \
"# Pagespeed is not supported by Nextcloud, so if your server is built" \
"# with the `ngx_pagespeed` module, uncomment this line to disable it." \
"#pagespeed off;" \
"" \
"# The settings allows you to optimize the HTTP2 bandwidth." \
"# See https://blog.cloudflare.com/delivering-http-2-upload-speed-improvements/" \
"# for tuning hints" \
"client_body_buffer_size 512k;"
end
end
end
function getExtraLocationSettings -a domain s location -V service
if string match --regex '^(~ \^|\^~ )/cool/(.*)ws\$?$' "$location" >/dev/null
printf "%s\n" \
'# Websocket' \
"proxy_http_version 1.1;" \
'proxy_set_header Upgrade $http_upgrade;' \
'proxy_set_header Connection "upgrade";'
end
end
function getExtraBackupPatterns
echo "^(cache|db|nextcloud)\.env\$"
end
runInstaller --force $argv
end

View file

@ -0,0 +1,199 @@
worker_processes 8;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
server_tokens off;
keepalive_timeout 65;
upstream php-handler {
server core:9000;
#server unix:/run/php/php8.2-fpm.sock;
}
# Set the `immutable` cache control options only for assets with a cache busting `v` argument
map $arg_v $asset_immutable {
"" "";
default "immutable";
}
server {
listen 80;
# Path to the root of the domain
root /var/www/html;
# Prevent nginx HTTP Server Detection
server_tokens off;
# HSTS settings
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload" always;
# set max upload size and increase upload timeout:
client_max_body_size 512M;
client_body_timeout 300s;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml text/javascript application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/wasm application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
# Pagespeed is not supported by Nextcloud, so if your server is built
# with the `ngx_pagespeed` module, uncomment this line to disable it.
#pagespeed off;
# The settings allows you to optimize the HTTP2 bandwidth.
# See https://blog.cloudflare.com/delivering-http-2-upload-speed-improvements/
# for tuning hints
client_body_buffer_size 512k;
# HTTP response headers borrowed from Nextcloud `.htaccess`
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "noindex, nofollow" always;
add_header X-XSS-Protection "1; mode=block" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
# Set .mjs and .wasm MIME types
# Either include it in the default mime.types list
# and include that list explicitly or add the file extension
# only for Nextcloud like below:
include mime.types;
types {
text/javascript mjs;
application/wasm wasm;
}
# Specify how to handle directories -- specifying `/index.php$request_uri`
# here as the fallback means that Nginx always exhibits the desired behaviour
# when a client requests a path that corresponds to a directory that exists
# on the server. In particular, if that directory contains an index.php file,
# that file is correctly served; if it doesn't, then the request is passed to
# the front-end controller. This consistent behaviour means that we don't need
# to specify custom rules for certain paths (e.g. images and other assets,
# `/updater`, `/ocs-provider`), and thus
# `try_files $uri $uri/ /index.php$request_uri`
# always provides the desired behaviour.
index index.php index.html /index.php$request_uri;
# Rule borrowed from `.htaccess` to handle Microsoft DAV clients
location = / {
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Make a regex exception for `/.well-known` so that clients can still
# access it despite the existence of the regex rule
# `location ~ /(\.|autotest|...)` which would otherwise handle requests
# for `/.well-known`.
location ^~ /.well-known {
# The rules in this block are an adaptation of the rules
# in the Nextcloud `.htaccess` that concern `/.well-known`.
location = /.well-known/carddav { return 301 /remote.php/dav/; }
location = /.well-known/caldav { return 301 /remote.php/dav/; }
location /.well-known/acme-challenge { try_files $uri $uri/ =404; }
location /.well-known/pki-validation { try_files $uri $uri/ =404; }
# Let Nextcloud's API for `/.well-known` URIs handle all other
# requests by passing them to the front-end controller.
return 301 /index.php$request_uri;
}
# Rules borrowed from `.htaccess` to hide certain paths from clients
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/) { return 404; }
location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { return 404; }
# Ensure this block, which passes PHP files to the PHP process, is above the blocks
# which handle static assets (as seen below). If this block is not declared first,
# then Nginx will encounter an infinite rewriting loop when it prepends `/index.php`
# to the URI, resulting in a HTTP 500 error response.
location ~ \.php(?:$|/) {
# Required for legacy support
rewrite ^/(?!index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|ocs-provider\/.+|.+\/richdocumentscode(_arm64)?\/proxy) /index.php$request_uri;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
fastcgi_param HTTPS on;
fastcgi_param modHeadersAvailable true; # Avoid sending the security headers twice
fastcgi_param front_controller_active true; # Enable pretty urls
fastcgi_pass php-handler;
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
fastcgi_max_temp_file_size 0;
}
# Serve static files
location ~ \.(?:css|js|mjs|svg|gif|ico|jpg|png|webp|wasm|tflite|map|ogg|flac)$ {
try_files $uri /index.php$request_uri;
# HTTP response headers borrowed from Nextcloud `.htaccess`
add_header Cache-Control "public, max-age=15778463$asset_immutable";
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "noindex, nofollow" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log off; # Optional: Don't log access to assets
}
location ~ \.(otf|woff2?)$ {
try_files $uri /index.php$request_uri;
expires 7d; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
# Rule borrowed from `.htaccess`
location /remote {
return 301 /remote.php$request_uri;
}
location / {
try_files $uri $uri/ /index.php$request_uri;
}
}
}

View file

@ -0,0 +1,5 @@
memory_limit = 1G
upload_max_filesize = 16G
post_max_size = 1G
max_input_time = 3600
max_execution_time = 3600

View file

@ -0,0 +1,11 @@
services:
ryot:
image: ghcr.io/ignisda/ryot:latest
restart: unless-stopped
volumes:
- ./data:/data
db:
image: postgres
restart: unless-stopped
volumes:
- ./data/db:/var/lib/postgresql/data

View file

@ -0,0 +1,11 @@
services:
ryot:
environment:
DATABASE_URL: postgres://ryot:pw@db/Ryot
ports:
- 127.0.0.1:1337:8000
db:
environment:
POSTGRES_DB: Ryot
POSTGRES_USER: ryot
POSTGRES_PASSWORD: pw

View file

@ -0,0 +1,44 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l domain tracker ""
set -l service ryot
set -l source "$dir/docker-compose.overrides.yml"
source "$dir/../service.fish"
function installSW -V dir -V domain -V service -V source
set -l domain (getServiceDomain $domain)
set -l root (getServiceRoot $argv)
set -l file "$dir/docker-compose.base.yml"
set -l pw (nix-shell -p keepassxc --run "keepassxc-cli generate --length 32")
set -l envKey ".services.$service.environment"
set -l dbKey "$envKey.DATABASE_URL"
set -l dbUrl (yq "$dbKey" "$source" | sed "s/^\(.*:\/\/.*:\).*\(@.*\/.*\)\$/\1$pw\2/")
initializeServiceInstallation $argv
sudo cp "$file" "$root"
URL=$dbUrl yq "$dbKey = env(URL)" "$source" |
PW=$pw yq ".services.db.environment.POSTGRES_PASSWORD = env(PW)" |
sudo tee (getServiceOverrides $argv) >/dev/null
installDockerService $argv
end
function configureSW -V dir
configureDockerService $argv
end
function getServiceServers -V domain
printf "%s\0" $domain
end
function getServiceLocations -V service
printf "%s\0" $service /
end
function getBackupArgs
printf "%s\n" --hidden --no-ignore . --exclude "docker-compose.yml" (getServiceRoot $argv)
end
runInstaller --force $argv
end

View file

@ -2,11 +2,30 @@
begin
set -l dir (status dirname)
set -l root /usr/local/lib
set -l secretsFile "docker-compose.secrets.yml"
set -l overrides "docker-compose.overrides.yml"
set -l nginxRoot "/etc/nginx/conf.d"
set -l portPattern "^\([.[:digit:]]\+:\)\([[:digit:]]\+\)\(:[[:digit:]]\+\)"
source "$dir/../../../../lib/software.fish"
function getRandomPort
random 49152 65535
end
function getPortPattern
echo "^\([.[:digit:]]\+:\)\?\([[:digit:]]\+\)\(:[[:digit:]]\+\(\/tcp\|udp\)\?\)"
end
function __substitutePort -a substitution
sed "s/$(getPortPattern)/$substitution/"
end
function extractPort
__substitutePort "\2"
end
function mutatePort -a port
__substitutePort "\1$port\3"
end
function getServiceName
argparse -i "name=" -- $argv
echo "$_flag_name"
@ -29,11 +48,11 @@ begin
echo "$nginxRoot/$(getServiceName $argv).conf"
end
function getServiceSecretsConfig -V secretsFile
echo "$(getServiceRoot $argv)/$secretsFile"
function getServiceOverrides -V overrides
echo "$(getServiceRoot $argv)/$overrides"
end
function __getServicePortKey -V secretsFile -a name
function getServicePortKey -V overrides -a name
echo "$(getServiceKey "$name").ports[0]"
end
@ -49,66 +68,115 @@ begin
echo "$domain"
end
function initializeServiceInstallation -V nginxRoot
mkdir -p (getServiceRoot $argv)
mkdir -p "$nginxRoot"
mkdir -p (dirname (getServiceSecretsConfig $argv))
function getExtraServerConfig -a domain
end
function installDockerService -V dir -V nginxRoot -V portPattern
set -l config (getServiceSecretsConfig $argv)
set -l servers (getServiceServers $argv | string split0)
function getServiceLocationConfig -a domain service location
getServiceDefaultProxy $domain $service $location $argv
end
function getServiceDefaultProxy -a domain service location
argparse -i "comment=" "path=" "scheme=" "url=" -- $argv
set -l scheme
set -l url
set -l config (getServiceOverrides $argv)
set -l portKey (getServicePortKey "$service")
set -l port (yq "$portKey" "$config" | extractPort)
if [ -n "$_flag_scheme" ]
set scheme "$_flag_scheme"
else
set scheme http
end
if [ -n "$_flag_url" ]
set url "$_flag_url"
else
set url "$scheme://127.0.0.1:$port"
if [ -n "$_flag_path" ]
set url "$url$_flag_path"
end
end
printf "%s\n" \
(if [ -n "$_flag_comment" ]
echo "# $_flag_comment"
end) \
"location $location {" \
"proxy_pass $url;" \
'proxy_set_header Host $host;' \
'proxy_set_header X-Real-IP $remote_addr;' \
'proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;' \
'proxy_set_header X-Forwarded-Proto $scheme;' \
'proxy_set_header X-Forwarded-Protocol $scheme;' \
'proxy_set_header X-Forwarded-Host $http_host;' \
(getExtraLocationSettings $argv) \
"}"
end
function getExtraLocationSettings -a domain service location
end
function initializeServiceInstallation -V dir -V nginxRoot
set -l root (getServiceRoot $argv)
sudo mkdir -p (getServiceRoot $argv)
sudo mkdir -p "$nginxRoot"
sudo mkdir -p (dirname (getServiceOverrides $argv))
sudo touch "$root/docker-compose.overrides.yml"
sudo cp "$dir/docker-compose.yml" "$root"
end
function installDockerService -V dir -V nginxRoot
set -l config (getServiceOverrides $argv)
set -l servers (getServiceServers $argv | string split0 || true)
for i in (seq 1 2 (count $servers))
set -l locations (getServiceLocations $i $argv)
set -l locations (getServiceLocations $i $argv | string split0 || true)
for j in (seq 1 4 (count $locations))
for j in (seq 1 3 (count $locations))
set -l file (mktemp)
set -l port (random 49152 65535)
set -l port (getRandomPort)
set -l service $locations[$j]
set -l portKey (__getServicePortKey "$service")
set -l portKey (getServicePortKey "$service")
set -l exposedPort
sudo mkdir -p (getServiceRoot $argv)
sudo mkdir -p "$nginxRoot"
cp "$config" "$file"
set exposedPort (yq --raw-output "$portKey" "$file" | sed "s/$portPattern/\1$port\3/")
PORT=$exposedPort yq -y "$portKey = env.PORT" "$file" | sudo tee "$config" >/dev/null
set exposedPort (yq "$portKey" "$file" | mutatePort $port)
PORT=$exposedPort yq "$portKey = env(PORT)" "$file" | sudo tee "$config" >/dev/null
end
end
end
function configureDockerService -V portPattern
set -l config (getServiceSecretsConfig $argv)
set -l servers (getServiceServers $argv | string split0)
function configureDockerService
set -l servers (getServiceServers $argv | string split0 || true)
set -l nginxConfig (__getServiceNginxConfig $argv)
for i in (seq 1 2 (count $servers))
set -l domain $servers[(math $i + 1)]
set -l subdomain $servers[(math $i)]
set -l locations (getServiceLocations $i $argv | string split0)
set -l locations (getServiceLocations $i $argv | string split0 || true)
set domain (getServiceDomain "$subdomain" "$domain")
begin
printf "%s\n" \
"server {" \
"listen 80;" \
"server_name $domain;"
"server_name $domain;" \
(getExtraServerConfig $domain $argv)
for j in (seq 1 2 (count $locations))
for j in (seq 1 3 (count $locations))
set -l service $locations[$j]
set -l location $locations[(math $j + 1)]
set -l portKey (__getServicePortKey "$service")
set -l port (yq --raw-output "$portKey" "$config" | sed "s/$portPattern/\2/")
set -l comment $locations[(math $j + 2)]
printf "%s\n" \
"location $location {" \
"proxy_pass http://127.0.0.1:$port;" \
'proxy_set_header Host $host;' \
'proxy_set_header X-Real-IP $remote_addr;' \
'proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;' \
'proxy_set_header X-Forwarded-Proto $scheme;' \
"}"
if [ -n "$comment" ]
set -a argv --comment "$comment"
end
getServiceLocationConfig $domain $service $location $argv
end
echo "}"
@ -118,7 +186,17 @@ begin
sudo systemctl restart nginx
end
function getExtraBackupPatterns
echo ""
end
function getBackupArgs
printf "%s\n" --hidden --no-ignore . --exclude "docker-compose.yml" (getServiceRoot $argv)
set -l extraPatterns (getExtraBackupPatterns)
if [ -n "$extraPatterns" ]
set extraPatterns "|$extraPatterns"
end
printf "%s\n" --base-directory (getServiceRoot $argv) --hidden --no-ignore "^(docker-compose\.overrides\.yml|data)\$$extraPatterns"
end
end

View file

@ -0,0 +1,21 @@
services:
teamspeak:
image: teamspeak
restart: unless-stopped
depends_on:
- db
environment:
TS3SERVER_DB_PLUGIN: ts3db_mariadb
TS3SERVER_DB_SQLCREATEPATH: create_mariadb
TS3SERVER_DB_HOST: db
TS3SERVER_DB_WAITUNTILREADY: 30
TS3SERVER_LICENSE: accept
volumes:
- ./data/teamspeak:/var/ts3server
db:
image: mariadb
restart: unless-stopped
environment:
MARIADB_RANDOM_ROOT_PASSWORD: "yes"
volumes:
- ./data/db:/var/lib/mysql

View file

@ -0,0 +1,6 @@
services:
teamspeak:
ports:
- 9987:9987/udp
- 10011:10011
- 30033:30033

View file

@ -0,0 +1,44 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l source "$dir/docker-compose.overrides.yml"
source "$dir/../service.fish"
function installSW -V dir -V domain -V source
set -l service teamspeak
set -l pw (nix-shell -p keepassxc --run "keepassxc-cli generate --length 32")
set -l db TeamSpeak
set -l tsEnv "$(getServiceKey "$service").environment"
set -l dbEnv "$(getServiceKey db).environment"
initializeServiceInstallation $argv
sudo cp "$dir/docker-compose.base.yml" (getServiceRoot $argv)
USER="$service" begin
yq "$tsEnv.TS3SERVER_DB_USER = env(USER)" "$source" |
yq "$dbEnv.MARIADB_USER = env(USER)"
end | \
PW="$pw" begin
yq "$tsEnv.TS3SERVER_DB_PASSWORD = env(PW)" |
yq "$dbEnv.MARIADB_PASSWORD = env(PW)"
end | \
DB="$db" begin
yq "$tsEnv.TS3SERVER_DB_NAME = env(DB)" |
yq "$dbEnv.MARIADB_DATABASE = env(DB)"
end |
sudo tee (getServiceOverrides $argv) >/dev/null
installDockerService $argv
end
function configureSW -V dir
configureDockerService $argv
end
function getServiceServers -V server
end
function getServiceLocations
end
runInstaller --force $argv
end

View file

@ -0,0 +1,10 @@
services:
terraria:
image: ryshe/terraria:latest
restart: unless-stopped
tty: true
stdin_open: true
ports:
- 7777:7777
volumes:
- ./data:/root/.local/share/Terraria/Worlds

View file

@ -0,0 +1,8 @@
services:
terraria:
environment: {}
command:
-autocreate 1
-difficulty 1
-seed "AwesomeSeed"
-lang "en-US"

View file

@ -0,0 +1,24 @@
#!/bin/env fish
begin
set -l dir (status dirname)
source "$dir/../service.fish"
function installSW -V dir
set -l root (getServiceRoot $argv)
initializeServiceInstallation $argv
sudo cp -rf "$dir"/docker-compose{.base,.overrides}.yml "$root"
installDockerService $argv
end
function configureSW -V dir
configureDockerService $argv
end
function getServiceServers
end
function getServiceLocations
end
runInstaller --force $argv
end

View file

@ -0,0 +1 @@
data/

View file

@ -0,0 +1,71 @@
services:
game:
build:
context: .
dockerfile: trackmania.Dockerfile
restart: unless-stopped
extends:
file: docker-compose.core.yml
service: tm
stdin_open: true
environment:
TM_XMLRPC_ALLOWED_REMOTE: xaseco
volumes:
- ./data/tm/data:/app/GameData
healthcheck:
test: [CMD, bash, -c, echo '' >/dev/tcp/127.0.0.1/5000]
start_period: 1m
start_interval: 10s
interval: 5s
timeout: 3s
retries: 5
downloader:
extends:
file: docker-compose.core.yml
service: xaseco-installer
volumes:
- ./initdb.d:/cache/localdb
healthcheck:
test: [CMD, bash, -c, ls /cache/localdb/*]
start_period: 1m
start_interval: 10s
interval: 5s
timeout: 3s
retries: 5
xaseco:
restart: unless-stopped
extends:
file: docker-compose.core.yml
service: xaseco
depends_on:
game:
condition: service_healthy
db:
condition: service_healthy
environment:
TM_SERVER_HOST: game
MYSQL_HOST: db
volumes:
- ./data/xaseco:/data
db:
image: mysql:5
restart: unless-stopped
extends:
file: docker-compose.core.yml
service: db
depends_on:
downloader:
condition: service_completed_successfully
command: --sql_mode=""
environment:
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
volumes:
- ./data/db:/var/lib/mysql
- ./initdb.d:/docker-entrypoint-initdb.d
healthcheck:
test: [CMD, bash, -c, mysqladmin ping -hlocalhost -u "$$MYSQL_USER" -p"$$MYSQL_PASSWORD"]
timeout: 20s
retries: 10
volumes:
xaseco: {}

View file

@ -0,0 +1,19 @@
services:
tm:
environment: &tm-config
TM_SUPERADMIN_PASSWORD: pw
TM_SERVER_USER: null
TM_SERVER_PASSWORD: null
volumes: &tm-volumes
- ./data/tm/tracks:/app/GameData/Tracks/Challenges
db:
environment: &db-config
MYSQL_USER: xaseco
MYSQL_PASSWORD: pw
MYSQL_DATABASE: Aseco
xaseco:
environment:
<<:
- *tm-config
- *db-config
volumes: *tm-volumes

View file

@ -0,0 +1,30 @@
services:
tm:
extends:
file: docker-compose.core.overrides.yml
service: tm
volumes: &tm-volumes
- ./app:/app
xaseco-installer:
build: &xaseco-build
context: .
dockerfile: xaseco.Dockerfile
restart: on-failure
command: "true"
<<: &xaseco-base
privileged: true
volumes: &xaseco-volumes
- xaseco:/cache
xaseco:
extends:
file: docker-compose.core.overrides.yml
service: xaseco
<<:
- *xaseco-base
- volumes: *tm-volumes
- volumes: *xaseco-volumes
build: *xaseco-build
db:
extends:
file: docker-compose.core.overrides.yml
service: db

View file

@ -0,0 +1,12 @@
services:
game:
command:
- /game_settings=MatchSettings/Nations/NationsBlue.txt
environment: {}
ports:
- 2350:2350
- 2350:2350/udp
- 2353:2353
- 2353:2353/udp
xaseco:
environment: {}

View file

@ -0,0 +1,37 @@
#!/bin/env fish
begin
set -l dir (status dirname)
set -l source "$dir/docker-compose.core.overrides.yml"
source "$dir/../service.fish"
function installSW -V dir -V domain -V source
set -l root (getServiceRoot $argv)
set -l tmPW (nix-shell -p keepassxc --run "keepassxc-cli generate --length 32")
set -l sqlPW (nix-shell -p keepassxc --run "keepassxc-cli generate --length 32")
initializeServiceInstallation $argv
sudo cp -rf "$dir"/{.dockerignore,docker-compose{.base,.overrides,.core{,.overrides}}.yml,parser.patch,tmforever-entrypoint.sh,trackmania.Dockerfile,xaseco-entrypoint.sh,xaseco.Dockerfile} "$root"
PW="$tmPW" yq "$(getServiceKey "tm").environment.TM_SUPERADMIN_PASSWORD = env(PW)" "$source" |
PW="$sqlPW" yq "$(getServiceKey "db").environment.MYSQL_PASSWORD = env(PW)" |
sudo tee "$root/$(basename "$source")" >/dev/null
installDockerService $argv
end
function configureSW -V dir
configureDockerService $argv
end
function getServiceServers
end
function getServiceLocations
end
function getExtraBackupPatterns
echo "^docker-compose\.core\.overrides\.yml\$"
end
runInstaller --force $argv --name tm-forever
end

View file

@ -0,0 +1,11 @@
--- a/includes/xmlparser.inc.php
+++ b/includes/xmlparser.inc.php
@@ -37,8 +37,6 @@
else
$this->data = $source;
- // escape '&' characters
- $this->data = str_replace('&', '<![CDATA[&]]>', $this->data);
// parse xml file
$parsed = xml_parse($this->parser, $this->data);

View file

@ -0,0 +1,145 @@
#!/bin/bash
set -e
configSource="GameData/Config/dedicated_cfg"
configFile="GameData/Config/live_config.xml"
settingsPath="/dedicated"
authLevelsPath="$settingsPath/authorization_levels/level"
accountPath="$settingsPath/masterserver_account"
serverPath="$settingsPath/server_options"
systemPath="$settingsPath/system_config"
TM_SUPERADMIN_PASSWORD="${TM_SUPERADMIN_PASSWORD:-$(openssl rand -base64 33)}"
TM_ADMIN_PASSWORD="${TM_ADMIN_PASSWORD:-$(openssl rand -base64 33)}"
echo "Checking whether the server is present…"
levels=(
TM_SUPERADMIN_PASSWORD 1
TM_ADMIN_PASSWORD 2
TM_USER_PASSWORD 3
)
accountOption=(
TM_SERVER_USER login
TM_SERVER_PASSWORD password
TM_SERVER_VALIDATION_KEY validation_key
)
serverOptions=(
TM_TITLE name
TM_COMMENT comment
TM_HIDDEN hide_server
\
TM_MAX_PLAYERS max_players
TM_PLAYER_PASSWORD password
\
TM_MAX_SPECTATORS max_spectators
TM_SPECTATOR_PASSWORD password_spectator
\
TM_LADDER_MODE ladder_mode
TM_LADDER_LIMIT_MIN ladder_serverlimit_min
TM_LADDER_LIMIT_MAX ladder_serverlimit_max
\
TM_ENABLE_P2P_UPLOAD enable_p2p_upload
TM_ENABLE_P2P_DOWNLOAD enable_p2p_download
\
TM_CALLVOTE_TIMEOUT callvote_timeout
TM_CALLVOTE_RATIO callvote_ratio
\
TM_ALLOW_CHALLENGE_DOWNLOAD allow_challenge_download
TM_AUTOSAVE_REPLAYS autosave_replays
TM_AUTOSAVE_VALIDATION_REPLAYS autosave_validation_replays
\
TM_REFEREE_PASSWORD referee_password
TM_REFEREE_VALIDATION_MODE referee_validation_mode
\
TM_USE_CHANGING_VALIDATION_SEED use_changing_validation_seed
)
systemOptions=(
TM_BIND_IP bind_ip_address
TM_BIND_PORT server_port
TM_P2P_PORT server_p2p_port
TM_CLIENT_PORT client_port
TM_USE_NAT_UPNP use_nat_upnp
\
TM_XMLRPC_PORT xmlrpc_port
TM_XMLRPC_ALLOWED_REMOTE xmlrpc_allowremote
\
TM_PACKMASK packmask
\
TM_CONNECTION_UPLOADRATE connection_uploadrate
TM_CONNECTION_DOWNLOADRATE connection_downloadrate
\
TM_P2P_CACHE_SIZE p2p_cache_size
\
TM_BLACKLIST_URL blacklist_url
TM_GUESTLIST_FILENAME guestlist_filename
TM_BLACKLIST_FILENAME blacklist_filename
\
TM_ALLOW_SPECTATOR_RELAYS allow_spectator_relays
\
TM_USE_PROXY use_proxy
TM_PROXY_LOGIN proxy_login
TM_PROXY_PASSWORD proxy_password
)
if [[ ! -x "./TrackmaniaServer" ]] || [ ! -f "$configSource".* ]; then
echo "Server not found!"
echo "Downloading TrackMania Forever Dedicated Server…"
file="$(mktemp)"
dir="$(mktemp -d)"
wget http://files2.trackmaniaforever.com/TrackmaniaServer_2011-02-21.zip -O "$file" && unzip -o "$file" -d .
fi
if [ -f "$configSource.xml" ]; then
cp "$configSource.xml" "$configFile"
elif [ -f "$configSource.txt" ]; then
cp "$configSource.txt" "$configFile"
fi
for i in $(seq 0 2 $((${#levels[@]} - 1))); do
var="${levels[$i]}"
index="${levels[$(($i + 1))]}"
password="${!var}"
if [ -n "$password" ]; then
xmlstarlet edit --inplace --update "$authLevelsPath[$index]/password" --value "$password" "$configFile"
fi
done
for i in $(seq 0 2 $((${#accountOption[@]} - 1))); do
var="${accountOption[$i]}"
option="${accountOption[$(($i + 1))]}"
value="${!var}"
if [ -n "$value" ]; then
xmlstarlet edit --inplace --update "$accountPath/$option" --value "$value" "$configFile"
fi
done
for i in $(seq 0 2 $((${#serverOptions[@]} - 1))); do
var="${serverOptions[$i]}"
option="${serverOptions[$(($i + 1))]}"
value="${!var}"
if [ -n "$value" ]; then
xmlstarlet edit --inplace --update "$serverPath/$option" --value "$value" "$configFile"
fi
done
for i in $(seq 0 2 $((${#systemOptions[@]} - 1))); do
var="${systemOptions[$i]}"
option="${systemOptions[$(($i + 1))]}"
value="${!var}"
if [ -n "$value" ]; then
xmlstarlet edit --inplace --update "$systemPath/$option" --value "$value" "$configFile"
fi
done
if [ -z "$TM_LOG_FILES" ]; then
set -- "$@" /nologs
fi
unbuffer -p ./TrackmaniaServer /dedicated_cfg="$(basename "$configFile")" /nodaemon $@

View file

@ -0,0 +1,75 @@
FROM debian:11.3
RUN apt-get update -y \
&& apt-get install -y \
expect \
unzip \
wget \
xmlstarlet \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /app
WORKDIR /app
COPY ./tmforever-entrypoint.sh /usr/local/bin/tmforever-foreground
ENV \
TM_BIND_IP="" \
TM_BIND_PORT="" \
TM_P2P_PORT="" \
TM_CLIENT_PORT="" \
TM_USE_NAT_UPNP="" \
\
TM_XMLRPC_PORT="" \
TM_XMLRPC_ALLOWED_REMOTE="" \
\
TM_SERVER_USER="" \
TM_SERVER_PASSWORD="" \
TM_SERVER_VALIDATION_KEY="" \
\
TM_TITLE="" \
TM_COMMENT="" \
TM_HIDDEN="" \
\
TM_PACKMASK="" \
\
TM_MAX_PLAYERS="" \
TM_PLAYER_PASSWORD="" \
\
TM_MAX_SPECTATORS="" \
TM_SPECTATOR_PASSWORD="" \
\
TM_LADDER_MODE="" \
TM_LADDER_LIMIT_MIN="" \
TM_LADDER_LIMIT_MAX="" \
\
TM_ENABLE_P2P_UPLOAD="" \
TM_ENABLE_P2P_DOWNLOAD="" \
\
TM_CALLVOTE_TIMEOUT="" \
TM_CALLVOTE_RATIO="" \
\
TM_ALLOW_CHALLENGE_DOWNLOAD="" \
TM_AUTOSAVE_REPLAYS="" \
TM_AUTOSAVE_VALIDATION_REPLAYS="" \
\
TM_REFEREE_PASSWORD="" \
TM_REFEREE_VALIDATION_MODE="" \
\
TM_USE_CHANGING_VALIDATION_SEED="" \
\
TM_SUPERADMIN_PASSWORD="" \
TM_ADMIN_PASSWORD="" \
\
TM_CONNECTION_UPLOADRATE="" \
TM_CONNECTION_DOWNLOADRATE="" \
TM_P2P_CACHE_SIZE="" \
TM_BLACKLIST_URL="" \
TM_GUESTLIST_FILENAME="" \
TM_BLACKLIST_FILENAME="" \
TM_ALLOW_SPECTATOR_RELAYS="" \
TM_USE_PROXY="" \
TM_PROXY_LOGIN="" \
TM_PROXY_PASSWORD="" \
\
TM_LOG_FILES=""
ENTRYPOINT [ "tmforever-foreground" ]

View file

@ -0,0 +1,177 @@
#!/bin/bash
export TM_USER="${TM_USER:-SuperAdmin}"
export TM_PASSWORD="${TM_PASSWORD:-$TM_SUPERADMIN_PASSWORD}"
export TM_SERVER_PORT="${TM_SERVER_PORT:-$TM_XMLRPC_PORT}"
export DEDI_USER="${DEDI_USER:-$TM_SERVER_USER}"
export DEDI_PASSWORD="${DEDI_PASSWORD:-$TM_SERVER_PASSWORD}"
overlay="$(mktemp -d)"
upperDir="$overlay/upper"
workDir="$overlay/work"
cache="/cache"
data="/data"
runDir="/opt/xaseco"
entrypoint="$cache/aseco.php"
config="$runDir/config.xml"
adminConfig="$runDir/adminops.xml"
dbConfig="$runDir/localdatabase.xml"
dediConfig="$runDir/dedimania.xml"
pluginConfig="$runDir/plugins.xml"
settingsPath="/settings"
serverPath="$settingsPath/tmserver"
pluginTag="plugin"
pluginPath="/aseco_plugins"
userPattern="^(.+?)(@([[:digit:]]+(.[[:digit:]]+){3}))?\$"
tmOptions=(
TM_SERVER_HOST ip
TM_SERVER_PORT port
TM_USER login
TM_PASSWORD password
TM_TIMEOUT timeout
)
groups=(
ADMINS admins
OPERATORS operators
)
sqlOptions=(
MYSQL_HOST mysql_server
MYSQL_USER mysql_login
MYSQL_PASSWORD mysql_password
MYSQL_DATABASE mysql_database
)
dediOptions=(
DEDI_URL url
DEDI_NAME name
DEDI_USER login
DEDI_PASSWORD password
DEDI_NATION nation
DEDI_LOG_NEWS log_news
DEDI_SHOW_MOTD show_welcome
)
if [ ! -f "$entrypoint" ]; then
file="$(mktemp -u).zip"
root="$(mktemp -d)"
mkdir -p "$(dirname "$file")"
curl -v --insecure https://www.gamers.org/tmn/xaseco_116.zip -o "$file"
unzip -o "$file" -d "$root"
cp -r "$root"/xaseco/* "$cache"
fi
script="$(
printf "%s\n" \
'file="$(basename "$1")";' \
'if [[ "$file" == *".php" ]];' \
' then dir="includes";' \
'else' \
' dir=".";' \
'fi;' \
'if [ ! -f "$0/$dir/$file" ]; then' \
' mkdir -p "$0/$dir";' \
' cp "$1" "$0/$dir";' \
'fi;'
)"
find "$cache/newinstall" -type f -exec bash -c "$script" "$cache" \{\} \;
mount -t tmpfs tmpfs "$overlay"
mkdir -p "$upperDir" "$workDir"
mount -t overlay overlay -o lowerdir="$cache:$data",upperdir="$upperDir",workdir="$workDir" "$runDir"
cd "$runDir"
dos2unix "/root/parser.patch" ./includes/xmlparser.inc.php
patch -p 1 ./includes/xmlparser.inc.php "/root/parser.patch"
if [ -n "$MASTERADMIN_USER" ]; then
loginTag="tmlogin"
ipTag="ipaddress"
adminPath="$settingsPath/aseco/masteradmins"
nodesPath="$adminPath/*"
namePath="$nodesPath[1]"
ipPath="$nodesPath[2]"
if [ "$(xmlstarlet select --template -v "count($nodesPath)" "$config")" -lt 1 ]; then
xmlstarlet edit --inplace --subnode "$adminPath" --type elem -n "$loginTag" "$config"
fi
if [ "$(xmlstarlet select --template -v "count($nodesPath)" "$config")" -lt 2 ] ||
[ "$(xmlstarlet select --template -v "name($ipPath)" "$config")" != "$ipTag" ]; then
xmlstarlet edit --inplace --append "$namePath" --type elem -n "$ipTag" "$config"
fi
xmlstarlet edit --inplace --update "$namePath" --value "$MASTERADMIN_USER" "$config"
if [ -z "$MASTERADMIN_IP" ]; then
xmlstarlet edit --inplace --delete "$ipPath" "$config"
else
xmlstarlet edit --inplace --update "$ipPath" --value "$MASTERADMIN_IP" "$config"
fi
fi
for i in $(seq 0 2 $((${#tmOptions[@]} - 1))); do
var="${tmOptions[$i]}"
option="${tmOptions[$(($i + 1))]}"
value="${!var}"
if [ -n "$value" ]; then
xmlstarlet edit --inplace --update "$serverPath/$option" --value "$value" "$config"
fi
done
for i in $(seq 0 2 $((${#groups[@]} - 1))); do
var="${groups[$i]}"
group="${groups[$(($i + 1))]}"
value="${!var}"
path="/lists/$group"
echo "$value" | while read user; do
name="$(echo "$user" | LC_ALL="C" perl -pe "s/$userPattern/\1/")"
ip="$(echo "$user" | LC_ALL="C" perl -pe "s/$userPattern/\3/")"
xmlstarlet edit --inplace --subnode "$path" --type elem -n "tmlogin" --value "$name" "$adminConfig"
if [ -n "$ip" ]; then
xmlstarlet edit --inplace --subnode "$path" --type elem -n "ipaddress" --value "$ip" "$adminConfig"
fi
done
done
for i in $(seq 0 2 $((${#sqlOptions[@]} - 1))); do
var="${sqlOptions[$i]}"
option="${sqlOptions[$(($i + 1))]}"
value="${!var}"
xmlstarlet edit --inplace --update "$settingsPath/$option" --value "$value" "$dbConfig"
done
for i in $(seq 0 2 $((${#dediOptions[@]} - 1))); do
var="${dediOptions[$i]}"
option="${dediOptions[$(($i + 1))]}"
value="${!var}"
if [ -n "$value" ]; then
xmlstarlet edit --inplace --update "/dedimania/masterserver_account/$option" --value "$value" "$dediConfig"
fi
done
if [ -n "$X1_EXTRA_PLUGINS" ]; then
for plugin in $X1_EXTRA_PLUGINS; do
xmlstarlet edit --inplace --subnode "$pluginPath" --type elem -n "$pluginTag" --value "$plugin" "$pluginConfig"
done
fi
if [ -n "$X1_DISABLED_PLUGINS" ]; then
for i in $(seq 1 "$(xmlstarlet select --template -v "count($pluginPath/$pluginTag)" "$pluginConfig")"); do
for plugin in $X1_DISABLED_PLUGINS; do
path="$pluginPath/$pluginTag[$i]"
if [ "$(xmlstarlet select --template -v "$path/text()" "$pluginConfig")" == "$plugin" ]; then
xmlstarlet edit --inplace --delete "$path" "$pluginConfig"
fi
done
done
fi
$@

View file

@ -0,0 +1,62 @@
FROM alpine:3.14 AS base
RUN apk add xmlstarlet
FROM php:5.5-alpine
USER root
RUN apk update \
&& apk add \
bash \
unzip \
ca-certificates \
# xmlstarlet dependencies
musl \
libxslt \
libxml2 \
perl
RUN update-ca-certificates
RUN docker-php-ext-install mysql
COPY --from=base /usr/bin/xmlstarlet /usr/bin
COPY ./xaseco-entrypoint.sh /usr/local/bin/xaseco-foreground
COPY ./parser.patch /root/parser.patch
# Server dir
RUN mkdir /cache
# Override dir
RUN mkdir /data
# Merged dir
RUN mkdir -p /opt/xaseco
WORKDIR /opt/xaseco
ENV \
MASTERADMIN_USER="" \
MASTERADMIN_IP="" \
\
ADMINS="" \
OPERATORS="" \
\
X1_EXTRA_PLUGINS="" \
X1_DISABLED_PLUGINS="" \
\
TM_SERVER_HOST="" \
TM_SERVER_PORT="" \
TM_XMLRPC_PORT="" \
TM_TIMEOUT="" \
\
TM_USER="" \
TM_PASSWORD="" \
TM_SUPERADMIN_PASSWORD="" \
\
MYSQL_HOST="" \
MYSQL_USER="" \
MYSQL_PASSWORD="" \
MYSQL_DATABASE="" \
\
DEDI_URL="" \
DEDI_NAME="" \
DEDI_USER="" \
DEDI_PASSWORD="" \
DEDI_NATION="" \
DEDI_LOG_NEWS="" \
DEDI_SHOW_MOTD=""
ENTRYPOINT [ "xaseco-foreground" ]
CMD [ "php", "./aseco.php" ]

Some files were not shown because too many files have changed in this diff Show more