feat: add LM Studio headless LMS server module

- Custom NixOS module wrapping the lms CLI (daemon + server)
- Supports auto-loading models on startup
- Optional nginx reverse proxy via domain option
- Security hardening (NoNewPrivileges, ProtectSystem, PrivateTmp)
- Follows existing myModules.* convention
- Enabled on this host on port 1234 (no models loaded yet)
This commit is contained in:
Franz Kafka 2026-03-21 20:25:13 +00:00
parent 10d8924106
commit 9f82a7b00e
3 changed files with 185 additions and 0 deletions

View file

@ -215,6 +215,14 @@
ollamaUrl = "http://100.64.0.1:11434"; ollamaUrl = "http://100.64.0.1:11434";
}; };
# === LM Studio Headless LMS Server ===
myModules.lmstudio-server = {
enable = true;
port = 1234;
# domain = "llm.ashisgreat.xyz"; # Uncomment to add nginx reverse proxy
# models = [ "openai/gpt-oss-20b" ]; # Uncomment to auto-load models
};
# === Backups (Restic + B2) === # === Backups (Restic + B2) ===
myModules.backup = { myModules.backup = {
enable = true; enable = true;

View file

@ -13,5 +13,6 @@
./forgejo.nix ./forgejo.nix
./headscale.nix ./headscale.nix
./open-webui.nix ./open-webui.nix
./lmstudio-server.nix
]; ];
} }

176
modules/lmstudio-server.nix Normal file
View file

@ -0,0 +1,176 @@
# LM Studio Headless LMS Server Module
# Provides: OpenAI-compatible LLM API server via LM Studio's `lms` CLI
#
# Usage:
# myModules.lmstudio-server = {
# enable = true;
# port = 1234;
# domain = "llm.example.com"; # optional, sets up nginx reverse proxy
# models = [ "openai/gpt-oss-20b" ]; # auto-load on startup
# };
{
config,
lib,
pkgs,
...
}:
let
cfg = config.myModules.lmstudio-server;
in
{
options.myModules.lmstudio-server = {
enable = lib.mkEnableOption "LM Studio headless LMS server";
package = lib.mkOption {
type = lib.types.package;
default = pkgs.lmstudio;
defaultText = lib.literalExpression "pkgs.lmstudio";
description = "The lmstudio package to use (provides the `lms` CLI).";
};
user = lib.mkOption {
type = lib.types.str;
default = "lmstudio";
description = "System user under which the LMS server runs.";
};
group = lib.mkOption {
type = lib.types.str;
default = "lmstudio";
description = "System group under which the LMS server runs.";
};
home = lib.mkOption {
type = lib.types.path;
default = "/var/lib/lmstudio";
description = "Home directory for the lmstudio user. Models and runtime data are stored here.";
};
port = lib.mkOption {
type = lib.types.port;
default = 1234;
description = "Port for the OpenAI-compatible HTTP API server.";
};
cors = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable CORS support for the HTTP API server.";
};
models = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "openai/gpt-oss-20b" ];
description = "Models to auto-load on startup. Models must already be downloaded (via `lms get`).";
};
domain = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "llm.example.com";
description = "Public domain name. If set, configures nginx reverse proxy with HTTPS.";
};
openFirewall = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Open the configured port in the firewall (only needed without nginx proxy).";
};
};
config = lib.mkIf cfg.enable {
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
"lmstudio"
];
users.users = lib.optionalAttrs (cfg.user == "lmstudio") {
lmstudio = {
inherit (cfg) group home;
isSystemUser = true;
description = "LM Studio headless LMS server";
};
};
users.groups = lib.optionalAttrs (cfg.group == "lmstudio") {
lmstudio = { };
};
systemd.services.lmstudio-server = {
description = "LM Studio Headless LMS Server";
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
environment.HOME = cfg.home;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
ExecStartPre = [
"${pkgs.coreutils}/bin/mkdir -p ${cfg.home}/.lmstudio"
];
ExecStart = pkgs.writeShellScript "lmstudio-server-start" ''
set -euo pipefail
LMS="${cfg.package}/bin/lms"
# Start the daemon
echo "Starting LM Studio daemon..."
$LMS daemon up
# Wait for daemon to be ready
sleep 2
# Load requested models
${lib.concatMapStringsSep "\n" (model: ''
echo "Loading model: ${model}..."
$LMS load "${model}" --yes || echo "Warning: failed to load ${model}, continuing..."
'') cfg.models}
# Start the server (foreground)
SERVER_ARGS="--port ${toString cfg.port}"
${lib.optionalString cfg.cors "SERVER_ARGS=\"$SERVER_ARGS --cors\""}
echo "Starting LM Studio server on port ${toString cfg.port}..."
exec $LMS server start $SERVER_ARGS
'';
ExecStop = "${cfg.package}/bin/lms daemon down";
Restart = "on-failure";
RestartSec = "10s";
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = false;
ReadWritePaths = [ cfg.home ];
PrivateTmp = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
};
};
networking.firewall = lib.mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
};
# Optional nginx reverse proxy
myModules.nginx.domains = lib.mkIf (cfg.domain != null) {
${cfg.domain} = {
port = cfg.port;
extraConfig = ''
client_max_body_size 100M;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
'';
};
};
};
}