diff --git a/configuration.nix b/configuration.nix index 254f638..6d21d70 100644 --- a/configuration.nix +++ b/configuration.nix @@ -215,6 +215,14 @@ ollamaUrl = "http://100.64.0.1:11434"; }; + # === LM Studio Headless LMS Server === + myModules.lmstudio-server = { + enable = true; + port = 1234; + # domain = "llm.ashisgreat.xyz"; # Uncomment to add nginx reverse proxy + # models = [ "openai/gpt-oss-20b" ]; # Uncomment to auto-load models + }; + # === Backups (Restic + B2) === myModules.backup = { enable = true; diff --git a/modules/default.nix b/modules/default.nix index a743700..2d274c9 100644 --- a/modules/default.nix +++ b/modules/default.nix @@ -13,5 +13,6 @@ ./forgejo.nix ./headscale.nix ./open-webui.nix + ./lmstudio-server.nix ]; } diff --git a/modules/lmstudio-server.nix b/modules/lmstudio-server.nix new file mode 100644 index 0000000..171c4b4 --- /dev/null +++ b/modules/lmstudio-server.nix @@ -0,0 +1,176 @@ +# LM Studio Headless LMS Server Module +# Provides: OpenAI-compatible LLM API server via LM Studio's `lms` CLI +# +# Usage: +# myModules.lmstudio-server = { +# enable = true; +# port = 1234; +# domain = "llm.example.com"; # optional, sets up nginx reverse proxy +# models = [ "openai/gpt-oss-20b" ]; # auto-load on startup +# }; + +{ + config, + lib, + pkgs, + ... +}: + +let + cfg = config.myModules.lmstudio-server; +in +{ + options.myModules.lmstudio-server = { + enable = lib.mkEnableOption "LM Studio headless LMS server"; + + package = lib.mkOption { + type = lib.types.package; + default = pkgs.lmstudio; + defaultText = lib.literalExpression "pkgs.lmstudio"; + description = "The lmstudio package to use (provides the `lms` CLI)."; + }; + + user = lib.mkOption { + type = lib.types.str; + default = "lmstudio"; + description = "System user under which the LMS server runs."; + }; + + group = lib.mkOption { + type = lib.types.str; + default = "lmstudio"; + description = "System group under which the LMS server runs."; + }; + + home = lib.mkOption { + type = lib.types.path; + default = "/var/lib/lmstudio"; + description = "Home directory for the lmstudio user. Models and runtime data are stored here."; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 1234; + description = "Port for the OpenAI-compatible HTTP API server."; + }; + + cors = lib.mkOption { + type = lib.types.bool; + default = false; + description = "Enable CORS support for the HTTP API server."; + }; + + models = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ ]; + example = [ "openai/gpt-oss-20b" ]; + description = "Models to auto-load on startup. Models must already be downloaded (via `lms get`)."; + }; + + domain = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + example = "llm.example.com"; + description = "Public domain name. If set, configures nginx reverse proxy with HTTPS."; + }; + + openFirewall = lib.mkOption { + type = lib.types.bool; + default = false; + description = "Open the configured port in the firewall (only needed without nginx proxy)."; + }; + }; + + config = lib.mkIf cfg.enable { + nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ + "lmstudio" + ]; + + users.users = lib.optionalAttrs (cfg.user == "lmstudio") { + lmstudio = { + inherit (cfg) group home; + isSystemUser = true; + description = "LM Studio headless LMS server"; + }; + }; + + users.groups = lib.optionalAttrs (cfg.group == "lmstudio") { + lmstudio = { }; + }; + + systemd.services.lmstudio-server = { + description = "LM Studio Headless LMS Server"; + + after = [ "network-online.target" ]; + wants = [ "network-online.target" ]; + wantedBy = [ "multi-user.target" ]; + + environment.HOME = cfg.home; + + serviceConfig = { + Type = "simple"; + User = cfg.user; + Group = cfg.group; + + ExecStartPre = [ + "${pkgs.coreutils}/bin/mkdir -p ${cfg.home}/.lmstudio" + ]; + + ExecStart = pkgs.writeShellScript "lmstudio-server-start" '' + set -euo pipefail + + LMS="${cfg.package}/bin/lms" + + # Start the daemon + echo "Starting LM Studio daemon..." + $LMS daemon up + + # Wait for daemon to be ready + sleep 2 + + # Load requested models + ${lib.concatMapStringsSep "\n" (model: '' + echo "Loading model: ${model}..." + $LMS load "${model}" --yes || echo "Warning: failed to load ${model}, continuing..." + '') cfg.models} + + # Start the server (foreground) + SERVER_ARGS="--port ${toString cfg.port}" + ${lib.optionalString cfg.cors "SERVER_ARGS=\"$SERVER_ARGS --cors\""} + echo "Starting LM Studio server on port ${toString cfg.port}..." + exec $LMS server start $SERVER_ARGS + ''; + + ExecStop = "${cfg.package}/bin/lms daemon down"; + + Restart = "on-failure"; + RestartSec = "10s"; + + NoNewPrivileges = true; + ProtectSystem = "strict"; + ProtectHome = false; + ReadWritePaths = [ cfg.home ]; + PrivateTmp = true; + ProtectKernelTunables = true; + ProtectControlGroups = true; + RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ]; + }; + }; + + networking.firewall = lib.mkIf cfg.openFirewall { + allowedTCPPorts = [ cfg.port ]; + }; + + # Optional nginx reverse proxy + myModules.nginx.domains = lib.mkIf (cfg.domain != null) { + ${cfg.domain} = { + port = cfg.port; + extraConfig = '' + client_max_body_size 100M; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + ''; + }; + }; + }; +}