newelle: configure the ollama endpoint statically

This commit is contained in:
2025-07-24 08:02:16 +00:00
parent 5552decca6
commit 0758697534

View File

@@ -8,8 +8,6 @@
# - usable, but e.g. the "post" button (and the window close button) is cut off
# - closing the UI does not fully exit the process
# - presumably it's sitting in the background, waiting to be dbus-activated
#
# TODO: configure ollama connection details statically
{ pkgs, ... }:
{
sane.programs.newelle = {
@@ -46,6 +44,37 @@
sandbox.whitelistWayland = true;
sandbox.mesaCacheDir = ".cache/Newelle/mesa";
gsettings."io/github/qwersyk/Newelle" = {
language-model = "ollama";
# the actual gsettings is HUGE (see ~/.config/glib-2.0/settings/keyfile).
# this is sliced down, and i'll let Newelle re-populate the omitted data on launch.
llm-settings = ''
{
"ollama": {
"last_request": 1,
"models-info": {},
"model-library": [
{
"key": "qwen2.5-abliterate:14b",
"title": "qwen2.5-abliterate:14b",
"description": "User added model"
}
],
"endpoint": "http://10.0.10.22:11434",
"models": "[[\"qwen2.5-abliterate:14b\", \"qwen2.5-abliterate:14b\"]]",
"model": "qwen2.5-abliterate:14b"
}
}
'';
welcome-screen-shown = true;
# extensions-settings='{"website-reader": {}, "websearch": {}}'
# tts-voice='{"groq_tts": {"endpoint": "https://api.groq.com/openai/v1/", "instructions": ""}}'
# stt-settings='{"groq_sr": {"endpoint": "https://api.groq.com/openai/v1/"}}'
# llm-settings = <json blob>;
# embedding-settings = <json blob>;
# llm-secondary-settings = <json blob>;
};
persist.byStore.ephemeral = [
".cache/Newelle" # extensions
".config/Newelle" # chat logs