desko: ship a few more ollama models

This commit is contained in:
2025-02-05 16:55:51 +00:00
parent 7a149d8f2f
commit d077036bb6
5 changed files with 49 additions and 7 deletions

View File

@@ -19,7 +19,7 @@ let
modelSources = pkgs.symlinkJoin {
name = "ollama-models";
paths = with pkgs.ollamaPackages; [
athene-v2-72b-q2_K # untested
athene-v2-72b-q2_K # very knowledgable; fairly compliant (briefly lets you know if something's wrong, but still answers)
# aya-8b # it avoids generating code, only text
# codegeex4-9b # it's okaaay, seems to not give wrong code, just incomplete code.
# codegemma-7b # it generates invalid nix code
@@ -30,11 +30,12 @@ let
deepseek-r1-1_5b
deepseek-r1-7b
deepseek-r1-14b
deepseek-r1-32b
# deepseek-r1-32b # redundant with abliterated deepseek-r1
deepseek-r1-abliterated-14b
deepseek-r1-abliterated-32b
dolphin-mistral-7b # UNCENSORED mistral
dolphin-mixtral-8x7b # UNCENSORED mixtral
deepseek-r1-abliterated-70b
dolphin-mistral-7b # UNCENSORED mistral; compliant
dolphin-mixtral-8x7b # about as fast as a 14b model, similar quality results. uncensored, but still preachy
# falcon2-11b # code examples are lacking
# gemma2-9b # fast, but not great for code
gemma2-27b # generates at 1word/sec, but decent coding results if you can wrangle it
@@ -42,9 +43,10 @@ let
# hermes3-8b # FAST, but unwieldy
# llama3-chatqa-8b # it gets stuck
# llama3_1-70b # generates like 1 word/sec, decent output (comparable to qwen2_5-32b)
llama3_2-3b
# llama3_2-3b # redundant with uncensored llama
llama3_2-uncensored-3b
llama3_3-70b
# llama3_3-70b # non-compliant; dodges iffy questions
llama3_3-abliterated-70b # compliant, but slower and not as helpful as deepseek-r1-abliterated-70b
magicoder-7b # it generates valid, if sparse, code
marco-o1-7b # untested
# mistral-7b # it generates invalid code
@@ -61,7 +63,8 @@ let
qwen2_5-abliterate-14b
qwen2_5-abliterate-32b
# qwen2_5-coder-7b # fast, and concise, but generates invalid code
qwq-32b # untested
qwq-32b # heavily restricted
qwq-abliterated-32b
# solar-pro-22b # generates invalid code
# starcoder2-15b-instruct # it gets stuck
# wizardlm2-7b # generates invalid code

View File

@@ -0,0 +1,12 @@
# <https://ollama.com/library/deepseek-r1>
# uncensored version of deepseek-r1
{ mkOllamaModel }: mkOllamaModel {
owner = "huihui_ai";
modelName = "deepseek-r1-abliterated";
variant = "70b";
manifestHash = "sha256-UPjQ/pgPFuoVNhCH1QvJ9dQWDmYnXYRK/vd/+ZO4CSA=";
modelBlob = "e30d4a7705b624845a113478563f2a47edb426b1508a8119c90376d433084ee2";
modelBlobHash = "sha256-4w1KdwW2JIRaETR4Vj8qR+20JrFQioEZyQN21DMITuI=";
paramsBlob = "f4d24e9138dd4603380add165d2b0d970bef471fac194b436ebd50e6147c6588";
paramsBlobHash = "sha256-9NJOkTjdRgM4Ct0WXSsNlwvvRx+sGUtDbr1Q5hR8ZYg=";
}

View File

@@ -0,0 +1,12 @@
# <https://ollama.com/huihui_ai/llama3.3-abliterated>
# uncensored version of llama 3.3
{ mkOllamaModel }: mkOllamaModel {
owner = "huihui_ai";
modelName = "llama3.3-abliterated";
variant = "70b";
manifestHash = "sha256-aUypPUtudmT+2FExojOiJ/ESaEPtHLhqADv8wJofOpc=";
modelBlob = "ca07b492de2cb9534482296866bcad31824907d2cb1b9407f017f5f569915b40";
modelBlobHash = "sha256-yge0kt4suVNEgiloZrytMYJJB9LLG5QH8Bf19WmRW0A=";
paramsBlob = "56bb8bd477a519ffa694fc449c2413c6f0e1d3b1c88fa7e3c9d88d3ae49d4dcb";
paramsBlobHash = "sha256-VruL1HelGf+mlPxEnCQTxvDh07HIj6fjydiNOuSdTcs=";
}

View File

@@ -35,6 +35,7 @@ lib.recurseIntoAttrs (lib.makeScope newScope (self: with self; {
deepseek-r1-32b = callPackage ./deepseek-r1-32b.nix { };
deepseek-r1-abliterated-14b = callPackage ./deepseek-r1-abliterated-14b.nix { };
deepseek-r1-abliterated-32b = callPackage ./deepseek-r1-abliterated-32b.nix { };
deepseek-r1-abliterated-70b = callPackage ./deepseek-r1-abliterated-70b.nix { };
dolphin-mistral-7b = callPackage ./dolphin-mistral-7b.nix { };
dolphin-mixtral-8x7b = callPackage ./dolphin-mixtral-8x7b.nix { };
falcon2-11b = callPackage ./falcon2-11b.nix { };
@@ -47,6 +48,7 @@ lib.recurseIntoAttrs (lib.makeScope newScope (self: with self; {
llama3_2-3b = callPackage ./llama3_2-3b.nix { };
llama3_2-uncensored-3b = callPackage ./llama3_2-uncensored-3b.nix { };
llama3_3-70b = callPackage ./llama3_3-70b.nix { };
llama3_3-abliterated-70b = callPackage ./llama3_3-abliterated-70b.nix { };
magicoder-7b = callPackage ./magicoder-7b.nix { };
marco-o1-7b = callPackage ./marco-o1-7b.nix { };
mistral-7b = callPackage ./mistral-7b.nix { };
@@ -64,6 +66,7 @@ lib.recurseIntoAttrs (lib.makeScope newScope (self: with self; {
qwen2_5-abliterate-32b = callPackage ./qwen2_5-abliterate-32b.nix { };
qwen2_5-coder-7b = callPackage ./qwen2_5-coder-7b.nix { };
qwq-32b = callPackage ./qwq-32b.nix { };
qwq-abliterated-32b = callPackage ./qwq-abliterated-32b.nix { };
solar-pro-22b = callPackage ./solar-pro-22b.nix { };
starcoder2-15b-instruct = callPackage ./starcoder2-15b-instruct.nix { };
wizardlm2-7b = callPackage ./wizardlm2-7b.nix { };

View File

@@ -0,0 +1,12 @@
# <https://ollama.com/huihui_ai/qwq-abliterated>
# uncensored version of qwq
{ mkOllamaModel }: mkOllamaModel {
owner = "huihui_ai";
modelName = "qwq-abliterated";
variant = "32b";
manifestHash = "sha256-EdT/0h+Uwtuluy5HO8EGKmmrl1tNIrSt33KUI3mOefo=";
modelBlob = "6278acec322cac64ac9f5f98578a16e6ca88d361ebb3f54b82a3a5d1b0208a61";
modelBlobHash = "sha256-Ynis7DIsrGSsn1+YV4oW5sqI02Hrs/VLgqOl0bAgimE=";
systemBlob = "50795fcf7f4df9be65090ce980b63e8d82f51b5cbad9492fa504aefa3ad3c9a0";
systemBlobHash = "sha256-UHlfz39N+b5lCQzpgLY+jYL1G1y62UkvpQSu+jrTyaA=";
}