Merge branch 'staging-next' into staging

This commit is contained in:
Jan Tojnar 2022-12-25 01:30:47 +01:00
commit 72c37eddec
895 changed files with 23748 additions and 21115 deletions

1
.github/CODEOWNERS vendored
View File

@ -270,6 +270,7 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# GNOME
/pkgs/desktops/gnome @jtojnar
/pkgs/desktops/gnome/extensions @piegamesde @jtojnar
/pkgs/build-support/make-hardcode-gsettings-patch @jtojnar
# Cinnamon
/pkgs/desktops/cinnamon @mkg20001

View File

@ -55,7 +55,7 @@ Package version upgrades usually allow for simpler commit messages, including at
Pull requests should not be squash merged in order to keep complete commit messages and GPG signatures intact and must not be when the change doesn't make sense as a single commit.
This means that, when addressing review comments in order to keep the pull request in an always mergeable status, you will sometimes need to rewrite your branch's history and then force-push it with `git push --force-with-lease`.
Useful git commands that can help a lot with this are `git commit --patch --amend` and `git rebase --interactive @~3`. For more details consult the git man pages.
Useful git commands that can help a lot with this are `git commit --patch --amend` and `git rebase --interactive`. For more details consult the git man pages or online resources like [git-rebase.io](https://git-rebase.io/) or [The Pro Git Book](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).
## Rebasing between branches (i.e. from master to staging)

View File

@ -3,10 +3,31 @@ Turns a manpage reference into a link, when a mapping is defined below.
]]
local man_urls = {
["tmpfiles.d(5)"] = "https://www.freedesktop.org/software/systemd/man/tmpfiles.d.html",
["nix.conf(5)"] = "https://nixos.org/manual/nix/stable/#sec-conf-file",
["systemd.time(7)"] = "https://www.freedesktop.org/software/systemd/man/systemd.time.html",
["journald.conf(5)"] = "https://www.freedesktop.org/software/systemd/man/journald.conf.html",
["logind.conf(5)"] = "https://www.freedesktop.org/software/systemd/man/logind.conf.html",
["networkd.conf(5)"] = "https://www.freedesktop.org/software/systemd/man/networkd.conf.html",
["systemd.automount(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.automount.html",
["systemd.exec(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.exec.html",
["systemd.link(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.link.html",
["systemd.mount(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.mount.html",
["systemd.netdev(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.netdev.html",
["systemd.network(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.network.html",
["systemd.nspawn(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.nspawn.html",
["systemd.path(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.path.html",
["systemd.resource-control(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html",
["systemd.scope(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.scope.html",
["systemd.service(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.service.html",
["systemd.slice(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.slice.html",
["systemd.socket(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.socket.html",
["systemd.timer(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.timer.html",
["systemd.unit(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.unit.html",
["timesyncd.conf(5)"] = "https://www.freedesktop.org/software/systemd/man/timesyncd.conf.html",
["tmpfiles.d(5)"] = "https://www.freedesktop.org/software/systemd/man/tmpfiles.d.html",
["systemd.time(7)"] = "https://www.freedesktop.org/software/systemd/man/systemd.time.html",
["systemd-fstab-generator(8)"] = "https://www.freedesktop.org/software/systemd/man/systemd-fstab-generator.html",
["systemd-networkd-wait-online.service(8)"] = "https://www.freedesktop.org/software/systemd/man/systemd-networkd-wait-online.service.html",
}
function Code(elem)

View File

@ -7,4 +7,5 @@
</para>
<xi:include href="special/fhs-environments.section.xml" />
<xi:include href="special/mkshell.section.xml" />
<xi:include href="special/darwin-builder.section.xml" />
</chapter>

View File

@ -0,0 +1,60 @@
# darwin.builder {#sec-darwin-builder}
`darwin.builder` provides a way to bootstrap a Linux builder on a macOS machine.
This requires macOS version 12.4 or later.
This also requires that port 22 on your machine is free (since Nix does not
permit specifying a non-default SSH port for builders).
You will also need to be a trusted user for your Nix installation. In other
words, your `/etc/nix/nix.conf` should have something like:
```
extra-trusted-users = <your username goes here>
```
To launch the builder, run the following flake:
```ShellSession
$ nix run nixpkgs#darwin.builder
```
That will prompt you to enter your `sudo` password:
```
+ sudo --reset-timestamp /nix/store/…-install-credentials.sh ./keys
Password:
```
… so that it can install a private key used to `ssh` into the build server.
After that the script will launch the virtual machine:
```
<<< Welcome to NixOS 22.11.20220901.1bd8d11 (aarch64) - ttyAMA0 >>>
Run 'nixos-help' for the NixOS manual.
nixos login:
```
> Note: When you need to stop the VM, type `Ctrl`-`a` + `c` to open the `qemu`
> prompt and then type `quit` followed by `Enter`
To delegate builds to the remote builder, add the following options to your
`nix.conf` file:
```
# - Replace ${ARCH} with either aarch64 or x86_64 to match your host machine
# - Replace ${MAX_JOBS} with the maximum number of builds (pick 4 if you're not sure)
builders = ssh-ng://builder@localhost ${ARCH}-linux /etc/nix/builder_ed25519 ${MAX_JOBS} - - - c3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSUpCV2N4Yi9CbGFxdDFhdU90RStGOFFVV3JVb3RpQzVxQkorVXVFV2RWQ2Igcm9vdEBuaXhvcwo='
# Not strictly necessary, but this will reduce your disk utilization
builders-use-substitutes = true
```
… and then restart your Nix daemon to apply the change:
```ShellSession
$ sudo launchctl kickstart -k system/org.nixos.nix-daemon
```

View File

@ -3,8 +3,22 @@ let
inherit (pkgs) lib;
inherit (lib) hasPrefix removePrefix;
locationsXml = import ./lib-function-locations.nix { inherit pkgs nixpkgs; };
functionDocs = import ./lib-function-docs.nix { inherit locationsXml pkgs; };
libsets = [
{ name = "asserts"; description = "assertion functions"; }
{ name = "attrsets"; description = "attribute set functions"; }
{ name = "strings"; description = "string manipulation functions"; }
{ name = "versions"; description = "version string functions"; }
{ name = "trivial"; description = "miscellaneous functions"; }
{ name = "lists"; description = "list manipulation functions"; }
{ name = "debug"; description = "debugging functions"; }
{ name = "options"; description = "NixOS / nixpkgs option handling"; }
{ name = "filesystem"; description = "filesystem functions"; }
{ name = "sources"; description = "source filtering functions"; }
{ name = "cli"; description = "command-line serialization functions"; }
];
locationsXml = import ./lib-function-locations.nix { inherit pkgs nixpkgs libsets; };
functionDocs = import ./lib-function-docs.nix { inherit locationsXml pkgs libsets; };
version = pkgs.lib.version;
epub-xsl = pkgs.writeText "epub.xsl" ''

View File

@ -1,30 +1,32 @@
# Generates the documentation for library functions via nixdoc. To add
# another library function file to this list, the include list in the
# file `doc/functions/library.xml` must also be updated.
# Generates the documentation for library functions via nixdoc.
{ pkgs ? import ./.. {}, locationsXml }:
{ pkgs, locationsXml, libsets }:
with pkgs; stdenv.mkDerivation {
name = "nixpkgs-lib-docs";
src = ./../../lib;
src = ../../lib;
buildInputs = [ nixdoc ];
installPhase = ''
function docgen {
nixdoc -c "$1" -d "$2" -f "../lib/$1.nix" > "$out/$1.xml"
# TODO: wrap lib.$1 in <literal>, make nixdoc not escape it
nixdoc -c "$1" -d "lib.$1: $2" -f "$1.nix" > "$out/$1.xml"
echo "<xi:include href='$1.xml' />" >> "$out/index.xml"
}
mkdir -p $out
ln -s ${locationsXml} $out/locations.xml
mkdir -p "$out"
docgen asserts 'Assert functions'
docgen attrsets 'Attribute-set functions'
docgen strings 'String manipulation functions'
docgen trivial 'Miscellaneous functions'
docgen lists 'List manipulation functions'
docgen debug 'Debugging functions'
docgen options 'NixOS / nixpkgs option handling'
docgen filesystem 'Filesystem functions'
docgen sources 'Source filtering functions'
cat > "$out/index.xml" << 'EOF'
<?xml version="1.0" encoding="utf-8"?>
<root xmlns:xi="http://www.w3.org/2001/XInclude">
EOF
${lib.concatMapStrings ({ name, description }: ''
docgen ${name} ${lib.escapeShellArg description}
'') libsets}
echo "</root>" >> "$out/index.xml"
ln -s ${locationsXml} $out/locations.xml
'';
}

View File

@ -1,4 +1,4 @@
{ pkgs ? (import ./.. { }), nixpkgs ? { }}:
{ pkgs, nixpkgs ? { }, libsets }:
let
revision = pkgs.lib.trivial.revisionWithDefault (nixpkgs.revision or "master");
@ -16,9 +16,7 @@ let
subsetname = subsetname;
functions = libDefPos toplib.${subsetname};
})
(builtins.filter
(name: builtins.isAttrs toplib.${name})
(builtins.attrNames toplib));
(builtins.map (x: x.name) libsets);
nixpkgsLib = pkgs.lib;

View File

@ -11,6 +11,7 @@
<xsl:param name="html.script" select="'./highlightjs/highlight.pack.js ./highlightjs/loader.js'" />
<xsl:param name="xref.with.number.and.title" select="0" />
<xsl:param name="use.id.as.filename" select="1" />
<xsl:param name="generate.section.toc.level" select="1" />
<xsl:param name="toc.section.depth" select="0" />
<xsl:param name="admon.style" select="''" />
<xsl:param name="callout.graphics.extension" select="'.svg'" />

View File

@ -8,25 +8,7 @@
Nixpkgs provides a standard library at <varname>pkgs.lib</varname>, or through <code>import &lt;nixpkgs/lib&gt;</code>.
</para>
<!-- These docs are generated via nixdoc. To add another generated
library function file to this list, the file
`lib-function-docs.nix` must also be updated. -->
<xi:include href="./library/generated/asserts.xml" />
<xi:include href="./library/generated/attrsets.xml" />
<xi:include href="./library/generated/strings.xml" />
<xi:include href="./library/generated/trivial.xml" />
<xi:include href="./library/generated/lists.xml" />
<xi:include href="./library/generated/debug.xml" />
<xi:include href="./library/generated/options.xml" />
<xi:include href="./library/generated/filesystem.xml" />
<xi:include href="./library/generated/sources.xml" />
<!-- The index must have a root element to declare namespaces, but we
don't want to include it, so we select all of its children. -->
<xi:include href="./library/generated/index.xml" xpointer="xpointer(/root/*)" />
</section>

View File

@ -232,7 +232,6 @@ androidenv.emulateApp {
platformVersion = "24";
abiVersion = "armeabi-v7a"; # mips, x86, x86_64
systemImageType = "default";
useGoogleAPIs = false;
app = ./MyApp.apk;
package = "MyApp";
activity = "MainActivity";

View File

@ -121,7 +121,6 @@ in buildDotnetModule rec {
dotnet-sdk = dotnetCorePackages.sdk_3_1;
dotnet-runtime = dotnetCorePackages.net_5_0;
dotnetFlags = [ "--runtime linux-x64" ];
executables = [ "foo" ]; # This wraps "$out/lib/$pname/foo" to `$out/bin/foo`.
executables = []; # Don't install any executables.

View File

@ -16,6 +16,8 @@ rec {
Example:
x = { a = { b = 3; }; }
# ["a" "b"] is equivalent to x.a.b
# 6 is a default value to return if the path does not exist in attrset
attrByPath ["a" "b"] 6 x
=> 3
attrByPath ["z" "z"] 6 x
@ -23,6 +25,7 @@ rec {
Type:
attrByPath :: [String] -> Any -> AttrSet -> Any
*/
attrByPath =
# A list of strings representing the attribute path to return from `set`
@ -96,7 +99,7 @@ rec {
=> error: cannot find attribute `z.z'
Type:
getAttrFromPath :: [String] -> AttrSet -> Value
getAttrFromPath :: [String] -> AttrSet -> Any
*/
getAttrFromPath =
# A list of strings representing the attribute path to get from `set`
@ -109,10 +112,7 @@ rec {
/* Map each attribute in the given set and merge them into a new attribute set.
Type:
concatMapAttrs ::
(String -> a -> AttrSet)
-> AttrSet
-> AttrSet
concatMapAttrs :: (String -> a -> AttrSet) -> AttrSet -> AttrSet
Example:
concatMapAttrs
@ -168,8 +168,7 @@ rec {
] { a.b.c = 0; }
=> { a = { b = { d = 1; }; }; x = { y = "xy"; }; }
Type:
updateManyAttrsByPath :: [AttrSet] -> AttrSet -> AttrSet
Type: updateManyAttrsByPath :: [{ path :: [String], update :: (Any -> Any) }] -> AttrSet -> AttrSet
*/
updateManyAttrsByPath = let
# When recursing into attributes, instead of updating the `path` of each
@ -252,6 +251,7 @@ rec {
Example:
attrValues {c = 3; a = 1; b = 2;}
=> [1 2 3]
Type:
attrValues :: AttrSet -> [Any]
*/
@ -341,6 +341,7 @@ rec {
Type:
foldAttrs :: (Any -> Any -> Any) -> Any -> [AttrSets] -> Any
*/
foldAttrs =
# A function, given a value and a collector combines the two.
@ -394,7 +395,7 @@ rec {
{ a = 2; b = 20; }
]
Type:
cartesianProductOfSets :: AttrSet -> [AttrSet]
cartesianProductOfSets :: AttrSet -> [AttrSet]
*/
cartesianProductOfSets =
# Attribute set with attributes that are lists of values
@ -413,7 +414,7 @@ rec {
=> { name = "some"; value = 6; }
Type:
nameValuePair :: String -> Any -> AttrSet
nameValuePair :: String -> Any -> { name :: String, value :: Any }
*/
nameValuePair =
# Attribute name
@ -600,7 +601,7 @@ rec {
=> { }
Type:
optionalAttrs :: Bool -> AttrSet
optionalAttrs :: Bool -> AttrSet -> AttrSet
*/
optionalAttrs =
# Condition under which the `as` attribute set is returned.
@ -646,7 +647,7 @@ rec {
=> { a = ["x" "y"]; b = ["z"] }
Type:
zipAttrsWith :: (String -> [ Any ] -> Any) -> [ AttrSet ] -> AttrSet
zipAttrsWith :: (String -> [ Any ] -> Any) -> [ AttrSet ] -> AttrSet
*/
zipAttrsWith =
builtins.zipAttrsWith or (f: sets: zipAttrsWithNames (concatMap attrNames sets) f sets);
@ -737,7 +738,7 @@ rec {
}
Type:
recursiveUpdate :: AttrSet -> AttrSet -> AttrSet
recursiveUpdate :: AttrSet -> AttrSet -> AttrSet
*/
recursiveUpdate =
# Left attribute set of the merge.
@ -795,6 +796,7 @@ rec {
/* Turns a list of strings into a human-readable description of those
strings represented as an attribute path. The result of this function is
not intended to be machine-readable.
Create a new attribute set with `value` set at the nested attribute location specified in `attrPath`.
Example:
showAttrPath [ "foo" "10" "bar" ]
@ -831,11 +833,11 @@ rec {
If the output does not exist, fallback to `.out` and then to the default.
Example:
getOutput pkgs.openssl
getBin pkgs.openssl
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r"
Type:
getOutput :: Derivation -> String
getBin :: Derivation -> String
*/
getBin = getOutput "bin";
@ -844,11 +846,11 @@ rec {
If the output does not exist, fallback to `.out` and then to the default.
Example:
getOutput pkgs.openssl
getLib pkgs.openssl
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-lib"
Type:
getOutput :: Derivation -> String
getLib :: Derivation -> String
*/
getLib = getOutput "lib";
@ -857,11 +859,11 @@ rec {
If the output does not exist, fallback to `.out` and then to the default.
Example:
getOutput pkgs.openssl
getDev pkgs.openssl
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev"
Type:
getOutput :: Derivation -> String
getDev :: Derivation -> String
*/
getDev = getOutput "dev";
@ -870,15 +872,19 @@ rec {
If the output does not exist, fallback to `.out` and then to the default.
Example:
getOutput pkgs.openssl
getMan pkgs.openssl
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-man"
Type:
getOutput :: Derivation -> String
getMan :: Derivation -> String
*/
getMan = getOutput "man";
/* Pick the outputs of packages to place in `buildInputs` */
/* Pick the outputs of packages to place in `buildInputs`
Type: chooseDevOutputs :: [Derivation] -> [String]
*/
chooseDevOutputs =
# List of packages to pick `dev` outputs from
drvs:
@ -900,6 +906,7 @@ rec {
Type:
recurseIntoAttrs :: AttrSet -> AttrSet
*/
recurseIntoAttrs =
# An attribute set to scan for derivations.
@ -909,7 +916,7 @@ rec {
/* Undo the effect of recurseIntoAttrs.
Type:
recurseIntoAttrs :: AttrSet -> AttrSet
dontRecurseIntoAttrs :: AttrSet -> AttrSet
*/
dontRecurseIntoAttrs =
# An attribute set to not scan for derivations.
@ -919,7 +926,10 @@ rec {
/* `unionOfDisjoint x y` is equal to `x // y // z` where the
attrnames in `z` are the intersection of the attrnames in `x` and
`y`, and all values `assert` with an error message. This
operator is commutative, unlike (//). */
operator is commutative, unlike (//).
Type: unionOfDisjoint :: AttrSet -> AttrSet -> AttrSet
*/
unionOfDisjoint = x: y:
let
intersection = builtins.intersectAttrs x y;
@ -930,9 +940,10 @@ rec {
in
(x // y) // mask;
# deprecated
# DEPRECATED
zipWithNames = zipAttrsWithNames;
# deprecated
# DEPRECATED
zip = builtins.trace
"lib.zip is deprecated, use lib.zipAttrsWith instead" zipAttrsWith;
}

View File

@ -342,7 +342,10 @@ rec {
else "{" + introSpace
+ libStr.concatStringsSep introSpace (libAttr.mapAttrsToList
(name: value:
"${libStr.escapeNixIdentifier name} = ${go (indent + " ") value};") v)
"${libStr.escapeNixIdentifier name} = ${
builtins.addErrorContext "while evaluating an attribute `${name}`"
(go (indent + " ") value)
};") v)
+ outroSpace + "}"
else abort "generators.toPretty: should never happen (v = ${v})";
in go "";

View File

@ -969,6 +969,11 @@ in mkLicense lset) ({
fullName = "wxWindows Library Licence, Version 3.1";
};
x11 = {
spdxId = "X11";
fullName = "X11 License";
};
xfig = {
fullName = "xfig";
url = "http://mcj.sourceforge.net/authors.html#xfig"; # https is broken

View File

@ -104,8 +104,6 @@ rec {
/* Creates an Option attribute set for an option that specifies the
package a module should use for some purpose.
Type: mkPackageOption :: pkgs -> string -> { default :: [string], example :: null | string | [string] } -> option
The package is specified as a list of strings representing its attribute path in nixpkgs.
Because of this, you need to pass nixpkgs itself as the first argument.
@ -116,6 +114,8 @@ rec {
You can omit the default path if the name of the option is also attribute path in nixpkgs.
Type: mkPackageOption :: pkgs -> string -> { default :: [string], example :: null | string | [string] } -> option
Example:
mkPackageOption pkgs "hello" { }
=> { _type = "option"; default = «derivation /nix/store/3r2vg51hlxj3cx5vscp0vkv60bqxkaq0-hello-2.10.drv»; defaultText = { ... }; description = "The hello package to use."; type = { ... }; }
@ -221,9 +221,10 @@ rec {
optionAttrSetToDocList' = _: options:
concatMap (opt:
let
name = showOption opt.loc;
docOption = rec {
loc = opt.loc;
name = showOption opt.loc;
inherit name;
description = opt.description or null;
declarations = filter (x: x != unknownModule) opt.declarations;
internal = opt.internal or false;
@ -234,8 +235,18 @@ rec {
readOnly = opt.readOnly or false;
type = opt.type.description or "unspecified";
}
// optionalAttrs (opt ? example) { example = renderOptionValue opt.example; }
// optionalAttrs (opt ? default) { default = renderOptionValue (opt.defaultText or opt.default); }
// optionalAttrs (opt ? example) {
example =
builtins.addErrorContext "while evaluating the example of option `${name}`" (
renderOptionValue opt.example
);
}
// optionalAttrs (opt ? default) {
default =
builtins.addErrorContext "while evaluating the default value of option `${name}`" (
renderOptionValue (opt.defaultText or opt.default)
);
}
// optionalAttrs (opt ? relatedPackages && opt.relatedPackages != null) { inherit (opt) relatedPackages; };
subOptions =

View File

@ -281,7 +281,7 @@ rec {
=> [ ]
stringToCharacters "abc"
=> [ "a" "b" "c" ]
stringToCharacters "💩"
stringToCharacters "🦄"
=> [ "<EFBFBD>" "<EFBFBD>" "<EFBFBD>" "<EFBFBD>" ]
*/
stringToCharacters = s:

View File

@ -186,6 +186,12 @@
githubId = 7755101;
name = "Aaron Andersen";
};
aaqaishtyaq = {
email = "aaqaishtyaq@gmail.com";
github = "aaqaishtyaq";
githubId = 22131756;
name = "Aaqa Ishtyaq";
};
aaronjanse = {
email = "aaron@ajanse.me";
matrix = "@aaronjanse:matrix.org";
@ -1695,6 +1701,15 @@
fingerprint = "D35E C9CE E631 638F F1D8 B401 6F0E 410D C3EE D02";
}];
};
benjaminedwardwebb = {
name = "Ben Webb";
email = "benjaminedwardwebb@gmail.com";
github = "benjaminedwardwebb";
githubId = 7118777;
keys = [{
fingerprint = "E9A3 7864 2165 28CE 507C CA82 72EA BF75 C331 CD25";
}];
};
benley = {
email = "benley@gmail.com";
github = "benley";
@ -1728,12 +1743,6 @@
githubId = 442623;
name = "Ben Pye";
};
benwbooth = {
email = "benwbooth@gmail.com";
github = "benwbooth";
githubId = 75972;
name = "Ben Booth";
};
berberman = {
email = "berberman@yandex.com";
matrix = "@berberman:mozilla.org";
@ -3932,6 +3941,16 @@
github = "edlimerkaj";
githubId = 71988351;
};
edrex = {
email = "ericdrex@gmail.com";
github = "edrex";
githubId = 14615;
keys = [{
fingerprint = "AC47 2CCC 9867 4644 A9CF EB28 1C5C 1ED0 9F66 6824";
}];
matrix = "@edrex:matrix.org";
name = "Eric Drechsel";
};
ehllie = {
email = "me@ehllie.xyz";
github = "ehllie";
@ -8693,6 +8712,12 @@
githubId = 322214;
name = "Mathnerd314";
};
math-42 = {
email = "matheus.4200@gmail.com";
github = "Math-42";
githubId = 43853194;
name = "Matheus Vieira";
};
matklad = {
email = "aleksey.kladov@gmail.com";
github = "matklad";
@ -11530,6 +11555,15 @@
githubId = 131856;
name = "Arnout Engelen";
};
rafael = {
name = "Rafael";
email = "pr9@tuta.io";
github = "rafa-dot-el";
githubId = 104688305;
keys = [{
fingerprint = "5F0B 3EAC F1F9 8155 0946 CDF5 469E 3255 A40D 2AD6";
}];
};
RaghavSood = {
email = "r@raghavsood.com";
github = "RaghavSood";
@ -12026,7 +12060,7 @@
name = "Russell O'Connor";
};
rodrgz = {
email = "rodrgz@proton.me";
email = "erik@rodgz.com";
github = "rodrgz";
githubId = 53882428;
name = "Erik Rodriguez";

View File

@ -1,5 +1,5 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p coreutils nix gnused -I nixpkgs=.
#! nix-shell -i bash -p coreutils jq nix -I nixpkgs=.
config_file=pkgs/development/haskell-modules/configuration-hackage2nix/transitive-broken.yaml
@ -12,4 +12,4 @@ dont-distribute-packages:
EOF
echo "Regenerating list of transitive broken packages ..."
echo -e $(nix-instantiate --eval --strict maintainers/scripts/haskell/transitive-broken-packages.nix) | sed 's/\"//' | LC_ALL=C.UTF-8 sort -i >> $config_file
nix-instantiate --eval --option restrict-eval true -I . --strict --json maintainers/scripts/haskell/transitive-broken-packages.nix | jq -r . | LC_ALL=C.UTF-8 sort -i >> $config_file

View File

@ -12,5 +12,5 @@ let
(getEvaluating (nixpkgs { config.allowBroken = true; }).haskellPackages);
in
''
${lib.concatMapStringsSep "\n" (x: " - ${x}") brokenDeps}
${lib.concatMapStringsSep "\n" (x: " - ${x}") brokenDeps}
''

View File

@ -11,6 +11,7 @@ compat53,,,,0.7-1,,vcunat
cosmo,,,,,,marsam
coxpcall,,,,1.17.0-1,,
cqueues,,,,,,vcunat
cyan,,,,,,
cyrussasl,https://github.com/JorjBauer/lua-cyrussasl.git,,,,,
digestif,https://github.com/astoff/digestif.git,,,0.2-1,5.3,
dkjson,,,,,,
@ -99,6 +100,7 @@ sqlite,,,,,,
std._debug,https://github.com/lua-stdlib/_debug.git,,,,,
std.normalize,https://github.com/lua-stdlib/normalize.git,,,,,
stdlib,,,,41.2.2,,vyp
teal-language-server,,,http://luarocks.org/dev,,,
tl,,,,,,mephistophiles
vstruct,https://github.com/ToxicFrog/vstruct.git,,,,,
vusted,,,,,,figsoda

1 name src ref server version luaversion maintainers
11 cosmo marsam
12 coxpcall 1.17.0-1
13 cqueues vcunat
14 cyan
15 cyrussasl https://github.com/JorjBauer/lua-cyrussasl.git
16 digestif https://github.com/astoff/digestif.git 0.2-1 5.3
17 dkjson
100 std._debug https://github.com/lua-stdlib/_debug.git
101 std.normalize https://github.com/lua-stdlib/normalize.git
102 stdlib 41.2.2 vyp
103 teal-language-server http://luarocks.org/dev
104 tl mephistophiles
105 vstruct https://github.com/ToxicFrog/vstruct.git
106 vusted figsoda

View File

@ -755,6 +755,7 @@ with lib.maintainers; {
xfce = {
members = [
romildo
muscaln
];
scope = "Maintain Xfce desktop environment and related packages.";
shortName = "Xfce";

View File

@ -98,6 +98,26 @@
<literal>fetch-ec2-metadata.service</literal>
</para>
</listitem>
<listitem>
<para>
<literal>minio</literal> removed support for its legacy
filesystem backend in
<link xlink:href="https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z">RELEASE.2022-10-29T06-21-33Z</link>.
This means if your storage was created with the old format,
minio will no longer start. Unfortunately minio doesnt
provide a an automatic migration, they only provide
<link xlink:href="https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html">instructions
how to manually convert the node</link>. To facilitate this
migration we keep around the last version that still supports
the old filesystem backend as
<literal>minio_legacy_fs</literal>. Use it via
<literal>services.minio.package = minio_legacy_fs;</literal>
to export your data before switching to the new version. See
the corresponding
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/199318">issue</link>
for more details.
</para>
</listitem>
<listitem>
<para>
<literal>services.sourcehut.dispatch</literal> and the
@ -149,6 +169,16 @@
<literal>llvmPackages_rocm.clang-unwrapped</literal>.
</para>
</listitem>
<listitem>
<para>
The Nginx module now validates the syntax of config files at
build time. For more complex configurations (using
<literal>include</literal> with out-of-store files notably)
you may need to disable this check by setting
<link linkend="opt-services.nginx.validateConfig">services.nginx.validateConfig</link>
to <literal>false</literal>.
</para>
</listitem>
<listitem>
<para>
The EC2 image module previously detected and automatically
@ -269,6 +299,49 @@
remote <literal>PostgreSQL</literal> database.
</para>
</listitem>
<listitem>
<para>
<literal>services.peertube</literal> now requires you to
specify the secret file
<literal>secrets.secretsFile</literal>. It can be generated by
running <literal>openssl rand -hex 32</literal>. Before
upgrading, read the release notes for PeerTube:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<link xlink:href="https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0">Release
v5.0.0</link>
</para>
</listitem>
</itemizedlist>
<para>
And backup your data.
</para>
</listitem>
<listitem>
<para>
The module <literal>services.headscale</literal> was
refactored to be compliant with
<link xlink:href="https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md">RFC
0042</link>. To be precise, this means that the following
things have changed:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Most settings has been migrated under
<link linkend="opt-services.headscale.settings">services.headscale.settings</link>
which is an attribute-set that will be converted into
headscales YAML config format. This means that the
configuration from
<link xlink:href="https://github.com/juanfont/headscale/blob/main/config-example.yaml">headscales
example configuration</link> can be directly written as
attribute-set in Nix within this option.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
A new <literal>virtualisation.rosetta</literal> module was

View File

@ -35,6 +35,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service`
- `minio` removed support for its legacy filesystem backend in [RELEASE.2022-10-29T06-21-33Z](https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z). This means if your storage was created with the old format, minio will no longer start. Unfortunately minio doesn't provide a an automatic migration, they only provide [instructions how to manually convert the node](https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html). To facilitate this migration we keep around the last version that still supports the old filesystem backend as `minio_legacy_fs`. Use it via `services.minio.package = minio_legacy_fs;` to export your data before switching to the new version. See the corresponding [issue](https://github.com/NixOS/nixpkgs/issues/199318) for more details.
- `services.sourcehut.dispatch` and the corresponding package (`sourcehut.dispatchsrht`) have been removed due to [upstream deprecation](https://sourcehut.org/blog/2022-08-01-dispatch-deprecation-plans/).
- The [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
@ -45,6 +47,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`.
- The Nginx module now validates the syntax of config files at build time. For more complex configurations (using `include` with out-of-store files notably) you may need to disable this check by setting [services.nginx.validateConfig](#opt-services.nginx.validateConfig) to `false`.
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
@ -59,7 +63,7 @@ In addition to numerous new and upgraded packages, this release has the followin
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are *customizable* (in the sense of user configuration, like vimrc).
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
@ -78,6 +82,19 @@ In addition to numerous new and upgraded packages, this release has the followin
- `mastodon` now supports connection to a remote `PostgreSQL` database.
- `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`.
Before upgrading, read the release notes for PeerTube:
- [Release v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0)
And backup your data.
- The module `services.headscale` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
- Most settings has been migrated under [services.headscale.settings](#opt-services.headscale.settings) which is an attribute-set that
will be converted into headscale's YAML config format. This means that the configuration from
[headscale's example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
can be directly written as attribute-set in Nix within this option.
- A new `virtualisation.rosetta` module was added to allow running `x86_64` binaries through [Rosetta](https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment) inside virtualised NixOS guests on Apple silicon. This feature works by default with the [UTM](https://docs.getutm.app/) virtualisation [package](https://search.nixos.org/packages?channel=unstable&show=utm&from=0&size=1&sort=relevance&type=packages&query=utm).
- The new option `users.motdFile` allows configuring a Message Of The Day that can be updated dynamically.

View File

@ -324,7 +324,11 @@ in rec {
scriptArgs = mkOption {
type = types.str;
default = "";
description = lib.mdDoc "Arguments passed to the main process script.";
example = "%i";
description = lib.mdDoc ''
Arguments passed to the main process script.
Can contain specifiers (`%` placeholders expanded by systemd, see {manpage}`systemd.unit(5)`).
'';
};
preStart = mkOption {

View File

@ -220,6 +220,20 @@ class Driver:
res = driver.polling_conditions.pop()
assert res is self.condition
def wait(self, timeout: int = 900) -> None:
def condition(last: bool) -> bool:
if last:
rootlog.info(f"Last chance for {self.condition.description}")
ret = self.condition.check(force=True)
if not ret and not last:
rootlog.info(
f"({self.condition.description} failure not fatal yet)"
)
return ret
with rootlog.nested(f"waiting for {self.condition.description}"):
retry(condition, timeout=timeout)
if fun_ is None:
return Poll
else:

View File

@ -1,4 +1,5 @@
from typing import Callable, Optional
from math import isfinite
import time
from .logger import rootlog
@ -14,7 +15,7 @@ class PollingCondition:
description: Optional[str]
last_called: float
entered: bool
entry_count: int
def __init__(
self,
@ -34,14 +35,21 @@ class PollingCondition:
self.description = str(description)
self.last_called = float("-inf")
self.entered = False
self.entry_count = 0
def check(self) -> bool:
if self.entered or not self.overdue:
def check(self, force: bool = False) -> bool:
if (self.entered or not self.overdue) and not force:
return True
with self, rootlog.nested(self.nested_message):
rootlog.info(f"Time since last: {time.monotonic() - self.last_called:.2f}s")
time_since_last = time.monotonic() - self.last_called
last_message = (
f"Time since last: {time_since_last:.2f}s"
if isfinite(time_since_last)
else "(not called yet)"
)
rootlog.info(last_message)
try:
res = self.condition() # type: ignore
except Exception:
@ -69,9 +77,16 @@ class PollingCondition:
def overdue(self) -> bool:
return self.last_called + self.seconds_interval < time.monotonic()
@property
def entered(self) -> bool:
# entry_count should never dip *below* zero
assert self.entry_count >= 0
return self.entry_count > 0
def __enter__(self) -> None:
self.entered = True
self.entry_count += 1
def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore
self.entered = False
assert self.entered
self.entry_count -= 1
self.last_called = time.monotonic()

View File

@ -1,3 +1,4 @@
args@
{ system
, pkgs ? import ../.. { inherit system config; }
# Use a minimal kernel?
@ -5,7 +6,7 @@
# Ignored
, config ? { }
# !!! See comment about args in lib/modules.nix
, specialArgs ? { }
, specialArgs ? throw "legacy - do not use, see error below"
# Modules to add to each VM
, extraConfigurations ? [ ]
}:
@ -13,6 +14,13 @@ let
nixos-lib = import ./default.nix { inherit (pkgs) lib; };
in
pkgs.lib.throwIf (args?specialArgs) ''
testing-python.nix: `specialArgs` is not supported anymore. If you're looking
for the public interface to the NixOS test framework, use `runTest`, and
`node.specialArgs`.
See https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
and https://nixos.org/manual/nixos/unstable/index.html#test-opt-node.specialArgs
''
rec {
inherit pkgs;

View File

@ -33,9 +33,13 @@ with lib;
ffmpeg_4 = super.ffmpeg_4-headless;
ffmpeg_5 = super.ffmpeg_5-headless;
gobject-introspection = super.gobject-introspection.override { x11Support = false; };
gpsd = super.gpsd.override { guiSupport = false; };
imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
libextractor = super.libextractor.override { gstreamerSupport = false; gtkSupport = false; };
libva = super.libva-minimal;
limesuite = super.limesuite.override { withGui = false; };
msmtp = super.msmtp.override { withKeyring = false; };
networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
networkmanager-l2tp = super.networkmanager-l2tp.override { withGnome = false; };

View File

@ -70,14 +70,12 @@ let
;
# Timeout in syslinux is in units of 1/10 of a second.
# 0 is used to disable timeouts.
# null means max timeout (35996, just under 1h in 1/10 seconds)
# 0 means disable timeout
syslinuxTimeout = if config.boot.loader.timeout == null then
0
35996
else
max (config.boot.loader.timeout * 10) 1;
max = x: y: if x > y then x else y;
config.boot.loader.timeout * 10;
# The configuration file for syslinux.

View File

@ -180,6 +180,7 @@
./programs/hamster.nix
./programs/htop.nix
./programs/iftop.nix
./programs/i3lock.nix
./programs/iotop.nix
./programs/java.nix
./programs/k3b.nix
@ -724,6 +725,7 @@
./services/monitoring/riemann.nix
./services/monitoring/scollector.nix
./services/monitoring/smartd.nix
./services/monitoring/statsd.nix
./services/monitoring/sysstat.nix
./services/monitoring/teamviewer.nix
./services/monitoring/telegraf.nix
@ -874,7 +876,6 @@
./services/networking/miredo.nix
./services/networking/mjpg-streamer.nix
./services/networking/mmsd.nix
./services/networking/mosquitto.nix
./services/networking/monero.nix
./services/networking/morty.nix
./services/networking/mosquitto.nix

View File

@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmwAAAJASuMMnErjD
JwAAAAtzc2gtZWQyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmw
AAAEDIN2VWFyggtoSPXcAFy8dtG1uAig8sCuyE21eMDt2GgJBWcxb/Blaqt1auOtE+F8QU
WrUotiC5qBJ+UuEWdVCbAAAACnJvb3RAbml4b3MBAgM=
-----END OPENSSH PRIVATE KEY-----

View File

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJBWcxb/Blaqt1auOtE+F8QUWrUotiC5qBJ+UuEWdVCb root@nixos

View File

@ -0,0 +1,140 @@
{ config, lib, pkgs, ... }:
let
keysDirectory = "/var/keys";
user = "builder";
keyType = "ed25519";
in
{ imports = [
../virtualisation/qemu-vm.nix
];
# The builder is not intended to be used interactively
documentation.enable = false;
environment.etc = {
"ssh/ssh_host_ed25519_key" = {
mode = "0600";
source = ./keys/ssh_host_ed25519_key;
};
"ssh/ssh_host_ed25519_key.pub" = {
mode = "0644";
source = ./keys/ssh_host_ed25519_key.pub;
};
};
# DNS fails for QEMU user networking (SLiRP) on macOS. See:
#
# https://github.com/utmapp/UTM/issues/2353
#
# This works around that by using a public DNS server other than the DNS
# server that QEMU provides (normally 10.0.2.3)
networking.nameservers = [ "8.8.8.8" ];
nix.settings = {
auto-optimise-store = true;
min-free = 1024 * 1024 * 1024;
max-free = 3 * 1024 * 1024 * 1024;
trusted-users = [ "root" user ];
};
services.openssh = {
enable = true;
authorizedKeysFiles = [ "${keysDirectory}/%u_${keyType}.pub" ];
};
system.build.macos-builder-installer =
let
privateKey = "/etc/nix/${user}_${keyType}";
publicKey = "${privateKey}.pub";
# This installCredentials script is written so that it's as easy as
# possible for a user to audit before confirming the `sudo`
installCredentials = hostPkgs.writeShellScript "install-credentials" ''
KEYS="''${1}"
INSTALL=${hostPkgs.coreutils}/bin/install
"''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
"''${INSTALL}" -g nixbld -m 644 "''${KEYS}/${user}_${keyType}.pub" ${publicKey}
'';
hostPkgs = config.virtualisation.host.pkgs;
script = hostPkgs.writeShellScriptBin "create-builder" ''
KEYS="''${KEYS:-./keys}"
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
PUBLIC_KEY="''${PRIVATE_KEY}.pub"
if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
fi
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
fi
KEYS="$(nix-store --add "$KEYS")" ${config.system.build.vm}/bin/run-nixos-vm
'';
in
script.overrideAttrs (old: {
meta = (old.meta or { }) // {
platforms = lib.platforms.darwin;
};
});
system.stateVersion = "22.05";
users.users."${user}"= {
isNormalUser = true;
};
virtualisation = {
diskSize = 20 * 1024;
memorySize = 3 * 1024;
forwardPorts = [
{ from = "host"; guest.port = 22; host.port = 22; }
];
# Disable graphics for the builder since users will likely want to run it
# non-interactively in the background.
graphics = false;
sharedDirectories.keys = {
source = "\"$KEYS\"";
target = keysDirectory;
};
# If we don't enable this option then the host will fail to delegate builds
# to the guest, because:
#
# - The host will lock the path to build
# - The host will delegate the build to the guest
# - The guest will attempt to lock the same path and fail because
# the lockfile on the host is visible on the guest
#
# Snapshotting the host's /nix/store as an image isolates the guest VM's
# /nix/store from the host's /nix/store, preventing this problem.
useNixStoreImage = true;
# Obviously the /nix/store needs to be writable on the guest in order for it
# to perform builds.
writableStore = true;
# This ensures that anything built on the guest isn't lost when the guest is
# restarted.
writableStoreUseTmpfs = false;
};
}

View File

@ -20,15 +20,41 @@ in
};
config = mkOption {
type = with types; attrsOf (attrsOf anything);
default = { };
type =
with types;
let
gitini = attrsOf (attrsOf anything);
in
either gitini (listOf gitini) // {
merge = loc: defs:
let
config = foldl'
(acc: { value, ... }@x: acc // (if isList value then {
ordered = acc.ordered ++ value;
} else {
unordered = acc.unordered ++ [ x ];
}))
{
ordered = [ ];
unordered = [ ];
}
defs;
in
[ (gitini.merge loc config.unordered) ] ++ config.ordered;
};
default = [ ];
example = {
init.defaultBranch = "main";
url."https://github.com/".insteadOf = [ "gh:" "github:" ];
};
description = lib.mdDoc ''
Configuration to write to /etc/gitconfig. See the CONFIGURATION FILE
section of git-config(1) for more information.
Configuration to write to /etc/gitconfig. A list can also be
specified to keep the configuration in order. For example, setting
`config` to `[ { foo.x = 42; } { bar.y = 42; }]` will put the `foo`
section before the `bar` section unlike the default alphabetical
order, which can be helpful for sections such as `include` and
`includeIf`. See the CONFIGURATION FILE section of git-config(1) for
more information.
'';
};
@ -48,8 +74,8 @@ in
config = mkMerge [
(mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
environment.etc.gitconfig = mkIf (cfg.config != {}) {
text = generators.toGitINI cfg.config;
environment.etc.gitconfig = mkIf (cfg.config != [ ]) {
text = concatMapStringsSep "\n" generators.toGitINI cfg.config;
};
})
(mkIf (cfg.enable && cfg.lfs.enable) {

View File

@ -0,0 +1,58 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.i3lock;
in {
###### interface
options = {
programs.i3lock = {
enable = mkEnableOption (mdDoc "i3lock");
package = mkOption {
type = types.package;
default = pkgs.i3lock;
defaultText = literalExpression "pkgs.i3lock";
example = literalExpression ''
pkgs.i3lock-color
'';
description = mdDoc ''
Specify which package to use for the i3lock program,
The i3lock package must include a i3lock file or link in its out directory in order for the u2fSupport option to work correctly.
'';
};
u2fSupport = mkOption {
type = types.bool;
default = false;
example = true;
description = mdDoc ''
Whether to enable U2F support in the i3lock program.
U2F enables authentication using a hardware device, such as a security key.
When U2F support is enabled, the i3lock program will set the setuid bit on the i3lock binary and enable the pam u2fAuth service,
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
security.wrappers.i3lock = mkIf cfg.u2fSupport {
setuid = true;
owner = "root";
group = "root";
source = "${cfg.package.out}/bin/i3lock";
};
security.pam.services.i3lock.u2fAuth = cfg.u2fSupport;
};
}

View File

@ -280,7 +280,7 @@ in {
${pkgs.lighthouse}/bin/lighthouse validator_client \
--network ${cfg.network} \
--beacon-nodes ${lib.concatStringsSep "," cfg.validator.beaconNodes} \
--datadir ${cfg.validator.dataDir}/${cfg.network}
--datadir ${cfg.validator.dataDir}/${cfg.network} \
${optionalString cfg.validator.metrics.enable ''--metrics --metrics-address ${cfg.validator.metrics.address} --metrics-port ${toString cfg.validator.metrics.port}''} \
${cfg.extraArgs} ${cfg.validator.extraArgs}
'';

View File

@ -89,6 +89,8 @@ in
SendSIGHUP = true;
TimeoutStopSec = "30s";
KeyringMode = "shared";
Type = "idle";
};
# Don't kill a user session when using nixos-rebuild

View File

@ -47,7 +47,7 @@ in
{
options = {
services.nitter = {
enable = mkEnableOption (lib.mdDoc "If enabled, start Nitter.");
enable = mkEnableOption (lib.mdDoc "Nitter");
package = mkOption {
default = pkgs.nitter;

View File

@ -555,7 +555,7 @@ in {
auto_assign_org_role = mkOption {
description = lib.mdDoc "Default role new users will be auto assigned.";
default = "Viewer";
type = types.enum ["Viewer" "Editor"];
type = types.enum ["Viewer" "Editor" "Admin"];
};
};

View File

@ -68,6 +68,7 @@ let
"smartctl"
"smokeping"
"sql"
"statsd"
"surfboard"
"systemd"
"tor"

View File

@ -0,0 +1,19 @@
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.statsd;
in
{
port = 9102;
serviceOpts = {
serviceConfig = {
ExecStart = ''
${pkgs.prometheus-statsd-exporter}/bin/statsd_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};
};
}

View File

@ -11,8 +11,10 @@ let
format = pkgs.formats.toml {};
settings = {
database_url = dbURL;
human_logs = true;
syncstorage = {
database_url = dbURL;
};
tokenserver = {
node_type = "mysql";
database_url = dbURL;
@ -253,8 +255,7 @@ in
serviceConfig = {
User = defaultUser;
Group = defaultUser;
ExecStart = "${cfg.package}/bin/syncstorage --config ${configFile}";
Stderr = "journal";
ExecStart = "${cfg.package}/bin/syncserver --config ${configFile}";
EnvironmentFile = lib.mkIf (cfg.secrets != null) "${cfg.secrets}";
# hardening

View File

@ -1,15 +1,18 @@
{ config, lib, pkgs, ... }:
with lib;
let
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.headscale;
dataDir = "/var/lib/headscale";
runDir = "/run/headscale";
settingsFormat = pkgs.formats.yaml { };
settingsFormat = pkgs.formats.yaml {};
configFile = settingsFormat.generate "headscale.yaml" cfg.settings;
in
{
in {
options = {
services.headscale = {
enable = mkEnableOption (lib.mdDoc "headscale, Open Source coordination server for Tailscale");
@ -51,15 +54,6 @@ in
'';
};
serverUrl = mkOption {
type = types.str;
default = "http://127.0.0.1:8080";
description = lib.mdDoc ''
The url clients will connect to.
'';
example = "https://myheadscale.example.com:443";
};
address = mkOption {
type = types.str;
default = "127.0.0.1";
@ -78,337 +72,346 @@ in
example = 443;
};
privateKeyFile = mkOption {
type = types.path;
default = "${dataDir}/private.key";
description = lib.mdDoc ''
Path to private key file, generated automatically if it does not exist.
'';
};
derp = {
urls = mkOption {
type = types.listOf types.str;
default = [ "https://controlplane.tailscale.com/derpmap/default" ];
description = lib.mdDoc ''
List of urls containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
paths = mkOption {
type = types.listOf types.path;
default = [ ];
description = lib.mdDoc ''
List of file paths containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
autoUpdate = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to automatically update DERP maps on a set frequency.
'';
example = false;
};
updateFrequency = mkOption {
type = types.str;
default = "24h";
description = lib.mdDoc ''
Frequency to update DERP maps.
'';
example = "5m";
};
};
ephemeralNodeInactivityTimeout = mkOption {
type = types.str;
default = "30m";
description = lib.mdDoc ''
Time before an inactive ephemeral node is deleted.
'';
example = "5m";
};
database = {
type = mkOption {
type = types.enum [ "sqlite3" "postgres" ];
example = "postgres";
default = "sqlite3";
description = lib.mdDoc "Database engine to use.";
};
host = mkOption {
type = types.nullOr types.str;
default = null;
example = "127.0.0.1";
description = lib.mdDoc "Database host address.";
};
port = mkOption {
type = types.nullOr types.port;
default = null;
example = 3306;
description = lib.mdDoc "Database host port.";
};
name = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database name.";
};
user = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database user.";
};
passwordFile = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/keys/headscale-dbpassword";
description = lib.mdDoc ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
path = mkOption {
type = types.nullOr types.str;
default = "${dataDir}/db.sqlite";
description = lib.mdDoc "Path to the sqlite3 database file.";
};
};
logLevel = mkOption {
type = types.str;
default = "info";
description = lib.mdDoc ''
headscale log level.
'';
example = "debug";
};
dns = {
nameservers = mkOption {
type = types.listOf types.str;
default = [ "1.1.1.1" ];
description = lib.mdDoc ''
List of nameservers to pass to Tailscale clients.
'';
};
domains = mkOption {
type = types.listOf types.str;
default = [ ];
description = lib.mdDoc ''
Search domains to inject to Tailscale clients.
'';
example = [ "mydomain.internal" ];
};
magicDns = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
Only works if there is at least a nameserver defined.
'';
example = false;
};
baseDomain = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
Defines the base domain to create the hostnames for MagicDNS.
{option}`baseDomain` must be a FQDNs, without the trailing dot.
The FQDN of the hosts will be
`hostname.namespace.base_domain` (e.g.
`myhost.mynamespace.example.com`).
'';
};
};
openIdConnect = {
issuer = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
URL to OpenID issuer.
'';
example = "https://openid.example.com";
};
clientId = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
OpenID Connect client ID.
'';
};
clientSecretFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to OpenID Connect client secret file.
'';
};
domainMap = mkOption {
type = types.attrsOf types.str;
default = { };
description = lib.mdDoc ''
Domain map is used to map incoming users (by their email) to
a namespace. The key can be a string, or regex.
'';
example = {
".*" = "default-namespace";
};
};
};
tls = {
letsencrypt = {
hostname = mkOption {
type = types.nullOr types.str;
default = "";
description = lib.mdDoc ''
Domain name to request a TLS certificate for.
'';
};
challengeType = mkOption {
type = types.enum [ "TLS-ALPN-01" "HTTP-01" ];
default = "HTTP-01";
description = lib.mdDoc ''
Type of ACME challenge to use, currently supported types:
`HTTP-01` or `TLS-ALPN-01`.
'';
};
httpListen = mkOption {
type = types.nullOr types.str;
default = ":http";
description = lib.mdDoc ''
When HTTP-01 challenge is chosen, letsencrypt must set up a
verification endpoint, and it will be listening on:
`:http = port 80`.
'';
};
};
certFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to already created certificate.
'';
};
keyFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to key for already created certificate.
'';
};
};
aclPolicyFile = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to a file containing ACL policies.
'';
};
settings = mkOption {
type = settingsFormat.type;
default = { };
description = lib.mdDoc ''
Overrides to {file}`config.yaml` as a Nix attribute set.
This option is ideal for overriding settings not exposed as Nix options.
Check the [example config](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
for possible options.
'';
type = types.submodule {
freeformType = settingsFormat.type;
options = {
server_url = mkOption {
type = types.str;
default = "http://127.0.0.1:8080";
description = lib.mdDoc ''
The url clients will connect to.
'';
example = "https://myheadscale.example.com:443";
};
private_key_path = mkOption {
type = types.path;
default = "${dataDir}/private.key";
description = lib.mdDoc ''
Path to private key file, generated automatically if it does not exist.
'';
};
noise.private_key_path = mkOption {
type = types.path;
default = "${dataDir}/noise_private.key";
description = lib.mdDoc ''
Path to noise private key file, generated automatically if it does not exist.
'';
};
derp = {
urls = mkOption {
type = types.listOf types.str;
default = ["https://controlplane.tailscale.com/derpmap/default"];
description = lib.mdDoc ''
List of urls containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
paths = mkOption {
type = types.listOf types.path;
default = [];
description = lib.mdDoc ''
List of file paths containing DERP maps.
See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
'';
};
auto_update_enable = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to automatically update DERP maps on a set frequency.
'';
example = false;
};
update_frequency = mkOption {
type = types.str;
default = "24h";
description = lib.mdDoc ''
Frequency to update DERP maps.
'';
example = "5m";
};
};
ephemeral_node_inactivity_timeout = mkOption {
type = types.str;
default = "30m";
description = lib.mdDoc ''
Time before an inactive ephemeral node is deleted.
'';
example = "5m";
};
db_type = mkOption {
type = types.enum ["sqlite3" "postgres"];
example = "postgres";
default = "sqlite3";
description = lib.mdDoc "Database engine to use.";
};
db_host = mkOption {
type = types.nullOr types.str;
default = null;
example = "127.0.0.1";
description = lib.mdDoc "Database host address.";
};
db_port = mkOption {
type = types.nullOr types.port;
default = null;
example = 3306;
description = lib.mdDoc "Database host port.";
};
db_name = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database name.";
};
db_user = mkOption {
type = types.nullOr types.str;
default = null;
example = "headscale";
description = lib.mdDoc "Database user.";
};
db_password_file = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/keys/headscale-dbpassword";
description = lib.mdDoc ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
db_path = mkOption {
type = types.nullOr types.str;
default = "${dataDir}/db.sqlite";
description = lib.mdDoc "Path to the sqlite3 database file.";
};
log.level = mkOption {
type = types.str;
default = "info";
description = lib.mdDoc ''
headscale log level.
'';
example = "debug";
};
log.format = mkOption {
type = types.str;
default = "text";
description = lib.mdDoc ''
headscale log format.
'';
example = "json";
};
dns_config = {
nameservers = mkOption {
type = types.listOf types.str;
default = ["1.1.1.1"];
description = lib.mdDoc ''
List of nameservers to pass to Tailscale clients.
'';
};
override_local_dns = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to use [Override local DNS](https://tailscale.com/kb/1054/dns/).
'';
example = true;
};
domains = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc ''
Search domains to inject to Tailscale clients.
'';
example = ["mydomain.internal"];
};
magic_dns = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
Only works if there is at least a nameserver defined.
'';
example = false;
};
base_domain = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
Defines the base domain to create the hostnames for MagicDNS.
{option}`baseDomain` must be a FQDNs, without the trailing dot.
The FQDN of the hosts will be
`hostname.namespace.base_domain` (e.g.
`myhost.mynamespace.example.com`).
'';
};
};
oidc = {
issuer = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
URL to OpenID issuer.
'';
example = "https://openid.example.com";
};
client_id = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
OpenID Connect client ID.
'';
};
client_secret_file = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to OpenID Connect client secret file.
'';
};
domain_map = mkOption {
type = types.attrsOf types.str;
default = {};
description = lib.mdDoc ''
Domain map is used to map incomming users (by their email) to
a namespace. The key can be a string, or regex.
'';
example = {
".*" = "default-namespace";
};
};
};
tls_letsencrypt_hostname = mkOption {
type = types.nullOr types.str;
default = "";
description = lib.mdDoc ''
Domain name to request a TLS certificate for.
'';
};
tls_letsencrypt_challenge_type = mkOption {
type = types.enum ["TLS-ALPN-01" "HTTP-01"];
default = "HTTP-01";
description = lib.mdDoc ''
Type of ACME challenge to use, currently supported types:
`HTTP-01` or `TLS-ALPN-01`.
'';
};
tls_letsencrypt_listen = mkOption {
type = types.nullOr types.str;
default = ":http";
description = lib.mdDoc ''
When HTTP-01 challenge is chosen, letsencrypt must set up a
verification endpoint, and it will be listening on:
`:http = port 80`.
'';
};
tls_cert_path = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to already created certificate.
'';
};
tls_key_path = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to key for already created certificate.
'';
};
acl_policy_path = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Path to a file containg ACL policies.
'';
};
};
};
};
};
};
imports = [
# TODO address + port = listen_addr
(mkRenamedOptionModule ["services" "headscale" "serverUrl"] ["services" "headscale" "settings" "server_url"])
(mkRenamedOptionModule ["services" "headscale" "privateKeyFile"] ["services" "headscale" "settings" "private_key_path"])
(mkRenamedOptionModule ["services" "headscale" "derp" "urls"] ["services" "headscale" "settings" "derp" "urls"])
(mkRenamedOptionModule ["services" "headscale" "derp" "paths"] ["services" "headscale" "settings" "derp" "paths"])
(mkRenamedOptionModule ["services" "headscale" "derp" "autoUpdate"] ["services" "headscale" "settings" "derp" "auto_update_enable"])
(mkRenamedOptionModule ["services" "headscale" "derp" "updateFrequency"] ["services" "headscale" "settings" "derp" "update_frequency"])
(mkRenamedOptionModule ["services" "headscale" "ephemeralNodeInactivityTimeout"] ["services" "headscale" "settings" "ephemeral_node_inactivity_timeout"])
(mkRenamedOptionModule ["services" "headscale" "database" "type"] ["services" "headscale" "settings" "db_type"])
(mkRenamedOptionModule ["services" "headscale" "database" "path"] ["services" "headscale" "settings" "db_path"])
(mkRenamedOptionModule ["services" "headscale" "database" "host"] ["services" "headscale" "settings" "db_host"])
(mkRenamedOptionModule ["services" "headscale" "database" "port"] ["services" "headscale" "settings" "db_port"])
(mkRenamedOptionModule ["services" "headscale" "database" "name"] ["services" "headscale" "settings" "db_name"])
(mkRenamedOptionModule ["services" "headscale" "database" "user"] ["services" "headscale" "settings" "db_user"])
(mkRenamedOptionModule ["services" "headscale" "database" "passwordFile"] ["services" "headscale" "settings" "db_password_file"])
(mkRenamedOptionModule ["services" "headscale" "logLevel"] ["services" "headscale" "settings" "log" "level"])
(mkRenamedOptionModule ["services" "headscale" "dns" "nameservers"] ["services" "headscale" "settings" "dns_config" "nameservers"])
(mkRenamedOptionModule ["services" "headscale" "dns" "domains"] ["services" "headscale" "settings" "dns_config" "domains"])
(mkRenamedOptionModule ["services" "headscale" "dns" "magicDns"] ["services" "headscale" "settings" "dns_config" "magic_dns"])
(mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ["services" "headscale" "settings" "oidc" "domain_map"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
(mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
(mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
];
config = mkIf cfg.enable {
services.headscale.settings = {
server_url = mkDefault cfg.serverUrl;
listen_addr = mkDefault "${cfg.address}:${toString cfg.port}";
private_key_path = mkDefault cfg.privateKeyFile;
derp = {
urls = mkDefault cfg.derp.urls;
paths = mkDefault cfg.derp.paths;
auto_update_enable = mkDefault cfg.derp.autoUpdate;
update_frequency = mkDefault cfg.derp.updateFrequency;
};
# Turn off update checks since the origin of our package
# is nixpkgs and not Github.
disable_check_updates = true;
ephemeral_node_inactivity_timeout = mkDefault cfg.ephemeralNodeInactivityTimeout;
db_type = mkDefault cfg.database.type;
db_path = mkDefault cfg.database.path;
log_level = mkDefault cfg.logLevel;
dns_config = {
nameservers = mkDefault cfg.dns.nameservers;
domains = mkDefault cfg.dns.domains;
magic_dns = mkDefault cfg.dns.magicDns;
base_domain = mkDefault cfg.dns.baseDomain;
};
unix_socket = "${runDir}/headscale.sock";
# OpenID Connect
oidc = {
issuer = mkDefault cfg.openIdConnect.issuer;
client_id = mkDefault cfg.openIdConnect.clientId;
domain_map = mkDefault cfg.openIdConnect.domainMap;
};
tls_letsencrypt_cache_dir = "${dataDir}/.cache";
} // optionalAttrs (cfg.database.host != null) {
db_host = mkDefault cfg.database.host;
} // optionalAttrs (cfg.database.port != null) {
db_port = mkDefault cfg.database.port;
} // optionalAttrs (cfg.database.name != null) {
db_name = mkDefault cfg.database.name;
} // optionalAttrs (cfg.database.user != null) {
db_user = mkDefault cfg.database.user;
} // optionalAttrs (cfg.tls.letsencrypt.hostname != null) {
tls_letsencrypt_hostname = mkDefault cfg.tls.letsencrypt.hostname;
} // optionalAttrs (cfg.tls.letsencrypt.challengeType != null) {
tls_letsencrypt_challenge_type = mkDefault cfg.tls.letsencrypt.challengeType;
} // optionalAttrs (cfg.tls.letsencrypt.httpListen != null) {
tls_letsencrypt_listen = mkDefault cfg.tls.letsencrypt.httpListen;
} // optionalAttrs (cfg.tls.certFile != null) {
tls_cert_path = mkDefault cfg.tls.certFile;
} // optionalAttrs (cfg.tls.keyFile != null) {
tls_key_path = mkDefault cfg.tls.keyFile;
} // optionalAttrs (cfg.aclPolicyFile != null) {
acl_policy_path = mkDefault cfg.aclPolicyFile;
};
# Setup the headscale configuration in a known path in /etc to
@ -416,7 +419,7 @@ in
# for communication.
environment.etc."headscale/config.yaml".source = configFile;
users.groups.headscale = mkIf (cfg.group == "headscale") { };
users.groups.headscale = mkIf (cfg.group == "headscale") {};
users.users.headscale = mkIf (cfg.user == "headscale") {
description = "headscale user";
@ -427,70 +430,68 @@ in
systemd.services.headscale = {
description = "headscale coordination server for Tailscale";
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [ configFile ];
after = ["network-online.target"];
wantedBy = ["multi-user.target"];
restartTriggers = [configFile];
environment.GIN_MODE = "release";
script = ''
${optionalString (cfg.database.passwordFile != null) ''
export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.database.passwordFile})"
${optionalString (cfg.settings.db_password_file != null) ''
export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})"
''}
${optionalString (cfg.openIdConnect.clientSecretFile != null) ''
export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.openIdConnect.clientSecretFile})"
${optionalString (cfg.settings.oidc.client_secret_file != null) ''
export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.settings.oidc.client_secret_file})"
''}
exec ${cfg.package}/bin/headscale serve
'';
serviceConfig =
let
capabilityBoundingSet = [ "CAP_CHOWN" ] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
in
{
Restart = "always";
Type = "simple";
User = cfg.user;
Group = cfg.group;
serviceConfig = let
capabilityBoundingSet = ["CAP_CHOWN"] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
in {
Restart = "always";
Type = "simple";
User = cfg.user;
Group = cfg.group;
# Hardening options
RuntimeDirectory = "headscale";
# Allow headscale group access so users can be added and use the CLI.
RuntimeDirectoryMode = "0750";
# Hardening options
RuntimeDirectory = "headscale";
# Allow headscale group access so users can be added and use the CLI.
RuntimeDirectoryMode = "0750";
StateDirectory = "headscale";
StateDirectoryMode = "0750";
StateDirectory = "headscale";
StateDirectoryMode = "0750";
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectHostname = true;
ProtectClock = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictNamespaces = true;
RemoveIPC = true;
UMask = "0077";
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectHostname = true;
ProtectClock = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictNamespaces = true;
RemoveIPC = true;
UMask = "0077";
CapabilityBoundingSet = capabilityBoundingSet;
AmbientCapabilities = capabilityBoundingSet;
NoNewPrivileges = true;
LockPersonality = true;
RestrictRealtime = true;
SystemCallFilter = [ "@system-service" "~@privileged" "@chown" ];
SystemCallArchitectures = "native";
RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
};
CapabilityBoundingSet = capabilityBoundingSet;
AmbientCapabilities = capabilityBoundingSet;
NoNewPrivileges = true;
LockPersonality = true;
RestrictRealtime = true;
SystemCallFilter = ["@system-service" "~@privileged" "@chown"];
SystemCallArchitectures = "native";
RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
};
};
};
meta.maintainers = with maintainers; [ kradalby ];
meta.maintainers = with maintainers; [kradalby misterio77];
}

View File

@ -35,6 +35,16 @@ with lib;
systemd.services.rpcbind = {
wantedBy = [ "multi-user.target" ];
# rpcbind performs a check for /var/run/rpcbind.lock at startup
# and will crash if /var/run isn't present. In the stock NixOS
# var.conf tmpfiles configuration file, /var/run is symlinked to
# /run, so rpcbind can enter a race condition in which /var/run
# isn't symlinked yet but tries to interact with the path, so
# controlling the order explicitly here ensures that rpcbind can
# start successfully. The `wants` instead of `requires` should
# avoid creating a strict/brittle dependency.
wants = [ "systemd-tmpfiles-setup.service" ];
after = [ "systemd-tmpfiles-setup.service" ];
};
users.users.rpc = {

View File

@ -8,7 +8,7 @@ let
homeDir = "/var/lib/tox-node";
configFile = let
src = "${pkg.src}/dpkg/config.yml";
src = "${pkg.src}/tox_node/dpkg/config.yml";
confJSON = pkgs.writeText "config.json" (
builtins.toJSON {
log-type = cfg.logType;

View File

@ -25,6 +25,22 @@ in
default = false;
description = lib.mdDoc "Whether to build the PSW package in debug mode.";
};
environment = mkOption {
type = with types; attrsOf str;
default = { };
description = mdDoc "Additional environment variables to pass to the AESM service.";
# Example environment variable for `sgx-azure-dcap-client` provider library
example = {
AZDCAP_COLLATERAL_VERSION = "v2";
AZDCAP_DEBUG_LOG_LEVEL = "INFO";
};
};
quoteProviderLibrary = mkOption {
type = with types; nullOr path;
default = null;
example = literalExpression "pkgs.sgx-azure-dcap-client";
description = lib.mdDoc "Custom quote provider library to use.";
};
settings = mkOption {
description = lib.mdDoc "AESM configuration";
default = { };
@ -83,7 +99,6 @@ in
storeAesmFolder = "${sgx-psw}/aesm";
# Hardcoded path AESM_DATA_FOLDER in psw/ae/aesm_service/source/oal/linux/aesm_util.cpp
aesmDataFolder = "/var/opt/aesmd/data";
aesmStateDirSystemd = "%S/aesmd";
in
{
description = "Intel Architectural Enclave Service Manager";
@ -98,8 +113,8 @@ in
environment = {
NAME = "aesm_service";
AESM_PATH = storeAesmFolder;
LD_LIBRARY_PATH = storeAesmFolder;
};
LD_LIBRARY_PATH = makeLibraryPath [ cfg.quoteProviderLibrary ];
} // cfg.environment;
# Make sure any of the SGX application enclave devices is available
unitConfig.AssertPathExists = [

View File

@ -1,11 +1,11 @@
{ config, pkgs, lib, ... }:
let
inherit (lib) any boolToString concatStringsSep isBool isString literalExpression mapAttrsToList mkDefault mkEnableOption mkIf mkOption optionalAttrs types;
inherit (lib) any boolToString concatStringsSep isBool isString mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption optionalAttrs types;
package = pkgs.dolibarr.override { inherit (cfg) stateDir; };
cfg = config.services.dolibarr;
vhostCfg = config.services.nginx.virtualHosts."${cfg.domain}";
vhostCfg = lib.optionalAttr (cfg.nginx != null) config.services.nginx.virtualHosts."${cfg.domain}";
mkConfigFile = filename: settings:
let
@ -38,7 +38,7 @@ let
force_install_database = cfg.database.name;
force_install_databaselogin = cfg.database.user;
force_install_mainforcehttps = vhostCfg.forceSSL;
force_install_mainforcehttps = vhostCfg.forceSSL or false;
force_install_createuser = false;
force_install_dolibarrlogin = null;
} // optionalAttrs (cfg.database.passwordFile != null) {
@ -183,7 +183,8 @@ in
};
# implementation
config = mkIf cfg.enable {
config = mkIf cfg.enable (mkMerge [
{
assertions = [
{ assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
@ -214,7 +215,7 @@ in
# Security settings
dolibarr_main_prod = true;
dolibarr_main_force_https = vhostCfg.forceSSL;
dolibarr_main_force_https = vhostCfg.forceSSL or false;
dolibarr_main_restrict_os_commands = "${pkgs.mariadb}/bin/mysqldump, ${pkgs.mariadb}/bin/mysql";
dolibarr_nocsrfcheck = false;
dolibarr_main_instance_unique_id = ''
@ -314,7 +315,9 @@ in
users.groups = optionalAttrs (cfg.group == "dolibarr") {
dolibarr = { };
};
users.users."${config.services.nginx.group}".extraGroups = [ cfg.group ];
};
}
(mkIf (cfg.nginx != null) {
users.users."${config.services.nginx.group}".extraGroups = mkIf (cfg.nginx != null) [ cfg.group ];
})
]);
}

View File

@ -161,6 +161,18 @@ in {
description = lib.mdDoc "Configure nginx as a reverse proxy for peertube.";
};
secrets = {
secretsFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/secrets/peertube";
description = lib.mdDoc ''
Secrets to run PeerTube.
Generate one using `openssl rand -hex 32`
'';
};
};
database = {
createLocally = lib.mkOption {
type = lib.types.bool;
@ -201,7 +213,7 @@ in {
passwordFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/keys/peertube/password-posgressql-db";
example = "/run/keys/peertube/password-postgresql";
description = lib.mdDoc "Password for PostgreSQL database.";
};
};
@ -282,6 +294,11 @@ in {
prevent this.
'';
}
{ assertion = cfg.secrets.secretsFile != null;
message = ''
<option>services.peertube.secrets.secretsFile</option> needs to be set.
'';
}
{ assertion = !(cfg.redis.enableUnixSocket && (cfg.redis.host != null || cfg.redis.port != null));
message = ''
<option>services.peertube.redis.createLocally</option> and redis network connection (<option>services.peertube.redis.host</option> or <option>services.peertube.redis.port</option>) enabled. Disable either of them.
@ -349,6 +366,7 @@ in {
captions = lib.mkDefault "/var/lib/peertube/storage/captions/";
cache = lib.mkDefault "/var/lib/peertube/storage/cache/";
plugins = lib.mkDefault "/var/lib/peertube/storage/plugins/";
well_known = lib.mkDefault "/var/lib/peertube/storage/well_known/";
client_overrides = lib.mkDefault "/var/lib/peertube/storage/client-overrides/";
};
import = {
@ -417,6 +435,10 @@ in {
#!/bin/sh
umask 077
cat > /var/lib/peertube/config/local.yaml <<EOF
${lib.optionalString (cfg.secrets.secretsFile != null) ''
secrets:
peertube: '$(cat ${cfg.secrets.secretsFile})'
''}
${lib.optionalString ((!cfg.database.createLocally) && (cfg.database.passwordFile != null)) ''
database:
password: '$(cat ${cfg.database.passwordFile})'
@ -443,6 +465,7 @@ in {
RestartSec = 20;
TimeoutSec = 60;
WorkingDirectory = cfg.package;
SyslogIdentifier = "peertube";
# User and group
User = cfg.user;
Group = cfg.group;
@ -548,9 +571,14 @@ in {
'';
};
locations."~ ^/plugins/[^/]+(/[^/]+)?/ws/" = {
tryFiles = "/dev/null @api_websocket";
priority = 1230;
};
locations."@api_websocket" = {
proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
priority = 1230;
priority = 1240;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@ -581,7 +609,7 @@ in {
'';
};
locations."~ ^/lazy-static/(avatars|banners)/" = {
locations."^~ /lazy-static/avatars/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.avatars;
priority = 1330;
@ -599,6 +627,26 @@ in {
add_header Cache-Control 'public, max-age=7200';
rewrite ^/lazy-static/avatars/(.*)$ /$1 break;
'';
};
locations."^~ /lazy-static/banners/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.avatars;
priority = 1340;
extraConfig = ''
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
add_header Cache-Control 'no-cache';
add_header Content-Type 'text/plain charset=UTF-8';
add_header Content-Length 0;
return 204;
}
${nginxCommonHeaders}
add_header Cache-Control 'public, max-age=7200';
rewrite ^/lazy-static/banners/(.*)$ /$1 break;
'';
};
@ -606,7 +654,7 @@ in {
locations."^~ /lazy-static/previews/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.previews;
priority = 1340;
priority = 1350;
extraConfig = ''
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
@ -624,10 +672,34 @@ in {
'';
};
locations."^~ /static/streaming-playlists/private/" = {
proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
priority = 1410;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_limit_rate 5M;
'';
};
locations."^~ /static/webseed/private/" = {
proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
priority = 1420;
extraConfig = ''
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_limit_rate 5M;
'';
};
locations."^~ /static/thumbnails/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.thumbnails;
priority = 1350;
priority = 1430;
extraConfig = ''
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
@ -648,8 +720,14 @@ in {
locations."^~ /static/redundancy/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.redundancy;
priority = 1360;
priority = 1440;
extraConfig = ''
set $peertube_limit_rate 800k;
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5M;
}
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
@ -662,15 +740,14 @@ in {
access_log off;
}
aio threads;
sendfile on;
sendfile_max_chunk 1M;
limit_rate $peertube_limit_rate;
limit_rate_after 5M;
set $peertube_limit_rate 800k;
set $limit_rate $peertube_limit_rate;
rewrite ^/static/redundancy/(.*)$ /$1 break;
'';
};
@ -678,8 +755,14 @@ in {
locations."^~ /static/streaming-playlists/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.streaming_playlists;
priority = 1370;
priority = 1450;
extraConfig = ''
set $peertube_limit_rate 800k;
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5M;
}
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
@ -697,20 +780,24 @@ in {
sendfile on;
sendfile_max_chunk 1M;
limit_rate $peertube_limit_rate;
limit_rate_after 5M;
set $peertube_limit_rate 5M;
set $limit_rate $peertube_limit_rate;
rewrite ^/static/streaming-playlists/(.*)$ /$1 break;
'';
};
locations."~ ^/static/webseed/" = {
locations."^~ /static/webseed/" = {
tryFiles = "$uri @api";
root = cfg.settings.storage.videos;
priority = 1380;
priority = 1460;
extraConfig = ''
set $peertube_limit_rate 800k;
if ($request_uri ~ -fragmented.mp4$) {
set $peertube_limit_rate 5M;
}
if ($request_method = 'OPTIONS') {
${nginxCommonHeaders}
add_header Access-Control-Max-Age 1728000;
@ -728,11 +815,9 @@ in {
sendfile on;
sendfile_max_chunk 1M;
limit_rate $peertube_limit_rate;
limit_rate_after 5M;
set $peertube_limit_rate 800k;
set $limit_rate $peertube_limit_rate;
rewrite ^/static/webseed/(.*)$ /$1 break;
'';
};

View File

@ -241,7 +241,7 @@ let
configPath = if cfg.enableReload
then "/etc/nginx/nginx.conf"
else configFile;
else finalConfigFile;
execCommand = "${cfg.package}/bin/nginx -c '${configPath}'";
@ -393,6 +393,38 @@ let
);
mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
snakeOilCert = pkgs.runCommand "nginx-config-validate-cert" { nativeBuildInputs = [ pkgs.openssl.bin ]; } ''
mkdir $out
openssl genrsa -des3 -passout pass:xxxxx -out server.pass.key 2048
openssl rsa -passin pass:xxxxx -in server.pass.key -out $out/server.key
openssl req -new -key $out/server.key -out server.csr \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com"
openssl x509 -req -days 1 -in server.csr -signkey $out/server.key -out $out/server.crt
'';
validatedConfigFile = pkgs.runCommand "validated-nginx.conf" { nativeBuildInputs = [ cfg.package ]; } ''
# nginx absolutely wants to read the certificates even when told to only validate config, so let's provide fake certs
sed ${configFile} \
-e "s|ssl_certificate .*;|ssl_certificate ${snakeOilCert}/server.crt;|g" \
-e "s|ssl_trusted_certificate .*;|ssl_trusted_certificate ${snakeOilCert}/server.crt;|g" \
-e "s|ssl_certificate_key .*;|ssl_certificate_key ${snakeOilCert}/server.key;|g" \
> conf
LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so \
NIX_REDIRECTS="/etc/resolv.conf=/dev/null" \
nginx -t -c $(readlink -f ./conf) > out 2>&1 || true
if ! grep -q "syntax is ok" out; then
echo nginx config validation failed.
echo config was ${configFile}.
echo 'in case of false positive, set `services.nginx.validateConfig` to false.'
echo nginx output:
cat out
exit 1
fi
cp ${configFile} $out
'';
finalConfigFile = if cfg.validateConfig then validatedConfigFile else configFile;
in
{
@ -491,6 +523,17 @@ in
'';
};
validateConfig = mkOption {
# FIXME: re-enable if we can make of the configurations work.
#default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
default = false;
defaultText = literalExpression "pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform";
type = types.bool;
description = lib.mdDoc ''
Validate the generated nginx config at build time. The check is not very robust and can be disabled in case of false positives. This is notably the case when cross-compiling or when using `include` with files outside of the store.
'';
};
additionalModules = mkOption {
default = [];
type = types.listOf (types.attrsOf types.anything);
@ -1029,7 +1072,7 @@ in
};
environment.etc."nginx/nginx.conf" = mkIf cfg.enableReload {
source = configFile;
source = finalConfigFile;
};
# This service waits for all certificates to be available
@ -1048,7 +1091,7 @@ in
# certs are updated _after_ config has been reloaded.
before = sslTargets;
after = sslServices;
restartTriggers = optionals (cfg.enableReload) [ configFile ];
restartTriggers = optionals (cfg.enableReload) [ finalConfigFile ];
# Block reloading if not all certs exist yet.
# Happens when config changes add new vhosts/certs.
unitConfig.ConditionPathExists = optionals (sslServices != []) (map (certName: certs.${certName}.directory + "/fullchain.pem") dependentCertNames);

View File

@ -105,7 +105,7 @@ in
services.dbus.packages = with pkgs.cinnamon; [
cinnamon-common
cinnamon-screensaver
nemo
nemo-with-extensions
xapp
];
services.cinnamon.apps.enable = mkDefault true;
@ -154,7 +154,7 @@ in
polkit_gnome
# packages
nemo
nemo-with-extensions
cinnamon-control-center
cinnamon-settings-daemon
libgnomekbd

View File

@ -585,6 +585,8 @@ in
hardware.bluetooth.enable = true;
hardware.pulseaudio.enable = true;
networking.networkmanager.enable = true;
# Required for autorotate
hardware.sensor.iio.enable = lib.mkDefault true;
# Recommendations can be found here:
# - https://invent.kde.org/plasma-mobile/plasma-phone-settings/-/tree/master/etc/xdg

View File

@ -1,4 +1,5 @@
#V1: {
system: string
init: string
initrd?: string
initrdSecrets?: string

View File

@ -19,13 +19,15 @@ let
(builtins.toJSON
{
v1 = {
system = config.boot.kernelPackages.stdenv.hostPlatform.system;
kernel = "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
kernelParams = config.boot.kernelParams;
initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
label = "NixOS ${config.system.nixos.codeName} ${config.system.nixos.label} (Linux ${config.boot.kernelPackages.kernel.modDirVersion})";
inherit (cfg) extensions;
} // lib.optionalAttrs config.boot.initrd.enable {
initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
};
});
@ -54,7 +56,7 @@ let
specialisationInjector =
let
specialisationLoader = (lib.mapAttrsToList
(childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/bootspec/${filename}" ])
(childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/${filename}" ])
children);
in
lib.escapeShellArgs [
@ -66,7 +68,7 @@ let
''
mkdir -p $out/bootspec
${toplevelInjector} | ${specialisationInjector} > $out/bootspec/${filename}
${toplevelInjector} | ${specialisationInjector} > $out/${filename}
'';
validator = pkgs.writeCueValidator ./bootspec.cue {
@ -80,7 +82,7 @@ in
enable = lib.mkEnableOption (lib.mdDoc "Enable generation of RFC-0125 bootspec in $system/bootspec, e.g. /run/current-system/bootspec");
extensions = lib.mkOption {
type = lib.types.attrs;
type = lib.types.attrsOf lib.types.attrs; # <namespace>: { ...namespace-specific fields }
default = { };
description = lib.mdDoc ''
User-defined data that extends the bootspec document.

View File

@ -81,7 +81,7 @@ let
${optionalString (!config.boot.isContainer && config.boot.bootspec.enable) ''
${config.boot.bootspec.writer}
${config.boot.bootspec.validator} "$out/bootspec/${config.boot.bootspec.filename}"
${config.boot.bootspec.validator} "$out/${config.boot.bootspec.filename}"
''}
${config.system.extraSystemBuilderCmds}

View File

@ -205,8 +205,9 @@ let
# Copy ld manually since it isn't detected correctly
cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
# Copy all of the needed libraries
find $out/bin $out/lib -type f | while read BIN; do
# Copy all of the needed libraries in a consistent order so
# duplicates are resolved the same way.
find $out/bin $out/lib -type f | sort | while read BIN; do
echo "Copying libs for executable $BIN"
for LIB in $(${findLibs}/bin/find-libs $BIN); do
TGT="$out/lib/$(basename $LIB)"

View File

@ -503,6 +503,10 @@ in
assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
}
{
assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
}
];
boot = {

View File

@ -109,6 +109,37 @@ in
'';
};
autoPrune = {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to periodically prune Podman resources. If enabled, a
systemd timer will run `podman system prune -f`
as specified by the `dates` option.
'';
};
flags = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--all" ];
description = lib.mdDoc ''
Any additional flags passed to {command}`podman system prune`.
'';
};
dates = mkOption {
default = "weekly";
type = types.str;
description = lib.mdDoc ''
Specification (in the format described by
{manpage}`systemd.time(7)`) of the time at
which the prune will occur.
'';
};
};
package = lib.mkOption {
type = types.package;
default = podmanPackage;
@ -151,6 +182,23 @@ in
ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
};
systemd.services.podman-prune = {
description = "Prune podman resources";
restartIfChanged = false;
unitConfig.X-StopOnRemoval = false;
serviceConfig.Type = "oneshot";
script = ''
${cfg.package}/bin/podman system prune -f ${toString cfg.autoPrune.flags}
'';
startAt = lib.optional cfg.autoPrune.enable cfg.autoPrune.dates;
after = [ "podman.service" ];
requires = [ "podman.service" ];
};
systemd.sockets.podman.wantedBy = [ "sockets.target" ];
systemd.sockets.podman.socketConfig.SocketGroup = "podman";

View File

@ -144,7 +144,11 @@
in {
name = "acme";
meta.maintainers = lib.teams.acme.members;
meta = {
maintainers = lib.teams.acme.members;
# Hard timeout in seconds. Average run time is about 7 minutes.
timeout = 1800;
};
nodes = {
# The fake ACME server which will respond to client requests
@ -357,6 +361,30 @@ in {
import time
TOTAL_RETRIES = 20
class BackoffTracker(object):
delay = 1
increment = 1
def handle_fail(self, retries, message) -> int:
assert retries < TOTAL_RETRIES, message
print(f"Retrying in {self.delay}s, {retries + 1}/{TOTAL_RETRIES}")
time.sleep(self.delay)
# Only increment after the first try
if retries == 0:
self.delay += self.increment
self.increment *= 2
return retries + 1
backoff = BackoffTracker()
def switch_to(node, name):
# On first switch, this will create a symlink to the current system so that we can
# quickly switch between derivations
@ -404,9 +432,7 @@ in {
assert False
def check_connection(node, domain, retries=3):
assert retries >= 0, f"Failed to connect to https://{domain}"
def check_connection(node, domain, retries=0):
result = node.succeed(
"openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
f" -servername {domain} -connect {domain}:443 < /dev/null 2>&1"
@ -414,13 +440,11 @@ in {
for line in result.lower().split("\n"):
if "verification" in line and "error" in line:
time.sleep(3)
return check_connection(node, domain, retries - 1)
retries = backoff.handle_fail(retries, f"Failed to connect to https://{domain}")
return check_connection(node, domain, retries)
def check_connection_key_bits(node, domain, bits, retries=3):
assert retries >= 0, f"Did not find expected number of bits ({bits}) in key"
def check_connection_key_bits(node, domain, bits, retries=0):
result = node.succeed(
"openssl s_client -CAfile /tmp/ca.crt"
f" -servername {domain} -connect {domain}:443 < /dev/null"
@ -429,13 +453,11 @@ in {
print("Key type:", result)
if bits not in result:
time.sleep(3)
return check_connection_key_bits(node, domain, bits, retries - 1)
retries = backoff.handle_fail(retries, f"Did not find expected number of bits ({bits}) in key")
return check_connection_key_bits(node, domain, bits, retries)
def check_stapling(node, domain, retries=3):
assert retries >= 0, "OCSP Stapling check failed"
def check_stapling(node, domain, retries=0):
# Pebble doesn't provide a full OCSP responder, so just check the URL
result = node.succeed(
"openssl s_client -CAfile /tmp/ca.crt"
@ -445,21 +467,19 @@ in {
print("OCSP Responder URL:", result)
if "${caDomain}:4002" not in result.lower():
time.sleep(3)
return check_stapling(node, domain, retries - 1)
retries = backoff.handle_fail(retries, "OCSP Stapling check failed")
return check_stapling(node, domain, retries)
def download_ca_certs(node, retries=5):
assert retries >= 0, "Failed to connect to pebble to download root CA certs"
def download_ca_certs(node, retries=0):
exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
exit_code_2, _ = node.execute(
"curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
)
if exit_code + exit_code_2 > 0:
time.sleep(3)
return download_ca_certs(node, retries - 1)
retries = backoff.handle_fail(retries, "Failed to connect to pebble to download root CA certs")
return download_ca_certs(node, retries)
start_all()

View File

@ -1,7 +1,7 @@
{ pkgs, lib, ... }: {
name = "aesmd";
meta = {
maintainers = with lib.maintainers; [ veehaitch ];
maintainers = with lib.maintainers; [ trundle veehaitch ];
};
nodes.machine = { lib, ... }: {
@ -25,38 +25,78 @@
# We don't have a real SGX machine in NixOS tests
systemd.services.aesmd.unitConfig.AssertPathExists = lib.mkForce [ ];
specialisation = {
withQuoteProvider.configuration = { ... }: {
services.aesmd = {
quoteProviderLibrary = pkgs.sgx-azure-dcap-client;
environment = {
AZDCAP_DEBUG_LOG_LEVEL = "INFO";
};
};
};
};
};
testScript = ''
with subtest("aesmd.service starts"):
machine.wait_for_unit("aesmd.service")
status, main_pid = machine.systemctl("show --property MainPID --value aesmd.service")
assert status == 0, "Could not get MainPID of aesmd.service"
main_pid = main_pid.strip()
testScript = { nodes, ... }:
let
specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
in
''
def get_aesmd_pid():
status, main_pid = machine.systemctl("show --property MainPID --value aesmd.service")
assert status == 0, "Could not get MainPID of aesmd.service"
return main_pid.strip()
with subtest("aesmd.service runtime directory permissions"):
runtime_dir = "/run/aesmd";
res = machine.succeed(f"stat -c '%a %U %G' {runtime_dir}").strip()
assert "750 aesmd sgx" == res, f"{runtime_dir} does not have the expected permissions: {res}"
with subtest("aesmd.service starts"):
machine.wait_for_unit("aesmd.service")
with subtest("aesm.socket available on host"):
socket_path = "/var/run/aesmd/aesm.socket"
machine.wait_until_succeeds(f"test -S {socket_path}")
machine.succeed(f"test 777 -eq $(stat -c '%a' {socket_path})")
for op in [ "-r", "-w", "-x" ]:
machine.succeed(f"sudo -u sgxtest test {op} {socket_path}")
machine.fail(f"sudo -u nosgxtest test {op} {socket_path}")
main_pid = get_aesmd_pid()
with subtest("Copies white_list_cert_to_be_verify.bin"):
whitelist_path = "/var/opt/aesmd/data/white_list_cert_to_be_verify.bin"
whitelist_perms = machine.succeed(
f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/stat -c '%a' {whitelist_path}"
).strip()
assert "644" == whitelist_perms, f"white_list_cert_to_be_verify.bin has permissions {whitelist_perms}"
with subtest("aesmd.service runtime directory permissions"):
runtime_dir = "/run/aesmd";
res = machine.succeed(f"stat -c '%a %U %G' {runtime_dir}").strip()
assert "750 aesmd sgx" == res, f"{runtime_dir} does not have the expected permissions: {res}"
with subtest("Writes and binds aesm.conf in service namespace"):
aesmd_config = machine.succeed(f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/cat /etc/aesmd.conf")
with subtest("aesm.socket available on host"):
socket_path = "/var/run/aesmd/aesm.socket"
machine.wait_until_succeeds(f"test -S {socket_path}")
machine.succeed(f"test 777 -eq $(stat -c '%a' {socket_path})")
for op in [ "-r", "-w", "-x" ]:
machine.succeed(f"sudo -u sgxtest test {op} {socket_path}")
machine.fail(f"sudo -u nosgxtest test {op} {socket_path}")
assert aesmd_config == "whitelist url = http://nixos.org\nproxy type = direct\ndefault quoting type = ecdsa_256\n", "aesmd.conf differs"
'';
with subtest("Copies white_list_cert_to_be_verify.bin"):
whitelist_path = "/var/opt/aesmd/data/white_list_cert_to_be_verify.bin"
whitelist_perms = machine.succeed(
f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/stat -c '%a' {whitelist_path}"
).strip()
assert "644" == whitelist_perms, f"white_list_cert_to_be_verify.bin has permissions {whitelist_perms}"
with subtest("Writes and binds aesm.conf in service namespace"):
aesmd_config = machine.succeed(f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/cat /etc/aesmd.conf")
assert aesmd_config == "whitelist url = http://nixos.org\nproxy type = direct\ndefault quoting type = ecdsa_256\n", "aesmd.conf differs"
with subtest("aesmd.service without quote provider library has correct LD_LIBRARY_PATH"):
status, environment = machine.systemctl("show --property Environment --value aesmd.service")
assert status == 0, "Could not get Environment of aesmd.service"
env_by_name = dict(entry.split("=", 1) for entry in environment.split())
assert not env_by_name["LD_LIBRARY_PATH"], "LD_LIBRARY_PATH is not empty"
with subtest("aesmd.service with quote provider library starts"):
machine.succeed('${specialisations}/withQuoteProvider/bin/switch-to-configuration test')
machine.wait_for_unit("aesmd.service")
main_pid = get_aesmd_pid()
with subtest("aesmd.service with quote provider library has correct LD_LIBRARY_PATH"):
ld_library_path = machine.succeed(f"xargs -0 -L1 -a /proc/{main_pid}/environ | grep LD_LIBRARY_PATH")
assert ld_library_path.startswith("LD_LIBRARY_PATH=${pkgs.sgx-azure-dcap-client}/lib:"), \
"LD_LIBRARY_PATH is not set to the configured quote provider library"
with subtest("aesmd.service with quote provider library has set AZDCAP_DEBUG_LOG_LEVEL"):
azdcp_debug_log_level = machine.succeed(f"xargs -0 -L1 -a /proc/{main_pid}/environ | grep AZDCAP_DEBUG_LOG_LEVEL")
assert azdcp_debug_log_level == "AZDCAP_DEBUG_LOG_LEVEL=INFO\n", "AZDCAP_DEBUG_LOG_LEVEL is not set to INFO"
'';
}

View File

@ -69,7 +69,7 @@ in {
_3proxy = runTest ./3proxy.nix;
acme = runTest ./acme.nix;
adguardhome = runTest ./adguardhome.nix;
aesmd = runTest ./aesmd.nix;
aesmd = runTestOn ["x86_64-linux"] ./aesmd.nix;
agate = runTest ./web-servers/agate.nix;
agda = handleTest ./agda.nix {};
airsonic = handleTest ./airsonic.nix {};
@ -96,6 +96,7 @@ in {
blockbook-frontend = handleTest ./blockbook-frontend.nix {};
blocky = handleTest ./blocky.nix {};
boot = handleTestOn ["x86_64-linux" "aarch64-linux"] ./boot.nix {};
bootspec = handleTestOn ["x86_64-linux"] ./bootspec.nix {};
boot-stage1 = handleTest ./boot-stage1.nix {};
borgbackup = handleTest ./borgbackup.nix {};
botamusique = handleTest ./botamusique.nix {};
@ -256,6 +257,7 @@ in {
haste-server = handleTest ./haste-server.nix {};
haproxy = handleTest ./haproxy.nix {};
hardened = handleTest ./hardened.nix {};
headscale = handleTest ./headscale.nix {};
healthchecks = handleTest ./web-apps/healthchecks.nix {};
hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };

View File

@ -43,7 +43,7 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
machine.succeed("test -e /run/current-system/boot.json")
'';
};
@ -65,7 +65,7 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
machine.succeed("test -e /run/current-system/boot.json")
'';
};
@ -86,7 +86,33 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/boot.json")
'';
};
# Check that initrd create corresponding entries in bootspec.
initrd = makeTest {
name = "bootspec-with-initrd";
meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
nodes.machine = {
imports = [ standard ];
environment.systemPackages = [ pkgs.jq ];
# It's probably the case, but we want to make it explicit here.
boot.initrd.enable = true;
};
testScript = ''
import json
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
bootspec = json.loads(machine.succeed("jq -r '.v1' /run/current-system/bootspec/boot.json"))
assert all(key in bootspec for key in ('initrd', 'initrdSecrets')), "Bootspec should contain initrd or initrdSecrets field when initrd is enabled"
'';
};
@ -107,11 +133,11 @@ in
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /run/current-system/bootspec/boot.json")
machine.succeed("test -e /run/current-system/specialisation/something/bootspec/boot.json")
machine.succeed("test -e /run/current-system/boot.json")
machine.succeed("test -e /run/current-system/specialisation/something/boot.json")
sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/bootspec/boot.json"))
sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/bootspec/boot.json"))
sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/boot.json"))
sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/boot.json"))
assert sp_in_parent == sp_in_fs['v1'], "Bootspecs of the same specialisation are different!"
'';
@ -135,7 +161,7 @@ in
machine.wait_for_unit("multi-user.target")
current_os_release = machine.succeed("cat /etc/os-release")
bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/bootspec/boot.json)")
bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/boot.json)")
assert current_os_release == bootspec_os_release, "Filename referenced by extension has unexpected contents"
'';

17
nixos/tests/headscale.nix Normal file
View File

@ -0,0 +1,17 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "headscale";
meta.maintainers = with lib.maintainers; [ misterio77 ];
nodes.machine = { ... }: {
services.headscale.enable = true;
environment.systemPackages = [ pkgs.headscale ];
};
testScript = ''
machine.wait_for_unit("headscale")
machine.wait_for_open_port(8080)
# Test basic funcionality
machine.succeed("headscale namespaces create test")
machine.succeed("headscale preauthkeys -n test create")
'';
})

View File

@ -64,7 +64,6 @@ let
# wait for reader to be ready
machine.wait_for_file("${readyFile}")
machine.sleep(1)
# send all keys
for key in inputs:
@ -78,9 +77,18 @@ let
with open("${pkgs.writeText "tests.json" (builtins.toJSON tests)}") as json_file:
tests = json.load(json_file)
# These environments used to run in the opposite order, causing the
# following error at openvt startup.
#
# openvt: Couldn't deallocate console 1
#
# This error did not appear in successful runs.
# I don't know the exact cause, but I it seems that openvt and X are
# fighting over the virtual terminal. This does not appear to be a problem
# when the X test runs first.
keymap_environments = {
"VT Keymap": "openvt -sw --",
"Xorg Keymap": "DISPLAY=:0 xterm -title testterm -class testterm -fullscreen -e",
"VT Keymap": "openvt -sw --",
}
machine.wait_for_x()

View File

@ -61,7 +61,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
specialisation.reloadWithErrorsSystem.configuration = {
services.nginx.package = pkgs.nginxMainline;
services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
services.nginx.virtualHosts."hello".extraConfig = "access_log /does/not/exist.log;";
};
};
};

View File

@ -1,7 +1,7 @@
import ./make-test-python.nix ({pkgs, lib, ...}:
let
# A filesystem image with a (presumably) bootable debian
debianImage = pkgs.vmTools.diskImageFuns.debian9i386 {
debianImage = pkgs.vmTools.diskImageFuns.debian11i386 {
# os-prober cannot detect systems installed on disks without a partition table
# so we create the disk ourselves
createRootFS = with pkgs; ''

View File

@ -1172,6 +1172,25 @@ let
'';
};
statsd = {
exporterConfig = {
enable = true;
};
exporterTest = ''
wait_for_unit("prometheus-statsd-exporter.service")
wait_for_open_port(9102)
succeed("curl http://localhost:9102/metrics | grep 'statsd_exporter_build_info{'")
succeed(
"echo 'test.udp:1|c' > /dev/udp/localhost/9125",
"curl http://localhost:9102/metrics | grep 'test_udp 1'",
)
succeed(
"echo 'test.tcp:1|c' > /dev/tcp/localhost/9125",
"curl http://localhost:9102/metrics | grep 'test_tcp 1'",
)
'';
};
surfboard = {
exporterConfig = {
enable = true;

View File

@ -49,8 +49,8 @@ let
start_all()
machine.wait_for_unit('graphical.target')
machine.wait_until_succeeds('pgrep -x codium')
codium_running.wait()
with codium_running:
# Wait until vscodium is visible. "File" is in the menu bar.
machine.wait_for_text('Get Started')

View File

@ -41,6 +41,9 @@ import ../make-test-python.nix ({pkgs, ...}:
server = { pkgs, ... }: {
environment = {
etc = {
"peertube/secrets-peertube".text = ''
063d9c60d519597acef26003d5ecc32729083965d09181ef3949200cbe5f09ee
'';
"peertube/password-posgressql-db".text = ''
0gUN0C1mgST6czvjZ8T9
'';
@ -67,6 +70,10 @@ import ../make-test-python.nix ({pkgs, ...}:
localDomain = "peertube.local";
enableWebHttps = false;
secrets = {
secretsFile = "/etc/peertube/secrets-peertube";
};
database = {
host = "192.168.2.10";
name = "peertube_local";

View File

@ -1,3 +1,6 @@
# This file was automatically generated by passthru.fetch-deps.
# Please dont edit it manually, your changes might get overwritten!
{ fetchNuGet }: [
(fetchNuGet { pname = "Avalonia"; version = "0.10.14"; sha256 = "0nn3xgkf7v47dwpnsxjg0b25ifqa4mbq02ja5rvnlc3q2k6k0fxv"; })
(fetchNuGet { pname = "Avalonia.Angle.Windows.Natives"; version = "2.1.0.2020091801"; sha256 = "04jm83cz7vkhhr6n2c9hya2k8i2462xbf6np4bidk55as0jdq43a"; })

View File

@ -2,12 +2,12 @@
let
pname = "plexamp";
version = "4.5.2";
version = "4.6.1";
src = fetchurl {
url = "https://plexamp.plex.tv/plexamp.plex.tv/desktop/Plexamp-${version}.AppImage";
name="${pname}-${version}.AppImage";
sha512 = "/0CW5S5n4xh9FF/Sfxl3H0bNCpbUfz4ik4ptVTIwvEcXw2NoKS5dLxFSTn9lfVZAV+UfzSqcrWH5HknN+o5wQw==";
sha512 = "9wkhSDn7kvj6pqCawTJDBO8HfYe8eEYtAR1Bi9/fxiOBXRYUUHEZzSGLF9QoTVYMuGGHeX35c+QvnA2VsdsWCw==";
};
appimageContents = appimageTools.extractType2 {
@ -33,7 +33,7 @@ in appimageTools.wrapType2 {
meta = with lib; {
description = "A beautiful Plex music player for audiophiles, curators, and hipsters";
homepage = "https://plexamp.com/";
changelog = "https://forums.plex.tv/t/plexamp-release-notes/221280/47";
changelog = "https://forums.plex.tv/t/plexamp-release-notes/221280/48";
license = licenses.unfree;
maintainers = with maintainers; [ killercup synthetica ];
platforms = [ "x86_64-linux" ];

View File

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
pname = "praat";
version = "6.3.02";
version = "6.3.03";
src = fetchFromGitHub {
owner = "praat";
repo = "praat";
rev = "v${version}";
sha256 = "sha256-sn/GWCw1bxtFjUUKkrPZVOe5qRQ5ATyII52CPHmlB3g=";
sha256 = "sha256-Fb16sx+LVoXuiFASeiaYUMoNgZJXqKTBrUHFd2YXEJ0=";
};
configurePhase = ''

View File

@ -34,13 +34,13 @@ stdenv.mkDerivation {
pname = binName;
# versions are specified in `squeezelite.h`
# see https://github.com/ralph-irving/squeezelite/issues/29
version = "1.9.9.1414";
version = "1.9.9.1419";
src = fetchFromGitHub {
owner = "ralph-irving";
repo = "squeezelite";
rev = "dbe69eb8aa88f644cfb46541d6cef72fa666570d";
hash = "sha256-BN6eBHMMecucfHwzmho3xi1l2O3YnYcBUE321Rl6xrc=";
rev = "226efa300c4cf037e8486bad635e9deb3104636f";
hash = "sha256-ZZWliw1prFbBZMFp0QmXg6MKuHPNuFh2lFxQ8bbuWAM=";
};
buildInputs = [ flac libmad libvorbis mpg123 ]

View File

@ -25,13 +25,13 @@
stdenv.mkDerivation rec {
pname = "tauon";
version = "7.4.6";
version = "7.4.7";
src = fetchFromGitHub {
owner = "Taiko2k";
repo = "TauonMusicBox";
rev = "v${version}";
sha256 = "sha256-G3DDr2ON35ctjPkRMJDjnfDHMHMhR3tlTgJ65DXvzwk=";
sha256 = "sha256-WUHMXsbnNaDlV/5bCOPMadJKWoF5i2UlFf9fcX6GCZ0=";
};
postUnpack = ''

View File

@ -160,6 +160,7 @@ stdenv.mkDerivation rec {
preFixup = ''
gappsWrapperArgs+=(
--prefix GSETTINGS_SCHEMA_DIR : "$out/share/gsettings-schemas/${pname}-${version}/glib-2.0/schemas/"
--prefix XDG_DATA_DIRS : "$XDG_ICON_DIRS"
)
'';

File diff suppressed because it is too large Load Diff

View File

@ -11,13 +11,13 @@
stdenv.mkDerivation rec {
pname = "fulcrum";
version = "1.8.2";
version = "1.9.0";
src = fetchFromGitHub {
owner = "cculianu";
repo = "Fulcrum";
rev = "v${version}";
sha256 = "sha256-sX9GeY+c/mcsAWApQ0E5LwoXZgWUC4w7YY8/PEzMhl8=";
sha256 = "sha256-HAA5YRShLzxVP9aIP1RdDH09cZqjiZhZOxxc2EVGvx8=";
};
nativeBuildInputs = [ pkg-config qmake ];

View File

@ -29,6 +29,8 @@ stdenv.mkDerivation rec {
rm -r external/{miniupnp,randomx,rapidjson,unbound}
# export patched source for haven-gui
cp -r . $source
# fix build on aarch64-darwin
substituteInPlace CMakeLists.txt --replace "-march=x86-64" ""
'';
nativeBuildInputs = [ cmake pkg-config ];
@ -59,7 +61,5 @@ stdenv.mkDerivation rec {
license = licenses.bsd3;
platforms = platforms.all;
maintainers = with maintainers; [ kim0 ];
# never built on aarch64-darwin since first introduction in nixpkgs
broken = stdenv.isDarwin && stdenv.isAarch64;
};
}

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
# This file was automatically generated by passthru.fetch-deps.
# Please dont edit it manually, your changes might get overwritten!
{ fetchNuGet }: [
(fetchNuGet { pname = "Microsoft.AspNetCore.App.Runtime.linux-x64"; version = "3.1.26"; sha256 = "0rib2121wri6wj6h4f6w4yqw9qp2xsad3ind63fmp1sr649jifyh"; })
(fetchNuGet { pname = "Microsoft.AspNetCore.App.Runtime.osx-x64"; version = "3.1.26"; sha256 = "0z29rrhc87g0bi273lcqd608f7ngd16nv85v8549231yvf99n60x"; })
(fetchNuGet { pname = "Microsoft.AspNetCore.App.Runtime.win-x64"; version = "3.1.26"; sha256 = "0pbm6hpibsvq5w8hyvvllz4qns287x3l8bc07krffv23yfbv8zwy"; })
(fetchNuGet { pname = "Microsoft.AspNetCore.JsonPatch"; version = "3.1.1"; sha256 = "0c0aaz9rlh9chc53dnv5jryp0x0415hipaizrmih3kzwd3fmqpml"; })
(fetchNuGet { pname = "Microsoft.AspNetCore.Mvc.NewtonsoftJson"; version = "3.1.1"; sha256 = "1c2lrlp64kkacnjgdyygr6fqdawk10l8j4qgppii6rq61yjwhcig"; })
(fetchNuGet { pname = "Microsoft.Build"; version = "15.3.409"; sha256 = "0vzq6csp2yys9s96c7i37bjml439rdi47g8f5rzqdr7xf5a1jk81"; })
@ -18,11 +18,6 @@
(fetchNuGet { pname = "Microsoft.Extensions.Logging.Abstractions"; version = "1.0.0"; sha256 = "1sh9bidmhy32gkz6fkli79mxv06546ybrzppfw5v2aq0bda1ghka"; })
(fetchNuGet { pname = "Microsoft.Extensions.Primitives"; version = "2.0.0"; sha256 = "1xppr5jbny04slyjgngxjdm0maxdh47vq481ps944d7jrfs0p3mb"; })
(fetchNuGet { pname = "Microsoft.NETCore.App"; version = "2.0.5"; sha256 = "0qb7k624w7l0zhapdp519ymqg84a67r8zyd8cpj42hywsgb0dqv6"; })
(fetchNuGet { pname = "Microsoft.NETCore.App.Host.osx-x64"; version = "3.1.26"; sha256 = "1vk4dr2z72nmjg2skqvy2m2h5brqp21v51pnd7ldpm7asgr5ck8n"; })
(fetchNuGet { pname = "Microsoft.NETCore.App.Host.win-x64"; version = "3.1.26"; sha256 = "0l5yfnpbd36n38rjlmhsnq4bniq1fcssv4qh8kb9h3qigz40qxj9"; })
(fetchNuGet { pname = "Microsoft.NETCore.App.Runtime.linux-x64"; version = "3.1.26"; sha256 = "1h9b8fwgwbycvn1ngxnpdz3s1zh59wi2iy8n4y2nfkmz2rbldrrm"; })
(fetchNuGet { pname = "Microsoft.NETCore.App.Runtime.osx-x64"; version = "3.1.26"; sha256 = "0y06qz4pgflwia222mljg19nlfmhcg0qs1a8wm3zwj602wzy3nll"; })
(fetchNuGet { pname = "Microsoft.NETCore.App.Runtime.win-x64"; version = "3.1.26"; sha256 = "1half7rywhxb1x19gzddvjqbll4whx9wmwdlk57iy68djas95lmy"; })
(fetchNuGet { pname = "Microsoft.NETCore.DotNetAppHost"; version = "2.0.5"; sha256 = "00bsxdg9c8msjxyffvfi8siqk8v2m7ca8fqy1npv7b2pzg3byjws"; })
(fetchNuGet { pname = "Microsoft.NETCore.DotNetHostPolicy"; version = "2.0.5"; sha256 = "0v5csskiwpk8kz8wclqad8kcjmxr7ik4w99wl05740qvaag3qysk"; })
(fetchNuGet { pname = "Microsoft.NETCore.DotNetHostResolver"; version = "2.0.5"; sha256 = "1sz2fdp8fdwz21x3lr2m1zhhrbix6iz699fjkwiryqdjl4ygd3hw"; })
@ -66,9 +61,6 @@
(fetchNuGet { pname = "runtime.unix.System.Diagnostics.Debug"; version = "4.0.11"; sha256 = "05ndbai4vpqrry0ghbfgqc8xblmplwjgndxmdn1zklqimczwjg2d"; })
(fetchNuGet { pname = "runtime.unix.System.Private.Uri"; version = "4.0.1"; sha256 = "0ic5dgc45jkhcr1g9xmmzjm7ffiw4cymm0fprczlx4fnww4783nm"; })
(fetchNuGet { pname = "runtime.unix.System.Runtime.Extensions"; version = "4.1.0"; sha256 = "0x1cwd7cvifzmn5x1wafvj75zdxlk3mxy860igh3x1wx0s8167y4"; })
(fetchNuGet { pname = "runtime.win.System.Diagnostics.Debug"; version = "4.0.11"; sha256 = "1ylkj4v7aq00svij7aq82d86afpwqgrqf2kpikabxl26p19ry9wm"; })
(fetchNuGet { pname = "runtime.win.System.Runtime.Extensions"; version = "4.1.0"; sha256 = "1zmx2msa04ka8mgh8viahi4pqpp86vdhzij2rg1jg131bwlv59yw"; })
(fetchNuGet { pname = "runtime.win7.System.Private.Uri"; version = "4.0.1"; sha256 = "1ibrwabavdpqapnplm5kh6nz9vgcwv0wn61w1p60v262kif6sglp"; })
(fetchNuGet { pname = "Swashbuckle.AspNetCore"; version = "5.0.0"; sha256 = "0rn2awmzrsrppk97xbbwk4kq1mys9bygb5xhl6mphbk0hchrvh09"; })
(fetchNuGet { pname = "Swashbuckle.AspNetCore.Swagger"; version = "5.0.0"; sha256 = "1341nv8nmh6avs3y7w2szzir5qd0bndxwrkdmvvj3hcxj1126w2f"; })
(fetchNuGet { pname = "Swashbuckle.AspNetCore.SwaggerGen"; version = "5.0.0"; sha256 = "00swg2avqnb38q2bsxljd34n8rpknp74h9vbn0fdnfds3a32cqr4"; })

View File

@ -8,13 +8,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "jove";
version = "4.17.4.8";
version = "4.17.4.9";
src = fetchFromGitHub {
owner = "jonmacs";
repo = "jove";
rev = finalAttrs.version;
sha256 = "sha256-/n/TgVqyG/WeK+/DZqFZCdkQR4SD5+YmljLlzAehMvw=";
sha256 = "sha256-Lo5S3t4vewkpoihVdxa3yRrEzNWeNLHCZHXiLCxOH5o=";
};
nativeBuildInputs = [ makeWrapper ];

View File

@ -64,6 +64,10 @@ stdenv.mkDerivation {
description = "An integrated development environment for Java, C, C++ and PHP";
homepage = "https://netbeans.apache.org/";
license = lib.licenses.asl20;
sourceProvenance = with lib.sourceTypes; [
binaryBytecode
binaryNativeCode
];
maintainers = with lib.maintainers; [ sander rszibele asbachb ];
platforms = lib.platforms.unix;
};

View File

@ -1,9 +1,6 @@
{ lib, stdenv
{ lib
, stdenv
, rtpPath
, vim
, vimCommandCheckHook
, vimGenDocHook
, neovimRequireCheckHook
, toVimPlugin
}:
@ -14,39 +11,46 @@ rec {
overrideAttrs = f: addRtp (drv.overrideAttrs f);
};
buildVimPlugin = attrs@{
name ? "${attrs.pname}-${attrs.version}",
namePrefix ? "vimplugin-",
src,
unpackPhase ? "",
configurePhase ? "",
buildPhase ? "",
preInstall ? "",
postInstall ? "",
path ? ".",
addonInfo ? null,
...
}:
let drv = stdenv.mkDerivation (attrs // {
name = namePrefix + name;
buildVimPlugin =
{ name ? "${attrs.pname}-${attrs.version}"
, namePrefix ? "vimplugin-"
, src
, unpackPhase ? ""
, configurePhase ? ""
, buildPhase ? ""
, preInstall ? ""
, postInstall ? ""
, path ? "."
, addonInfo ? null
, meta ? { }
, ...
}@attrs:
let
drv = stdenv.mkDerivation (attrs // {
name = namePrefix + name;
inherit unpackPhase configurePhase buildPhase addonInfo preInstall postInstall;
inherit unpackPhase configurePhase buildPhase addonInfo preInstall postInstall;
installPhase = ''
runHook preInstall
installPhase = ''
runHook preInstall
target=$out/${rtpPath}/${path}
mkdir -p $out/${rtpPath}
cp -r . $target
target=$out/${rtpPath}/${path}
mkdir -p $out/${rtpPath}
cp -r . $target
runHook postInstall
'';
});
in addRtp (toVimPlugin drv);
runHook postInstall
'';
meta = {
platforms = lib.platforms.all;
} // meta;
});
in
addRtp (toVimPlugin drv);
buildVimPluginFrom2Nix = attrs: buildVimPlugin ({
# vim plugins may override this
buildPhase = ":";
configurePhase =":";
configurePhase = ":";
} // attrs);
}

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,17 @@
{ buildGrammar, fetchFromGitHub, fetchFromGitLab, fetchgit }:
{
ada = buildGrammar {
language = "ada";
version = "e9e2ec9";
source = fetchFromGitHub {
owner = "briot";
repo = "tree-sitter-ada";
rev = "e9e2ec9d3b6302e9b455901bec00036e29d1c121";
hash = "sha256-buTQ1GjaJSVy4SPikZq88bifWubyHtPSI4ac7p1/tOg=";
};
meta.homepage = "https://github.com/briot/tree-sitter-ada";
};
agda = buildGrammar {
language = "agda";
version = "80ea622";
@ -27,12 +38,12 @@
};
astro = buildGrammar {
language = "astro";
version = "947e930";
version = "22697b0";
source = fetchFromGitHub {
owner = "virchau13";
repo = "tree-sitter-astro";
rev = "947e93089e60c66e681eba22283f4037841451e7";
hash = "sha256-q1ni++SPbq5y+47fPb6TryMw86gpULwNcXwi5yjXCWI=";
rev = "22697b0e2413464b7abaea9269c5e83a59e39a83";
hash = "sha256-vp2VjkfBEYEUwUCjBlbVjPIB49QIikdFAITzzFLZX+U=";
};
meta.homepage = "https://github.com/virchau13/tree-sitter-astro";
};
@ -104,12 +115,12 @@
};
c_sharp = buildGrammar {
language = "c_sharp";
version = "8e4ec08";
version = "1bd772f";
source = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-c-sharp";
rev = "8e4ec08f1dae1d72f082df0f7e1176772f553d47";
hash = "sha256-BIqfaFFwco3aE65N9tRtawxFEXvaVwQvoMgM3cg10/k=";
rev = "1bd772f69b0db577122533514a239b184650adf2";
hash = "sha256-ySgSavQflqFXHY25DjX16aP4KgK2HFdsQ6HeYaerxNI=";
};
meta.homepage = "https://github.com/tree-sitter/tree-sitter-c-sharp";
};
@ -404,12 +415,12 @@
};
gdscript = buildGrammar {
language = "gdscript";
version = "2a6abda";
version = "a8fa839";
source = fetchFromGitHub {
owner = "PrestonKnopp";
repo = "tree-sitter-gdscript";
rev = "2a6abdaa47fcb91397e09a97c7433fd995ea46c6";
hash = "sha256-YDLPYwWHnwqj7CpgUKRXQYj2a6ZJUKc0bcalVSJ99Ew=";
rev = "a8fa839150d05baaf4d5a472520ee71e9b1d8b3c";
hash = "sha256-uSpic8MKMvl1p7LOHNn5EJGp1iOYkeqcRt7skdROmBo=";
};
meta.homepage = "https://github.com/PrestonKnopp/tree-sitter-gdscript";
};
@ -593,12 +604,12 @@
};
help = buildGrammar {
language = "help";
version = "ce20f13";
version = "61c7505";
source = fetchFromGitHub {
owner = "neovim";
repo = "tree-sitter-vimdoc";
rev = "ce20f13c3f12506185754888feaae3f2ad54c287";
hash = "sha256-XklORrP4ToX4klXFYxMv2s63INWugDyjl3mtLDdUHlg=";
rev = "61c75058299f3d1cf565697e4073d7b2cc6a6d6c";
hash = "sha256-MTossQzmBoHqXu933suYUUyDbmb20uO5oZlV31BYqIA=";
};
meta.homepage = "https://github.com/neovim/tree-sitter-vimdoc";
};
@ -736,23 +747,23 @@
};
jsonnet = buildGrammar {
language = "jsonnet";
version = "768a384";
version = "505f5bd";
source = fetchFromGitHub {
owner = "sourcegraph";
repo = "tree-sitter-jsonnet";
rev = "768a384989391237c6d55ff3d878a0d1e0d2b4fa";
hash = "sha256-kSG0YwtkzGVz8RIYBrE0ZyUMc6YTtQO8XvHHiwy5GL4=";
rev = "505f5bd90053ae895aa3d6f2bac8071dd9abd8b2";
hash = "sha256-XZqywAzM+UCKto/OFn50hhRpEyFVLpFV7Q1Z0NKoPsI=";
};
meta.homepage = "https://github.com/sourcegraph/tree-sitter-jsonnet";
};
julia = buildGrammar {
language = "julia";
version = "36b099e";
version = "f254ff9";
source = fetchFromGitHub {
owner = "tree-sitter";
repo = "tree-sitter-julia";
rev = "36b099e9ea577f64ba53323115028dadd2991d2c";
hash = "sha256-sd6Ue7Ur6Juq2kZbuC/E/gK9JJPVG/5UTToQ+5hdTD0=";
rev = "f254ff9c52e994f629a60821662917d2c6c0e8eb";
hash = "sha256-918mGh91hAl8hx4HoGOXr2BFoDtMz5yPkQRMDrkk1Mg=";
};
meta.homepage = "https://github.com/tree-sitter/tree-sitter-julia";
};
@ -813,12 +824,12 @@
};
lua = buildGrammar {
language = "lua";
version = "f5e84ff";
version = "0fc8996";
source = fetchFromGitHub {
owner = "MunifTanjim";
repo = "tree-sitter-lua";
rev = "f5e84ffc2b06858401e0d2edf5dce009efbe34b3";
hash = "sha256-9ig+F2W6MB5uSS3XFUL2OCW9PKYkb4KPpGN2DWKEdhY=";
rev = "0fc89962b7ff5c7d676b8592c1cbce1ceaa806fd";
hash = "sha256-MbNP1/NKdSkUTydmK819o8vMKUZFan1yE7d227xMsh4=";
};
meta.homepage = "https://github.com/MunifTanjim/tree-sitter-lua";
};
@ -1115,12 +1126,12 @@
};
query = buildGrammar {
language = "query";
version = "0695cd0";
version = "0717de0";
source = fetchFromGitHub {
owner = "nvim-treesitter";
repo = "tree-sitter-query";
rev = "0695cd0760532de7b54f23c667d459b5d1332b44";
hash = "sha256-DwhvOvUb3hNmZTTk5HkZ9X1DCWz+G3+YJ0l/PqLVDdU=";
rev = "0717de07078a20a8608c98ad5f26c208949d0e15";
hash = "sha256-dWWof8rYFTto3A4BfbKTKcNieRbwFdF6xDXW9tQvAqQ=";
};
meta.homepage = "https://github.com/nvim-treesitter/tree-sitter-query";
};
@ -1291,12 +1302,12 @@
};
sql = buildGrammar {
language = "sql";
version = "8dc7fa0";
version = "8635357";
source = fetchFromGitHub {
owner = "derekstride";
repo = "tree-sitter-sql";
rev = "8dc7fa0e51145f0312eedbb5aff9945bd967fb8f";
hash = "sha256-L6mur9BnDzA1mgtsWdyMC52IY9sKwt/xDkfPv2VKPPs=";
rev = "8635357363f8b01931ce6abbe0c937aa73e47bf8";
hash = "sha256-p3R8uGIHyzYnRnKYWpeqMAcuk1xjKlPvB5vllPqUvrs=";
};
generate = true;
meta.homepage = "https://github.com/derekstride/tree-sitter-sql";
@ -1336,12 +1347,12 @@
};
swift = buildGrammar {
language = "swift";
version = "693411c";
version = "f94e1d6";
source = fetchFromGitHub {
owner = "alex-pinkus";
repo = "tree-sitter-swift";
rev = "693411cb5a1167311ccd84708348281630562726";
hash = "sha256-KNmRR2Od2uTOHiENeCXoTKAp2jvzSsEhzqf9WmiL3Vo=";
rev = "f94e1d6dd8f4df6c7c8aa07da625e498ebb27d09";
hash = "sha256-DtcGcywpwCZoGNllZEetE+s1bwMNrRdooGdttAG3Ra0=";
};
generate = true;
meta.homepage = "https://github.com/alex-pinkus/tree-sitter-swift";
@ -1357,6 +1368,16 @@
};
meta.homepage = "https://github.com/RaafatTurki/tree-sitter-sxhkdrc";
};
t32 = buildGrammar {
language = "t32";
version = "f8106fc";
source = fetchgit {
url = "https://codeberg.org/xasc/tree-sitter-t32";
rev = "f8106fcf5a27f905b3d9d55d9cd3e910bea70c60";
hash = "sha256-hKddSekx67Yqm4+LqVaH8Sf1+73RlCnXE6th2FTHB34=";
};
meta.homepage = "https://codeberg.org/xasc/tree-sitter-t32";
};
teal = buildGrammar {
language = "teal";
version = "1ae8c68";
@ -1461,12 +1482,12 @@
};
v = buildGrammar {
language = "v";
version = "66b92a8";
version = "f0aa56e";
source = fetchFromGitHub {
owner = "vlang";
repo = "vls";
rev = "66b92a89ef1e149300df79c0b2a934ad959c8eec";
hash = "sha256-R6Irz3sdyzKH1qWOUwUYK1OKhYs0PUYS/azYn/nb6jk=";
rev = "f0aa56eec7689f08a389c90ad8d3e5e0d18b3cd7";
hash = "sha256-d69SvXDWxeRk2hvou8MhXUpUrSBwYENqFDGpT/Y5UpM=";
};
location = "tree_sitter_v";
meta.homepage = "https://github.com/vlang/vls";

View File

@ -71,7 +71,7 @@ def generate_grammar(item):
repo = "{repo}";"""
case _:
cmd += ["fetchgit", "url"]
cmd += ["fetchgit", "--url", url]
generated += f"""fetchgit {{
url = "{url}";"""

View File

@ -123,6 +123,10 @@ self: super: {
};
});
ChatGPT-nvim = super.ChatGPT-nvim.overrideAttrs (old: {
dependencies = with self; [ nui-nvim plenary-nvim telescope-nvim ];
});
clang_complete = super.clang_complete.overrideAttrs (old: {
# In addition to the arguments you pass to your compiler, you also need to
# specify the path of the C++ std header (if you are using C++).
@ -1147,6 +1151,8 @@ self: super: {
pname = "vim-markdown-composer-bin";
inherit (super.vim-markdown-composer) src version;
cargoSha256 = "sha256-Vie8vLTplhaVU4E9IohvxERfz3eBpd62m8/1Ukzk8e4=";
# tests require network access
doCheck = false;
};
in
super.vim-markdown-composer.overrideAttrs (old: {

View File

@ -1,6 +1,7 @@
repo,branch,alias
https://github.com/euclidianAce/BetterLua.vim/,,
https://github.com/vim-scripts/BufOnly.vim/,,
https://github.com/jackMort/ChatGPT.nvim/,HEAD,
https://github.com/chrisbra/CheckAttach/,,
https://github.com/vim-scripts/Colour-Sampler-Pack/,,
https://github.com/whonore/Coqtail/,,
@ -42,6 +43,7 @@ https://github.com/eikenb/acp/,,
https://github.com/stevearc/aerial.nvim/,,
https://github.com/Numkil/ag.nvim/,,
https://github.com/derekelkins/agda-vim/,,
https://github.com/aduros/ai.vim/,HEAD,
https://github.com/slashmili/alchemist.vim/,,
https://github.com/dense-analysis/ale/,,
https://github.com/vim-scripts/align/,,
@ -240,6 +242,7 @@ https://github.com/lambdalisue/fern.vim/,,
https://github.com/wincent/ferret/,,
https://github.com/j-hui/fidget.nvim/,,
https://github.com/bogado/file-line/,,
https://github.com/glacambre/firenvim/,HEAD,
https://github.com/andviro/flake8-vim/,,
https://github.com/ggandor/flit.nvim/,HEAD,
https://github.com/ncm2/float-preview.nvim/,,
@ -274,6 +277,7 @@ https://github.com/lewis6991/gitsigns.nvim/,,
https://github.com/gregsexton/gitv/,,
https://github.com/gleam-lang/gleam.vim/,,
https://github.com/ellisonleao/glow.nvim/,,
https://github.com/p00f/godbolt.nvim/,HEAD,
https://github.com/roman/golden-ratio/,,
https://github.com/buoto/gotests-vim/,,
https://github.com/rmagatti/goto-preview/,,
@ -335,6 +339,7 @@ https://github.com/qnighy/lalrpop.vim/,,
https://github.com/sk1418/last256/,,
https://github.com/latex-box-team/latex-box/,,
https://github.com/dundalek/lazy-lsp.nvim/,HEAD,
https://github.com/folke/lazy.nvim/,HEAD,
https://github.com/kdheepak/lazygit.nvim/,,
https://github.com/Julian/lean.nvim/,,
https://github.com/leanprover/lean.vim/,,

View File

@ -391,8 +391,7 @@ rec {
} ./neovim-require-check-hook.sh) {};
inherit (import ./build-vim-plugin.nix {
inherit lib stdenv rtpPath vim vimGenDocHook
toVimPlugin vimCommandCheckHook neovimRequireCheckHook;
inherit lib stdenv rtpPath toVimPlugin;
}) buildVimPlugin buildVimPluginFrom2Nix;

View File

@ -1483,8 +1483,8 @@ let
mktplcRef = {
name = "latex-workshop";
publisher = "James-Yu";
version = "9.1.1";
sha256 = "sha256-Xt/z5r9R090Z9nP1v7k+jYm9EOcjy0GfYiYpc7jNid4=";
version = "9.2.0";
sha256 = "sha256-AAADJkMXsKvpEHBH8+TNM0x3CGEGVtf/b+tce297rkw=";
};
meta = with lib; {
changelog = "https://marketplace.visualstudio.com/items/James-Yu.latex-workshop/changelog";

View File

@ -9,28 +9,69 @@
let
inherit (vscode-utils) buildVscodeMarketplaceExtension;
# patch runs on remote machine hence use of which
# links to local node if version is 12
nodeVersion = "16";
# As VS Code executes this code on the remote machine
# we test to see if we can build Node from Nixpkgs
# otherwise we check if the globally installed Node
# is usable.
patch = ''
f="$HOME/.vscode-server/bin/$COMMIT_ID/node"
localNodePath=''$(which node)
if [ -x "''$localNodePath" ]; then
localNodeVersion=''$(node -v)
if [ "\''${localNodeVersion:1:2}" = "12" ]; then
echo PATCH: replacing ''$f with ''$localNodePath
rm ''$f
ln -s ''$localNodePath ''$f
# Use Node from nixpkgs for NixOS hosts
#
serverDir="$HOME/.vscode-server/bin/$COMMIT_ID"
serverNode="$serverDir/node"
echo "VS Code Node: $serverNode"
# Check if VS Code Server has a non-working Node or the wrong version of Node
if ! nodeVersion=$($serverNode -v) || [ "\''${nodeVersion:1:2}" != "${nodeVersion}" ]; then
echo "VS Code Node Version: $nodeVersion"
if nix-build "<nixpkgs>" -A nodejs-${nodeVersion}_x --out-link "$serverDir/nix" && [ -e "$serverDir/nix/bin/node" ]; then
nodePath="$serverDir/nix/bin/node"
fi
echo "Node from Nix: $nodePath"
nodeVersion=$($nodePath -v)
echo "Node from Nix Version: $nodeVersion"
if [ "\''${nodeVersion:1:2}" != "${nodeVersion}" ]; then
echo "Getting Node from Nix failed, use Local Node instead"
nodePath=$(which node)
echo "Local Node: $nodePath"
nodeVersion=$($nodePath -v)
echo "Local Node Version: $nodeVersion"
fi
if [ "\''${nodeVersion:1:2}" == "${nodeVersion}" ]; then
echo PATCH: replacing $serverNode with $nodePath
ln -sf $nodePath $serverNode
fi
fi
nodeVersion=$($serverNode -v)
echo "VS Code Node Version: $nodeVersion"
if [ "\''${nodeVersion:1:2}" != "${nodeVersion}" ]; then
echo "Unsupported VS Code Node version: $nodeVersion", quitting
fail_with_exitcode ''${o.InstallExitCode.ServerTransferFailed}
fi
${lib.optionalString useLocalExtensions ''
# Use local extensions
if [ -d $HOME/.vscode/extensions ]; then
if ! test -L "$HOME/.vscode-server/extensions"; then
mkdir -p $HOME/.vscode-server
ln -s $HOME/.vscode/extensions $HOME/.vscode-server/
if [ -e $HOME/.vscode-server/extensions ]; then
mv $HOME/.vscode-server/extensions $HOME/.vscode-server/extensions.bak
fi
mkdir -p $HOME/.vscode-server
ln -s $HOME/.vscode/extensions $HOME/.vscode-server/extensions
fi
''}
#
# Start the server
'';
in
buildVscodeMarketplaceExtension {
@ -43,7 +84,7 @@ buildVscodeMarketplaceExtension {
postPatch = ''
substituteInPlace "out/extension.js" \
--replace "# install extensions" '${patch}'
--replace '# Start the server\n' '${patch}'
'';
meta = with lib; {

View File

@ -1,7 +1,7 @@
{ stdenv, lib, makeDesktopItem
, unzip, libsecret, libXScrnSaver, libxshmfence, wrapGAppsHook, makeWrapper
, atomEnv, at-spi2-atk, autoPatchelfHook
, systemd, fontconfig, libdbusmenu, glib, buildFHSUserEnvBubblewrap
, systemd, fontconfig, libdbusmenu, glib, buildFHSUserEnvBubblewrap, wayland
# Populate passthru.tests
, tests
@ -66,7 +66,7 @@ let
buildInputs = [ libsecret libXScrnSaver libxshmfence ]
++ lib.optionals (!stdenv.isDarwin) ([ at-spi2-atk ] ++ atomEnv.packages);
runtimeDependencies = lib.optionals stdenv.isLinux [ (lib.getLib systemd) fontconfig.lib libdbusmenu ];
runtimeDependencies = lib.optionals stdenv.isLinux [ (lib.getLib systemd) fontconfig.lib libdbusmenu wayland ];
nativeBuildInputs = [ unzip ]
++ lib.optionals stdenv.isLinux [

View File

@ -18,17 +18,17 @@ let
archive_fmt = if stdenv.isDarwin then "zip" else "tar.gz";
sha256 = {
x86_64-linux = "0f5l720gc47dygbk7mahx7pb088a8bfrnf69j3igvczbnfapx5sy";
x86_64-darwin = "0qmji8bfnqvrxv4yk3rscvns1hk5wfwwdng8jblh5bilf657g1fc";
aarch64-linux = "0qcmcsb97q303izhw8k4242nsb72my1vnf24hsfml4vr76f5qqbd";
aarch64-darwin = "1cc8p5s8vr5bml715yx2lzkqa9q85rziswrhl1d11zagymvswjzn";
armv7l-linux = "086c3wazjk30f8r8dgi0bjsvzcc6sa9d80cy4500pim7rb7s6ppn";
x86_64-linux = "0xdj5v2n34d6p49ng13qr9d4yyyvqr96qv15la2fda9s7n1s659c";
x86_64-darwin = "0jgrf534qy39nki0rfc8lrdbdb8ghzarckd3cx9fzq6bw1p2jy1f";
aarch64-linux = "133577j6i709dq4ircnh2yklylcmy0kgs6lhly7mx8nrag8qi9c1";
aarch64-darwin = "18346igq8b1d0kywy9alvzm0glb46aalznnhr5mql5rhaana92xw";
armv7l-linux = "0l0wvgi981ryqbhyh5qalr8lasyf3pg4pzqs9f9hc75ppk4d6sny";
}.${system} or throwSystem;
in
callPackage ./generic.nix rec {
# Please backport all compatible updates to the stable release.
# This is important for the extension ecosystem.
version = "1.74.1";
version = "1.74.2";
pname = "vscode";
executableName = "code" + lib.optionalString isInsiders "-insiders";

View File

@ -28,6 +28,10 @@ stdenv.mkDerivation {
# This wrapper and wrapper only is under PD
license = licenses.publicDomain;
maintainers = with maintainers; [ ];
# dependency yi-language no longer builds
hydraPlatforms = lib.platforms.none;
broken = true;
};
}

View File

@ -20,21 +20,24 @@
, pugixml
, rapidjson
, vulkan-headers
, wayland
, wxGTK32
, zarchive
, vulkan-loader
, nix-update-script
}:
stdenv.mkDerivation rec {
pname = "cemu";
version = "2.0-17";
version = "2.0-22";
src = fetchFromGitHub {
owner = "cemu-project";
repo = "Cemu";
rev = "v${version}";
hash = "sha256-ryFph55o7s3eiqQ8kx5+3Et5S2U9H5i3fmZTc1CaCnA=";
hash = "sha256-ZQfJHQnT5mV6GC3dO6QV1fGsnyZMYqXiVdBSsimL5yU=";
};
patches = [
@ -68,6 +71,7 @@ stdenv.mkDerivation rec {
pugixml
rapidjson
vulkan-headers
wayland
wxGTK32
zarchive
];
@ -108,13 +112,13 @@ stdenv.mkDerivation rec {
in ''
gappsWrapperArgs+=(
--prefix LD_LIBRARY_PATH : "${lib.makeLibraryPath libs}"
# Force X11 to be used until Wayland is natively supported
# <https://github.com/cemu-project/Cemu/pull/143>
--set GDK_BACKEND x11
)
'';
passthru.updateScript = nix-update-script {
attrPath = pname;
};
meta = with lib; {
description = "Cemu is a Wii U emulator";
homepage = "https://cemu.info";

View File

@ -38,14 +38,14 @@ in
+ lib.optionalString enableQt "-qt"
+ lib.optionalString (!enableQt) "-sdl"
+ lib.optionalString forceWayland "-wayland";
version = "1.13.2";
version = "1.14.1";
src = fetchFromGitHub {
owner = "hrydgard";
repo = "ppsspp";
rev = "v${finalAttrs.version}";
fetchSubmodules = true;
sha256 = "sha256-Ubbl2KCZ4QlWDtTxl4my0nKNGY25DOkD/iEurzVx4gU=";
sha256 = "sha256-WGTPd3xcFk4E/Wf+DEv4pzGYf0dppzV3vUTwrYmZ2YM=";
};
postPatch = ''

View File

@ -49,7 +49,7 @@ let
mkLibretroCore =
{ core
, src ? (getCoreSrc core)
, version ? "unstable-2022-11-21"
, version ? "unstable-2022-12-20"
, ...
}@args:
import ./mkLibretroCore.nix ({

View File

@ -2,12 +2,15 @@
, stdenv
, nixosTests
, enableNvidiaCgToolkit ? false
, withAssets ? false
, withCoreInfo ? false
, withGamemode ? stdenv.isLinux
, withVulkan ? stdenv.isLinux
, withWayland ? stdenv.isLinux
, alsa-lib
, dbus
, fetchFromGitHub
, fetchpatch
, ffmpeg_4
, flac
, freetype
@ -25,11 +28,12 @@
, libxml2
, libXxf86vm
, makeWrapper
, mbedtls
, mbedtls_2
, mesa
, nvidia_cg_toolkit
, pkg-config
, python3
, retroarch-assets
, SDL2
, substituteAll
, udev
@ -55,9 +59,15 @@ stdenv.mkDerivation rec {
};
patches = [
(substituteAll {
src = ./use-fixed-path-for-libretro_core_info.patch;
libretro_info_path = libretro-core-info;
./use-default-values-for-libretro_info_path-assets_directory.patch
# TODO: remove those two patches in the next RetroArch release
(fetchpatch {
url = "https://github.com/libretro/RetroArch/commit/894c44c5ea7f1eada9207be3c29e8d5c0a7a9e1f.patch";
hash = "sha256-ThB6jd9pmsipT8zjehz7znK/s0ofHHCJeEYBKur6sO8=";
})
(fetchpatch {
url = "https://github.com/libretro/RetroArch/commit/c5bfd52159cf97312bb28fc42203c39418d1bbbd.patch";
hash = "sha256-rb1maAvCSUgq2VtJ67iqUY+Fz00Fchl8YGG0EPm0+F0=";
})
];
@ -72,7 +82,7 @@ stdenv.mkDerivation rec {
libGL
libGLU
libxml2
mbedtls
mbedtls_2
python3
SDL2
zlib
@ -100,9 +110,18 @@ stdenv.mkDerivation rec {
configureFlags = [
"--disable-update_cores"
"--disable-builtinmbedtls"
"--enable-systemmbedtls"
"--disable-builtinzlib"
"--disable-builtinflac"
] ++
lib.optionals withAssets [
"--disable-update_assets"
"--with-assets_dir=${retroarch-assets}/share"
] ++
lib.optionals withCoreInfo [
"--disable-update_core_info"
"--with-core_info_dir=${libretro-core-info}/share"
] ++
lib.optionals stdenv.isLinux [
"--enable-dbus"
"--enable-egl"
@ -112,9 +131,16 @@ stdenv.mkDerivation rec {
postInstall = lib.optionalString (runtimeLibs != [ ]) ''
wrapProgram $out/bin/retroarch \
--prefix LD_LIBRARY_PATH ':' ${lib.makeLibraryPath runtimeLibs}
'' +
lib.optionalString enableNvidiaCgToolkit ''
wrapProgram $out/bin/retroarch-cg2glsl \
--prefix PATH ':' ${lib.makeBinPath [ nvidia_cg_toolkit ]}
'';
preFixup = "rm $out/bin/retroarch-cg2glsl";
preFixup = lib.optionalString (!enableNvidiaCgToolkit) ''
rm $out/bin/retroarch-cg2glsl
rm $out/share/man/man6/retroarch-cg2glsl.6*
'';
passthru.tests = nixosTests.retroarch;

View File

@ -14,8 +14,8 @@
"beetle-lynx": {
"owner": "libretro",
"repo": "beetle-lynx-libretro",
"rev": "3d2fcc5a555bea748b76f92a082c40227dff8222",
"sha256": "PpFLi9DIvv8igtAqDPkLfH1CjkbeOumcpNCP7K9C1PY="
"rev": "9c48124dc15604b3eb6892e3616dfb77992a6fd6",
"sha256": "ZXFU4QmjVQVU5bE5TVmGm4gepZpuoS8+p60l+Ha4I9s="
},
"beetle-ngp": {
"owner": "libretro",
@ -26,26 +26,26 @@
"beetle-pce-fast": {
"owner": "libretro",
"repo": "beetle-pce-fast-libretro",
"rev": "cc248db4d2f47d0f255fbc1a3c651df4beb3d835",
"sha256": "euoNldhyEPfC9EgEX201mpSjns2qbCIAow0zmMKTnaE="
"rev": "d4fa4480f17f067c3aba25380717a5aee059f026",
"sha256": "t7OJuqEWec3GvNq9dsmrRhgz+GybBzt1ZO6FwZ9L5yE="
},
"beetle-pcfx": {
"owner": "libretro",
"repo": "beetle-pcfx-libretro",
"rev": "08632fcbc039f70dbd6da5810db9dcc304d7fbde",
"sha256": "G+OUs6k8dwH4BK+0X/g47wbY7Dpb3lT5TslLwPWq6g4="
"rev": "af16dfd8353ed6cf76ef381b98a6a9abf59051ec",
"sha256": "snAA5PCU2NRsCiQtBRYEzczPSGG9OT2jDTrGaPZqhic="
},
"beetle-psx": {
"owner": "libretro",
"repo": "beetle-psx-libretro",
"rev": "798fab9d5bc82dde26442d9b4861d377d4689e31",
"sha256": "wHCUSMdPbIudmNm4XXW/zH6TDz7x9DrMNV/L8H3aO/w="
"rev": "3827fb4bd0d36f0db7b59e0c220524c7daaf0430",
"sha256": "CGNzb6XDPsp+EitkgyvDha9DoZSy+e9JWye0nmCiOns="
},
"beetle-saturn": {
"owner": "libretro",
"repo": "beetle-saturn-libretro",
"rev": "054862a4ccb9b2f1bad9e5b075fc3d1116dc8055",
"sha256": "oL9YPvDGkUs0Tm/rNznnV+Tg5mcvqs1VcGVmz/fDHmw="
"rev": "19ce186783174b93b90845c3f0e1fa1694904912",
"sha256": "mEuv9lrDi/q2ASV9hxYptievupcv4PfUWPYlDcNzXQg="
},
"beetle-snes": {
"owner": "libretro",
@ -56,26 +56,26 @@
"beetle-supafaust": {
"owner": "libretro",
"repo": "supafaust",
"rev": "85b5527231a6ad6f9475c15c8ff1b9d16884cd30",
"sha256": "6ynxRfGYlp7Fuq3XT2uHsR9Uwu7WMIYjclLc0Pf/qNM="
"rev": "75c658cce454e58ae04ea252f53a31c60d61548e",
"sha256": "2fXarVfb5/SYXF8t25/fGNFvODpGas5Bi0hLIbXgB+0="
},
"beetle-supergrafx": {
"owner": "libretro",
"repo": "beetle-supergrafx-libretro",
"rev": "3cfafe8c684a2f4f4532bcf18e25d2f8760ca45d",
"sha256": "hIBUMpXgX5zPi/W1vAhkuxprGfZQ/K5ZrtiswV36EMQ="
"rev": "787772dff157c8fe54b2e16bb770f2c344c8932b",
"sha256": "i4SnjIqA0U88FnaT7fz5fqMyp8FyfNvxxhflOaAv1mA="
},
"beetle-vb": {
"owner": "libretro",
"repo": "beetle-vb-libretro",
"rev": "162918f06d9a705330b2ba128e0d3b65fd1a1bcc",
"sha256": "BtrdDob+B5g8Lq93LUhF7E0uWFUIMZneWFgH0VcsgPE="
"rev": "3e845666d7ce235a071eb306e94074f1a72633bf",
"sha256": "ukKzG+O2o6EAF0l7cmMQOkemJ1oweIpRH5rle1gqaFk="
},
"beetle-wswan": {
"owner": "libretro",
"repo": "beetle-wswan-libretro",
"rev": "16d96f64a32cbe1fa89c40b142298dbd007f2f4d",
"sha256": "LBtOQfVvP70OB6qMnFXtWdJUu7CkkMfSQ0iPGhe7xeI="
"rev": "cccee4217e53e164fd70196e56dfb24b967e5fd8",
"sha256": "RpGYQwDWkfYY0qnrTuAMzVuOSfTX5AZph7FD8ijUggc="
},
"blastem": {
"owner": "libretro",
@ -92,8 +92,8 @@
"bsnes": {
"owner": "libretro",
"repo": "bsnes-libretro",
"rev": "7679cb9618c37c9044158d5cf3da28ef25afa9af",
"sha256": "9ozzXvCAuafcZn9iq91tTq16e2mlYqjwauJUGSbFd+k="
"rev": "dabf6679024124b2f819c79f279dbb85a5263255",
"sha256": "iv8gxC48i8JMzby3vR4eYDViqCwSf8JGlPekQE6AF4c="
},
"bsnes-hd": {
"owner": "DerKoun",
@ -110,8 +110,8 @@
"citra": {
"owner": "libretro",
"repo": "citra",
"rev": "70bf7d8a63b0b501e8f5cff89a86a3e2d4083aa0",
"sha256": "uHWROH6/ZAZygkhEQGNyllncCp2XDCdYwy/CKgGKAcM=",
"rev": "f0b09a5c0cb3767d43f5f8ca12a783012298fd44",
"sha256": "v86R5TLmNNMhuTMCwU3mAAtLK5H0sP//soh4x+cFgTQ=",
"fetchSubmodules": true
},
"desmume": {
@ -129,8 +129,8 @@
"dolphin": {
"owner": "libretro",
"repo": "dolphin",
"rev": "a8188dbc4e63d6c0867ed2196f5125130955f012",
"sha256": "gf9OjeDazDPDnQ9S2+hV4CNxPAkCCaEhJDZF97a1//U="
"rev": "2f4b0f7902257d40a054f60b2c670d6e314f2a04",
"sha256": "9WYWbLehExYbPmGJpguhVFXqFJ9aR6VxzFVChd4QOEg="
},
"dosbox": {
"owner": "libretro",
@ -153,8 +153,8 @@
"fbneo": {
"owner": "libretro",
"repo": "fbneo",
"rev": "a12455af75e60765da134b83051700e0fbe3803a",
"sha256": "ujO9KVn7o6xueeEr5GHfOy7NimwNIvYxgMM9xJvtjvo="
"rev": "ef17049274a21239e5f21198b026dacbb38d7b90",
"sha256": "2N7c5L9grp+Rkhj25SoB9K9rVHq4H9IzU2KSeb1O7/E="
},
"fceumm": {
"owner": "libretro",
@ -189,8 +189,8 @@
"genesis-plus-gx": {
"owner": "libretro",
"repo": "Genesis-Plus-GX",
"rev": "3abf975785fe77267a399cc583ccf1469e081b86",
"sha256": "QdiWKS7j80Sw0L+hf6efmQ40lQi/f95pFLQfoohoUKg="
"rev": "74a2f6521aea975a51f99497b57c5db500d61ed9",
"sha256": "qTNbFXg5QFKSzMOWhDdDfc0FinF/D7n2OruG5zv+ANY="
},
"gpsp": {
"owner": "libretro",
@ -219,8 +219,8 @@
"mame": {
"owner": "libretro",
"repo": "mame",
"rev": "57622367cb780013690d6ef23b2066b500f6ce92",
"sha256": "0iR1JGAhwYXXLnv8BDW1bsxfFywEI82aov2+MHw5w6Q="
"rev": "85581d60bb24fea14542b154aef2c7b624f5b60f",
"sha256": "AUqJAXJCvddv9vPqXt5EZncKNdeLaXoc6xhYWqOMebY="
},
"mame2000": {
"owner": "libretro",
@ -231,14 +231,14 @@
"mame2003": {
"owner": "libretro",
"repo": "mame2003-libretro",
"rev": "dbdda8e7189d63061ac42f502c0cd2dc7f1f8651",
"sha256": "XED/gunYOc+NnQ8YORw/ALP2eCTyvRdIxPiFpNf5nuA="
"rev": "b1cc49cf1d8bbef88b890e1c2a315a39d009171b",
"sha256": "bc4uER92gHf20JjR/Qcetvlu89ZmldJ1DiQphJZt/EA="
},
"mame2003-plus": {
"owner": "libretro",
"repo": "mame2003-plus-libretro",
"rev": "5dd4a30500edc0b00c712750093aa287c9bb4ce2",
"sha256": "Nvm5U6rpsDZdUJONtvZ6YmztuupLaXz2QT0SBJtzO/4="
"rev": "3249de7ceaaa92ee18e93cbd8c2ace9f1ee34c08",
"sha256": "mBF1j4em4e/fKEmPA8MmAZrXXYQiqFfAloOHdMbVq+k="
},
"mame2010": {
"owner": "libretro",
@ -261,14 +261,14 @@
"melonds": {
"owner": "libretro",
"repo": "melonds",
"rev": "5e52c245fb38cabe881fbfa6513280ee44fc5bd8",
"sha256": "jWBZ5wg1dKEgoEV09VTGJ+I4+8uiivAHhpTiD9tPaYg="
"rev": "0e1f06da626cbe67215c3f06f6bdf510dd4e4649",
"sha256": "ax9Vu8+1pNAHWPXrx5QA0n5EsmaJ2T7KJ5Otz8DSZwM="
},
"mesen": {
"owner": "libretro",
"repo": "mesen",
"rev": "9b412c1533a6d7eec7b2904775cbd26c21f02119",
"sha256": "Tf+lWfSU7AuW6Um5TXkWNAeg35W08YkYQwW0Yx3iNTM="
"rev": "c89474c9d87df967d21b7b7d5971dc9475fec028",
"sha256": "cnPNBWXbnCpjgW/wJIboiRBzv3zrHWxpNM1kg09ShLU="
},
"mesen-s": {
"owner": "libretro",
@ -291,20 +291,20 @@
"mupen64plus": {
"owner": "libretro",
"repo": "mupen64plus-libretro-nx",
"rev": "1b67122ff6a923c93a56ff94273e3768a6da5dff",
"sha256": "qORxhy7hXVdGUkQumOmGVXnF1kW0BShMNBVlaRu3a1w="
"rev": "bc241538b9ef85d8b22c392d7699dc73f460e283",
"sha256": "eCosI2yL1HJpHWvZLYZQe6+1rmmyHLFYCY7bX+3hPec="
},
"neocd": {
"owner": "libretro",
"repo": "neocd_libretro",
"rev": "b7d96e794f2dfa500cba46c78cbc3c28349cfd05",
"sha256": "TG5xIqIM0MlHDNtPhyISqo/ctTqemKROwXgoqUsCQ0E="
"rev": "53f5453311a1ac43700fedb2317c810586f9ccf5",
"sha256": "BZBpojShHk+j5wz/d7FnykpX562TgH6PAqTUigE+zUU="
},
"nestopia": {
"owner": "libretro",
"repo": "nestopia",
"rev": "5c360e55d5437ecd3520568ee44cf1af63d4696a",
"sha256": "+1QQc4gVZ5ZHt/I0bjRkW+kbPaeGUNrjbrzUoVz4drM="
"rev": "d30c55052292826836f6dbaa2adc46fdf1a2d93c",
"sha256": "R2Kbtr2EqNUyx5eGBYyyw/ugSxVRM70TP/IsIsU0EZM="
},
"np2kai": {
"owner": "AZO234",
@ -340,34 +340,34 @@
"pcsx2": {
"owner": "libretro",
"repo": "pcsx2",
"rev": "ad7650949e6c8c87cd2c5e278af88e3722a321bc",
"sha256": "iqXCW28werxbZNo1hlDLiD3ywSZ9hvWmxwGPJ5bRZ+w="
"rev": "d2e37b80cfe6f6eecfe0356c7537d8e98bee7a8d",
"sha256": "rHXJG2wGoyNGvxxeZVF/I1CpaSBPUwZNERJtkG/z7MU="
},
"pcsx_rearmed": {
"owner": "libretro",
"repo": "pcsx_rearmed",
"rev": "a4e249a1373cf6269e1e4e0d60105e72210e67d3",
"sha256": "NOz2NQonVWEhEhAgSFHSWv6bmuTPcw0R9ihISlGwkb0="
"rev": "aced3eb3fcaa0fe13c44c4dd196cdab42555fd98",
"sha256": "RzcrSADagi3AIPINQxc36BfMjWjatP/JL6HY744XnZk="
},
"picodrive": {
"owner": "libretro",
"repo": "picodrive",
"rev": "0a4ec83cbfaebb65fb1c40f26ffaf28131f9003b",
"sha256": "NOMQoDmXGrxrquAcSLo6Otcz8bH4gnhqcG/zzet3Dtk=",
"rev": "62873cab5366999207c197e9f55987daee10be4a",
"sha256": "YErmanNczeh6BeanCGllwOoTjXO+9At8l/o4UhIek4o=",
"fetchSubmodules": true
},
"play": {
"owner": "jpd002",
"repo": "Play-",
"rev": "ad3b855c6d8cc62c85e2a5d2f659159fdfaa8d80",
"sha256": "+uTf/xv2JHuNGx0bxFNXf0akRzonzRMT7gSvT2n12+o=",
"rev": "0483fc43da01b5b29883acb2cf1d02d33bba1e30",
"sha256": "OxBQFTQP0L8k0lH88Ey6KWybW912Ehsv7XjWrvFivxo=",
"fetchSubmodules": true
},
"ppsspp": {
"owner": "hrydgard",
"repo": "ppsspp",
"rev": "e654f6937a02f4a2ac8cce3574ab4f2db99f77d4",
"sha256": "LTqRA3KMV/VuQH0eTWjpOqy0U944c4ofPNEsexf93Kc=",
"rev": "1fa2f7a97191d2a73f243bfc464edef69b26f652",
"sha256": "BDX2eHtFbsloC9XYORHwpix8tbRSQUbcoP7DKFIohW4=",
"fetchSubmodules": true
},
"prboom": {
@ -385,8 +385,8 @@
"puae": {
"owner": "libretro",
"repo": "libretro-uae",
"rev": "d9a8dfbde7f6967fea3cffe09cd87e1d79a1a3fd",
"sha256": "uMn9ejknjwGmbc0JOu/xl30z3ff7vpxtA3qr2sv0glI="
"rev": "af9e35383c00980aabb38c929e679704b624dee0",
"sha256": "hp4XOQUKktmUfLtRfVv1Oe1vqHUYu+vagxSSef55APs="
},
"quicknes": {
"owner": "libretro",
@ -439,8 +439,8 @@
"stella": {
"owner": "stella-emu",
"repo": "stella",
"rev": "fa49e034101a22344c7bd01648d514b6cc61ac7f",
"sha256": "Svv+j7/9PvZ6djk2kfpbr9iUC8xqX8B4Plnf43Hj62A="
"rev": "82da36dd685c68b09047d7c835175879edb68653",
"sha256": "y7AOSY2VUe4Jv+wteplvA1ul5iXHoeYQhgycD+nfIuc="
},
"stella2014": {
"owner": "libretro",
@ -451,8 +451,8 @@
"swanstation": {
"owner": "libretro",
"repo": "swanstation",
"rev": "27a224fc9e86e0f061504878d1c0cbf3fd6891af",
"sha256": "5kW9/4gMfyvo3ExlJVivx8LhW5as3Mq5fhlNrIFDUVM="
"rev": "f2e335bfd4751410dfb24d933f762b9a4fd7fdeb",
"sha256": "l3A1Xb6YD+OOTZEF6whst1Kr8fSRnXuIVIUN1BCa2Bw="
},
"tgbdual": {
"owner": "libretro",

View File

@ -14,7 +14,12 @@ stdenvNoCC.mkDerivation rec {
rev = "v${version}";
};
makeFlags = [ "PREFIX=$(out)" ];
makeFlags = [
"PREFIX=$(out)"
# By default install in $(PREFIX)/share/libretro/info
# that is not in RetroArch's core info path
"INSTALLDIR=$(PREFIX)/share/retroarch/cores"
];
dontBuild = true;

Some files were not shown because too many files have changed in this diff Show More