merge master

This commit is contained in:
Kasper Gałkowski 2023-03-14 17:03:07 +01:00
commit e5eb0322a2
2013 changed files with 63230 additions and 45717 deletions

1
.github/CODEOWNERS vendored
View File

@ -237,6 +237,7 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/pkgs/applications/editors/vim/plugins @figsoda @jonringer
# VsCode Extensions
/pkgs/applications/editors/vscode @superherointj
/pkgs/applications/editors/vscode/extensions @jonringer
# Prometheus exporter modules and tests

View File

@ -27,6 +27,7 @@ jobs:
uses: korthout/backport-action@v1.2.0
with:
# Config README: https://github.com/korthout/backport-action#backport-action
copy_labels_pattern: 'severity:\ssecurity'
pull_description: |-
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.

View File

@ -19,7 +19,7 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19
- uses: cachix/install-nix-action@v20
- uses: cachix/cachix-action@v12
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.

View File

@ -28,16 +28,14 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v19
- uses: cachix/install-nix-action@v20
with:
# nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0
nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/c473cc8714710179df205b153f4e9fa007107ff9.tar.gz
- name: install editorconfig-checker
run: nix-env -iA editorconfig-checker -f '<nixpkgs>'
- name: Checking EditorConfig
run: |
cat "$HOME/changed_files" | xargs -r editorconfig-checker -disable-indent-size
cat "$HOME/changed_files" | nix-shell -p editorconfig-checker --run 'xargs -r editorconfig-checker -disable-indent-size'
- if: ${{ failure() }}
run: |
echo "::error :: Hey! It looks like your changes don't follow our editorconfig settings. Read https://editorconfig.org/#download to configure your editor so you never see this error again."

View File

@ -18,7 +18,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v19
- uses: cachix/install-nix-action@v20
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View File

@ -8,6 +8,7 @@ on:
- master
paths:
- 'doc/**'
- 'lib/**'
jobs:
nixpkgs:
@ -18,7 +19,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v19
- uses: cachix/install-nix-action@v20
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View File

@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19
- uses: cachix/install-nix-action@v20
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19
- uses: cachix/install-nix-action@v20
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup

View File

@ -71,6 +71,7 @@ The main difference between `fetchurl` and `fetchzip` is in how they store the c
- `relative`: Similar to using `git-diff`'s `--relative` flag, only keep changes inside the specified directory, making paths relative to it.
- `stripLen`: Remove the first `stripLen` components of pathnames in the patch.
- `decode`: Pipe the downloaded data through this command before processing it as a patch.
- `extraPrefix`: Prefix pathnames by this string.
- `excludes`: Exclude files matching these patterns (applies after the above arguments).
- `includes`: Include only files matching these patterns (applies after the above arguments).

View File

@ -101,6 +101,7 @@ in
diskSize = "auto";
additionalSpace = "0M"; # Defaults to 512M.
copyChannel = false;
memSize = 2048; # Qemu VM memory size in megabytes. Defaults to 1024M.
}
```

View File

@ -71,8 +71,10 @@ $ nix-env -f '<nixpkgs>' -qaP -A haskell.compiler
haskell.compiler.ghc810 ghc-8.10.7
haskell.compiler.ghc88 ghc-8.8.4
haskell.compiler.ghc90 ghc-9.0.2
haskell.compiler.ghc92 ghc-9.2.4
haskell.compiler.ghc924 ghc-9.2.4
haskell.compiler.ghc925 ghc-9.2.5
haskell.compiler.ghc926 ghc-9.2.6
haskell.compiler.ghc92 ghc-9.2.7
haskell.compiler.ghc942 ghc-9.4.2
haskell.compiler.ghc943 ghc-9.4.3
haskell.compiler.ghc94 ghc-9.4.4
@ -86,13 +88,15 @@ haskell.compiler.ghc924Binary ghc-binary-9.2.4
haskell.compiler.ghc924BinaryMinimal ghc-binary-9.2.4
haskell.compiler.integer-simple.ghc810 ghc-integer-simple-8.10.7
haskell.compiler.integer-simple.ghc8107 ghc-integer-simple-8.10.7
haskell.compiler.integer-simple.ghc884 ghc-integer-simple-8.8.4
haskell.compiler.integer-simple.ghc88 ghc-integer-simple-8.8.4
haskell.compiler.integer-simple.ghc884 ghc-integer-simple-8.8.4
haskell.compiler.native-bignum.ghc90 ghc-native-bignum-9.0.2
haskell.compiler.native-bignum.ghc902 ghc-native-bignum-9.0.2
haskell.compiler.native-bignum.ghc92 ghc-native-bignum-9.2.4
haskell.compiler.native-bignum.ghc924 ghc-native-bignum-9.2.4
haskell.compiler.native-bignum.ghc925 ghc-native-bignum-9.2.5
haskell.compiler.native-bignum.ghc926 ghc-native-bignum-9.2.6
haskell.compiler.native-bignum.ghc92 ghc-native-bignum-9.2.7
haskell.compiler.native-bignum.ghc927 ghc-native-bignum-9.2.7
haskell.compiler.native-bignum.ghc942 ghc-native-bignum-9.4.2
haskell.compiler.native-bignum.ghc943 ghc-native-bignum-9.4.3
haskell.compiler.native-bignum.ghc94 ghc-native-bignum-9.4.4
@ -105,15 +109,15 @@ Each of those compiler versions has a corresponding attribute set built using
it. However, the non-standard package sets are not tested regularly and, as a
result, contain fewer working packages. The corresponding package set for GHC
9.4.4 is `haskell.packages.ghc944`. In fact `haskellPackages` is just an alias
for `haskell.packages.ghc924`:
for `haskell.packages.ghc927`:
```console
$ nix-env -f '<nixpkgs>' -qaP -A haskell.packages.ghc924
haskell.packages.ghc924.a50 a50-0.5
haskell.packages.ghc924.AAI AAI-0.2.0.1
haskell.packages.ghc924.aasam aasam-0.2.0.0
haskell.packages.ghc924.abacate abacate-0.0.0.0
haskell.packages.ghc924.abc-puzzle abc-puzzle-0.2.1
$ nix-env -f '<nixpkgs>' -qaP -A haskell.packages.ghc927
haskell.packages.ghc927.a50 a50-0.5
haskell.packages.ghc927.AAI AAI-0.2.0.1
haskell.packages.ghc927.aasam aasam-0.2.0.0
haskell.packages.ghc927.abacate abacate-0.0.0.0
haskell.packages.ghc927.abc-puzzle abc-puzzle-0.2.1
```

View File

@ -13,7 +13,7 @@ into your `configuration.nix` or bring them into scope with `nix-shell -p rustc
For other versions such as daily builds (beta and nightly),
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
or use a community maintained [Rust overlay](#using-community-rust-overlays).
or use [community maintained Rust toolchains](#using-community-maintained-rust-toolchains).
## `buildRustPackage`: Compiling Rust applications with Cargo {#compiling-rust-applications-with-cargo}
@ -686,31 +686,61 @@ $ cargo build
$ cargo test
```
### Controlling Rust Version Inside `nix-shell` {#controlling-rust-version-inside-nix-shell}
## Using community maintained Rust toolchains {#using-community-maintained-rust-toolchains}
To control your rust version (i.e. use nightly) from within `shell.nix` (or
other nix expressions) you can use the following `shell.nix`
::: {.note}
Note: The following projects cannot be used within nixpkgs since [IFD](#ssec-import-from-derivation) is disallowed.
To package things that require Rust nightly, `RUSTC_BOOTSTRAP = true;` can sometimes be used as a hack.
:::
There are two community maintained approaches to Rust toolchain management:
- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
- [fenix](https://github.com/nix-community/fenix)
Despite their names, both projects provides a similar set of packages and overlays under different APIs.
Oxalica's overlay allows you to select a particular Rust version without you providing a hash or a flake input,
but comes with a larger git repository than fenix.
Fenix also provides rust-analyzer nightly in addition to the Rust toolchains.
Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
Because of this and ergonomics, either of those community projects
should be preferred to the Mozilla's Rust overlay ([nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla)).
The following documentation demonstrates examples using fenix and oxalica's Rust overlay
with `nix-shell` and building derivations. More advanced usages like flake usage
are documented in their own repositories.
### Using Rust nightly with `nix-shell` {#using-rust-nightly-with-nix-shell}
Here is a simple `shell.nix` that provides Rust nightly (default profile) using fenix:
```nix
# Latest Nightly
with import <nixpkgs> {};
let src = fetchFromGitHub {
owner = "mozilla";
repo = "nixpkgs-mozilla";
# commit from: 2019-05-15
rev = "9f35c4b09fd44a77227e79ff0c1b4b6a69dff533";
hash = "sha256-18h0nvh55b5an4gmlgfbvwbyqj91bklf1zymis6lbdh75571qaz0=";
};
with import <nixpkgs> { };
let
fenix = callPackage
(fetchFromGitHub {
owner = "nix-community";
repo = "fenix";
# commit from: 2023-03-03
rev = "e2ea04982b892263c4d939f1cc3bf60a9c4deaa1";
hash = "sha256-AsOim1A8KKtMWIxG+lXh5Q4P2bhOZjoUhFWJ1EuZNNk=";
})
{ };
in
with import "${src.out}/rust-overlay.nix" pkgs pkgs;
stdenv.mkDerivation {
mkShell {
name = "rust-env";
buildInputs = [
# Note: to use stable, just replace `nightly` with `stable`
latest.rustChannels.nightly.rust
nativeBuildInputs = [
# Note: to use stable, just replace `default` with `stable`
fenix.default.toolchain
# Add some extra dependencies from `pkgs`
pkg-config openssl
# Example Build-time Additional Dependencies
pkg-config
];
buildInputs = [
# Example Run-time Additional Dependencies
openssl
];
# Set Environment Variables
@ -718,116 +748,66 @@ stdenv.mkDerivation {
}
```
Now run:
Save this to `shell.nix`, then run:
```ShellSession
$ rustc --version
rustc 1.26.0-nightly (188e693b3 2018-03-26)
rustc 1.69.0-nightly (13471d3b2 2023-03-02)
```
To see that you are using nightly.
## Using community Rust overlays {#using-community-rust-overlays}
Oxalica's Rust overlay has more complete examples of `shell.nix` (and cross compilation) under its
[`examples` directory](https://github.com/oxalica/rust-overlay/tree/e53e8853aa7b0688bc270e9e6a681d22e01cf299/examples).
There are two community maintained approaches to Rust toolchain management:
- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
- [fenix](https://github.com/nix-community/fenix)
### Using Rust nightly in a derivation with `buildRustPackage` {#using-rust-nightly-in-a-derivation-with-buildrustpackage}
Oxalica's overlay allows you to select a particular Rust version and components.
See [their documentation](https://github.com/oxalica/rust-overlay#rust-overlay) for more
detailed usage.
You can also use Rust nightly to build rust packages using `makeRustPlatform`.
The below snippet demonstrates invoking `buildRustPackage` with a Rust toolchain from oxalica's overlay:
Fenix is an alternative to `rustup` and can also be used as an overlay.
Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
Because of this and ergonomics, either of those community projects
should be preferred to the Mozilla's Rust overlay (`nixpkgs-mozilla`).
### How to select a specific `rustc` and toolchain version {#how-to-select-a-specific-rustc-and-toolchain-version}
You can consume the oxalica overlay and use it to grab a specific Rust toolchain version.
Here is an example `shell.nix` showing how to grab the current stable toolchain:
```nix
{ pkgs ? import <nixpkgs> {
overlays = [
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
];
}
}:
pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
rust-bin.stable.latest.minimal
];
}
```
You can try this out by:
1. Saving that to `shell.nix`
2. Executing `nix-shell --pure --command 'rustc --version'`
As of writing, this prints out `rustc 1.56.0 (09c42c458 2021-10-18)`.
### How to use an overlay toolchain in a derivation {#how-to-use-an-overlay-toolchain-in-a-derivation}
You can also use an overlay's Rust toolchain with `buildRustPackage`.
The below snippet demonstrates invoking `buildRustPackage` with an oxalica overlay selected Rust toolchain:
```nix
with import <nixpkgs> {
with import <nixpkgs>
{
overlays = [
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
];
};
let
rustPlatform = makeRustPlatform {
cargo = rust-bin.stable.latest.minimal;
rustc = rust-bin.stable.latest.minimal;
};
in
rustPlatform.buildRustPackage rec {
pname = "ripgrep";
version = "12.1.1";
nativeBuildInputs = [
rust-bin.stable.latest.minimal
];
src = fetchFromGitHub {
owner = "BurntSushi";
repo = "ripgrep";
rev = version;
hash = "sha256-1hqps7l5qrjh9f914r5i6kmcz6f1yb951nv4lby0cjnp5l253kps=";
hash = "sha256-+s5RBC3XSgb8omTbUNLywZnP6jSxZBKSS1BmXOjRF8M=";
};
cargoSha256 = "03wf9r2csi6jpa7v5sw5lpxkrk4wfzwmzx7k3991q3bdjzcwnnwp";
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
doCheck = false;
meta = with lib; {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = "https://github.com/BurntSushi/ripgrep";
license = licenses.unlicense;
maintainers = [ maintainers.tailhook ];
license = with licenses; [ mit unlicense ];
maintainers = with maintainers; [ tailhook ];
};
}
```
Follow the below steps to try that snippet.
1. create a new directory
1. save the above snippet as `default.nix` in that directory
1. cd into that directory and run `nix-build`
2. cd into that directory and run `nix-build`
### Rust overlay installation {#rust-overlay-installation}
You can use this overlay by either changing your local nixpkgs configuration,
or by adding the overlay declaratively in a nix expression, e.g. in `configuration.nix`.
For more information see [the manual on installing overlays](#sec-overlays-install).
### Declarative Rust overlay installation {#declarative-rust-overlay-installation}
This snippet shows how to use oxalica's Rust overlay.
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
```nix
{ pkgs ? import <nixpkgs> {
overlays = [
(import (builtins.fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
# Further overlays go here
];
};
};
```
Note that this will fetch the latest overlay version when rebuilding your system.
Fenix also has examples with `buildRustPackage`,
[crane](https://github.com/ipetkov/crane),
[naersk](https://github.com/nix-community/naersk),
and cross compilation in its [Examples](https://github.com/nix-community/fenix#examples) section.

View File

@ -1,4 +1,7 @@
{ " " = 32;
{ "\t" = 9;
"\n" = 10;
"\r" = 13;
" " = 32;
"!" = 33;
"\"" = 34;
"#" = 35;

View File

@ -333,6 +333,66 @@ rec {
) (attrNames set)
);
/*
Like builtins.foldl' but for attribute sets.
Iterates over every name-value pair in the given attribute set.
The result of the callback function is often called `acc` for accumulator. It is passed between callbacks from left to right and the final `acc` is the return value of `foldlAttrs`.
Attention:
There is a completely different function
`lib.foldAttrs`
which has nothing to do with this function, despite the similar name.
Example:
foldlAttrs
(acc: name: value: {
sum = acc.sum + value;
names = acc.names ++ [name];
})
{ sum = 0; names = []; }
{
foo = 1;
bar = 10;
}
->
{
sum = 11;
names = ["bar" "foo"];
}
foldlAttrs
(throw "function not needed")
123
{};
->
123
foldlAttrs
(_: _: v: v)
(throw "initial accumulator not needed")
{ z = 3; a = 2; };
->
3
The accumulator doesn't have to be an attrset.
It can be as simple as a number or string.
foldlAttrs
(acc: _: v: acc * 10 + v)
1
{ z = 1; a = 2; };
->
121
Type:
foldlAttrs :: ( a -> String -> b -> a ) -> a -> { ... :: b } -> a
*/
foldlAttrs = f: init: set:
foldl'
(acc: name: f acc name set.${name})
init
(attrNames set);
/* Apply fold functions to values grouped by key.
Example:

View File

@ -250,90 +250,4 @@ rec {
{ testX = allTrue [ true ]; }
*/
testAllTrue = expr: { inherit expr; expected = map (x: true) expr; };
# -- DEPRECATED --
traceShowVal = x: trace (showVal x) x;
traceShowValMarked = str: x: trace (str + showVal x) x;
attrNamesToStr = a:
trace ( "Warning: `attrNamesToStr` is deprecated "
+ "and will be removed in the next release. "
+ "Please use more specific concatenation "
+ "for your uses (`lib.concat(Map)StringsSep`)." )
(concatStringsSep "; " (map (x: "${x}=") (attrNames a)));
showVal =
trace ( "Warning: `showVal` is deprecated "
+ "and will be removed in the next release, "
+ "please use `traceSeqN`" )
(let
modify = v:
let pr = f: { __pretty = f; val = v; };
in if isDerivation v then pr
(drv: "<δ:${drv.name}:${concatStringsSep ","
(attrNames drv)}>")
else if [] == v then pr (const "[]")
else if isList v then pr (l: "[ ${go (head l)}, ]")
else if isAttrs v then pr
(a: "{ ${ concatStringsSep ", " (attrNames a)} }")
else v;
go = x: generators.toPretty
{ allowPrettyValues = true; }
(modify x);
in go);
traceXMLVal = x:
trace ( "Warning: `traceXMLVal` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `traceValFn builtins.toXML`." )
(trace (builtins.toXML x) x);
traceXMLValMarked = str: x:
trace ( "Warning: `traceXMLValMarked` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `traceValFn (x: str + builtins.toXML x)`." )
(trace (str + builtins.toXML x) x);
# trace the arguments passed to function and its result
# maybe rewrite these functions in a traceCallXml like style. Then one function is enough
traceCall = n: f: a: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a));
traceCall2 = n: f: a: b: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b));
traceCall3 = n: f: a: b: c: let t = n2: x: traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b) (t "arg 3" c));
traceValIfNot = c: x:
trace ( "Warning: `traceValIfNot` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `if/then/else` and `traceValSeq 1`.")
(if c x then true else traceSeq (showVal x) false);
addErrorContextToAttrs = attrs:
trace ( "Warning: `addErrorContextToAttrs` is deprecated "
+ "and will be removed in the next release. "
+ "Please use `builtins.addErrorContext` directly." )
(mapAttrs (a: v: addErrorContext "while evaluating ${a}" v) attrs);
# example: (traceCallXml "myfun" id 3) will output something like
# calling myfun arg 1: 3 result: 3
# this forces deep evaluation of all arguments and the result!
# note: if result doesn't evaluate you'll get no trace at all (FIXME)
# args should be printed in any case
traceCallXml = a:
trace ( "Warning: `traceCallXml` is deprecated "
+ "and will be removed in the next release. "
+ "Please complain if you use the function regularly." )
(if !isInt a then
traceCallXml 1 "calling ${a}\n"
else
let nr = a;
in (str: expr:
if isFunction expr then
(arg:
traceCallXml (builtins.add 1 nr) "${str}\n arg ${builtins.toString nr} is \n ${builtins.toXML (builtins.seq arg arg)}" (expr arg)
)
else
let r = builtins.seq expr expr;
in trace "${str}\n result:\n${builtins.toXML r}" r
));
}

View File

@ -78,7 +78,7 @@ let
composeManyExtensions makeExtensible makeExtensibleWithCustomName;
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
filterAttrsRecursive foldAttrs collect nameValuePair mapAttrs
filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs
mapAttrs' mapAttrsToList concatMapAttrs mapAttrsRecursive mapAttrsRecursiveCond
genAttrs isDerivation toDerivation optionalAttrs
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
@ -100,7 +100,7 @@ let
escapeShellArg escapeShellArgs
isStorePath isStringLike
isValidPosixName toShellVar toShellVars
escapeRegex escapeXML replaceChars lowerChars
escapeRegex escapeURL escapeXML replaceChars lowerChars
upperChars toLower toUpper addContextFrom splitString
removePrefix removeSuffix versionOlder versionAtLeast
getName getVersion
@ -145,11 +145,10 @@ let
isOptionType mkOptionType;
inherit (self.asserts)
assertMsg assertOneOf;
inherit (self.debug) addErrorContextToAttrs traceIf traceVal traceValFn
traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq
traceValSeqFn traceValSeqN traceValSeqNFn traceFnSeqN traceShowVal
traceShowValMarked showVal traceCall traceCall2 traceCall3
traceValIfNot runTests testAllTrue traceCallXml attrNamesToStr;
inherit (self.debug) traceIf traceVal traceValFn
traceSeq traceSeqN traceValSeq
traceValSeqFn traceValSeqN traceValSeqNFn traceFnSeqN
runTests testAllTrue;
inherit (self.misc) maybeEnv defaultMergeArg defaultMerge foldArgs
maybeAttrNullable maybeAttr ifEnable checkFlag getValue
checkReqs uniqList uniqListExt condConcat lazyGenericClosure

View File

@ -81,7 +81,6 @@ in mkLicense lset) ({
apsl10 = {
spdxId = "APSL-1.0";
fullName = "Apple Public Source License 1.0";
url = "https://web.archive.org/web/20040701000000*/http://www.opensource.apple.com/apsl/1.0.txt";
};
apsl20 = {
@ -225,6 +224,12 @@ in mkLicense lset) ({
fullName = "Creative Commons Zero v1.0 Universal";
};
cc-by-nc-nd-30 = {
spdxId = "CC-BY-NC-ND-3.0";
fullName = "Creative Commons Attribution Non Commercial No Derivative Works 3.0 Unported";
free = false;
};
cc-by-nc-sa-20 = {
spdxId = "CC-BY-NC-SA-2.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 2.0";

View File

@ -21,6 +21,7 @@ let
isBool
isFunction
isList
isPath
isString
length
mapAttrs
@ -45,6 +46,9 @@ let
showOption
unknownModule
;
inherit (lib.strings)
isConvertibleWithToString
;
showDeclPrefix = loc: decl: prefix:
" - option(s) with prefix `${showOption (loc ++ [prefix])}' in module `${decl._file}'";
@ -403,7 +407,7 @@ rec {
key = module.key;
module = module;
modules = collectedImports.modules;
disabled = module.disabledModules ++ collectedImports.disabled;
disabled = (if module.disabledModules != [] then [{ file = module._file; disabled = module.disabledModules; }] else []) ++ collectedImports.disabled;
}) initialModules);
# filterModules :: String -> { disabled, modules } -> [ Module ]
@ -412,10 +416,30 @@ rec {
# modules recursively. It returns the final list of unique-by-key modules
filterModules = modulesPath: { disabled, modules }:
let
moduleKey = m: if isString m && (builtins.substring 0 1 m != "/")
then toString modulesPath + "/" + m
else toString m;
disabledKeys = map moduleKey disabled;
moduleKey = file: m:
if isString m
then
if builtins.substring 0 1 m == "/"
then m
else toString modulesPath + "/" + m
else if isConvertibleWithToString m
then
if m?key && m.key != toString m
then
throw "Module `${file}` contains a disabledModules item that is an attribute set that can be converted to a string (${toString m}) but also has a `.key` attribute (${m.key}) with a different value. This makes it ambiguous which module should be disabled."
else
toString m
else if m?key
then
m.key
else if isAttrs m
then throw "Module `${file}` contains a disabledModules item that is an attribute set, presumably a module, that does not have a `key` attribute. This means that the module system doesn't have any means to identify the module that should be disabled. Make sure that you've put the correct value in disabledModules: a string path relative to modulesPath, a path value, or an attribute set with a `key` attribute."
else throw "Each disabledModules item must be a path, string, or a attribute set with a key attribute, or a value supported by toString. However, one of the disabledModules items in `${toString file}` is none of that, but is of type ${builtins.typeOf m}.";
disabledKeys = concatMap ({ file, disabled }: map (moduleKey file) disabled) disabled;
keyFilter = filter (attrs: ! elem attrs.key disabledKeys);
in map (attrs: attrs.module) (builtins.genericClosure {
startSet = keyFilter modules;

View File

@ -110,10 +110,6 @@ rec {
/* Creates an Option attribute set for an option that specifies the
package a module should use for some purpose.
Type: mkPackageOption :: pkgs -> (string|[string]) ->
{ default? :: [string], example? :: null|string|[string], extraDescription? :: string } ->
option
The package is specified in the third argument under `default` as a list of strings
representing its attribute path in nixpkgs (or another package set).
Because of this, you need to pass nixpkgs itself (or a subset) as the first argument.
@ -133,6 +129,8 @@ rec {
If you wish to explicitly provide no default, pass `null` as `default`.
Type: mkPackageOption :: pkgs -> (string|[string]) -> { default? :: [string], example? :: null|string|[string], extraDescription? :: string } -> option
Example:
mkPackageOption pkgs "hello" { }
=> { _type = "option"; default = «derivation /nix/store/3r2vg51hlxj3cx5vscp0vkv60bqxkaq0-hello-2.10.drv»; defaultText = { ... }; description = "The hello package to use."; type = { ... }; }
@ -157,11 +155,11 @@ rec {
# Name for the package, shown in option description
name:
{
# The attribute path where the default package is located
# The attribute path where the default package is located (may be omitted)
default ? name,
# A string or an attribute path to use as an example
# A string or an attribute path to use as an example (may be omitted)
example ? null,
# Additional text to include in the option description
# Additional text to include in the option description (may be omitted)
extraDescription ? "",
}:
let

View File

@ -4,6 +4,8 @@ let
inherit (builtins) length;
asciiTable = import ./ascii-table.nix;
in
rec {
@ -327,9 +329,7 @@ rec {
=> 40
*/
charToInt = let
table = import ./ascii-table.nix;
in c: builtins.getAttr c table;
charToInt = c: builtins.getAttr c asciiTable;
/* Escape occurrence of the elements of `list` in `string` by
prefixing it with a backslash.
@ -355,6 +355,21 @@ rec {
*/
escapeC = list: replaceStrings list (map (c: "\\x${ toLower (lib.toHexString (charToInt c))}") list);
/* Escape the string so it can be safely placed inside a URL
query.
Type: escapeURL :: string -> string
Example:
escapeURL "foo/bar baz"
=> "foo%2Fbar%20baz"
*/
escapeURL = let
unreserved = [ "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "-" "_" "." "~" ];
toEscape = builtins.removeAttrs asciiTable unreserved;
in
replaceStrings (builtins.attrNames toEscape) (lib.mapAttrsToList (_: c: "%${fixedWidthString 2 "0" (lib.toHexString c)}") toEscape);
/* Quote string to be used safely within the Bourne shell.
Type: escapeShellArg :: string -> string

View File

@ -140,6 +140,7 @@ rec {
qemuArch =
if final.isAarch32 then "arm"
else if final.isS390 && !final.isS390x then null
else if final.isx86_64 then "x86_64"
else if final.isx86 then "i386"
else final.uname.processor;
@ -193,7 +194,7 @@ rec {
then "${pkgs.runtimeShell} -c '\"$@\"' --"
else if final.isWindows
then "${wine}/bin/wine${lib.optionalString (final.parsed.cpu.bits == 64) "64"}"
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux && final.qemuArch != null
then "${qemu-user}/bin/qemu-${final.qemuArch}"
else if final.isWasi
then "${pkgs.wasmtime}/bin/wasmtime"

View File

@ -22,7 +22,7 @@ let
"x86_64-solaris"
# JS
"js-ghcjs"
"javascript-ghcjs"
# Linux
"aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux"

View File

@ -329,6 +329,9 @@ rec {
# Ghcjs
ghcjs = {
config = "js-unknown-ghcjs";
# This triple is special to GHC/Cabal/GHCJS and not recognized by autotools
# See: https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c
# https://github.com/ghcjs/ghcjs/issues/53
config = "javascript-unknown-ghcjs";
};
}

View File

@ -49,7 +49,7 @@ rec {
isM68k = { cpu = { family = "m68k"; }; };
isS390 = { cpu = { family = "s390"; }; };
isS390x = { cpu = { family = "s390"; bits = 64; }; };
isJavaScript = { cpu = cpuTypes.js; };
isJavaScript = { cpu = cpuTypes.javascript; };
is32bit = { cpu = { bits = 32; }; };
is64bit = { cpu = { bits = 64; }; };

View File

@ -131,7 +131,7 @@ rec {
or1k = { bits = 32; significantByte = bigEndian; family = "or1k"; };
js = { bits = 32; significantByte = littleEndian; family = "js"; };
javascript = { bits = 32; significantByte = littleEndian; family = "javascript"; };
};
# GNU build systems assume that older NetBSD architectures are using a.out.
@ -182,23 +182,12 @@ rec {
(b == armv7l && isCompatible a armv7a)
(b == armv7l && isCompatible a armv7r)
(b == armv7l && isCompatible a armv7m)
(b == armv7a && isCompatible a armv8a)
(b == armv7r && isCompatible a armv8a)
(b == armv7m && isCompatible a armv8a)
(b == armv7a && isCompatible a armv8r)
(b == armv7r && isCompatible a armv8r)
(b == armv7m && isCompatible a armv8r)
(b == armv7a && isCompatible a armv8m)
(b == armv7r && isCompatible a armv8m)
(b == armv7m && isCompatible a armv8m)
# ARMv8
(b == armv8r && isCompatible a armv8a)
(b == armv8m && isCompatible a armv8a)
# XXX: not always true! Some arm64 cpus dont support arm32 mode.
(b == aarch64 && a == armv8a)
(b == armv8a && isCompatible a aarch64)
(b == armv8r && isCompatible a armv8a)
(b == armv8m && isCompatible a armv8a)
# PowerPC
(b == powerpc && isCompatible a powerpc64)

View File

@ -347,6 +347,15 @@ runTests {
expected = "Hello\\x20World";
};
testEscapeURL = testAllTrue [
("" == strings.escapeURL "")
("Hello" == strings.escapeURL "Hello")
("Hello%20World" == strings.escapeURL "Hello World")
("Hello%2FWorld" == strings.escapeURL "Hello/World")
("42%25" == strings.escapeURL "42%")
("%20%3F%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%09%3A%2F%40%24%27%28%29%2A%2C%3B" == strings.escapeURL " ?&=#+%!<>#\"{}|\\^[]`\t:/@$'()*,;")
];
testToInt = testAllTrue [
# Naive
(123 == toInt "123")
@ -524,6 +533,37 @@ runTests {
};
};
# code from example
testFoldlAttrs = {
expr = {
example = foldlAttrs
(acc: name: value: {
sum = acc.sum + value;
names = acc.names ++ [ name ];
})
{ sum = 0; names = [ ]; }
{
foo = 1;
bar = 10;
};
# should just return the initial value
emptySet = foldlAttrs (throw "function not needed") 123 { };
# should just evaluate to the last value
accNotNeeded = foldlAttrs (_acc: _name: v: v) (throw "accumulator not needed") { z = 3; a = 2; };
# the accumulator doesnt have to be an attrset it can be as trivial as being just a number or string
trivialAcc = foldlAttrs (acc: _name: v: acc * 10 + v) 1 { z = 1; a = 2; };
};
expected = {
example = {
sum = 11;
names = [ "bar" "foo" ];
};
emptySet = 123;
accNotNeeded = 3;
trivialAcc = 121;
};
};
# code from the example
testRecursiveUpdateUntil = {
expr = recursiveUpdateUntil (path: l: r: path == ["foo"]) {

View File

@ -141,6 +141,14 @@ checkConfigError "The option .*enable.* does not exist. Definition values:\n\s*-
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-define-enable.nix ./disable-declare-enable.nix
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-enable-modules.nix
checkConfigOutput '^true$' 'config.positive.enable' ./disable-module-with-key.nix
checkConfigOutput '^false$' 'config.negative.enable' ./disable-module-with-key.nix
checkConfigError 'Module ..*disable-module-bad-key.nix. contains a disabledModules item that is an attribute set, presumably a module, that does not have a .key. attribute. .*' 'config.enable' ./disable-module-bad-key.nix
# Not sure if we want to keep supporting module keys that aren't strings, paths or v?key, but we shouldn't remove support accidentally.
checkConfigOutput '^true$' 'config.positive.enable' ./disable-module-with-toString-key.nix
checkConfigOutput '^false$' 'config.negative.enable' ./disable-module-with-toString-key.nix
# Check _module.args.
set -- config.enable ./declare-enable.nix ./define-enable-with-custom-arg.nix
checkConfigError 'while evaluating the module argument .*custom.* in .*define-enable-with-custom-arg.nix.*:' "$@"
@ -358,6 +366,10 @@ checkConfigOutput '^"The option `a\.b. defined in `.*/doRename-warnings\.nix. ha
config.result \
./doRename-warnings.nix
# Anonymous modules get deduplicated by key
checkConfigOutput '^"pear"$' config.once.raw ./merge-module-with-key.nix
checkConfigOutput '^"pear\\npear"$' config.twice.raw ./merge-module-with-key.nix
cat <<EOF
====== module tests ======
$pass Pass

View File

@ -0,0 +1,16 @@
{ lib, ... }:
let
inherit (lib) mkOption types;
moduleWithKey = { config, ... }: {
config = {
enable = true;
};
};
in
{
imports = [
./declare-enable.nix
];
disabledModules = [ { } ];
}

View File

@ -0,0 +1,34 @@
{ lib, ... }:
let
inherit (lib) mkOption types;
moduleWithKey = {
key = "disable-module-with-key.nix#moduleWithKey";
config = {
enable = true;
};
};
in
{
options = {
positive = mkOption {
type = types.submodule {
imports = [
./declare-enable.nix
moduleWithKey
];
};
default = {};
};
negative = mkOption {
type = types.submodule {
imports = [
./declare-enable.nix
moduleWithKey
];
disabledModules = [ moduleWithKey ];
};
default = {};
};
};
}

View File

@ -0,0 +1,34 @@
{ lib, ... }:
let
inherit (lib) mkOption types;
moduleWithKey = {
key = 123;
config = {
enable = true;
};
};
in
{
options = {
positive = mkOption {
type = types.submodule {
imports = [
./declare-enable.nix
moduleWithKey
];
};
default = {};
};
negative = mkOption {
type = types.submodule {
imports = [
./declare-enable.nix
moduleWithKey
];
disabledModules = [ 123 ];
};
default = {};
};
};
}

View File

@ -0,0 +1,49 @@
{ lib, ... }:
let
inherit (lib) mkOption types;
moduleWithoutKey = {
config = {
raw = "pear";
};
};
moduleWithKey = {
key = __curPos.file + "#moduleWithKey";
config = {
raw = "pear";
};
};
decl = {
options = {
raw = mkOption {
type = types.lines;
};
};
};
in
{
options = {
once = mkOption {
type = types.submodule {
imports = [
decl
moduleWithKey
moduleWithKey
];
};
default = {};
};
twice = mkOption {
type = types.submodule {
imports = [
decl
moduleWithoutKey
moduleWithoutKey
];
};
default = {};
};
};
}

View File

@ -206,6 +206,12 @@
githubId = 22131756;
name = "Aaqa Ishtyaq";
};
aaronarinder = {
email = "aaronarinder@gmail.com";
github = "aaronArinder";
githubId = 26738844;
name = "Aaron Arinder";
};
aaronjanse = {
email = "aaron@ajanse.me";
matrix = "@aaronjanse:matrix.org";
@ -572,6 +578,12 @@
githubId = 43479487;
name = "Titouan Biteau";
};
aleksana = {
email = "me@aleksana.moe";
github = "Aleksanaa";
githubId = 42209822;
name = "Aleksana QwQ";
};
alekseysidorov = {
email = "sauron1987@gmail.com";
github = "alekseysidorov";
@ -672,6 +684,12 @@
githubId = 36147;
name = "Alireza Meskin";
};
alizter = {
email = "alizter@gmail.com";
github = "Alizter";
githubId = 8614547;
name = "Ali Caglayan";
};
alkasm = {
email = "alexreynolds00@gmail.com";
github = "alkasm";
@ -1090,6 +1108,12 @@
githubId = 1078530;
name = "Alexandre Peyroux";
};
apfelkuchen6 = {
email = "apfelkuchen6@hrnz.li";
github = "apfelkuchen6";
githubId = 73002165;
name = "apfelkuchen6";
};
applePrincess = {
email = "appleprincess@appleprincess.io";
github = "applePrincess";
@ -1820,6 +1844,12 @@
githubId = 442623;
name = "Ben Pye";
};
benwbooth = {
email = "benwboooth@gmail.com";
github = "benwbooth";
githubId = 75972;
name = "Ben Booth";
};
berberman = {
email = "berberman@yandex.com";
matrix = "@berberman:mozilla.org";
@ -2279,6 +2309,12 @@
githubId = 15320726;
name = "Car Cdr";
};
caarlos0 = {
name = "Carlos A Becker";
email = "carlos@becker.software";
github = "caarlos0";
githubId = 245435;
};
cab404 = {
email = "cab404@mailbox.org";
github = "cab404";
@ -2986,6 +3022,13 @@
githubId = 40290417;
name = "Seb Blair";
};
connorbaker = {
email = "connor.baker@tweag.io";
matrix = "@connorbaker:matrix.org";
github = "connorbaker";
name = "Connor Baker";
githubId = 3880346;
};
considerate = {
email = "viktor.kronvall@gmail.com";
github = "considerate";
@ -3601,6 +3644,13 @@
githubId = 62989;
name = "Demyan Rogozhin";
};
dennajort = {
email = "gosselinjb@gmail.com";
matrix = "@dennajort:matrix.org";
github = "dennajort";
githubId = 1536838;
name = "Jean-Baptiste Gosselin";
};
derchris = {
email = "derchris@me.com";
github = "derchrisuk";
@ -4857,7 +4907,7 @@
email = "nixpkgs@felipeqq2.rocks";
github = "felipeqq2";
githubId = 71830138;
keys = [{ fingerprint = "F5F0 2BCE 3580 BF2B 707A AA8C 2FD3 4A9E 2671 91B8"; }];
keys = [{ fingerprint = "7391 BF2D A2C3 B2C9 BE25 ACA9 C7A7 4616 F302 5DF4"; }];
matrix = "@felipeqq2:pub.solar";
};
felixscheinost = {
@ -5211,6 +5261,12 @@
githubId = 3036816;
name = "Edgar Aroutiounian";
};
fxttr = {
name = "Florian Büstgens";
email = "fb@fx-ttr.de";
github = "fxttr";
githubId = 16306293;
};
fzakaria = {
name = "Farid Zakaria";
email = "farid.m.zakaria@gmail.com";
@ -5224,6 +5280,12 @@
githubId = 606000;
name = "Gabriel Adomnicai";
};
GabrielDougherty = {
email = "contact@gabrieldougherty.com";
github = "GabrielDougherty";
githubId = 10541219;
name = "Gabriel Dougherty";
};
garaiza-93 = {
email = "araizagustavo93@gmail.com";
github = "garaiza-93";
@ -7911,6 +7973,12 @@
githubId = 804677;
name = "Kirill Kazakov";
};
kirillrdy = {
email = "kirillrdy@gmail.com";
github = "kirillrdy";
githubId = 12160;
name = "Kirill Radzikhovskyy";
};
kisonecat = {
email = "kisonecat@gmail.com";
github = "kisonecat";
@ -8036,6 +8104,13 @@
githubId = 15692230;
name = "Muhammad Herdiansyah";
};
konradmalik = {
email = "konrad.malik@gmail.com";
matrix = "@konradmalik:matrix.org";
name = "Konrad Malik";
github = "konradmalik";
githubId = 13033392;
};
koozz = {
email = "koozz@linux.com";
github = "koozz";
@ -8609,6 +8684,12 @@
fingerprint = "74F5 E5CC 19D3 B5CB 608F 6124 68FF 81E6 A785 0F49";
}];
};
longer = {
email = "michal@mieszczak.com.pl";
name = "Michał Mieszczak";
github = "LongerHV";
githubId = 46924944;
};
lourkeur = {
name = "Louis Bettens";
email = "louis@bettens.info";
@ -8630,6 +8711,12 @@
githubId = 542154;
name = "Lorenz Leutgeb";
};
lugarun = {
email = "lfschmidt.me@gmail.com";
github = "lugarun";
githubId = 5767106;
name = "Lukas Schmidt";
};
luis = {
email = "luis.nixos@gmail.com";
github = "Luis-Hebendanz";
@ -8897,7 +8984,8 @@
githubId = 13547699;
name = "Corin Hoad";
keys = [{
fingerprint = "BA3A 5886 AE6D 526E 20B4 57D6 6A37 DF94 8318 8492";
# fingerprint = "BA3A 5886 AE6D 526E 20B4 57D6 6A37 DF94 8318 8492"; # old key, superseded
fingerprint = "6E69 6A19 4BD8 BFAE 7362 ACDB 6437 4619 95CA 7F16";
}];
};
lux = {
@ -8949,6 +9037,9 @@
github = "Ma27";
githubId = 6025220;
name = "Maximilian Bosch";
keys = [{
fingerprint = "62B9 9C26 F046 721E 26B0 04F6 D006 A998 C6AB FDF1";
}];
};
ma9e = {
email = "sean@lfo.team";
@ -9112,6 +9203,12 @@
githubId = 50230945;
name = "Marcus Boyd";
};
marcusramberg = {
email = "marcus@means.no";
github = "marcusramberg";
githubId = 5526;
name = "Marcus Ramberg";
};
marenz = {
email = "marenz@arkom.men";
github = "marenz2569";
@ -9216,6 +9313,12 @@
githubId = 854770;
name = "Matej Cotman";
};
mateodd25 = {
email = "mateodd@icloud.com";
github = "mateodd25";
githubId = 854770;
name = "Mateo Diaz";
};
mathnerd314 = {
email = "mathnerd314.gph+hs@gmail.com";
github = "Mathnerd314";
@ -9807,6 +9910,12 @@
githubId = 5378535;
name = "Milo Gertjejansen";
};
milran = {
email = "milranmike@protonmail.com";
github = "milran";
githubId = 93639059;
name = "Milran Mike";
};
mimame = {
email = "miguel.madrid.mencia@gmail.com";
github = "mimame";
@ -10662,12 +10771,6 @@
fingerprint = "7BC1 77D9 C222 B1DC FB2F 0484 C061 089E FEBF 7A35";
}];
};
nichtsfrei = {
email = "philipp.eder@posteo.net";
github = "nichtsfrei";
githubId = 1665818;
name = "Philipp Eder";
};
nickcao = {
name = "Nick Cao";
email = "nickcao@nichi.co";
@ -11194,6 +11297,15 @@
githubId = 111265;
name = "Ozan Sener";
};
ostrolucky = {
email = "gabriel.ostrolucky@gmail.com";
github = "ostrolucky";
githubId = 496233;
name = "Gabriel Ostrolucký";
keys = [{
fingerprint = "6611 22A7 B778 6E4A E99A 9D6E C79A D015 19EF B134";
}];
};
otavio = {
email = "otavio.salvador@ossystems.com.br";
github = "otavio";
@ -11476,6 +11588,12 @@
githubId = 1368952;
name = "Pedro Lara Campos";
};
penalty1083 = {
email = "penalty1083@outlook.com";
github = "penalty1083";
githubId = 121009904;
name = "penalty1083";
};
penguwin = {
email = "penguwin@penguwin.eu";
github = "penguwin";
@ -11677,6 +11795,12 @@
fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E";
}];
};
pinkcreeper100 = {
email = "benmoreosm@gmail.com";
github = "pinkcreeper100";
githubId = 35699052;
name = "Oliver Samuel Morris";
};
pinpox = {
email = "mail@pablo.tools";
github = "pinpox";
@ -11932,6 +12056,12 @@
githubId = 146413;
name = "Tobias Poschwatta";
};
PowerUser64 = {
email = "blakelysnorth@gmail.com";
github = "PowerUser64";
githubId = 24578572;
name = "Blake North";
};
ppenguin = {
name = "Jeroen Versteeg";
email = "hieronymusv@gmail.com";
@ -12046,6 +12176,15 @@
githubId = 4633847;
name = "Ben Hamlin";
};
prrlvr = {
email = "po@prrlvr.fr";
github = "prrlvr";
githubId = 33699501;
name = "Pierre-Olivier Rey";
keys = [{
fingerprint = "40A0 78FD 297B 0AC1 E6D8 A119 4D38 49D9 9555 1307";
}];
};
prusnak = {
email = "pavol@rusnak.io";
github = "prusnak";
@ -12280,6 +12419,12 @@
githubId = 314564;
name = "Ryan Lahfa";
};
ralismark = {
email = "nixpkgs@ralismark.xyz";
github = "ralismark";
githubId = 13449732;
name = "Temmie";
};
raphaelr = {
email = "raphael-git@tapesoftware.net";
matrix = "@raphi:tapesoftware.net";
@ -12877,6 +13022,7 @@
email = "rrbutani+nix@gmail.com";
github = "rrbutani";
githubId = 7833358;
matrix = "@rbutani:matrix.org";
keys = [{
fingerprint = "7DCA 5615 8AB2 621F 2F32 9FF4 1C7C E491 479F A273";
}];
@ -12912,12 +13058,23 @@
githubId = 61306;
name = "Rene Treffer";
};
ruby0b = {
github = "ruby0b";
githubId = 106119328;
name = "ruby0b";
};
rubyowo = {
name = "Rei Star";
email = "perhaps-you-know@what-is.ml";
github = "rubyowo";
githubId = 105302757;
};
Ruixi-rebirth = {
name = "Ruixi-rebirth";
email = "ruixirebirth@gmail.com";
github = "Ruixi-rebirth";
githubId = 75824585;
};
rumpelsepp = {
name = "Stefan Tatschner";
email = "stefan@rumpelsepp.org";
@ -12972,6 +13129,12 @@
githubId = 12877905;
name = "Roman Volosatovs";
};
rxiao = {
email = "ben.xiao@me.com";
github = "benxiao";
githubId = 10908495;
name = "Ran Xiao";
};
ryanartecona = {
email = "ryanartecona@gmail.com";
github = "ryanartecona";
@ -13182,6 +13345,12 @@
githubId = 8534888;
name = "Savanni D'Gerinel";
};
savyajha = {
email = "savya.jha@hawkradius.com";
github = "savyajha";
githubId = 3996019;
name = "Savyasachee Jha";
};
sayanarijit = {
email = "sayanarijit@gmail.com";
github = "sayanarijit";
@ -14762,6 +14931,15 @@
fingerprint = "38A0 29B0 4A7E 4C13 A4BB 86C8 7D51 0786 6B1C 6752";
}];
};
thekostins = {
name = "Konstantin";
email = "anisimovkosta19@gmail.com";
github = "TheKostins";
githubId = 39405421;
keys = [{
fingerprint = "B216 7B33 E248 097F D82A 991D C94D 589A 4D0D CDD2";
}];
};
thelegy = {
email = "mail+nixos@0jb.de";
github = "thelegy";
@ -14896,6 +15074,12 @@
fingerprint = "7F3E EEAA EE66 93CC 8782 042A 7550 7BE2 56F4 0CED";
}];
};
Tungsten842 = {
name = "Tungsten842";
email = "886724vf@anonaddy.me";
github = "Tungsten842";
githubId = 24614168;
};
tiagolobocastro = {
email = "tiagolobocastro@gmail.com";
github = "tiagolobocastro";
@ -15964,6 +16148,15 @@
fingerprint = "DA03 D6C6 3F58 E796 AD26 E99B 366A 2940 479A 06FC";
}];
};
williamvds = {
email = "nixpkgs@williamvds.me";
github = "williamvds";
githubId = 26379999;
name = "William Vigolo";
keys = [{
fingerprint = "9848 B216 BCBE 29BB 1C6A E0D5 7A4D F5A8 CDBD 49C7";
}];
};
willibutz = {
email = "willibutz@posteo.de";
github = "WilliButz";
@ -16088,6 +16281,12 @@
github = "wr0belj";
githubId = 40501814;
};
wraithm = {
name = "Matthew Wraith";
email = "wraithm@gmail.com";
github = "wraithm";
githubId = 1512913;
};
wrmilling = {
name = "Winston R. Milling";
email = "Winston@Milli.ng";

View File

@ -26,6 +26,7 @@ Because step 1) is quite expensive and takes roughly ~5 minutes the result is ca
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE ViewPatterns #-}
{-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE DataKinds #-}
import Control.Monad (forM_, (<=<))
import Control.Monad.Trans (MonadIO (liftIO))
@ -54,17 +55,22 @@ import Data.Time (defaultTimeLocale, formatTime, getCurrentTime)
import Data.Time.Clock (UTCTime)
import GHC.Generics (Generic)
import Network.HTTP.Req (
GET (GET),
NoReqBody (NoReqBody),
defaultHttpConfig,
header,
https,
jsonResponse,
req,
responseBody,
responseTimeout,
runReq,
(/:),
GET (GET),
HttpResponse (HttpResponseBody),
NoReqBody (NoReqBody),
Option,
Req,
Scheme (Https),
bsResponse,
defaultHttpConfig,
header,
https,
jsonResponse,
req,
responseBody,
responseTimeout,
runReq,
(/:),
)
import System.Directory (XdgDirectory (XdgCache), getXdgDirectory)
import System.Environment (getArgs)
@ -76,6 +82,10 @@ import Control.Exception (evaluate)
import qualified Data.IntMap.Strict as IntMap
import qualified Data.IntSet as IntSet
import Data.Bifunctor (second)
import Data.Data (Proxy)
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as ByteString
import Distribution.Simple.Utils (safeLast, fromUTF8BS)
newtype JobsetEvals = JobsetEvals
{ evals :: Seq Eval
@ -123,17 +133,31 @@ showT = Text.pack . show
getBuildReports :: IO ()
getBuildReports = runReq defaultHttpConfig do
evalMay <- Seq.lookup 0 . evals <$> myReq (https "hydra.nixos.org" /: "jobset" /: "nixpkgs" /: "haskell-updates" /: "evals") mempty
evalMay <- Seq.lookup 0 . evals <$> hydraJSONQuery mempty ["jobset", "nixpkgs", "haskell-updates", "evals"]
eval@Eval{id} <- maybe (liftIO $ fail "No Evalution found") pure evalMay
liftIO . putStrLn $ "Fetching evaluation " <> show id <> " from Hydra. This might take a few minutes..."
buildReports :: Seq Build <- myReq (https "hydra.nixos.org" /: "eval" /: showT id /: "builds") (responseTimeout 600000000)
buildReports :: Seq Build <- hydraJSONQuery (responseTimeout 600000000) ["eval", showT id, "builds"]
liftIO do
fileName <- reportFileName
putStrLn $ "Finished fetching all builds from Hydra, saving report as " <> fileName
now <- getCurrentTime
encodeFile fileName (eval, now, buildReports)
where
myReq query option = responseBody <$> req GET query NoReqBody jsonResponse (header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option)
hydraQuery :: HttpResponse a => Proxy a -> Option 'Https -> [Text] -> Req (HttpResponseBody a)
hydraQuery responseType option query =
responseBody
<$> req
GET
(foldl' (/:) (https "hydra.nixos.org") query)
NoReqBody
responseType
(header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option)
hydraJSONQuery :: FromJSON a => Option 'Https -> [Text] -> Req a
hydraJSONQuery = hydraQuery jsonResponse
hydraPlainQuery :: [Text] -> Req ByteString
hydraPlainQuery = hydraQuery bsResponse mempty
hydraEvalCommand :: FilePath
hydraEvalCommand = "hydra-eval-jobs"
@ -304,6 +328,7 @@ platformIcon (Platform x) = case x of
"x86_64-linux" -> ":penguin:"
"aarch64-linux" -> ":iphone:"
"x86_64-darwin" -> ":apple:"
"aarch64-darwin" -> ":green_apple:"
_ -> x
data BuildResult = BuildResult {state :: BuildState, id :: Int} deriving (Show, Eq, Ord)
@ -326,23 +351,24 @@ instance Functor (Table row col) where
instance Foldable (Table row col) where
foldMap f (Table a) = foldMap f a
getBuildState :: Build -> BuildState
getBuildState Build{finished, buildstatus} = case (finished, buildstatus) of
(0, _) -> Unfinished
(_, Just 0) -> Success
(_, Just 1) -> Failed
(_, Just 2) -> DependencyFailed
(_, Just 3) -> HydraFailure
(_, Just 4) -> Canceled
(_, Just 7) -> TimedOut
(_, Just 11) -> OutputLimitExceeded
(_, i) -> Unknown i
buildSummary :: MaintainerMap -> ReverseDependencyMap -> Seq Build -> StatusSummary
buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSummary) Map.empty . fmap toSummary
where
unionSummary (SummaryEntry (Table lb) lm lr lu) (SummaryEntry (Table rb) rm rr ru) = SummaryEntry (Table $ Map.union lb rb) (lm <> rm) (max lr rr) (max lu ru)
toSummary Build{finished, buildstatus, job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult state id))) maintainers reverseDeps unbrokenReverseDeps)
toSummary build@Build{job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult (getBuildState build) id))) maintainers reverseDeps unbrokenReverseDeps)
where
state :: BuildState
state = case (finished, buildstatus) of
(0, _) -> Unfinished
(_, Just 0) -> Success
(_, Just 1) -> Failed
(_, Just 2) -> DependencyFailed
(_, Just 3) -> HydraFailure
(_, Just 4) -> Canceled
(_, Just 7) -> TimedOut
(_, Just 11) -> OutputLimitExceeded
(_, i) -> Unknown i
packageName = fromMaybe job (Text.stripSuffix ("." <> system) job)
splitted = nonEmpty $ Text.splitOn "." packageName
name = maybe packageName NonEmpty.last splitted
@ -463,7 +489,8 @@ printBuildSummary eval@Eval{id} fetchTime summary topBrokenRdeps =
if' (isNothing maintainedJob) "No `maintained` job found." <>
if' (Unfinished > maybe Success worstState mergeableJob) "`mergeable` jobset failed." <>
if' (outstandingJobs (Platform "x86_64-linux") > 100) "Too many outstanding jobs on x86_64-linux." <>
if' (outstandingJobs (Platform "aarch64-linux") > 100) "Too many outstanding jobs on aarch64-linux."
if' (outstandingJobs (Platform "aarch64-linux") > 100) "Too many outstanding jobs on aarch64-linux." <>
if' (outstandingJobs (Platform "aarch64-darwin") > 100) "Too many outstanding jobs on aarch64-darwin."
if' p e = if p then [e] else mempty
outstandingJobs platform | Table m <- numSummary = Map.findWithDefault 0 (platform, Unfinished) m
maintainedJob = Map.lookup "maintained" summary
@ -486,8 +513,23 @@ printMaintainerPing = do
printMarkBrokenList :: IO ()
printMarkBrokenList = do
(_, _, buildReport) <- readBuildReports
forM_ buildReport \Build{buildstatus, job} ->
case (buildstatus, Text.splitOn "." job) of
(Just 1, ["haskellPackages", name, "x86_64-linux"]) -> putStrLn $ " - " <> Text.unpack name
(_, fetchTime, buildReport) <- readBuildReports
runReq defaultHttpConfig $ forM_ buildReport \build@Build{job, id} ->
case (getBuildState build, Text.splitOn "." job) of
(Failed, ["haskellPackages", name, "x86_64-linux"]) -> do
-- Fetch build log from hydra to figure out the cause of the error.
build_log <- ByteString.lines <$> hydraPlainQuery ["build", showT id, "nixlog", "1", "raw"]
-- We use the last probable error cause found in the build log file.
let error_message = fromMaybe " failure " $ safeLast $ mapMaybe probableErrorCause build_log
liftIO $ putStrLn $ " - " <> Text.unpack name <> " # " <> error_message <> " in job https://hydra.nixos.org/build/" <> show id <> " at " <> formatTime defaultTimeLocale "%Y-%m-%d" fetchTime
_ -> pure ()
{- | This function receives a line from a Nix Haskell builder build log and returns a possible error cause.
| We might need to add other causes in the future if errors happen in unusual parts of the builder.
-}
probableErrorCause :: ByteString -> Maybe String
probableErrorCause "Setup: Encountered missing or private dependencies:" = Just "dependency missing"
probableErrorCause "running tests" = Just "test failure"
probableErrorCause build_line | ByteString.isPrefixOf "Building" build_line = Just ("failure building " <> fromUTF8BS (fst $ ByteString.breakSubstring " for" $ ByteString.drop 9 build_line))
probableErrorCause build_line | ByteString.isSuffixOf "Phase" build_line = Just ("failure in " <> fromUTF8BS build_line)
probableErrorCause _ = Nothing

View File

@ -11,6 +11,9 @@
# Related scripts are update-hackage.sh, for updating the snapshot of the
# Hackage database used by hackage2nix, and update-cabal2nix-unstable.sh,
# for updating the version of hackage2nix used to perform this task.
#
# Note that this script doesn't gcroot anything, so it may be broken by an
# unfortunately timed nix-store --gc.
set -euo pipefail
@ -20,15 +23,21 @@ HACKAGE2NIX="${HACKAGE2NIX:-hackage2nix}"
# See: https://github.com/NixOS/nixpkgs/pull/122023
export LC_ALL=C.UTF-8
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
echo "Obtaining Hackage data"
extraction_derivation='with import ./. {}; runCommandLocal "unpacked-cabal-hashes" { } "tar xf ${all-cabal-hashes} --strip-components=1 --one-top-level=$out"'
unpacked_hackage="$(nix-build -E "$extraction_derivation" --no-out-link)"
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
echo "Generating compiler configuration"
compiler_config="$(nix-build -A haskellPackages.cabal2nix-unstable.compilerConfig --no-out-link)"
echo "Starting hackage2nix to regenerate pkgs/development/haskell-modules/hackage-packages.nix ..."
"$HACKAGE2NIX" \
--hackage "$unpacked_hackage" \
--preferred-versions <(for n in "$unpacked_hackage"/*/preferred-versions; do cat "$n"; echo; done) \
--nixpkgs "$PWD" \
--config "$compiler_config" \
--config "$config_dir/main.yaml" \
--config "$config_dir/stackage.yaml" \
--config "$config_dir/broken.yaml" \

View File

@ -19,6 +19,7 @@ fennel,,,,,,misterio77
fifo,,,,,,
fluent,,,,,,alerque
gitsigns.nvim,https://github.com/lewis6991/gitsigns.nvim.git,,,,5.1,
haskell-tools.nvim,,,,,,
http,,,,0.3-0,,vcunat
inspect,,,,,,
jsregexp,,,,,,
@ -102,6 +103,8 @@ std._debug,https://github.com/lua-stdlib/_debug.git,,,,,
std.normalize,https://github.com/lua-stdlib/normalize.git,,,,,
stdlib,,,,41.2.2,,vyp
teal-language-server,,,http://luarocks.org/dev,,,
telescope.nvim,,,,,5.1,
telescope-manix,,,,,,
tl,,,,,,mephistophiles
vstruct,https://github.com/ToxicFrog/vstruct.git,,,,,
vusted,,,,,,figsoda

1 name src ref server version luaversion maintainers
19 fifo
20 fluent alerque
21 gitsigns.nvim https://github.com/lewis6991/gitsigns.nvim.git 5.1
22 haskell-tools.nvim
23 http 0.3-0 vcunat
24 inspect
25 jsregexp
103 std.normalize https://github.com/lua-stdlib/normalize.git
104 stdlib 41.2.2 vyp
105 teal-language-server http://luarocks.org/dev
106 telescope.nvim 5.1
107 telescope-manix
108 tl mephistophiles
109 vstruct https://github.com/ToxicFrog/vstruct.git
110 vusted figsoda

View File

@ -81,6 +81,7 @@ with lib.maintainers; {
# Verify additions to this team with at least one already existing member of the team.
members = [
cdepillabout
wraithm
];
scope = "Group registration for packages maintained by Bitnomial.";
shortName = "Bitnomial employees";
@ -141,6 +142,7 @@ with lib.maintainers; {
# gares has no entry in the maintainers list
siraben
vbgl
alizter
];
scope = "Maintain the Coq theorem prover and related packages.";
shortName = "Coq";

View File

@ -135,28 +135,32 @@ let
}
'';
prepareManualFromMD = ''
cp -r --no-preserve=all $inputs/* .
substituteInPlace ./manual.md \
--replace '@NIXOS_VERSION@' "${version}"
substituteInPlace ./configuration/configuration.md \
--replace \
'@MODULE_CHAPTERS@' \
${lib.escapeShellArg (lib.concatMapStringsSep "\n" (p: "${p.value}") config.meta.doc)}
substituteInPlace ./nixos-options.md \
--replace \
'@NIXOS_OPTIONS_JSON@' \
${optionsDoc.optionsJSON}/share/doc/nixos/options.json
substituteInPlace ./development/writing-nixos-tests.section.md \
--replace \
'@NIXOS_TEST_OPTIONS_JSON@' \
${testOptionsDoc.optionsJSON}/share/doc/nixos/options.json
'';
manual-combined = runCommand "nixos-manual-combined"
{ inputs = lib.sourceFilesBySuffices ./. [ ".xml" ".md" ];
nativeBuildInputs = [ pkgs.nixos-render-docs pkgs.libxml2.bin pkgs.libxslt.bin ];
meta.description = "The NixOS manual as plain docbook XML";
}
''
cp -r --no-preserve=all $inputs/* .
substituteInPlace ./manual.md \
--replace '@NIXOS_VERSION@' "${version}"
substituteInPlace ./configuration/configuration.md \
--replace \
'@MODULE_CHAPTERS@' \
${lib.escapeShellArg (lib.concatMapStringsSep "\n" (p: "${p.value}") config.meta.doc)}
substituteInPlace ./nixos-options.md \
--replace \
'@NIXOS_OPTIONS_JSON@' \
${optionsDoc.optionsJSON}/share/doc/nixos/options.json
substituteInPlace ./development/writing-nixos-tests.section.md \
--replace \
'@NIXOS_TEST_OPTIONS_JSON@' \
${testOptionsDoc.optionsJSON}/share/doc/nixos/options.json
${prepareManualFromMD}
nixos-render-docs -j $NIX_BUILD_CORES manual docbook \
--manpage-urls ${manpageUrls} \
@ -193,7 +197,14 @@ in rec {
# Generate the NixOS manual.
manualHTML = runCommand "nixos-manual-html"
{ nativeBuildInputs = [ buildPackages.libxml2.bin buildPackages.libxslt.bin ];
{ nativeBuildInputs =
if allowDocBook then [
buildPackages.libxml2.bin
buildPackages.libxslt.bin
] else [
buildPackages.nixos-render-docs
];
inputs = lib.optionals (! allowDocBook) (lib.sourceFilesBySuffices ./. [ ".md" ]);
meta.description = "The NixOS manual in HTML format";
allowedReferences = ["out"];
}
@ -201,23 +212,44 @@ in rec {
# Generate the HTML manual.
dst=$out/share/doc/nixos
mkdir -p $dst
xsltproc \
${manualXsltprocOptions} \
--stringparam id.warnings "1" \
--nonet --output $dst/ \
${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
${manual-combined}/manual-combined.xml \
|& tee xsltproc.out
grep "^ID recommended on" xsltproc.out &>/dev/null && echo "error: some IDs are missing" && false
rm xsltproc.out
mkdir -p $dst/images/callouts
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
cp ${../../../doc/style.css} $dst/style.css
cp ${../../../doc/overrides.css} $dst/overrides.css
cp -r ${pkgs.documentation-highlighter} $dst/highlightjs
${if allowDocBook then ''
xsltproc \
${manualXsltprocOptions} \
--stringparam id.warnings "1" \
--nonet --output $dst/ \
${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
${manual-combined}/manual-combined.xml \
|& tee xsltproc.out
grep "^ID recommended on" xsltproc.out &>/dev/null && echo "error: some IDs are missing" && false
rm xsltproc.out
mkdir -p $dst/images/callouts
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
'' else ''
${prepareManualFromMD}
# TODO generator is set like this because the docbook/md manual compare workflow will
# trigger if it's different
nixos-render-docs -j $NIX_BUILD_CORES manual html \
--manpage-urls ${manpageUrls} \
--revision ${lib.escapeShellArg revision} \
--generator "DocBook XSL Stylesheets V${docbook_xsl_ns.version}" \
--stylesheet style.css \
--stylesheet overrides.css \
--stylesheet highlightjs/mono-blue.css \
--script ./highlightjs/highlight.pack.js \
--script ./highlightjs/loader.js \
--toc-depth 1 \
--chunk-toc-depth 1 \
./manual.md \
$dst/index.html
''}
mkdir -p $out/nix-support
echo "nix-build out $out" >> $out/nix-support/hydra-build-products
echo "doc manual $dst" >> $out/nix-support/hydra-build-products

View File

@ -8,8 +8,15 @@ the system on a stable release.
`disabledModules` is a top level attribute like `imports`, `options` and
`config`. It contains a list of modules that will be disabled. This can
either be the full path to the module or a string with the filename
relative to the modules path (eg. \<nixpkgs/nixos/modules> for nixos).
either be:
- the full path to the module,
- or a string with the filename relative to the modules path (eg. \<nixpkgs/nixos/modules> for nixos),
- or an attribute set containing a specific `key` attribute.
The latter allows some modules to be disabled, despite them being distributed
via attributes instead of file paths. The `key` should be globally unique, so
it is recommended to include a file path in it, or rely on a framework to do it
for you.
This example will replace the existing postgresql module with the
version defined in the nixos-unstable channel while keeping the rest of

View File

@ -47,7 +47,10 @@ development/development.md
contributing-to-this-manual.chapter.md
```
```{=include=} appendix
```{=include=} appendix html:into-file=//options.html
nixos-options.md
```
```{=include=} appendix html:into-file=//release-notes.html
release-notes/release-notes.md
```

View File

@ -8,6 +8,10 @@ In addition to numerous new and upgraded packages, this release has the followin
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- Core version changes:
- default linux: 5.15 -\> 6.1, all supported kernels available
- Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed.
- KDE Plasma has been updated to v5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what is changed.
@ -29,6 +33,9 @@ In addition to numerous new and upgraded packages, this release has the followin
- [Cloudlog](https://www.magicbug.co.uk/cloudlog/), a web-based Amateur Radio logging application. Available as [services.cloudlog](#opt-services.cloudlog.enable).
- [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion).
- [readarr](https://github.com/Readarr/Readarr), Book Manager and Automation (Sonarr for Ebooks). Available as [services.readarr](options.html#opt-services.readarr.enable).
- [gemstash](https://github.com/rubygems/gemstash), a RubyGems.org cache and private gem server. Available as [services.gemstash](#opt-services.gemstash.enable).
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
@ -54,12 +61,16 @@ In addition to numerous new and upgraded packages, this release has the followin
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
- [jellyseerr](https://github.com/Fallenbagel/jellyseerr), a web-based requests manager for Jellyfin, forked from Overseerr. Available as [services.jellyseerr](#opt-services.jellyseerr.enable).
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
- [autosuspend](https://github.com/languitar/autosuspend), a python daemon that suspends a system if certain conditions are met, or not met.
- [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable).
- [nimdow](https://github.com/avahe-kellenberger/nimdow), a window manager written in Nim, inspired by dwm.
## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -105,12 +116,14 @@ In addition to numerous new and upgraded packages, this release has the followin
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
- `teleport` has been upgraded to major version 11. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and [release notes](https://goteleport.com/docs/changelog/#1100).
- `teleport` has been upgraded from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12).
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
- Calling `makeSetupHook` without passing a `name` argument is deprecated.
- `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name.
- The `cosmoc` package has been removed. The upstream scripts in `cosmocc` should be used instead.
- Qt 5.12 and 5.14 have been removed, as the corresponding branches have been EOL upstream for a long time. This affected under 10 packages in nixpkgs, largely unmaintained upstream as well, however, out-of-tree package expressions may need to be updated manually.
@ -136,12 +149,18 @@ In addition to numerous new and upgraded packages, this release has the followin
[upstream's release notes](https://github.com/iputils/iputils/releases/tag/20221126)
for more details and available replacements.
- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) now defaults to the `modesetting` driver over device-specific ones. The `radeon`, `amdgpu` and `nouveau` drivers are still available, but effectively unmaintained and not recommended for use.
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
## Other Notable Changes {#sec-release-23.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
- Pantheon now defaults to Mutter 42 and GNOME settings daemon 42, all Pantheon packages are now tracking elementary OS 7 updates.
- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
- The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package)
@ -166,6 +185,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- NixOS now defaults to using nsncd (a non-caching reimplementation in Rust) as NSS lookup dispatcher, instead of the buggy and deprecated glibc-provided nscd. If you need to switch back, set `services.nscd.enableNsncd = false`, but please open an issue in nixpkgs so your issue can be fixed.
- `services.borgmatic` now allows for multiple configurations, placed in `/etc/borgmatic.d/`, you can define them with `services.borgmatic.configurations`.
- The `dnsmasq` service now takes configuration via the
`services.dnsmasq.settings` attribute set. The option
`services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches

View File

@ -154,6 +154,9 @@ To solve this, you can run `fdisk -l $image` and generate `dd if=$image of=$imag
, # Shell code executed after the VM has finished.
postVM ? ""
, # Guest memory size
memSize ? 1024
, # Copy the contents of the Nix store to the root of the image and
# skip further setup. Incompatible with `contents`,
# `installBootLoader` and `configFile`.
@ -525,7 +528,7 @@ let format' = format; in let
"-drive if=pflash,format=raw,unit=1,file=$efiVars"
]
);
memSize = 1024;
inherit memSize;
} ''
export PATH=${binPath}:$PATH

View File

@ -73,6 +73,9 @@
, # Shell code executed after the VM has finished.
postVM ? ""
, # Guest memory size
memSize ? 1024
, name ? "nixos-disk-image"
, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
@ -242,6 +245,7 @@ let
{
QEMU_OPTS = "-drive file=$bootDiskImage,if=virtio,cache=unsafe,werror=report"
+ " -drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
inherit memSize;
preVM = ''
PATH=$PATH:${pkgs.qemu_kvm}/bin
mkdir $out

View File

@ -61,9 +61,9 @@ with lib;
pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
qrencode = super.qrencode.overrideAttrs (_: { doCheck = false; });
qt5 = super.qt5.overrideScope' (self': super': {
qt5 = super.qt5.overrideScope (const (super': {
qtbase = super'.qtbase.override { withGtk3 = false; };
});
}));
stoken = super.stoken.override { withGTK3 = false; };
# translateManpages -> perlPackages.po4a -> texlive-combined-basic -> texlive-core-big -> libX11
util-linux = super.util-linux.override { translateManpages = false; };

View File

@ -127,9 +127,6 @@ if (-e "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors") {
push @kernelModules, "kvm-intel" if hasCPUFeature "vmx";
push @kernelModules, "kvm-amd" if hasCPUFeature "svm";
push @attrs, "hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "AuthenticAMD";
push @attrs, "hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "GenuineIntel";
# Look at the PCI devices and add necessary modules. Note that most
# modules are auto-detected so we don't need to list them here.
@ -324,11 +321,15 @@ if ($virt eq "systemd-nspawn") {
}
# Provide firmware for devices that are not detected by this script,
# unless we're in a VM/container.
push @imports, "(modulesPath + \"/installer/scan/not-detected.nix\")"
if $virt eq "none";
# Check if we're on bare metal, not in a VM/container.
if ($virt eq "none") {
# Provide firmware for devices that are not detected by this script.
push @imports, "(modulesPath + \"/installer/scan/not-detected.nix\")";
# Update the microcode.
push @attrs, "hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "AuthenticAMD";
push @attrs, "hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "GenuineIntel";
}
# For a device name like /dev/sda1, find a more stable path like
# /dev/disk/by-uuid/X or /dev/disk/by-label/Y.

View File

@ -180,7 +180,7 @@ in
# extraGroups = [ "wheel" ]; # Enable sudo for the user.
# packages = with pkgs; [
# firefox
# thunderbird
# tree
# ];
# };

View File

@ -205,6 +205,7 @@
./programs/nbd.nix
./programs/neovim.nix
./programs/nethoscope.nix
./programs/nexttrace.nix
./programs/nix-index.nix
./programs/nix-ld.nix
./programs/nm-applet.nix
@ -439,6 +440,7 @@
./services/development/blackfire.nix
./services/development/bloop.nix
./services/development/distccd.nix
./services/development/gemstash.nix
./services/development/hoogle.nix
./services/development/jupyter/default.nix
./services/development/jupyterhub/default.nix
@ -623,6 +625,7 @@
./services/misc/irkerd.nix
./services/misc/jackett.nix
./services/misc/jellyfin.nix
./services/misc/jellyseerr.nix
./services/misc/klipper.nix
./services/misc/languagetool.nix
./services/misc/leaps.nix
@ -662,6 +665,7 @@
./services/misc/prowlarr.nix
./services/misc/pykms.nix
./services/misc/radarr.nix
./services/misc/readarr.nix
./services/misc/redmine.nix
./services/misc/ripple-data-api.nix
./services/misc/rippled.nix
@ -800,6 +804,7 @@
./services/networking/bitlbee.nix
./services/networking/blockbook-frontend.nix
./services/networking/blocky.nix
./services/networking/cgit.nix
./services/networking/charybdis.nix
./services/networking/chisel-server.nix
./services/networking/cjdns.nix
@ -1128,6 +1133,7 @@
./services/web-apps/baget.nix
./services/web-apps/bookstack.nix
./services/web-apps/calibre-web.nix
./services/web-apps/coder.nix
./services/web-apps/changedetection-io.nix
./services/web-apps/cloudlog.nix
./services/web-apps/code-server.nix
@ -1267,6 +1273,7 @@
./services/x11/window-managers/bspwm.nix
./services/x11/window-managers/katriawm.nix
./services/x11/window-managers/metacity.nix
./services/x11/window-managers/nimdow.nix
./services/x11/window-managers/none.nix
./services/x11/window-managers/twm.nix
./services/x11/window-managers/windowlab.nix

View File

@ -142,6 +142,7 @@ in
# convert remainings logs and start eventually
atop.serviceConfig.ExecStartPre = pkgs.writeShellScript "atop-update-log-format" ''
set -e -u
shopt -s nullglob
for logfile in "$LOGPATH"/atop_*
do
${atop}/bin/atopconvert "$logfile" "$logfile".new
@ -150,6 +151,8 @@ in
if ! ${pkgs.diffutils}/bin/cmp -s "$logfile" "$logfile".new
then
${pkgs.coreutils}/bin/mv -v -f "$logfile".new "$logfile"
else
${pkgs.coreutils}/bin/rm -f "$logfile".new
fi
done
'';

View File

@ -8,7 +8,6 @@ with lib;
let
cfg = config.programs.java;
in
{
options = {
@ -40,12 +39,35 @@ in
type = types.package;
};
binfmt = mkEnableOption (lib.mdDoc "binfmt to execute java jar's and classes");
};
};
config = mkIf cfg.enable {
boot.binfmt.registrations = mkIf cfg.binfmt {
java-class = {
recognitionType = "extension";
magicOrExtension = "class";
interpreter = pkgs.writeShellScript "java-class-wrapper" ''
test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
classpath=$(dirname "$1")
class=$(basename "''${1%%.class}")
$JAVA_HOME/bin/java -classpath "$classpath" "$class" "''${@:2}"
'';
};
java-jar = {
recognitionType = "extension";
magicOrExtension = "jar";
interpreter = pkgs.writeShellScript "java-jar-wrapper" ''
test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
$JAVA_HOME/bin/java -jar "$@"
'';
};
};
environment.systemPackages = [ cfg.package ];
environment.shellInit = ''

View File

@ -0,0 +1,25 @@
{ config, lib, pkgs, ... }:
let
cfg = config.programs.nexttrace;
in
{
options = {
programs.nexttrace = {
enable = lib.mkEnableOption (lib.mdDoc "Nexttrace to the global environment and configure a setcap wrapper for it");
package = lib.mkPackageOptionMD pkgs "nexttrace" { };
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
security.wrappers.nexttrace = {
owner = "root";
group = "root";
capabilities = "cap_net_raw,cap_net_admin+eip";
source = "${cfg.package}/bin/nexttrace";
};
};
}

View File

@ -2,17 +2,22 @@
with lib;
let
cfg = config.programs.waybar;
in
{
options.programs.waybar = {
enable = mkEnableOption (lib.mdDoc "waybar");
package = mkPackageOptionMD pkgs "waybar" { };
};
config = mkIf config.programs.waybar.enable {
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
systemd.user.services.waybar = {
description = "Waybar as systemd service";
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
script = "${pkgs.waybar}/bin/waybar";
script = "${cfg.package}/bin/waybar";
};
};

View File

@ -5,44 +5,58 @@ with lib;
let
cfg = config.services.borgmatic;
settingsFormat = pkgs.formats.yaml { };
cfgType = with types; submodule {
freeformType = settingsFormat.type;
options.location = {
source_directories = mkOption {
type = listOf str;
description = mdDoc ''
List of source directories to backup (required). Globs and
tildes are expanded.
'';
example = [ "/home" "/etc" "/var/log/syslog*" ];
};
repositories = mkOption {
type = listOf str;
description = mdDoc ''
Paths to local or remote repositories (required). Tildes are
expanded. Multiple repositories are backed up to in
sequence. Borg placeholders can be used. See the output of
"borg help placeholders" for details. See ssh_command for
SSH options like identity file or port. If systemd service
is used, then add local repository paths in the systemd
service file to the ReadWritePaths list.
'';
example = [
"ssh://user@backupserver/./sourcehostname.borg"
"ssh://user@backupserver/./{fqdn}"
"/var/local/backups/local.borg"
];
};
};
};
cfgfile = settingsFormat.generate "config.yaml" cfg.settings;
in {
in
{
options.services.borgmatic = {
enable = mkEnableOption (lib.mdDoc "borgmatic");
enable = mkEnableOption (mdDoc "borgmatic");
settings = mkOption {
description = lib.mdDoc ''
description = mdDoc ''
See https://torsion.org/borgmatic/docs/reference/configuration/
'';
type = types.submodule {
freeformType = settingsFormat.type;
options.location = {
source_directories = mkOption {
type = types.listOf types.str;
description = lib.mdDoc ''
List of source directories to backup (required). Globs and
tildes are expanded.
'';
example = [ "/home" "/etc" "/var/log/syslog*" ];
};
repositories = mkOption {
type = types.listOf types.str;
description = lib.mdDoc ''
Paths to local or remote repositories (required). Tildes are
expanded. Multiple repositories are backed up to in
sequence. Borg placeholders can be used. See the output of
"borg help placeholders" for details. See ssh_command for
SSH options like identity file or port. If systemd service
is used, then add local repository paths in the systemd
service file to the ReadWritePaths list.
'';
example = [
"user@backupserver:sourcehostname.borg"
"user@backupserver:{fqdn}"
];
};
};
};
default = null;
type = types.nullOr cfgType;
};
configurations = mkOption {
description = mdDoc ''
Set of borgmatic configurations, see https://torsion.org/borgmatic/docs/reference/configuration/
'';
default = { };
type = types.attrsOf cfgType;
};
};
@ -50,9 +64,13 @@ in {
environment.systemPackages = [ pkgs.borgmatic ];
environment.etc."borgmatic/config.yaml".source = cfgfile;
environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) //
mapAttrs'
(name: value: nameValuePair
"borgmatic.d/${name}.yaml"
{ source = settingsFormat.generate "${name}.yaml" value; })
cfg.configurations;
systemd.packages = [ pkgs.borgmatic ];
};
}

View File

@ -47,7 +47,12 @@ let
then [ "${name} ${value}" ]
else concatLists (mapAttrsToList (genSection name) value);
addDefaults = settings: { backend = "btrfs-progs-sudo"; } // settings;
sudo_doas =
if config.security.sudo.enable then "sudo"
else if config.security.doas.enable then "doas"
else throw "The btrbk nixos module needs either sudo or doas enabled in the configuration";
addDefaults = settings: { backend = "btrfs-progs-${sudo_doas}"; } // settings;
mkConfigFile = name: settings: pkgs.writeTextFile {
name = "btrbk-${name}.conf";
@ -152,20 +157,41 @@ in
};
config = mkIf (sshEnabled || serviceEnabled) {
environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
security.sudo.extraRules = [
{
users = [ "btrbk" ];
commands = [
{ command = "${pkgs.btrfs-progs}/bin/btrfs"; options = [ "NOPASSWD" ]; }
{ command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
{ command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
{ command = "/run/current-system/bin/btrfs"; options = [ "NOPASSWD" ]; }
{ command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
{ command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
security.sudo = mkIf (sudo_doas == "sudo") {
extraRules = [
{
users = [ "btrbk" ];
commands = [
{ command = "${pkgs.btrfs-progs}/bin/btrfs"; options = [ "NOPASSWD" ]; }
{ command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
{ command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
{ command = "/run/current-system/bin/btrfs"; options = [ "NOPASSWD" ]; }
{ command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
{ command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
];
}
];
};
security.doas = mkIf (sudo_doas == "doas") {
extraRules = let
doasCmdNoPass = cmd: { users = [ "btrbk" ]; cmd = cmd; noPass = true; };
in
[
(doasCmdNoPass "${pkgs.btrfs-progs}/bin/btrfs")
(doasCmdNoPass "${pkgs.coreutils}/bin/mkdir")
(doasCmdNoPass "${pkgs.coreutils}/bin/readlink")
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
(doasCmdNoPass "/run/current-system/bin/btrfs")
(doasCmdNoPass "/run/current-system/sw/bin/mkdir")
(doasCmdNoPass "/run/current-system/sw/bin/readlink")
# doas matches command, not binary
(doasCmdNoPass "btrfs")
(doasCmdNoPass "mkdir")
(doasCmdNoPass "readlink")
];
}
];
};
users.users.btrbk = {
isSystemUser = true;
# ssh needs a home directory
@ -183,8 +209,9 @@ in
"best-effort" = 2;
"realtime" = 1;
}.${cfg.ioSchedulingClass};
sudo_doas_flag = "--${sudo_doas}";
in
''command="${pkgs.util-linux}/bin/ionice -t -c ${toString ioniceClass} ${optionalString (cfg.niceness >= 1) "${pkgs.coreutils}/bin/nice -n ${toString cfg.niceness}"} ${pkgs.btrbk}/share/btrbk/scripts/ssh_filter_btrbk.sh --sudo ${options}" ${v.key}''
''command="${pkgs.util-linux}/bin/ionice -t -c ${toString ioniceClass} ${optionalString (cfg.niceness >= 1) "${pkgs.coreutils}/bin/nice -n ${toString cfg.niceness}"} ${pkgs.btrbk}/share/btrbk/scripts/ssh_filter_btrbk.sh ${sudo_doas_flag} ${options}" ${v.key}''
)
cfg.sshAccess;
};

View File

@ -149,7 +149,7 @@ in
else
args+=(--token "$token")
fi
${cfg.package}/bin/config.sh "''${args[@]}"
${cfg.package}/bin/Runner.Listener configure "''${args[@]}"
# Move the automatically created _diag dir to the logs dir
mkdir -p "$STATE_DIRECTORY/_diag"
cp -r "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"

View File

@ -398,7 +398,7 @@ in
systemd.services.hydra-evaluator =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" "network.target" ];
after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = with pkgs; [ hydra-package nettools jq ];
restartTriggers = [ hydraConf ];
environment = env // {

View File

@ -0,0 +1,103 @@
{ lib, pkgs, config, ... }:
with lib;
let
settingsFormat = pkgs.formats.yaml { };
# gemstash uses a yaml config where the keys are ruby symbols,
# which means they start with ':'. This would be annoying to use
# on the nix side, so we rewrite plain names instead.
prefixColon = s: listToAttrs (map
(attrName: {
name = ":${attrName}";
value =
if isAttrs s.${attrName}
then prefixColon s."${attrName}"
else s."${attrName}";
})
(attrNames s));
# parse the port number out of the tcp://ip:port bind setting string
parseBindPort = bind: strings.toInt (last (strings.splitString ":" bind));
cfg = config.services.gemstash;
in
{
options.services.gemstash = {
enable = mkEnableOption (lib.mdDoc "gemstash service");
openFirewall = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to open the firewall for the port in {option}`services.gemstash.bind`.
'';
};
settings = mkOption {
default = {};
description = lib.mdDoc ''
Configuration for Gemstash. The details can be found at in
[gemstash documentation](https://github.com/rubygems/gemstash/blob/master/man/gemstash-configuration.5.md).
Each key set here is automatically prefixed with ":" to match the gemstash expectations.
'';
type = types.submodule {
freeformType = settingsFormat.type;
options = {
base_path = mkOption {
type = types.path;
default = "/var/lib/gemstash";
description = lib.mdDoc "Path to store the gem files and the sqlite database. If left unchanged, the directory will be created.";
};
bind = mkOption {
type = types.str;
default = "tcp://0.0.0.0:9292";
description = lib.mdDoc "Host and port combination for the server to listen on.";
};
db_adapter = mkOption {
type = types.nullOr (types.enum [ "sqlite3" "postgres" "mysql" "mysql2" ]);
default = null;
description = lib.mdDoc "Which database type to use. For choices other than sqlite3, the dbUrl has to be specified as well.";
};
db_url = mkOption {
type = types.nullOr types.str;
default = null;
description = lib.mdDoc "The database to connect to when using postgres, mysql, or mysql2.";
};
};
};
};
};
config =
mkIf cfg.enable {
users = {
users.gemstash = {
group = "gemstash";
isSystemUser = true;
};
groups.gemstash = { };
};
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ (parseBindPort cfg.settings.bind) ];
systemd.services.gemstash = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = mkMerge [
{
ExecStart = "${pkgs.gemstash}/bin/gemstash start --no-daemonize --config-file ${settingsFormat.generate "gemstash.yaml" (prefixColon cfg.settings)}";
NoNewPrivileges = true;
User = "gemstash";
Group = "gemstash";
PrivateTmp = true;
RestrictSUIDSGID = true;
LockPersonality = true;
}
(mkIf (cfg.settings.base_path == "/var/lib/gemstash") {
StateDirectory = "gemstash";
})
];
};
};
}

View File

@ -18,6 +18,12 @@ let
fwupd = cfg.daemonSettings;
};
};
"fwupd/uefi_capsule.conf" = {
source = format.generate "uefi_capsule.conf" {
uefi_capsule = cfg.uefiCapsuleSettings;
};
};
};
originalEtc =
@ -138,6 +144,16 @@ in {
Configurations for the fwupd daemon.
'';
};
uefiCapsuleSettings = mkOption {
type = types.submodule {
freeformType = format.type.nestedTypes.elemType;
};
default = {};
description = lib.mdDoc ''
UEFI capsule configurations for the fwupd daemon.
'';
};
};
};

View File

@ -288,11 +288,11 @@ in
LimitNOFILE = 65535;
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
LoadCredential = cfg.loadCredential;
ExecStartPre = ''
ExecStartPre = [''
${pkgs.envsubst}/bin/envsubst \
-i ${configurationYaml} \
-o /run/dendrite/dendrite.yaml
'';
''];
ExecStart = lib.strings.concatStringsSep " " ([
"${pkgs.dendrite}/bin/dendrite-monolith-server"
"--config /run/dendrite/dendrite.yaml"

View File

@ -5,7 +5,7 @@ with lib;
let
cfg = config.services.gitea;
opt = options.services.gitea;
gitea = cfg.package;
exe = lib.getExe cfg.package;
pg = config.services.postgresql;
useMysql = cfg.database.type == "mysql";
usePostgresql = cfg.database.type == "postgres";
@ -248,7 +248,7 @@ in
staticRootPath = mkOption {
type = types.either types.str types.path;
default = gitea.data;
default = cfg.package.data;
defaultText = literalExpression "package.data";
example = "/var/lib/gitea/data";
description = lib.mdDoc "Upper level of template and static files path.";
@ -481,14 +481,14 @@ in
# If we have a folder or symlink with gitea locales, remove it
# And symlink the current gitea locales in place
"L+ '${cfg.stateDir}/conf/locale' - - - - ${gitea.out}/locale"
"L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
];
systemd.services.gitea = {
description = "gitea";
after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
wantedBy = [ "multi-user.target" ];
path = [ gitea pkgs.git pkgs.gnupg ];
path = [ cfg.package pkgs.git pkgs.gnupg ];
# In older versions the secret naming for JWT was kind of confusing.
# The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
@ -512,7 +512,7 @@ in
cp -f ${configFile} ${runConfig}
if [ ! -s ${secretKey} ]; then
${gitea}/bin/gitea generate secret SECRET_KEY > ${secretKey}
${exe} generate secret SECRET_KEY > ${secretKey}
fi
# Migrate LFS_JWT_SECRET filename
@ -521,15 +521,15 @@ in
fi
if [ ! -s ${oauth2JwtSecret} ]; then
${gitea}/bin/gitea generate secret JWT_SECRET > ${oauth2JwtSecret}
${exe} generate secret JWT_SECRET > ${oauth2JwtSecret}
fi
if [ ! -s ${lfsJwtSecret} ]; then
${gitea}/bin/gitea generate secret LFS_JWT_SECRET > ${lfsJwtSecret}
${exe} generate secret LFS_JWT_SECRET > ${lfsJwtSecret}
fi
if [ ! -s ${internalToken} ]; then
${gitea}/bin/gitea generate secret INTERNAL_TOKEN > ${internalToken}
${exe} generate secret INTERNAL_TOKEN > ${internalToken}
fi
chmod u+w '${runConfig}'
@ -548,15 +548,15 @@ in
''}
# run migrations/init the database
${gitea}/bin/gitea migrate
${exe} migrate
# update all hooks' binary paths
${gitea}/bin/gitea admin regenerate hooks
${exe} admin regenerate hooks
# update command option in authorized_keys
if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
then
${gitea}/bin/gitea admin regenerate keys
${exe} admin regenerate keys
fi
'';
@ -565,7 +565,7 @@ in
User = cfg.user;
Group = "gitea";
WorkingDirectory = cfg.stateDir;
ExecStart = "${gitea}/bin/gitea web --pid /run/gitea/gitea.pid";
ExecStart = "${exe} web --pid /run/gitea/gitea.pid";
Restart = "always";
# Runtime directory and mode
RuntimeDirectory = "gitea";
@ -597,7 +597,7 @@ in
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @reboot @setuid @swap";
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
};
environment = {
@ -635,7 +635,7 @@ in
systemd.services.gitea-dump = mkIf cfg.dump.enable {
description = "gitea dump";
after = [ "gitea.service" ];
path = [ gitea ];
path = [ cfg.package ];
environment = {
USER = cfg.user;
@ -646,7 +646,7 @@ in
serviceConfig = {
Type = "oneshot";
User = cfg.user;
ExecStart = "${gitea}/bin/gitea dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
WorkingDirectory = cfg.dump.backupDir;
};
};
@ -658,5 +658,5 @@ in
timerConfig.OnCalendar = cfg.dump.interval;
};
};
meta.maintainers = with lib.maintainers; [ srhb ma27 ];
meta.maintainers = with lib.maintainers; [ srhb ma27 thehedgeh0g ];
}

View File

@ -89,11 +89,6 @@ let
};
};
pagesArgs = [
"-pages-domain" gitlabConfig.production.pages.host
"-pages-root" "${gitlabConfig.production.shared.path}/pages"
] ++ cfg.pagesExtraArgs;
gitlabConfig = {
# These are the default settings from config/gitlab.example.yml
production = flip recursiveUpdate cfg.extraConfig {
@ -161,6 +156,12 @@ let
};
extra = {};
uploads.storage_path = cfg.statePath;
pages = {
enabled = cfg.pages.enable;
port = 8090;
host = cfg.pages.settings.pages-domain;
secret_file = cfg.pages.settings.api-secret-key;
};
};
};
@ -246,6 +247,7 @@ in {
(mkRenamedOptionModule [ "services" "gitlab" "backupPath" ] [ "services" "gitlab" "backup" "path" ])
(mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
(mkRemovedOptionModule [ "services" "gitlab" "logrotate" "extraConfig" ] "Modify services.logrotate.settings.gitlab directly instead")
(mkRemovedOptionModule [ "services" "gitlab" "pagesExtraArgs" ] "Use services.gitlab.pages.settings instead")
];
options = {
@ -667,10 +669,127 @@ in {
};
};
pagesExtraArgs = mkOption {
type = types.listOf types.str;
default = [ "-listen-proxy" "127.0.0.1:8090" ];
description = lib.mdDoc "Arguments to pass to the gitlab-pages daemon";
pages.enable = mkEnableOption (lib.mdDoc "the GitLab Pages service");
pages.settings = mkOption {
example = literalExpression ''
{
pages-domain = "example.com";
auth-client-id = "generated-id-xxxxxxx";
auth-client-secret = { _secret = "/var/keys/auth-client-secret"; };
auth-redirect-uri = "https://projects.example.com/auth";
auth-secret = { _secret = "/var/keys/auth-secret"; };
auth-server = "https://gitlab.example.com";
}
'';
description = lib.mdDoc ''
Configuration options to set in the GitLab Pages config
file.
Options containing secret data should be set to an attribute
set containing the attribute `_secret` - a string pointing
to a file containing the value the option should be set
to. See the example to get a better picture of this: in the
resulting configuration file, the `auth-client-secret` and
`auth-secret` keys will be set to the contents of the
{file}`/var/keys/auth-client-secret` and
{file}`/var/keys/auth-secret` files respectively.
'';
type = types.submodule {
freeformType = with types; attrsOf (nullOr (oneOf [ str int bool attrs ]));
options = {
listen-http = mkOption {
type = with types; listOf str;
apply = x: if x == [] then null else lib.concatStringsSep "," x;
default = [];
description = lib.mdDoc ''
The address(es) to listen on for HTTP requests.
'';
};
listen-https = mkOption {
type = with types; listOf str;
apply = x: if x == [] then null else lib.concatStringsSep "," x;
default = [];
description = lib.mdDoc ''
The address(es) to listen on for HTTPS requests.
'';
};
listen-proxy = mkOption {
type = with types; listOf str;
apply = x: if x == [] then null else lib.concatStringsSep "," x;
default = [ "127.0.0.1:8090" ];
description = lib.mdDoc ''
The address(es) to listen on for proxy requests.
'';
};
artifacts-server = mkOption {
type = with types; nullOr str;
default = "http${optionalString cfg.https "s"}://${cfg.host}/api/v4";
defaultText = "http(s)://<services.gitlab.host>/api/v4";
example = "https://gitlab.example.com/api/v4";
description = lib.mdDoc ''
API URL to proxy artifact requests to.
'';
};
gitlab-server = mkOption {
type = with types; nullOr str;
default = "http${optionalString cfg.https "s"}://${cfg.host}";
defaultText = "http(s)://<services.gitlab.host>";
example = "https://gitlab.example.com";
description = lib.mdDoc ''
Public GitLab server URL.
'';
};
internal-gitlab-server = mkOption {
type = with types; nullOr str;
default = null;
defaultText = "http(s)://<services.gitlab.host>";
example = "https://gitlab.example.internal";
description = lib.mdDoc ''
Internal GitLab server used for API requests, useful
if you want to send that traffic over an internal load
balancer. By default, the value of
`services.gitlab.pages.settings.gitlab-server` is
used.
'';
};
api-secret-key = mkOption {
type = with types; nullOr str;
default = "${cfg.statePath}/gitlab_pages_secret";
internal = true;
description = lib.mdDoc ''
File with secret key used to authenticate with the
GitLab API.
'';
};
pages-domain = mkOption {
type = with types; nullOr str;
example = "example.com";
description = lib.mdDoc ''
The domain to serve static pages on.
'';
};
pages-root = mkOption {
type = types.str;
default = "${gitlabConfig.production.shared.path}/pages";
defaultText = literalExpression ''config.${opt.extraConfig}.production.shared.path + "/pages"'';
description = lib.mdDoc ''
The directory where pages are stored.
'';
};
};
};
};
secrets.secretFile = mkOption {
@ -1210,6 +1329,9 @@ in {
umask u=rwx,g=,o=
openssl rand -hex 32 > ${cfg.statePath}/gitlab_shell_secret
${optionalString cfg.pages.enable ''
openssl rand -base64 32 > ${cfg.pages.settings.api-secret-key}
''}
rm -f '${cfg.statePath}/config/database.yml'
@ -1359,28 +1481,66 @@ in {
};
};
systemd.services.gitlab-pages = mkIf (gitlabConfig.production.pages.enabled or false) {
description = "GitLab static pages daemon";
after = [ "network.target" "gitlab-config.service" ];
bindsTo = [ "gitlab-config.service" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = [ pkgs.unzip ];
serviceConfig = {
Type = "simple";
TimeoutSec = "infinity";
Restart = "on-failure";
User = cfg.user;
Group = cfg.group;
ExecStart = "${cfg.packages.pages}/bin/gitlab-pages ${escapeShellArgs pagesArgs}";
WorkingDirectory = gitlabEnv.HOME;
};
services.gitlab.pages.settings = {
api-secret-key = "${cfg.statePath}/gitlab_pages_secret";
};
systemd.services.gitlab-pages =
let
filteredConfig = filterAttrs (_: v: v != null) cfg.pages.settings;
isSecret = v: isAttrs v && v ? _secret && isString v._secret;
mkPagesKeyValue = lib.generators.toKeyValue {
mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" rec {
mkValueString = v:
if isInt v then toString v
else if isString v then v
else if true == v then "true"
else if false == v then "false"
else if isSecret v then builtins.hashString "sha256" v._secret
else throw "unsupported type ${builtins.typeOf v}: ${(lib.generators.toPretty {}) v}";
};
};
secretPaths = lib.catAttrs "_secret" (lib.collect isSecret filteredConfig);
mkSecretReplacement = file: ''
replace-secret ${lib.escapeShellArgs [ (builtins.hashString "sha256" file) file "/run/gitlab-pages/gitlab-pages.conf" ]}
'';
secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
configFile = pkgs.writeText "gitlab-pages.conf" (mkPagesKeyValue filteredConfig);
in
mkIf cfg.pages.enable {
description = "GitLab static pages daemon";
after = [ "network.target" "gitlab-config.service" "gitlab.service" ];
bindsTo = [ "gitlab-config.service" "gitlab.service" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = with pkgs; [
unzip
replace-secret
];
serviceConfig = {
Type = "simple";
TimeoutSec = "infinity";
Restart = "on-failure";
User = cfg.user;
Group = cfg.group;
ExecStartPre = pkgs.writeShellScript "gitlab-pages-pre-start" ''
set -o errexit -o pipefail -o nounset
shopt -s dotglob nullglob inherit_errexit
install -m u=rw ${configFile} /run/gitlab-pages/gitlab-pages.conf
${secretReplacements}
'';
ExecStart = "${cfg.packages.pages}/bin/gitlab-pages -config=/run/gitlab-pages/gitlab-pages.conf";
WorkingDirectory = gitlabEnv.HOME;
RuntimeDirectory = "gitlab-pages";
RuntimeDirectoryMode = "0700";
};
};
systemd.services.gitlab-workhorse = {
after = [ "network.target" ];
wantedBy = [ "gitlab.target" ];

View File

@ -0,0 +1,62 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.jellyseerr;
in
{
meta.maintainers = [ maintainers.camillemndn ];
options.services.jellyseerr = {
enable = mkEnableOption (mdDoc ''Jellyseerr, a requests manager for Jellyfin'');
openFirewall = mkOption {
type = types.bool;
default = false;
description = mdDoc ''Open port in the firewall for the Jellyseerr web interface.'';
};
port = mkOption {
type = types.port;
default = 5055;
description = mdDoc ''The port which the Jellyseerr web UI should listen to.'';
};
};
config = mkIf cfg.enable {
systemd.services.jellyseerr = {
description = "Jellyseerr, a requests manager for Jellyfin";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment.PORT = toString cfg.port;
serviceConfig = {
Type = "exec";
StateDirectory = "jellyseerr";
WorkingDirectory = "${pkgs.jellyseerr}/libexec/jellyseerr/deps/jellyseerr";
DynamicUser = true;
ExecStart = "${pkgs.jellyseerr}/bin/jellyseerr";
BindPaths = [ "/var/lib/jellyseerr/:${pkgs.jellyseerr}/libexec/jellyseerr/deps/jellyseerr/config/" ];
Restart = "on-failure";
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
NoNewPrivileges = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
};
};
}

View File

@ -6,6 +6,7 @@ let
pkg = cfg.package;
defaultUser = "paperless";
nltkDir = "/var/cache/paperless/nltk";
# Don't start a redis instance if the user sets a custom redis connection
enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig;
@ -15,6 +16,7 @@ let
PAPERLESS_DATA_DIR = cfg.dataDir;
PAPERLESS_MEDIA_ROOT = cfg.mediaDir;
PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir;
PAPERLESS_NLTK_DIR = nltkDir;
GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
} // optionalAttrs (config.time.timeZone != null) {
PAPERLESS_TIME_ZONE = config.time.timeZone;
@ -24,12 +26,14 @@ let
lib.mapAttrs (_: toString) cfg.extraConfig
);
manage = let
setupEnv = lib.concatStringsSep "\n" (mapAttrsToList (name: val: "export ${name}=\"${val}\"") env);
in pkgs.writeShellScript "manage" ''
${setupEnv}
exec ${pkg}/bin/paperless-ngx "$@"
'';
manage =
let
setupEnv = lib.concatStringsSep "\n" (mapAttrsToList (name: val: "export ${name}=\"${val}\"") env);
in
pkgs.writeShellScript "manage" ''
${setupEnv}
exec ${pkg}/bin/paperless-ngx "$@"
'';
# Secure the services
defaultServiceConfig = {
@ -47,6 +51,7 @@ let
cfg.dataDir
cfg.mediaDir
];
CacheDirectory = "paperless";
CapabilityBoundingSet = "";
# ProtectClock adds DeviceAllow=char-rtc r
DeviceAllow = "";
@ -170,7 +175,7 @@ in
extraConfig = mkOption {
type = types.attrs;
default = {};
default = { };
description = lib.mdDoc ''
Extra paperless config options.
@ -291,6 +296,33 @@ in
};
};
# Download NLTK corpus data
systemd.services.paperless-download-nltk-data = {
wantedBy = [ "paperless-scheduler.service" ];
before = [ "paperless-scheduler.service" ];
after = [ "network-online.target" ];
serviceConfig = defaultServiceConfig // {
User = cfg.user;
Type = "oneshot";
# Enable internet access
PrivateNetwork = false;
# Restrict write access
BindPaths = [];
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/ssl/certs"
"-/etc/static/ssl/certs"
"-/etc/hosts"
"-/etc/localtime"
];
ExecStart = let pythonWithNltk = pkg.python.withPackages (ps: [ ps.nltk ]); in ''
${pythonWithNltk}/bin/python -m nltk.downloader -d '${nltkDir}' punkt snowball_data stopwords
'';
};
};
systemd.services.paperless-consumer = {
description = "Paperless document consumer";
# Bind to `paperless-scheduler` so that the consumer never runs

View File

@ -0,0 +1,88 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.readarr;
in
{
options = {
services.readarr = {
enable = mkEnableOption (lib.mdDoc "Readarr");
dataDir = mkOption {
type = types.str;
default = "/var/lib/readarr/";
description = lib.mdDoc "The directory where Readarr stores its data files.";
};
package = mkOption {
type = types.package;
default = pkgs.readarr;
defaultText = literalExpression "pkgs.readarr";
description = lib.mdDoc "The Readarr package to use";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Open ports in the firewall for Readarr
'';
};
user = mkOption {
type = types.str;
default = "readarr";
description = lib.mdDoc ''
User account under which Readarr runs.
'';
};
group = mkOption {
type = types.str;
default = "readarr";
description = lib.mdDoc ''
Group under which Readarr runs.
'';
};
};
};
config = mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
];
systemd.services.readarr = {
description = "Readarr";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
ExecStart = "${cfg.package}/bin/Readarr -nobrowser -data='${cfg.dataDir}'";
Restart = "on-failure";
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ 8787 ];
};
users.users = mkIf (cfg.user == "readarr") {
readarr = {
description = "Readarr service";
home = cfg.dataDir;
group = cfg.group;
isSystemUser = true;
};
};
users.groups = mkIf (cfg.group == "readarr") {
readarr = { };
};
};
}

View File

@ -1408,7 +1408,7 @@ let
'';
action =
mkDefOpt (types.enum [ "replace" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
mkDefOpt (types.enum [ "replace" "lowercase" "uppercase" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
Action to perform based on regex matching.
'';
};

View File

@ -0,0 +1,203 @@
{ config, lib, pkgs, ...}:
with lib;
let
cfgs = config.services.cgit;
settingType = with types; oneOf [ bool int str ];
genAttrs' = names: f: listToAttrs (map f names);
regexEscape =
let
# taken from https://github.com/python/cpython/blob/05cb728d68a278d11466f9a6c8258d914135c96c/Lib/re.py#L251-L266
special = [
"(" ")" "[" "]" "{" "}" "?" "*" "+" "-" "|" "^" "$" "\\" "." "&" "~"
"#" " " "\t" "\n" "\r" "\v" "\f"
];
in
replaceStrings special (map (c: "\\${c}") special);
stripLocation = cfg: removeSuffix "/" cfg.nginx.location;
regexLocation = cfg: regexEscape (stripLocation cfg);
mkFastcgiPass = cfg: ''
${if cfg.nginx.location == "/" then ''
fastcgi_param PATH_INFO $uri;
'' else ''
fastcgi_split_path_info ^(${regexLocation cfg})(/.+)$;
fastcgi_param PATH_INFO $fastcgi_path_info;
''
}fastcgi_pass unix:${config.services.fcgiwrap.socketAddress};
'';
cgitrcLine = name: value: "${name}=${
if value == true then
"1"
else if value == false then
"0"
else
toString value
}";
mkCgitrc = cfg: pkgs.writeText "cgitrc" ''
# global settings
${concatStringsSep "\n" (
mapAttrsToList
cgitrcLine
({ virtual-root = cfg.nginx.location; } // cfg.settings)
)
}
${optionalString (cfg.scanPath != null) (cgitrcLine "scan-path" cfg.scanPath)}
# repository settings
${concatStrings (
mapAttrsToList
(url: settings: ''
${cgitrcLine "repo.url" url}
${concatStringsSep "\n" (
mapAttrsToList (name: cgitrcLine "repo.${name}") settings
)
}
'')
cfg.repos
)
}
# extra config
${cfg.extraConfig}
'';
mkCgitReposDir = cfg:
if cfg.scanPath != null then
cfg.scanPath
else
pkgs.runCommand "cgit-repos" {
preferLocalBuild = true;
allowSubstitutes = false;
} ''
mkdir -p "$out"
${
concatStrings (
mapAttrsToList
(name: value: ''
ln -s ${escapeShellArg value.path} "$out"/${escapeShellArg name}
'')
cfg.repos
)
}
'';
in
{
options = {
services.cgit = mkOption {
description = mdDoc "Configure cgit instances.";
default = {};
type = types.attrsOf (types.submodule ({ config, ... }: {
options = {
enable = mkEnableOption (mdDoc "cgit");
package = mkPackageOptionMD pkgs "cgit" {};
nginx.virtualHost = mkOption {
description = mdDoc "VirtualHost to serve cgit on, defaults to the attribute name.";
type = types.str;
default = config._module.args.name;
example = "git.example.com";
};
nginx.location = mkOption {
description = mdDoc "Location to serve cgit under.";
type = types.str;
default = "/";
example = "/git/";
};
repos = mkOption {
description = mdDoc "cgit repository settings, see cgitrc(5)";
type = with types; attrsOf (attrsOf settingType);
default = {};
example = {
blah = {
path = "/var/lib/git/example";
desc = "An example repository";
};
};
};
scanPath = mkOption {
description = mdDoc "A path which will be scanned for repositories.";
type = types.nullOr types.path;
default = null;
example = "/var/lib/git";
};
settings = mkOption {
description = mdDoc "cgit configuration, see cgitrc(5)";
type = types.attrsOf settingType;
default = {};
example = literalExpression ''
{
enable-follow-links = true;
source-filter = "''${pkgs.cgit}/lib/cgit/filters/syntax-highlighting.py";
}
'';
};
extraConfig = mkOption {
description = mdDoc "These lines go to the end of cgitrc verbatim.";
type = types.lines;
default = "";
};
};
}));
};
};
config = mkIf (any (cfg: cfg.enable) (attrValues cfgs)) {
assertions = mapAttrsToList (vhost: cfg: {
assertion = !cfg.enable || (cfg.scanPath == null) != (cfg.repos == {});
message = "Exactly one of services.cgit.${vhost}.scanPath or services.cgit.${vhost}.repos must be set.";
}) cfgs;
services.fcgiwrap.enable = true;
services.nginx.enable = true;
services.nginx.virtualHosts = mkMerge (mapAttrsToList (_: cfg: {
${cfg.nginx.virtualHost} = {
locations = (
genAttrs'
[ "cgit.css" "cgit.png" "favicon.ico" "robots.txt" ]
(name: nameValuePair "= ${stripLocation cfg}/${name}" {
extraConfig = ''
alias ${cfg.package}/cgit/${name};
'';
})
) // {
"~ ${regexLocation cfg}/.+/(info/refs|git-upload-pack)" = {
fastcgiParams = rec {
SCRIPT_FILENAME = "${pkgs.git}/libexec/git-core/git-http-backend";
GIT_HTTP_EXPORT_ALL = "1";
GIT_PROJECT_ROOT = mkCgitReposDir cfg;
HOME = GIT_PROJECT_ROOT;
};
extraConfig = mkFastcgiPass cfg;
};
"${stripLocation cfg}/" = {
fastcgiParams = {
SCRIPT_FILENAME = "${cfg.package}/cgit/cgit.cgi";
QUERY_STRING = "$args";
HTTP_HOST = "$server_name";
CGIT_CONFIG = mkCgitrc cfg;
};
extraConfig = mkFastcgiPass cfg;
};
};
};
}) cfgs);
};
}

View File

@ -269,6 +269,10 @@ in
assertion = cfg.filterForward -> config.networking.nftables.enable;
message = "filterForward only works with the nftables based firewall";
}
{
assertion = cfg.autoLoadConntrackHelpers -> lib.versionOlder config.boot.kernelPackages.kernel.version "6";
message = "conntrack helper autoloading has been removed from kernel 6.0 and newer";
}
];
networking.firewall.trustedInterfaces = [ "lo" ];

View File

@ -42,6 +42,8 @@ let
${if cfg.sslKey == "" then "" else "sslKey="+cfg.sslKey}
${if cfg.sslCa == "" then "" else "sslCA="+cfg.sslCa}
${lib.optionalString (cfg.dbus != null) "dbus=${cfg.dbus}"}
${cfg.extraConfig}
'';
in
@ -282,6 +284,12 @@ in
`murmur` is running.
'';
};
dbus = mkOption {
type = types.enum [ null "session" "system" ];
default = null;
description = lib.mdDoc "Enable D-Bus remote control. Set to the bus you want Murmur to connect to.";
};
};
};
@ -325,5 +333,27 @@ in
Group = "murmur";
};
};
# currently not included in upstream package, addition requested at
# https://github.com/mumble-voip/mumble/issues/6078
services.dbus.packages = mkIf (cfg.dbus == "system") [(pkgs.writeTextFile {
name = "murmur-dbus-policy";
text = ''
<!DOCTYPE busconfig PUBLIC
"-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
<busconfig>
<policy user="murmur">
<allow own="net.sourceforge.mumble.murmur"/>
</policy>
<policy context="default">
<allow send_destination="net.sourceforge.mumble.murmur"/>
<allow receive_sender="net.sourceforge.mumble.murmur"/>
</policy>
</busconfig>
'';
destination = "/share/dbus-1/system.d/murmur.conf";
})];
};
}

View File

@ -3,8 +3,11 @@
with lib;
let
cfg = config.services.networkd-dispatcher;
in {
options = {
services.networkd-dispatcher = {
@ -14,14 +17,49 @@ in {
for usage.
'');
scriptDir = mkOption {
type = types.path;
default = "/var/lib/networkd-dispatcher";
description = mdDoc ''
This directory is used for keeping various scripts read and run by
networkd-dispatcher. See [https://gitlab.com/craftyguy/networkd-dispatcher](upstream instructions)
for directory structure and script usage.
rules = mkOption {
default = {};
example = lib.literalExpression ''
{ "restart-tor" = {
onState = ["routable" "off"];
script = '''
#!''${pkgs.runtimeShell}
if [[ $IFACE == "wlan0" && $AdministrativeState == "configured" ]]; then
echo "Restarting Tor ..."
systemctl restart tor
fi
exit 0
''';
};
};
'';
description = lib.mdDoc ''
Declarative configuration of networkd-dispatcher rules. See
[https://gitlab.com/craftyguy/networkd-dispatcher](upstream instructions)
for an introduction and example scripts.
'';
type = types.attrsOf (types.submodule {
options = {
onState = mkOption {
type = types.listOf (types.enum [
"routable" "dormant" "no-carrier" "off" "carrier" "degraded"
"configuring" "configured"
]);
default = null;
description = lib.mdDoc ''
List of names of the systemd-networkd operational states which
should trigger the script. See <https://www.freedesktop.org/software/systemd/man/networkctl.html>
for a description of the specific state type.
'';
};
script = mkOption {
type = types.lines;
description = lib.mdDoc ''
Shell commands executed on specified operational states.
'';
};
};
});
};
};
@ -30,34 +68,31 @@ in {
config = mkIf cfg.enable {
systemd = {
packages = [ pkgs.networkd-dispatcher ];
services.networkd-dispatcher = {
wantedBy = [ "multi-user.target" ];
# Override existing ExecStart definition
serviceConfig.ExecStart = [
serviceConfig.ExecStart = let
scriptDir = pkgs.symlinkJoin {
name = "networkd-dispatcher-script-dir";
paths = lib.mapAttrsToList (name: cfg:
(map(state:
pkgs.writeTextFile {
inherit name;
text = cfg.script;
destination = "/${state}.d/${name}";
executable = true;
}
) cfg.onState)
) cfg.rules;
};
in [
""
"${pkgs.networkd-dispatcher}/bin/networkd-dispatcher -v --script-dir ${cfg.scriptDir} $networkd_dispatcher_args"
"${pkgs.networkd-dispatcher}/bin/networkd-dispatcher -v --script-dir ${scriptDir} $networkd_dispatcher_args"
];
};
# Directory structure required according to upstream instructions
# https://gitlab.com/craftyguy/networkd-dispatcher
tmpfiles.rules = [
"d '${cfg.scriptDir}' 0750 root root - -"
"d '${cfg.scriptDir}/routable.d' 0750 root root - -"
"d '${cfg.scriptDir}/dormant.d' 0750 root root - -"
"d '${cfg.scriptDir}/no-carrier.d' 0750 root root - -"
"d '${cfg.scriptDir}/off.d' 0750 root root - -"
"d '${cfg.scriptDir}/carrier.d' 0750 root root - -"
"d '${cfg.scriptDir}/degraded.d' 0750 root root - -"
"d '${cfg.scriptDir}/configuring.d' 0750 root root - -"
"d '${cfg.scriptDir}/configured.d' 0750 root root - -"
];
};
};
}

View File

@ -28,6 +28,32 @@ in
<https://wiki.nftables.org/wiki-nftables/index.php/Troubleshooting#Question_4._How_do_nftables_and_iptables_interact_when_used_on_the_same_system.3F>.
'';
};
networking.nftables.checkRuleset = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Run `nft check` on the ruleset to spot syntax errors during build.
Because this is executed in a sandbox, the check might fail if it requires
access to any environmental factors or paths outside the Nix store.
To circumvent this, the ruleset file can be edited using the preCheckRuleset
option to work in the sandbox environment.
'';
};
networking.nftables.preCheckRuleset = mkOption {
type = types.lines;
default = "";
example = lib.literalExpression ''
sed 's/skgid meadow/skgid nogroup/g' -i ruleset.conf
'';
description = lib.mdDoc ''
This script gets run before the ruleset is checked. It can be used to
create additional files needed for the ruleset check to work, or modify
the ruleset for cases the build environment cannot cover.
'';
};
networking.nftables.ruleset = mkOption {
type = types.lines;
default = "";
@ -105,13 +131,24 @@ in
wantedBy = [ "multi-user.target" ];
reloadIfChanged = true;
serviceConfig = let
rulesScript = pkgs.writeScript "nftables-rules" ''
#! ${pkgs.nftables}/bin/nft -f
flush ruleset
${if cfg.rulesetFile != null then ''
include "${cfg.rulesetFile}"
'' else cfg.ruleset}
'';
rulesScript = pkgs.writeTextFile {
name = "nftables-rules";
executable = true;
text = ''
#! ${pkgs.nftables}/bin/nft -f
flush ruleset
${if cfg.rulesetFile != null then ''
include "${cfg.rulesetFile}"
'' else cfg.ruleset}
'';
checkPhase = lib.optionalString cfg.checkRuleset ''
cp $out ruleset.conf
${cfg.preCheckRuleset}
export NIX_REDIRECTS=/etc/protocols=${pkgs.buildPackages.iana-etc}/etc/protocols:/etc/services=${pkgs.buildPackages.iana-etc}/etc/services
LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
${pkgs.buildPackages.nftables}/bin/nft --check --file ruleset.conf
'';
};
in {
Type = "oneshot";
RemainAfterExit = true;

View File

@ -203,7 +203,7 @@ in
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "@clock" "@setuid" "capset" "chown" ] ++ lib.optional pkgs.stdenv.hostPlatform.isAarch64 "fchownat";
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "@clock" "@setuid" "capset" "@chown" ];
};
};
};

View File

@ -11,6 +11,14 @@ in
services.teleport = with lib.types; {
enable = mkEnableOption (lib.mdDoc "the Teleport service");
package = mkOption {
type = types.package;
default = pkgs.teleport;
defaultText = lib.literalMD "pkgs.teleport";
example = lib.literalMD "pkgs.teleport_11";
description = lib.mdDoc "The teleport package to use";
};
settings = mkOption {
type = settingsYaml.type;
default = { };
@ -74,14 +82,14 @@ in
};
config = mkIf config.services.teleport.enable {
environment.systemPackages = [ pkgs.teleport ];
environment.systemPackages = [ cfg.package ];
systemd.services.teleport = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = ''
${pkgs.teleport}/bin/teleport start \
${cfg.package}/bin/teleport start \
${optionalString cfg.insecure.enable "--insecure"} \
${optionalString cfg.diag.enable "--diag-addr=${cfg.diag.addr}:${toString cfg.diag.port}"} \
${optionalString (cfg.settings != { }) "--config=${settingsYaml.generate "teleport.yaml" cfg.settings}"}

View File

@ -461,7 +461,7 @@ let
${ipPreMove} link add dev "${name}" type wireguard
${optionalString (values.interfaceNamespace != null && values.interfaceNamespace != values.socketNamespace) ''${ipPreMove} link set "${name}" netns "${ns}"''}
${optionalString (values.mtu != null) ''${ipPreMove} link set "${name}" mtu ${toString values.mtu}''}
${optionalString (values.mtu != null) ''${ipPostMove} link set "${name}" mtu ${toString values.mtu}''}
${concatMapStringsSep "\n" (ip:
''${ipPostMove} address add "${ip}" dev "${name}"''

View File

@ -318,8 +318,8 @@ to make packages available in the chroot.
{option}`services.systemd.akkoma.serviceConfig.BindPaths` and
{option}`services.systemd.akkoma.serviceConfig.BindReadOnlyPaths` permit access to outside paths
through bind mounts. Refer to
[{manpage}`systemd.exec(5)`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=)
for details.
[`BindPaths=`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=)
of {manpage}`systemd.exec(5)` for details.
### Distributed deployment {#modules-services-akkoma-distributed-deployment}

View File

@ -0,0 +1,217 @@
{ config, lib, options, pkgs, ... }:
with lib;
let
cfg = config.services.coder;
name = "coder";
in {
options = {
services.coder = {
enable = mkEnableOption (lib.mdDoc "Coder service");
user = mkOption {
type = types.str;
default = "coder";
description = lib.mdDoc ''
User under which the coder service runs.
::: {.note}
If left as the default value this user will automatically be created
on system activation, otherwise it needs to be configured manually.
:::
'';
};
group = mkOption {
type = types.str;
default = "coder";
description = lib.mdDoc ''
Group under which the coder service runs.
::: {.note}
If left as the default value this group will automatically be created
on system activation, otherwise it needs to be configured manually.
:::
'';
};
package = mkOption {
type = types.package;
default = pkgs.coder;
description = lib.mdDoc ''
Package to use for the service.
'';
defaultText = literalExpression "pkgs.coder";
};
homeDir = mkOption {
type = types.str;
description = lib.mdDoc ''
Home directory for coder user.
'';
default = "/var/lib/coder";
};
listenAddress = mkOption {
type = types.str;
description = lib.mdDoc ''
Listen address.
'';
default = "127.0.0.1:3000";
};
accessUrl = mkOption {
type = types.nullOr types.str;
description = lib.mdDoc ''
Access URL should be a external IP address or domain with DNS records pointing to Coder.
'';
default = null;
example = "https://coder.example.com";
};
wildcardAccessUrl = mkOption {
type = types.nullOr types.str;
description = lib.mdDoc ''
If you are providing TLS certificates directly to the Coder server, you must use a single certificate for the root and wildcard domains.
'';
default = null;
example = "*.coder.example.com";
};
database = {
createLocally = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Create the database and database user locally.
'';
};
host = mkOption {
type = types.str;
default = "/run/postgresql";
description = lib.mdDoc ''
Hostname hosting the database.
'';
};
database = mkOption {
type = types.str;
default = "coder";
description = lib.mdDoc ''
Name of database.
'';
};
username = mkOption {
type = types.str;
default = "coder";
description = lib.mdDoc ''
Username for accessing the database.
'';
};
password = mkOption {
type = types.nullOr types.str;
default = null;
description = lib.mdDoc ''
Password for accessing the database.
'';
};
sslmode = mkOption {
type = types.nullOr types.str;
default = "disable";
description = lib.mdDoc ''
Password for accessing the database.
'';
};
};
tlsCert = mkOption {
type = types.nullOr types.path;
description = lib.mdDoc ''
The path to the TLS certificate.
'';
default = null;
};
tlsKey = mkOption {
type = types.nullOr types.path;
description = lib.mdDoc ''
The path to the TLS key.
'';
default = null;
};
};
};
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.database.createLocally -> cfg.database.username == name;
message = "services.coder.database.username must be set to ${user} if services.coder.database.createLocally is set true";
}
];
systemd.services.coder = {
description = "Coder - Self-hosted developer workspaces on your infra";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
CODER_ACCESS_URL = cfg.accessUrl;
CODER_WILDCARD_ACCESS_URL = cfg.wildcardAccessUrl;
CODER_PG_CONNECTION_URL = "user=${cfg.database.username} ${optionalString (cfg.database.password != null) "password=${cfg.database.password}"} database=${cfg.database.database} host=${cfg.database.host} ${optionalString (cfg.database.sslmode != null) "sslmode=${cfg.database.sslmode}"}";
CODER_ADDRESS = cfg.listenAddress;
CODER_TLS_ENABLE = optionalString (cfg.tlsCert != null) "1";
CODER_TLS_CERT_FILE = cfg.tlsCert;
CODER_TLS_KEY_FILE = cfg.tlsKey;
};
serviceConfig = {
ProtectSystem = "full";
PrivateTmp = "yes";
PrivateDevices = "yes";
SecureBits = "keep-caps";
AmbientCapabilities = "CAP_IPC_LOCK CAP_NET_BIND_SERVICE";
CacheDirectory = "coder";
CapabilityBoundingSet = "CAP_SYSLOG CAP_IPC_LOCK CAP_NET_BIND_SERVICE";
KillSignal = "SIGINT";
KillMode = "mixed";
NoNewPrivileges = "yes";
Restart = "on-failure";
ExecStart = "${cfg.package}/bin/coder server";
User = cfg.user;
Group = cfg.group;
};
};
services.postgresql = lib.mkIf cfg.database.createLocally {
enable = true;
ensureDatabases = [
cfg.database.database
];
ensureUsers = [{
name = cfg.database.username;
ensurePermissions = {
"DATABASE \"${cfg.database.database}\"" = "ALL PRIVILEGES";
};
}
];
};
users.groups = optionalAttrs (cfg.group == name) {
"${cfg.group}" = {};
};
users.users = optionalAttrs (cfg.user == name) {
${name} = {
description = "Coder service user";
group = cfg.group;
home = cfg.homeDir;
createHome = true;
isSystemUser = true;
};
};
};
}

View File

@ -5,7 +5,7 @@ let
package = pkgs.dolibarr.override { inherit (cfg) stateDir; };
cfg = config.services.dolibarr;
vhostCfg = lib.optionalAttr (cfg.nginx != null) config.services.nginx.virtualHosts."${cfg.domain}";
vhostCfg = lib.optionalAttrs (cfg.nginx != null) config.services.nginx.virtualHosts."${cfg.domain}";
mkConfigFile = filename: settings:
let

View File

@ -112,10 +112,8 @@ let
''));
commonHttpConfig = ''
# The mime type definitions included with nginx are very incomplete, so
# we use a list of mime types from the mailcap package, which is also
# used by most other Linux distributions by default.
include ${pkgs.mailcap}/etc/nginx/mime.types;
# Load mime types.
include ${cfg.defaultMimeTypes};
# When recommendedOptimisation is disabled nginx fails to start because the mailmap mime.types database
# contains 1026 entries and the default is only 1024. Setting to a higher number to remove the need to
# overwrite it because nginx does not allow duplicated settings.
@ -529,6 +527,18 @@ in
'';
};
defaultMimeTypes = mkOption {
type = types.path;
default = "${pkgs.mailcap}/etc/nginx/mime.types";
defaultText = literalExpression "$''{pkgs.mailcap}/etc/nginx/mime.types";
example = literalExpression "$''{pkgs.nginx}/conf/mime.types";
description = lib.mdDoc ''
Default MIME types for NGINX, as MIME types definitions from NGINX are very incomplete,
we use by default the ones bundled in the mailcap package, used by most of the other
Linux distributions.
'';
};
package = mkOption {
default = pkgs.nginxStable;
defaultText = literalExpression "pkgs.nginxStable";

View File

@ -169,6 +169,9 @@ in
};
services.udev.packages = [
pkgs.pantheon.gnome-settings-daemon
# Force enable KMS modifiers for devices that require them.
# https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/1443
pkgs.pantheon.mutter
];
systemd.packages = [
pkgs.pantheon.gnome-settings-daemon

View File

@ -173,7 +173,7 @@ in
systemd.services.phosh = {
wantedBy = [ "graphical.target" ];
serviceConfig = {
ExecStart = "${cfg.package}/bin/phosh";
ExecStart = "${cfg.package}/bin/phosh-session";
User = cfg.user;
Group = cfg.group;
PAMName = "login";

View File

@ -448,6 +448,7 @@ in
kio-extras
];
optionalPackages = [
ark
elisa
gwenview
okular

View File

@ -0,0 +1,23 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.xserver.windowManager.nimdow;
in
{
options = {
services.xserver.windowManager.nimdow.enable = mkEnableOption (lib.mdDoc "nimdow");
};
config = mkIf cfg.enable {
services.xserver.windowManager.session = singleton {
name = "nimdow";
start = ''
${pkgs.nimdow}/bin/nimdow &
waitPID=$!
'';
};
environment.systemPackages = [ pkgs.nimdow ];
};
}

View File

@ -256,7 +256,7 @@ in
videoDrivers = mkOption {
type = types.listOf types.str;
default = [ "amdgpu" "radeon" "nouveau" "modesetting" "fbdev" ];
default = [ "modesetting" "fbdev" ];
example = [
"nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
"amdgpu-pro"

View File

@ -450,8 +450,9 @@ sub addEntry {
# Include second initrd with secrets
if (-e -x "$path/append-initrd-secrets") {
my $initrdName = basename($initrd);
my $initrdSecretsPath = "$bootPath/kernels/$initrdName-secrets";
# Name the initrd secrets after the system from which they're derived.
my $systemName = basename(Cwd::abs_path("$path"));
my $initrdSecretsPath = "$bootPath/kernels/$systemName-secrets";
mkpath(dirname($initrdSecretsPath), 0, 0755);
my $oldUmask = umask;
@ -470,7 +471,7 @@ sub addEntry {
if (-e $initrdSecretsPathTemp && ! -z _) {
rename $initrdSecretsPathTemp, $initrdSecretsPath or die "failed to move initrd secrets into place: $!\n";
$copied{$initrdSecretsPath} = 1;
$initrd .= " " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/kernels/$initrdName-secrets";
$initrd .= " " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/kernels/$systemName-secrets";
} else {
unlink $initrdSecretsPathTemp;
rmdir dirname($initrdSecretsPathTemp);

View File

@ -85,18 +85,18 @@ def copy_from_profile(profile: Optional[str], generation: int, specialisation: O
return efi_file_path
def describe_generation(generation_dir: str) -> str:
def describe_generation(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str:
try:
with open("%s/nixos-version" % generation_dir) as f:
with open(profile_path(profile, generation, specialisation, "nixos-version")) as f:
nixos_version = f.read()
except IOError:
nixos_version = "Unknown"
kernel_dir = os.path.dirname(os.path.realpath("%s/kernel" % generation_dir))
kernel_dir = os.path.dirname(profile_path(profile, generation, specialisation, "kernel"))
module_dir = glob.glob("%s/lib/modules/*" % kernel_dir)[0]
kernel_version = os.path.basename(module_dir)
build_time = int(os.path.getctime(generation_dir))
build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
description = "@distroName@ {}, Linux Kernel {}, Built on {}".format(
@ -131,11 +131,10 @@ def write_entry(profile: Optional[str], generation: int, specialisation: Optiona
"or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
generation_conf_filename(profile, generation, specialisation))
generation_dir = os.readlink(system_dir(profile, generation, specialisation))
tmp_path = "%s.tmp" % (entry_file)
kernel_params = "init=%s/init " % generation_dir
kernel_params = "init=%s " % profile_path(profile, generation, specialisation, "init")
with open("%s/kernel-params" % (generation_dir)) as params_file:
with open(profile_path(profile, generation, specialisation, "kernel-params")) as params_file:
kernel_params = kernel_params + params_file.read()
with open(tmp_path, 'w') as f:
f.write(BOOT_ENTRY.format(title=title,
@ -143,7 +142,7 @@ def write_entry(profile: Optional[str], generation: int, specialisation: Optiona
kernel=kernel,
initrd=initrd,
kernel_params=kernel_params,
description=describe_generation(generation_dir)))
description=describe_generation(profile, generation, specialisation)))
if machine_id is not None:
f.write("machine-id %s\n" % machine_id)
os.rename(tmp_path, entry_file)
@ -296,7 +295,7 @@ def main() -> None:
remove_old_entries(gens)
for gen in gens:
try:
is_default = os.readlink(system_dir(*gen)) == args.default_config
is_default = os.path.dirname(profile_path(*gen, "init")) == args.default_config
write_entry(*gen, machine_id, current=is_default)
for specialisation in get_specialisations(*gen):
write_entry(*specialisation, machine_id, current=is_default)

View File

@ -1948,7 +1948,7 @@ in
Extra command-line arguments to pass to systemd-networkd-wait-online.
These also affect per-interface `systemd-network-wait-online@` services.
See [{manpage}`systemd-networkd-wait-online.service(8)`](https://www.freedesktop.org/software/systemd/man/systemd-networkd-wait-online.service.html) for all available options.
See {manpage}`systemd-networkd-wait-online.service(8)` for all available options.
'';
type = with types; listOf str;
default = [];

View File

@ -614,7 +614,7 @@ in
# Avoid potentially degraded system state due to
# "Userspace Out-Of-Memory (OOM) Killer was skipped because of a failed condition check (ConditionControlGroupController=v2)."
systemd.services.systemd-oomd.enable = mkIf (!cfg.enableUnifiedCgroupHierarchy) false;
systemd.oomd.enable = mkIf (!cfg.enableUnifiedCgroupHierarchy) false;
services.logrotate.settings = {
"/var/log/btmp" = mapAttrs (_: mkDefault) {

View File

@ -100,7 +100,7 @@ in
logDriver =
mkOption {
type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs"];
type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs" "local"];
default = "journald";
description =
lib.mdDoc ''

View File

@ -108,9 +108,9 @@ let
set -e
NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}")
NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${toString config.virtualisation.diskImage}}") || test -z "$NIX_DISK_IMAGE"
if ! test -e "$NIX_DISK_IMAGE"; then
if test -n "$NIX_DISK_IMAGE" && ! test -e "$NIX_DISK_IMAGE"; then
${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
${toString config.virtualisation.diskSize}M
fi
@ -152,9 +152,11 @@ let
${lib.optionalString cfg.useBootLoader
''
# Create a writable copy/snapshot of the boot disk.
# A writable boot disk can be booted from automatically.
${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${bootDisk}/disk.img "$TMPDIR/disk.img"
if ${if !cfg.persistBootDevice then "true" else "! test -e $TMPDIR/disk.img"}; then
# Create a writable copy/snapshot of the boot disk.
# A writable boot disk can be booted from automatically.
${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${bootDisk}/disk.img "$TMPDIR/disk.img"
fi
NIX_EFI_VARS=$(readlink -f "''${NIX_EFI_VARS:-${cfg.efiVars}}")
@ -346,7 +348,7 @@ in
virtualisation.diskImage =
mkOption {
type = types.str;
type = types.nullOr types.str;
default = "./${config.system.name}.qcow2";
defaultText = literalExpression ''"./''${config.system.name}.qcow2"'';
description =
@ -354,6 +356,9 @@ in
Path to the disk image containing the root filesystem.
The image will be created on startup if it does not
exist.
If null, a tmpfs will be used as the root filesystem and
the VM's state will not be persistent.
'';
};
@ -367,6 +372,17 @@ in
'';
};
virtualisation.persistBootDevice =
mkOption {
type = types.bool;
default = false;
description =
lib.mdDoc ''
If useBootLoader is specified, whether to recreate the boot device
on each instantiaton or allow it to persist.
'';
};
virtualisation.emptyDiskImages =
mkOption {
type = types.listOf types.ints.positive;
@ -850,6 +866,8 @@ in
# * The disks are attached in `virtualisation.qemu.drives`.
# Their order makes them appear as devices `a`, `b`, etc.
# * `fileSystems."/boot"` is adjusted to be on device `b`.
# * The disk.img is recreated each time the VM is booted unless
# virtualisation.persistBootDevice is set.
# If `useBootLoader`, GRUB goes to the second disk, see
# note [Disk layout with `useBootLoader`].
@ -892,7 +910,7 @@ in
${optionalString cfg.writableStore ''
echo "mounting overlay filesystem on /nix/store..."
mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
mkdir -p -m 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
mount -t overlay overlay $targetRoot/nix/store \
-o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
''}
@ -990,12 +1008,12 @@ in
];
virtualisation.qemu.drives = mkMerge [
[{
(mkIf (cfg.diskImage != null) [{
name = "root";
file = ''"$NIX_DISK_IMAGE"'';
driveExtraOpts.cache = "writeback";
driveExtraOpts.werror = "report";
}]
}])
(mkIf cfg.useNixStoreImage [{
name = "nix-store";
file = ''"$TMPDIR"/store.img'';
@ -1018,20 +1036,21 @@ in
}) cfg.emptyDiskImages)
];
fileSystems = mkVMOverride cfg.fileSystems;
# Mount the host filesystem via 9P, and bind-mount the Nix store
# of the host into our own filesystem. We use mkVMOverride to
# allow this module to be applied to "normal" NixOS system
# configuration, where the regular value for the `fileSystems'
# attribute should be disregarded for the purpose of building a VM
# test image (since those filesystems don't exist in the VM).
fileSystems =
let
virtualisation.fileSystems = let
mkSharedDir = tag: share:
{
name =
if tag == "nix-store" && cfg.writableStore
then "/nix/.ro-store"
else share.target;
then "/nix/.ro-store"
else share.target;
value.device = tag;
value.fsType = "9p";
value.neededForBoot = true;
@ -1039,44 +1058,42 @@ in
[ "trans=virtio" "version=9p2000.L" "msize=${toString cfg.msize}" ]
++ lib.optional (tag == "nix-store") "cache=loose";
};
in
mkVMOverride (cfg.fileSystems //
optionalAttrs cfg.useDefaultFilesystems {
"/".device = cfg.bootDevice;
"/".fsType = "ext4";
"/".autoFormat = true;
} //
optionalAttrs config.boot.tmpOnTmpfs {
"/tmp" = {
in lib.mkMerge [
(lib.mapAttrs' mkSharedDir cfg.sharedDirectories)
{
"/" = lib.mkIf cfg.useDefaultFilesystems (if cfg.diskImage == null then {
device = "tmpfs";
fsType = "tmpfs";
} else {
device = cfg.bootDevice;
fsType = "ext4";
autoFormat = true;
});
"/tmp" = lib.mkIf config.boot.tmpOnTmpfs {
device = "tmpfs";
fsType = "tmpfs";
neededForBoot = true;
# Sync with systemd's tmp.mount;
options = [ "mode=1777" "strictatime" "nosuid" "nodev" "size=${toString config.boot.tmpOnTmpfsSize}" ];
};
} //
optionalAttrs cfg.useNixStoreImage {
"/nix/${if cfg.writableStore then ".ro-store" else "store"}" = {
"/nix/${if cfg.writableStore then ".ro-store" else "store"}" = lib.mkIf cfg.useNixStoreImage {
device = "${lookupDriveDeviceName "nix-store" cfg.qemu.drives}";
neededForBoot = true;
options = [ "ro" ];
};
} //
optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs) {
"/nix/.rw-store" = {
"/nix/.rw-store" = lib.mkIf (cfg.writableStore && cfg.writableStoreUseTmpfs) {
fsType = "tmpfs";
options = [ "mode=0755" ];
neededForBoot = true;
};
} //
optionalAttrs cfg.useBootLoader {
# see note [Disk layout with `useBootLoader`]
"/boot" = {
"/boot" = lib.mkIf cfg.useBootLoader {
device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk`
fsType = "vfat";
noCheck = true; # fsck fails on a r/o filesystem
};
} // lib.mapAttrs' mkSharedDir cfg.sharedDirectories);
}
];
boot.initrd.systemd = lib.mkIf (config.boot.initrd.systemd.enable && cfg.writableStore) {
mounts = [{
@ -1095,7 +1112,7 @@ in
unitConfig.DefaultDependencies = false;
serviceConfig = {
Type = "oneshot";
ExecStart = "/bin/mkdir -p 0755 /sysroot/nix/.rw-store/store /sysroot/nix/.rw-store/work /sysroot/nix/store";
ExecStart = "/bin/mkdir -p -m 0755 /sysroot/nix/.rw-store/store /sysroot/nix/.rw-store/work /sysroot/nix/store";
};
};
};

View File

@ -81,7 +81,7 @@ in {
extraDisk = mkOption {
description = lib.mdDoc ''
Optional extra disk/hdd configuration.
The disk will be an 'ext4' partition on a separate VMDK file.
The disk will be an 'ext4' partition on a separate file.
'';
default = null;
example = {
@ -183,8 +183,8 @@ in {
export HOME=$PWD
export PATH=${pkgs.virtualbox}/bin:$PATH
echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
echo "converting image to VirtualBox format..."
VBoxManage convertfromraw $diskImage disk.vdi
${optionalString (cfg.extraDisk != null) ''
echo "creating extra disk: data-disk.raw"
@ -196,8 +196,8 @@ in {
mkpart primary ext4 1MiB -1
eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
echo "creating extra disk: data-disk.vmdk"
VBoxManage internalcommands createrawvmdk -filename data-disk.vmdk -rawdisk $dataDiskImage
echo "creating extra disk: data-disk.vdi"
VBoxManage convertfromraw $dataDiskImage data-disk.vdi
''}
echo "creating VirtualBox VM..."
@ -209,10 +209,10 @@ in {
${lib.cli.toGNUCommandLineShell { } cfg.params}
VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController}
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \
--medium disk.vmdk
--medium disk.vdi
${optionalString (cfg.extraDisk != null) ''
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \
--medium data-disk.vmdk
--medium data-disk.vdi
''}
echo "exporting VirtualBox VM..."

View File

@ -100,7 +100,6 @@ in rec {
(onFullSupported "nixos.tests.login")
(onFullSupported "nixos.tests.misc")
(onFullSupported "nixos.tests.mutableUsers")
(onFullSupported "nixos.tests.nat.firewall-conntrack")
(onFullSupported "nixos.tests.nat.firewall")
(onFullSupported "nixos.tests.nat.standalone")
(onFullSupported "nixos.tests.networking.scripted.bond")

View File

@ -118,7 +118,6 @@ in rec {
"nixos.tests.ipv6"
"nixos.tests.login"
"nixos.tests.misc"
"nixos.tests.nat.firewall-conntrack"
"nixos.tests.nat.firewall"
"nixos.tests.nat.standalone"
"nixos.tests.nfs3.simple"

View File

@ -108,6 +108,7 @@ in {
breitbandmessung = handleTest ./breitbandmessung.nix {};
brscan5 = handleTest ./brscan5.nix {};
btrbk = handleTest ./btrbk.nix {};
btrbk-doas = handleTest ./btrbk-doas.nix {};
btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
btrbk-section-order = handleTest ./btrbk-section-order.nix {};
buildbot = handleTest ./buildbot.nix {};
@ -125,6 +126,7 @@ in {
ceph-single-node-bluestore = handleTestOn ["x86_64-linux"] ./ceph-single-node-bluestore.nix {};
certmgr = handleTest ./certmgr.nix {};
cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
cgit = handleTest ./cgit.nix {};
charliecloud = handleTest ./charliecloud.nix {};
chromium = (handleTestOn ["aarch64-linux" "x86_64-linux"] ./chromium.nix {}).stable or {};
chrony-ptp = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony-ptp.nix {};
@ -137,6 +139,7 @@ in {
cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
cockpit = handleTest ./cockpit.nix {};
cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
coder = handleTest ./coder.nix {};
collectd = handleTest ./collectd.nix {};
connman = handleTest ./connman.nix {};
consul = handleTest ./consul.nix {};
@ -238,11 +241,13 @@ in {
ft2-clone = handleTest ./ft2-clone.nix {};
mimir = handleTest ./mimir.nix {};
garage = handleTest ./garage {};
gemstash = handleTest ./gemstash.nix {};
gerrit = handleTest ./gerrit.nix {};
geth = handleTest ./geth.nix {};
ghostunnel = handleTest ./ghostunnel.nix {};
gitdaemon = handleTest ./gitdaemon.nix {};
gitea = handleTest ./gitea.nix { giteaPackage = pkgs.gitea; };
github-runner = handleTest ./github-runner.nix {};
gitlab = handleTest ./gitlab.nix {};
gitolite = handleTest ./gitolite.nix {};
gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {};
@ -307,6 +312,7 @@ in {
initrd-network-ssh = handleTest ./initrd-network-ssh {};
initrdNetwork = handleTest ./initrd-network.nix {};
initrd-secrets = handleTest ./initrd-secrets.nix {};
initrd-secrets-changing = handleTest ./initrd-secrets-changing.nix {};
input-remapper = handleTest ./input-remapper.nix {};
inspircd = handleTest ./inspircd.nix {};
installer = handleTest ./installer.nix {};
@ -433,10 +439,8 @@ in {
nagios = handleTest ./nagios.nix {};
nar-serve = handleTest ./nar-serve.nix {};
nat.firewall = handleTest ./nat.nix { withFirewall = true; };
nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
nat.standalone = handleTest ./nat.nix { withFirewall = false; };
nat.nftables.firewall = handleTest ./nat.nix { withFirewall = true; nftables = true; };
nat.nftables.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; nftables = true; };
nat.nftables.standalone = handleTest ./nat.nix { withFirewall = false; nftables = true; };
nats = handleTest ./nats.nix {};
navidrome = handleTest ./navidrome.nix {};
@ -581,6 +585,7 @@ in {
radarr = handleTest ./radarr.nix {};
radicale = handleTest ./radicale.nix {};
rasdaemon = handleTest ./rasdaemon.nix {};
readarr = handleTest ./readarr.nix {};
redis = handleTest ./redis.nix {};
redmine = handleTest ./redmine.nix {};
restartByActivationScript = handleTest ./restart-by-activation-script.nix {};
@ -688,6 +693,7 @@ in {
terminal-emulators = handleTest ./terminal-emulators.nix {};
tiddlywiki = handleTest ./tiddlywiki.nix {};
tigervnc = handleTest ./tigervnc.nix {};
timescaledb = handleTest ./timescaledb.nix {};
timezone = handleTest ./timezone.nix {};
tinc = handleTest ./tinc {};
tinydns = handleTest ./tinydns.nix {};

View File

@ -54,7 +54,7 @@ with lib;
client.execute("echo 'sync_address = \"http://server:${toString testPort}\"' > ~/.config/atuin/config.toml")
# log in to atuin server on client node
client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k {key}")
client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k \"{key}\"")
# pull records from atuin server
client.succeed("${atuin}/bin/atuin sync -f")

114
nixos/tests/btrbk-doas.nix Normal file
View File

@ -0,0 +1,114 @@
import ./make-test-python.nix ({ pkgs, ... }:
let
privateKey = ''
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
-----END OPENSSH PRIVATE KEY-----
'';
publicKey = ''
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
'';
in
{
name = "btrbk-doas";
meta = with pkgs.lib; {
maintainers = with maintainers; [ symphorien tu-maurice ];
};
nodes = {
archive = { ... }: {
security.sudo.enable = false;
security.doas.enable = true;
environment.systemPackages = with pkgs; [ btrfs-progs ];
# note: this makes the privateKey world readable.
# don't do it with real ssh keys.
environment.etc."btrbk_key".text = privateKey;
services.btrbk = {
extraPackages = [ pkgs.lz4 ];
instances = {
remote = {
onCalendar = "minutely";
settings = {
ssh_identity = "/etc/btrbk_key";
ssh_user = "btrbk";
stream_compress = "lz4";
volume = {
"ssh://main/mnt" = {
target = "/mnt";
snapshot_dir = "btrbk/remote";
subvolume = "to_backup";
};
};
};
};
};
};
};
main = { ... }: {
security.sudo.enable = false;
security.doas.enable = true;
environment.systemPackages = with pkgs; [ btrfs-progs ];
services.openssh = {
enable = true;
passwordAuthentication = false;
kbdInteractiveAuthentication = false;
};
services.btrbk = {
extraPackages = [ pkgs.lz4 ];
sshAccess = [
{
key = publicKey;
roles = [ "source" "send" "info" "delete" ];
}
];
instances = {
local = {
onCalendar = "minutely";
settings = {
volume = {
"/mnt" = {
snapshot_dir = "btrbk/local";
subvolume = "to_backup";
};
};
};
};
};
};
};
};
testScript = ''
start_all()
# create btrfs partition at /mnt
for machine in (archive, main):
machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
machine.succeed("mkfs.btrfs /data_fs")
machine.succeed("mkdir /mnt")
machine.succeed("mount /data_fs /mnt")
# what to backup and where
main.succeed("btrfs subvolume create /mnt/to_backup")
main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
# check that local snapshots work
with subtest("local"):
main.succeed("echo foo > /mnt/to_backup/bar")
main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
main.succeed("echo bar > /mnt/to_backup/bar")
main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
# check that btrfs send/receive works and ssh access works
with subtest("remote"):
archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
main.succeed("echo baz > /mnt/to_backup/bar")
archive.succeed("cat /mnt/*/bar | grep bar")
'';
})

73
nixos/tests/cgit.nix Normal file
View File

@ -0,0 +1,73 @@
import ./make-test-python.nix ({ pkgs, ... }:
let
robotsTxt = pkgs.writeText "cgit-robots.txt" ''
User-agent: *
Disallow: /
'';
in {
name = "cgit";
meta = with pkgs.lib.maintainers; {
maintainers = [ schnusch ];
};
nodes = {
server = { ... }: {
services.cgit."localhost" = {
enable = true;
package = pkgs.cgit.overrideAttrs ({ postInstall, ... }: {
postInstall = ''
${postInstall}
cp ${robotsTxt} "$out/cgit/robots.txt"
'';
});
nginx.location = "/(c)git/";
repos = {
some-repo = {
path = "/srv/git/some-repo";
desc = "some-repo description";
};
};
};
environment.systemPackages = [ pkgs.git ];
};
};
testScript = { nodes, ... }: ''
start_all()
server.wait_for_unit("nginx.service")
server.wait_for_unit("network.target")
server.wait_for_open_port(80)
server.succeed("curl -fsS http://localhost/%28c%29git/cgit.css")
server.succeed("curl -fsS http://localhost/%28c%29git/robots.txt | diff -u - ${robotsTxt}")
server.succeed(
"curl -fsS http://localhost/%28c%29git/ | grep -F 'some-repo description'"
)
server.fail("curl -fsS http://localhost/robots.txt")
server.succeed("${pkgs.writeShellScript "setup-cgit-test-repo" ''
set -e
git init --bare -b master /srv/git/some-repo
git init -b master reference
cd reference
git remote add origin /srv/git/some-repo
date > date.txt
git add date.txt
git -c user.name=test -c user.email=test@localhost commit -m 'add date'
git push -u origin master
''}")
server.succeed(
"curl -fsS 'http://localhost/%28c%29git/some-repo/plain/date.txt?id=master' | diff -u reference/date.txt -"
)
server.succeed(
"git clone http://localhost/%28c%29git/some-repo && diff -u reference/date.txt some-repo/date.txt"
)
'';
})

View File

@ -1,6 +1,6 @@
import ./make-test-python.nix ({ pkgs, ... }: {
name = "clickhouse";
meta.maintainers = with pkgs.lib.maintainers; [ ma27 ];
meta.maintainers = with pkgs.lib.maintainers; [ ];
nodes.machine = {
services.clickhouse.enable = true;

24
nixos/tests/coder.nix Normal file
View File

@ -0,0 +1,24 @@
import ./make-test-python.nix ({ pkgs, ... }: {
name = "coder";
meta = with pkgs.lib.maintainers; {
maintainers = [ shyim ghuntley ];
};
nodes.machine =
{ pkgs, ... }:
{
services.coder = {
enable = true;
accessUrl = "http://localhost:3000";
};
};
testScript = ''
machine.start()
machine.wait_for_unit("postgresql.service")
machine.wait_for_unit("coder.service")
machine.wait_for_open_port(3000)
machine.succeed("curl --fail http://localhost:3000")
'';
})

View File

@ -1,6 +1,52 @@
# this test creates a simple GNU image with docker tools and sees if it executes
import ./make-test-python.nix ({ pkgs, ... }: {
import ./make-test-python.nix ({ pkgs, ... }:
let
# nixpkgs#214434: dockerTools.buildImage fails to unpack base images
# containing duplicate layers when those duplicate tarballs
# appear under the manifest's 'Layers'. Docker can generate images
# like this even though dockerTools does not.
repeatedLayerTestImage =
let
# Rootfs diffs for layers 1 and 2 are identical (and empty)
layer1 = pkgs.dockerTools.buildImage { name = "empty"; };
layer2 = layer1.overrideAttrs (_: { fromImage = layer1; });
repeatedRootfsDiffs = pkgs.runCommandNoCC "image-with-links.tar" {
nativeBuildInputs = [pkgs.jq];
} ''
mkdir contents
tar -xf "${layer2}" -C contents
cd contents
first_rootfs=$(jq -r '.[0].Layers[0]' manifest.json)
second_rootfs=$(jq -r '.[0].Layers[1]' manifest.json)
target_rootfs=$(sha256sum "$first_rootfs" | cut -d' ' -f 1).tar
# Replace duplicated rootfs diffs with symlinks to one tarball
chmod -R ug+w .
mv "$first_rootfs" "$target_rootfs"
rm "$second_rootfs"
ln -s "../$target_rootfs" "$first_rootfs"
ln -s "../$target_rootfs" "$second_rootfs"
# Update manifest's layers to use the symlinks' target
cat manifest.json | \
jq ".[0].Layers[0] = \"$target_rootfs\"" |
jq ".[0].Layers[1] = \"$target_rootfs\"" > manifest.json.new
mv manifest.json.new manifest.json
tar --sort=name --hard-dereference -cf $out .
'';
in pkgs.dockerTools.buildImage {
fromImage = repeatedRootfsDiffs;
name = "repeated-layer-test";
tag = "latest";
copyToRoot = pkgs.bash;
# A runAsRoot script is required to force previous layers to be unpacked
runAsRoot = ''
echo 'runAsRoot has run.'
'';
};
in {
name = "docker-tools";
meta = with pkgs.lib.maintainers; {
maintainers = [ lnl7 roberth ];
@ -221,6 +267,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
"docker run --rm ${examples.layersUnpackOrder.imageName} cat /layer-order"
)
with subtest("Ensure repeated base layers handled by buildImage"):
docker.succeed(
"docker load --input='${repeatedLayerTestImage}'",
"docker run --rm ${repeatedLayerTestImage.imageName} /bin/bash -c 'exit 0'"
)
with subtest("Ensure environment variables are correctly inherited"):
docker.succeed(
"docker load --input='${examples.environmentVariables}'"

View File

@ -49,5 +49,5 @@ in
})
{}
[
"0_8_0"
"0_8"
]

51
nixos/tests/gemstash.nix Normal file
View File

@ -0,0 +1,51 @@
{ system ? builtins.currentSystem, config ? { }
, pkgs ? import ../.. { inherit system config; } }:
with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib;
let common_meta = { maintainers = [ maintainers.viraptor ]; };
in
{
gemstash_works = makeTest {
name = "gemstash-works";
meta = common_meta;
nodes.machine = { config, pkgs, ... }: {
services.gemstash = {
enable = true;
};
};
# gemstash responds to http requests
testScript = ''
machine.wait_for_unit("gemstash.service")
machine.wait_for_file("/var/lib/gemstash")
machine.wait_for_open_port(9292)
machine.succeed("curl http://localhost:9292")
'';
};
gemstash_custom_port = makeTest {
name = "gemstash-custom-port";
meta = common_meta;
nodes.machine = { config, pkgs, ... }: {
services.gemstash = {
enable = true;
openFirewall = true;
settings = {
bind = "tcp://0.0.0.0:12345";
};
};
};
# gemstash responds to http requests
testScript = ''
machine.wait_for_unit("gemstash.service")
machine.wait_for_file("/var/lib/gemstash")
machine.wait_for_open_port(12345)
machine.succeed("curl http://localhost:12345")
'';
};
}

View File

@ -1,6 +1,6 @@
{ system ? builtins.currentSystem,
config ? {},
giteaPackage,
giteaPackage ? pkgs.gitea,
pkgs ? import ../.. { inherit system config; }
}:
@ -8,6 +8,21 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib;
let
## gpg --faked-system-time='20230301T010000!' --quick-generate-key snakeoil ed25519 sign
signingPrivateKey = ''
-----BEGIN PGP PRIVATE KEY BLOCK-----
lFgEY/6jkBYJKwYBBAHaRw8BAQdADXiZRV8RJUyC9g0LH04wLMaJL9WTc+szbMi7
5fw4yP8AAQCl8EwGfzSLm/P6fCBfA3I9znFb3MEHGCCJhJ6VtKYyRw7ktAhzbmFr
ZW9pbIiUBBMWCgA8FiEE+wUM6VW/NLtAdSixTWQt6LZ4x50FAmP+o5ACGwMFCQPC
ZwAECwkIBwQVCgkIBRYCAwEAAh4FAheAAAoJEE1kLei2eMedFTgBAKQs1oGFZrCI
TZP42hmBTKxGAI1wg7VSdDEWTZxut/2JAQDGgo2sa4VHMfj0aqYGxrIwfP2B7JHO
GCqGCRf9O/hzBA==
=9Uy3
-----END PGP PRIVATE KEY BLOCK-----
'';
signingPrivateKeyId = "4D642DE8B678C79D";
supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
makeGiteaTest = type: nameValuePair type (makeTest {
name = "${giteaPackage.pname}-${type}";
@ -21,8 +36,9 @@ let
database = { inherit type; };
package = giteaPackage;
settings.service.DISABLE_REGISTRATION = true;
settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
};
environment.systemPackages = [ giteaPackage pkgs.jq ];
environment.systemPackages = [ giteaPackage pkgs.gnupg pkgs.jq ];
services.openssh.enable = true;
};
client1 = { config, pkgs, ... }: {
@ -58,6 +74,13 @@ let
server.wait_for_open_port(3000)
server.succeed("curl --fail http://localhost:3000/")
server.succeed(
"su -l gitea -c 'gpg --homedir /var/lib/gitea/data/home/.gnupg "
+ "--import ${toString (pkgs.writeText "gitea.key" signingPrivateKey)}'"
)
assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
server.succeed(
"curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
+ "Please contact your site administrator.'"

View File

@ -0,0 +1,37 @@
import ./make-test-python.nix ({ pkgs, ... }:
{
name = "github-runner";
meta = with pkgs.lib.maintainers; {
maintainers = [ veehaitch ];
};
nodes.machine = { pkgs, ... }: {
services.github-runners.test = {
enable = true;
url = "https://github.com/yaxitech";
tokenFile = builtins.toFile "github-runner.token" "not-so-secret";
};
systemd.services.dummy-github-com = {
wantedBy = [ "multi-user.target" ];
before = [ "github-runner-test.service" ];
script = "${pkgs.netcat}/bin/nc -Fl 443 | true && touch /tmp/registration-connect";
};
networking.hosts."127.0.0.1" = [ "api.github.com" ];
};
testScript = ''
start_all()
machine.wait_for_unit("dummy-github-com")
try:
machine.wait_for_unit("github-runner-test")
except Exception:
pass
out = machine.succeed("journalctl -u github-runner-test")
assert "Self-hosted runner registration" in out, "did not read runner registration header"
machine.wait_until_succeeds("test -f /tmp/registration-connect")
'';
})

Some files were not shown because too many files have changed in this diff Show More