Merge remote-tracking branch 'origin/master' into staging

Conflicts:
	pkgs/tools/networking/ofono/default.nix
This commit is contained in:
Dmitry Kalinkin 2023-12-08 12:27:28 -05:00
commit 025a278148
3146 changed files with 53922 additions and 31261 deletions

View File

@ -60,7 +60,7 @@ jobs:
Check that all providers build with:
```
@ofborg build terraform.full
@ofborg build opentofu.full
```
If there is more than ten commits in the PR `ofborg` won't build it automatically and you will need to use the above command.
branch: terraform-providers-update

View File

@ -12,3 +12,5 @@ Sandro Jäckel <sandro.jaeckel@gmail.com> <sandro.jaeckel@sap.com>
superherointj <5861043+superherointj@users.noreply.github.com>
Vladimír Čunát <v@cunat.cz> <vcunat@gmail.com>
Vladimír Čunát <v@cunat.cz> <vladimir.cunat@nic.cz>
Yifei Sun <ysun@hey.com> StepBroBD <Hi@StepBroBD.com>
Yifei Sun <ysun@hey.com> <ysun+git@stepbrobd.com>

View File

@ -26,7 +26,7 @@ This file contains general contributing information, but individual parts also h
This section describes in some detail how changes can be made and proposed with pull requests.
> **Note**
> [!Note]
> Be aware that contributing implies licensing those contributions under the terms of [COPYING](./COPYING), an MIT-like license.
0. Set up a local version of Nixpkgs to work with using GitHub and Git
@ -273,7 +273,7 @@ Once a pull request has been merged into `master`, a backport pull request to th
### Automatically backporting changes
> **Note**
> [!Note]
> You have to be a [Nixpkgs maintainer](./maintainers) to automatically create a backport pull request.
Add the [`backport release-YY.MM` label](https://github.com/NixOS/nixpkgs/labels?q=backport) to the pull request on the `master` branch.
@ -285,16 +285,17 @@ This can be done on both open or already merged pull requests.
To manually create a backport pull request, follow [the standard pull request process][pr-create], with these notable differences:
- Use `release-YY.MM` for the base branch, both for the local branch and the pull request.
> **Warning**
> Do not use the `nixos-YY.MM` branch, that is a branch pointing to the tested release channel commit
> [!Warning]
> Do not use the `nixos-YY.MM` branch, that is a branch pointing to the tested release channel commit
- Instead of manually making and committing the changes, use [`git cherry-pick -x`](https://git-scm.com/docs/git-cherry-pick) for each commit from the pull request you'd like to backport.
Either `git cherry-pick -x <commit>` when the reason for the backport is obvious (such as minor versions, fixes, etc.), otherwise use `git cherry-pick -xe <commit>` to add a reason for the backport to the commit message.
Here is [an example](https://github.com/nixos/nixpkgs/commit/5688c39af5a6c5f3d646343443683da880eaefb8) of this.
> **Warning**
> Ensure the commits exists on the master branch.
> In the case of squashed or rebased merges, the commit hash will change and the new commits can be found in the merge message at the bottom of the master pull request.
> [!Warning]
> Ensure the commits exists on the master branch.
> In the case of squashed or rebased merges, the commit hash will change and the new commits can be found in the merge message at the bottom of the master pull request.
- In the pull request description, link to the original pull request to `master`.
The pull request title should include `[YY.MM]` matching the release you're backporting to.
@ -305,7 +306,7 @@ To manually create a backport pull request, follow [the standard pull request pr
## How to review pull requests
[pr-review]: #how-to-review-pull-requests
> **Warning**
> [!Warning]
> The following section is a draft, and the policy for reviewing is still being discussed in issues such as [#11166](https://github.com/NixOS/nixpkgs/issues/11166) and [#20836](https://github.com/NixOS/nixpkgs/issues/20836).
The Nixpkgs project receives a fairly high number of contributions via GitHub pull requests. Reviewing and approving these is an important task and a way to contribute to the project.
@ -384,7 +385,7 @@ By keeping the `staging-next` branch separate from `staging`, this batching does
In order for the `staging` and `staging-next` branches to be up-to-date with the latest commits on `master`, there are regular _automated_ merges from `master` into `staging-next` and `staging`.
This is implemented using GitHub workflows [here](.github/workflows/periodic-merge-6h.yml) and [here](.github/workflows/periodic-merge-24h.yml).
> **Note**
> [!Note]
> Changes must be sufficiently tested before being merged into any branch.
> Hydra builds should not be used as testing platform.

View File

@ -48,7 +48,7 @@ It uses the widely compatible [header attributes](https://github.com/jgm/commonm
## Syntax {#sec-contributing-markup}
```
> **Note**
> [!Note]
> NixOS option documentation does not support headings in general.
#### Inline Anchors

View File

@ -68,16 +68,45 @@ All new projects should use the CUDA redistributables available in [`cudaPackage
### Updating CUDA redistributables {#updating-cuda-redistributables}
1. Go to NVIDIA's index of CUDA redistributables: <https://developer.download.nvidia.com/compute/cuda/redist/>
2. Copy the `redistrib_*.json` corresponding to the release to `pkgs/development/compilers/cudatoolkit/redist/manifests`.
3. Generate the `redistrib_features_*.json` file by running:
2. Make a note of the new version of CUDA available.
3. Run
```bash
nix run github:ConnorBaker/cuda-redist-find-features -- <path to manifest>
```
```bash
nix run github:connorbaker/cuda-redist-find-features -- \
download-manifests \
--log-level DEBUG \
--version <newest CUDA version> \
https://developer.download.nvidia.com/compute/cuda/redist \
./pkgs/development/cuda-modules/cuda/manifests
```
That command will generate the `redistrib_features_*.json` file in the same directory as the manifest.
This will download a copy of the manifest for the new version of CUDA.
4. Run
4. Include the path to the new manifest in `pkgs/development/compilers/cudatoolkit/redist/extension.nix`.
```bash
nix run github:connorbaker/cuda-redist-find-features -- \
process-manifests \
--log-level DEBUG \
--version <newest CUDA version> \
https://developer.download.nvidia.com/compute/cuda/redist \
./pkgs/development/cuda-modules/cuda/manifests
```
This will generate a `redistrib_features_<newest CUDA version>.json` file in the same directory as the manifest.
5. Update the `cudaVersionMap` attribute set in `pkgs/development/cuda-modules/cuda/extension.nix`.
### Updating cuTensor {#updating-cutensor}
1. Repeat the steps present in [Updating CUDA redistributables](#updating-cuda-redistributables) with the following changes:
- Use the index of cuTensor redistributables: <https://developer.download.nvidia.com/compute/cutensor/redist>
- Use the newest version of cuTensor available instead of the newest version of CUDA.
- Use `pkgs/development/cuda-modules/cutensor/manifests` instead of `pkgs/development/cuda-modules/cuda/manifests`.
- Skip the step of updating `cudaVersionMap` in `pkgs/development/cuda-modules/cuda/extension.nix`.
### Updating supported compilers and GPUs {#updating-supported-compilers-and-gpus}
1. Update `nvcc-compatibilities.nix` in `pkgs/development/cuda-modules/` to include the newest release of NVCC, as well as any newly supported host compilers.
2. Update `gpus.nix` in `pkgs/development/cuda-modules/` to include any new GPUs supported by the new release of CUDA.
### Updating the CUDA Toolkit runfile installer {#updating-the-cuda-toolkit}
@ -99,7 +128,7 @@ All new projects should use the CUDA redistributables available in [`cudaPackage
nix store prefetch-file --hash-type sha256 <link>
```
4. Update `pkgs/development/compilers/cudatoolkit/versions.toml` to include the release.
4. Update `pkgs/development/cuda-modules/cudatoolkit/releases.nix` to include the release.
### Updating the CUDA package set {#updating-the-cuda-package-set}
@ -107,7 +136,7 @@ All new projects should use the CUDA redistributables available in [`cudaPackage
- NOTE: Changing the default CUDA package set should occur in a separate PR, allowing time for additional testing.
2. Successfully build the closure of the new package set, updating `pkgs/development/compilers/cudatoolkit/redist/overrides.nix` as needed. Below are some common failures:
2. Successfully build the closure of the new package set, updating `pkgs/development/cuda-modules/cuda/overrides.nix` as needed. Below are some common failures:
| Unable to ... | During ... | Reason | Solution | Note |
| --- | --- | --- | --- | --- |

View File

@ -165,3 +165,10 @@ team after giving the existing members a few days to respond.
*Important:* If a team says it is a closed group, do not merge additions
to the team without an approval by at least one existing member.
# Maintainer scripts
Various utility scripts, which are mainly useful for nixpkgs maintainers,
are available under `./scripts/`. See its [README](./scripts/README.md)
for further information.

View File

@ -26,8 +26,10 @@
- `githubId` is your GitHub user ID, which can be found at `https://api.github.com/users/<userhandle>`,
- `keys` is a list of your PGP/GPG key fingerprints.
Specifying a GitHub account ensures that you automatically get a review request on
pull requests that modify a package for which you are a maintainer.
Specifying a GitHub account ensures that you automatically:
- get invited to the @NixOS/nixpkgs-maintainers team ;
- once you are part of the @NixOS org, OfBorg will request you review
pull requests that modify a package for which you are a maintainer.
`handle == github` is strongly preferred whenever `github` is an acceptable attribute name and is short and convenient.
@ -2266,6 +2268,15 @@
githubId = 16821405;
name = "Ben Kuhn";
};
benlemasurier = {
email = "ben@crypt.ly";
github = "benlemasurier";
githubId = 47993;
name = "Ben LeMasurier";
keys = [{
fingerprint = "0FD4 7407 EFD4 8FD8 8BF5 87B3 248D 430A E8E7 4189";
}];
};
benley = {
email = "benley@gmail.com";
github = "benley";
@ -4131,7 +4142,7 @@
email = "davidlewis@mac.com";
github = "oceanlewis";
githubId = 6754950;
name = "David Armstrong Lewis";
name = "Ocean Armstrong Lewis";
};
davidcromp = {
email = "davidcrompton1192@gmail.com";
@ -5163,6 +5174,12 @@
name = "Edvin Källström";
githubId = 84442052;
};
ekimber = {
email = "ekimber@protonmail.com";
github = "ekimber";
name = "Edward Kimber";
githubId = 99987;
};
ekleog = {
email = "leo@gaspard.io";
matrix = "@leo:gaspard.ninja";
@ -6828,6 +6845,12 @@
githubId = 6893840;
name = "Yacine Hmito";
};
gracicot = {
email = "gracicot42@gmail.com";
github = "gracicot";
githubId = 2906673;
name = "Guillaume Racicot";
};
graham33 = {
email = "graham@grahambennett.org";
github = "graham33";
@ -6933,6 +6956,11 @@
githubId = 21156405;
name = "GuangTao Zhang";
};
guekka = {
github = "Guekka";
githubId = 39066502;
name = "Guekka";
};
guibert = {
email = "david.guibert@gmail.com";
github = "dguibert";
@ -10793,6 +10821,12 @@
fingerprint = "97A0 AE5E 03F3 499B 7D7A 65C6 76A4 1432 37EF 5817";
}];
};
lukas-heiligenbrunner = {
email = "lukas.heiligenbrunner@gmail.com";
github = "lukas-heiligenbrunner";
githubId = 30468956;
name = "Lukas Heiligenbrunner";
};
lukaswrz = {
email = "lukas@wrz.one";
github = "lukaswrz";
@ -11974,7 +12008,7 @@
};
milran = {
email = "milranmike@protonmail.com";
github = "milran";
github = "wattmto";
githubId = 93639059;
name = "Milran Mike";
};
@ -11990,6 +12024,12 @@
githubId = 9799623;
name = "Rick van Schijndel";
};
mindstorms6 = {
email = "breland@bdawg.org";
github = "mindstorms6";
githubId = 92937;
name = "Breland Miley";
};
minijackson = {
email = "minijackson@riseup.net";
github = "minijackson";
@ -15347,7 +15387,7 @@
revol-xut = {
email = "revol-xut@protonmail.com";
name = "Tassilo Tanneberger";
github = "revol-xut";
github = "tanneberger";
githubId = 32239737;
keys = [{
fingerprint = "91EB E870 1639 1323 642A 6803 B966 009D 57E6 9CC6";
@ -16051,8 +16091,8 @@
};
SamirTalwar = {
email = "lazy.git@functional.computer";
github = "abstracte";
githubId = 47852;
github = "SamirTalwar";
githubId = 47582;
name = "Samir Talwar";
};
samlich = {
@ -17343,13 +17383,12 @@
name = "Stel Abrego";
};
stepbrobd = {
name = "StepBroBD";
github = "StepBroBD";
name = "Yifei Sun";
email = "ysun@hey.com";
github = "stepbrobd";
githubId = 81826728;
email = "Hi@StepBroBD.com";
matrix = "@stepbrobd:matrix.org";
keys = [{
fingerprint = "5D8B FA8B 286A C2EF 6EE4 8598 F742 B72C 8926 1A51";
fingerprint = "AC7C 52E6 BA2F E8DE 8F0F 5D78 D973 170F 9B86 DB70";
}];
};
stephank = {
@ -19228,6 +19267,12 @@
githubId = 118959;
name = "VinyMeuh";
};
viperML = {
email = "ayatsfer@gmail.com";
github = "viperML";
githubId = 11395853;
name = "Fernando Ayats";
};
viraptor = {
email = "nix@viraptor.info";
github = "viraptor";

View File

@ -0,0 +1,58 @@
# Maintainer scripts
This folder contains various executable scripts for nixpkgs maintainers,
and supporting data or nixlang files as needed.
These scripts generally aren't a stable interface and may changed or be removed.
What follows is a (very incomplete) overview of available scripts.
## Metadata
### `get-maintainer.sh`
`get-maintainer.sh [selector] value` returns a JSON object describing
a given nixpkgs maintainer, equivalent to `lib.maintainers.${x} // { handle = x; }`.
This allows looking up a maintainer's attrset (including GitHub and Matrix
handles, email address etc.) based on any of their handles, more correctly and
robustly than text search through `maintainers-list.nix`.
```
./get-maintainer.sh nicoo
{
"email": "nicoo@debian.org",
"github": "nbraud",
"githubId": 1155801,
"keys": [
{
"fingerprint": "E44E 9EA5 4B8E 256A FB73 49D3 EC9D 3708 72BC 7A8C"
}
],
"name": "nicoo",
"handle": "nicoo"
}
./get-maintainer.sh name 'Silvan Mosberger'
{
"email": "contact@infinisil.com",
"github": "infinisil",
"githubId": 20525370,
"keys": [
{
"fingerprint": "6C2B 55D4 4E04 8266 6B7D DA1A 422E 9EDA E015 7170"
}
],
"matrix": "@infinisil:matrix.org",
"name": "Silvan Mosberger",
"handle": "infinisil"
}
```
The maintainer is designated by a `selector` which must be one of:
- `handle` (default): the maintainer's attribute name in `lib.maintainers`;
- `email`, `name`, `github`, `githubId`, `matrix`, `name`:
attributes of the maintainer's object, matched exactly;
see [`maintainer-list.nix`] for the fields' definition.
[`maintainer-list.nix`]: ../maintainer-list.nix

View File

@ -0,0 +1,73 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p jq ncurses
# shellcheck shell=bash
# Get a nixpkgs maintainer's metadata as a JSON object
# see HELP_MESSAGE just below, or README.md.
set -euo pipefail
declare -A SELECTORS=( [handle]= [email]= [github]= [githubId]= [matrix]= [name]= )
HELP_MESSAGE="usage: '$0' [selector] value
examples:
get-maintainer.sh nicoo
get-maintainer.sh githubId 1155801
\`selector\` defaults to 'handle', can be one of:
${!SELECTORS[*]}
"
MAINTAINERS_DIR="$(dirname "$0")/.."
die() {
tput setaf 1 # red
echo "'$0': $*"
tput setaf 0 # back to black
exit 1
}
listAsJSON() {
nix-instantiate --eval --strict --json "${MAINTAINERS_DIR}/maintainer-list.nix"
}
parseArgs() {
[ $# -gt 0 -a $# -lt 3 ] || {
echo "$HELP_MESSAGE"
die "invalid number of arguments (must be 1 or 2)"
}
if [ $# -eq 1 ]; then
selector=handle
else
selector="$1"
shift
fi
[ -z "${SELECTORS[$selector]-n}" ] || {
echo "Valid selectors are:" "${!SELECTORS[@]}" >&2
die "invalid selector '$selector'"
}
value="$1"
shift
}
query() {
# explode { a: A, b: B, ... } into A + {handle: a}, B + {handle: b}, ...
local explode="to_entries[] | .value + { \"handle\": .key }"
# select matching items from the list
# TODO(nicoo): Support approximate matching for `name` ?
local select
case "$selector" in
githubId)
select="select(.${selector} == $value)"
;;
*)
select="select(.${selector} == \"$value\")"
esac
echo "$explode | $select"
}
parseArgs "$@"
listAsJSON | jq -e "$(query)"

View File

@ -562,6 +562,18 @@ with lib.maintainers; {
enableFeatureFreezePing = true;
};
lxc = {
members = [
aanderse
adamcstephens
jnsgruk
megheaiulian
mkg20001
];
scope = "All things linuxcontainers. LXC, Incus, LXD and related packages.";
shortName = "lxc";
};
lxqt = {
members = [
romildo

View File

@ -27,6 +27,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
- `mkosi` was updated to v19. Parts of the user interface have changed. Consult the
[release notes](https://github.com/systemd/mkosi/releases/tag/v19) for a list of changes.

View File

@ -27,31 +27,37 @@ var ${bucket:=nixos-amis}
var ${service_role_name:=vmimport}
# Output of the command:
# > aws ec2 describe-regions --all-regions --query "Regions[].{Name:RegionName}" --output text | sort
# $ nix-shell -I nixpkgs=. -p awscli --run 'aws ec2 describe-regions --region us-east-1 --all-regions --query "Regions[].{Name:RegionName}" --output text | sort | sed -e s/^/\ \ /'
var ${regions:=
af-south-1
ap-east-1
ap-northeast-1
ap-northeast-2
ap-northeast-3
ap-south-1
ap-southeast-1
ap-southeast-2
ap-southeast-3
ca-central-1
eu-central-1
eu-north-1
eu-south-1
eu-west-1
eu-west-2
eu-west-3
me-south-1
sa-east-1
us-east-1
us-east-2
us-west-1
us-west-2
}
af-south-1
ap-east-1
ap-northeast-1
ap-northeast-2
ap-northeast-3
ap-south-1
ap-south-2
ap-southeast-1
ap-southeast-2
ap-southeast-3
ap-southeast-4
ca-central-1
eu-central-1
eu-central-2
eu-north-1
eu-south-1
eu-south-2
eu-west-1
eu-west-2
eu-west-3
il-central-1
me-central-1
me-south-1
sa-east-1
us-east-1
us-east-2
us-west-1
us-west-2
}
regions=($regions)

View File

@ -1334,6 +1334,7 @@
./services/web-apps/vikunja.nix
./services/web-apps/whitebophir.nix
./services/web-apps/wiki-js.nix
./services/web-apps/windmill.nix
./services/web-apps/wordpress.nix
./services/web-apps/writefreely.nix
./services/web-apps/youtrack.nix
@ -1359,6 +1360,7 @@
./services/web-servers/molly-brown.nix
./services/web-servers/nginx/default.nix
./services/web-servers/nginx/gitweb.nix
./services/web-servers/nginx/tailscale-auth.nix
./services/web-servers/phpfpm/default.nix
./services/web-servers/pomerium.nix
./services/web-servers/rustus.nix

View File

@ -121,6 +121,7 @@ in
in mkIf (cfg.servers != {}) {
systemd.services = mapAttrs' (server: options:
nameValuePair "wyoming-faster-whisper-${server}" {
inherit (options) enable;
description = "Wyoming faster-whisper server instance ${server}";
after = [
"network-online.target"

View File

@ -116,6 +116,7 @@ in
in mkIf (cfg.servers != {}) {
systemd.services = mapAttrs' (server: options:
nameValuePair "wyoming-piper-${server}" {
inherit (options) enable;
description = "Wyoming Piper server instance ${server}";
after = [
"network-online.target"

View File

@ -602,53 +602,56 @@ in {
};
extraArgs = mkOption {
type = types.str;
type = with types; coercedTo (listOf str) escapeShellArgs str;
description = lib.mdDoc ''
Additional arguments for all {command}`borg` calls the
service has. Handle with care.
'';
default = "";
example = "--remote-path=/path/to/borg";
default = [ ];
example = [ "--remote-path=/path/to/borg" ];
};
extraInitArgs = mkOption {
type = types.str;
type = with types; coercedTo (listOf str) escapeShellArgs str;
description = lib.mdDoc ''
Additional arguments for {command}`borg init`.
Can also be set at runtime using `$extraInitArgs`.
'';
default = "";
example = "--append-only";
default = [ ];
example = [ "--append-only" ];
};
extraCreateArgs = mkOption {
type = types.str;
type = with types; coercedTo (listOf str) escapeShellArgs str;
description = lib.mdDoc ''
Additional arguments for {command}`borg create`.
Can also be set at runtime using `$extraCreateArgs`.
'';
default = "";
example = "--stats --checkpoint-interval 600";
default = [ ];
example = [
"--stats"
"--checkpoint-interval 600"
];
};
extraPruneArgs = mkOption {
type = types.str;
type = with types; coercedTo (listOf str) escapeShellArgs str;
description = lib.mdDoc ''
Additional arguments for {command}`borg prune`.
Can also be set at runtime using `$extraPruneArgs`.
'';
default = "";
example = "--save-space";
default = [ ];
example = [ "--save-space" ];
};
extraCompactArgs = mkOption {
type = types.str;
type = with types; coercedTo (listOf str) escapeShellArgs str;
description = lib.mdDoc ''
Additional arguments for {command}`borg compact`.
Can also be set at runtime using `$extraCompactArgs`.
'';
default = "";
example = "--cleanup-commits";
default = [ ];
example = [ "--cleanup-commits" ];
};
};
}

View File

@ -69,8 +69,8 @@ with lib;
confDir = mkOption {
type = types.path;
description = lib.mdDoc "Spark configuration directory. Spark will use the configuration files (spark-defaults.conf, spark-env.sh, log4j.properties, etc) from this directory.";
default = "${cfg.package}/lib/${cfg.package.untarDir}/conf";
defaultText = literalExpression ''"''${package}/lib/''${package.untarDir}/conf"'';
default = "${cfg.package}/conf";
defaultText = literalExpression ''"''${package}/conf"'';
};
logDir = mkOption {
type = types.path;
@ -111,9 +111,9 @@ with lib;
Type = "forking";
User = "spark";
Group = "spark";
WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-master.sh";
ExecStop = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-master.sh";
WorkingDirectory = "${cfg.package}/";
ExecStart = "${cfg.package}/sbin/start-master.sh";
ExecStop = "${cfg.package}/sbin/stop-master.sh";
TimeoutSec = 300;
StartLimitBurst=10;
Restart = "always";
@ -134,9 +134,9 @@ with lib;
serviceConfig = {
Type = "forking";
User = "spark";
WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-worker.sh spark://${cfg.worker.master}";
ExecStop = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-worker.sh";
WorkingDirectory = "${cfg.package}/";
ExecStart = "${cfg.package}/sbin/start-worker.sh spark://${cfg.worker.master}";
ExecStop = "${cfg.package}/sbin/stop-worker.sh";
TimeoutSec = 300;
StartLimitBurst=10;
Restart = "always";

View File

@ -4,7 +4,7 @@ with lib;
let
cfg = config.services.greetd;
tty = "tty${toString cfg.vt}";
settingsFormat = pkgs.formats.toml {};
settingsFormat = pkgs.formats.toml { };
in
{
options.services.greetd = {
@ -27,7 +27,7 @@ in
'';
};
vt = mkOption {
vt = mkOption {
type = types.int;
default = 1;
description = lib.mdDoc ''
@ -97,12 +97,18 @@ in
systemd.defaultUnit = "graphical.target";
# Create directories potentially required by supported greeters
# See https://github.com/NixOS/nixpkgs/issues/248323
systemd.tmpfiles.rules = [
"d '/var/cache/tuigreet' - greeter greeter - -"
];
users.users.greeter = {
isSystemUser = true;
group = "greeter";
};
users.groups.greeter = {};
users.groups.greeter = { };
};
meta.maintainers = with maintainers; [ queezle ];

View File

@ -100,7 +100,7 @@ in
serviceConfig = {
DynamicUser = true;
ExecStart = "${pkgs.teeworlds}/bin/teeworlds_srv -f ${teeworldsConf}";
ExecStart = "${pkgs.teeworlds-server}/bin/teeworlds_srv -f ${teeworldsConf}";
# Hardening
CapabilityBoundingSet = false;

View File

@ -11,14 +11,12 @@ let
# options shown in settings.
# We post-process the result to add support for YAML functions, like secrets or includes, see e.g.
# https://www.home-assistant.io/docs/configuration/secrets/
filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) cfg.config or {};
filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) (lib.recursiveUpdate customLovelaceModulesResources (cfg.config or {}));
configFile = pkgs.runCommandLocal "configuration.yaml" { } ''
cp ${format.generate "configuration.yaml" filteredConfig} $out
sed -i -e "s/'\!\([a-z_]\+\) \(.*\)'/\!\1 \2/;s/^\!\!/\!/;" $out
'';
lovelaceConfig = if (cfg.lovelaceConfig == null) then {}
else (lib.recursiveUpdate customLovelaceModulesResources cfg.lovelaceConfig);
lovelaceConfigFile = format.generate "ui-lovelace.yaml" lovelaceConfig;
lovelaceConfigFile = format.generate "ui-lovelace.yaml" cfg.lovelaceConfig;
# Components advertised by the home-assistant package
availableComponents = cfg.package.availableComponents;
@ -77,7 +75,7 @@ let
# Create parts of the lovelace config that reference lovelave modules as resources
customLovelaceModulesResources = {
lovelace.resources = map (card: {
url = "/local/nixos-lovelace-modules/${card.entrypoint or card.pname}.js?${card.version}";
url = "/local/nixos-lovelace-modules/${card.entrypoint or card.pname + ".js"}?${card.version}";
type = "module";
}) cfg.customLovelaceModules;
};

View File

@ -51,13 +51,17 @@ in
{
ExecStart = "${getExe cfg.package} --config ${validateConfig conf}";
DynamicUser = true;
Restart = "no";
Restart = "always";
StateDirectory = "vector";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
# This group is required for accessing journald.
SupplementaryGroups = mkIf cfg.journaldAccess "systemd-journal";
};
unitConfig = {
StartLimitIntervalSec = 10;
StartLimitBurst = 5;
};
};
};
}

View File

@ -42,7 +42,7 @@ let
database = lib.last (lib.splitString "/" noSchema);
};
postgresDBs = [
postgresDBs = builtins.filter isPostgresql [
cfg.settings.database
cfg.settings.crypto_database
cfg.settings.plugin_databases.postgres

View File

@ -265,7 +265,7 @@ in
linkProfileToPath = acc: profile: location: let
guixProfile = "${cfg.stateDir}/guix/profiles/per-user/\${USER}/${profile}";
in acc + ''
[ -d "${guixProfile}" ] && ln -sf "${guixProfile}" "${location}"
[ -d "${guixProfile}" ] && [ -L "${location}" ] || ln -sf "${guixProfile}" "${location}"
'';
activationScript = lib.foldlAttrs linkProfileToPath "" guixUserProfiles;

View File

@ -6,9 +6,83 @@ with lib;
let
cfg = config.power.ups;
in
defaultPort = 3493;
nutFormat = {
type = with lib.types; let
singleAtom = nullOr (oneOf [
bool
int
float
str
]) // {
description = "atom (null, bool, int, float or string)";
};
in attrsOf (oneOf [
singleAtom
(listOf (nonEmptyListOf singleAtom))
]);
generate = name: value:
let
normalizedValue =
lib.mapAttrs (key: val:
if lib.isList val
then forEach val (elem: if lib.isList elem then elem else [elem])
else
if val == null
then []
else [[val]]
) value;
mkValueString = concatMapStringsSep " " (v:
let str = generators.mkValueStringDefault {} v;
in
# Quote the value if it has spaces and isn't already quoted.
if (hasInfix " " str) && !(hasPrefix "\"" str && hasSuffix "\"" str)
then "\"${str}\""
else str
);
in pkgs.writeText name (lib.generators.toKeyValue {
mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } " ";
listsAsDuplicateKeys = true;
} normalizedValue);
};
installSecrets = source: target: secrets:
pkgs.writeShellScript "installSecrets.sh" ''
install -m0600 -D ${source} "${target}"
${concatLines (forEach secrets (name: ''
${pkgs.replace-secret}/bin/replace-secret \
'@${name}@' \
"$CREDENTIALS_DIRECTORY/${name}" \
"${target}"
''))}
chmod u-w "${target}"
'';
upsmonConf = nutFormat.generate "upsmon.conf" cfg.upsmon.settings;
upsdUsers = pkgs.writeText "upsd.users" (let
# This looks like INI, but it's not quite because the
# 'upsmon' option lacks a '='. See: man upsd.users
userConfig = name: user: concatStringsSep "\n " (concatLists [
[
"[${name}]"
"password = \"@upsdusers_password_${name}@\""
]
(optional (user.upsmon != null) "upsmon ${user.upsmon}")
(forEach user.actions (action: "actions = ${action}"))
(forEach user.instcmds (instcmd: "instcmds = ${instcmd}"))
]);
in concatStringsSep "\n\n" (mapAttrsToList userConfig cfg.users));
let
upsOptions = {name, config, ...}:
{
options = {
@ -95,6 +169,213 @@ let
};
};
listenOptions = {
options = {
address = mkOption {
type = types.str;
description = lib.mdDoc ''
Address of the interface for `upsd` to listen on.
See `man upsd.conf` for details.
'';
};
port = mkOption {
type = types.port;
default = defaultPort;
description = lib.mdDoc ''
TCP port for `upsd` to listen on.
See `man upsd.conf` for details.
'';
};
};
};
upsdOptions = {
options = {
enable = mkOption {
type = types.bool;
defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`";
description = mdDoc "Whether to enable `upsd`.";
};
listen = mkOption {
type = with types; listOf (submodule listenOptions);
default = [];
example = [
{
address = "192.168.50.1";
}
{
address = "::1";
port = 5923;
}
];
description = lib.mdDoc ''
Address of the interface for `upsd` to listen on.
See `man upsd` for details`.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = lib.mdDoc ''
Additional lines to add to `upsd.conf`.
'';
};
};
config = {
enable = mkDefault (elem cfg.mode [ "standalone" "netserver" ]);
};
};
monitorOptions = { name, config, ... }: {
options = {
system = mkOption {
type = types.str;
default = name;
description = lib.mdDoc ''
Identifier of the UPS to monitor, in this form: `<upsname>[@<hostname>[:<port>]]`
See `upsmon.conf` for details.
'';
};
powerValue = mkOption {
type = types.int;
default = 1;
description = lib.mdDoc ''
Number of power supplies that the UPS feeds on this system.
See `upsmon.conf` for details.
'';
};
user = mkOption {
type = types.str;
description = lib.mdDoc ''
Username from `upsd.users` for accessing this UPS.
See `upsmon.conf` for details.
'';
};
passwordFile = mkOption {
type = types.str;
defaultText = literalMD "power.ups.users.\${user}.passwordFile";
description = lib.mdDoc ''
The full path to a file containing the password from
`upsd.users` for accessing this UPS. The password file
is read on service start.
See `upsmon.conf` for details.
'';
};
type = mkOption {
type = types.str;
default = "master";
description = lib.mdDoc ''
The relationship with `upsd`.
See `upsmon.conf` for details.
'';
};
};
config = {
passwordFile = mkDefault cfg.users.${config.user}.passwordFile;
};
};
upsmonOptions = {
options = {
enable = mkOption {
type = types.bool;
defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`, `netclient`";
description = mdDoc "Whether to enable `upsmon`.";
};
monitor = mkOption {
type = with types; attrsOf (submodule monitorOptions);
default = {};
description = lib.mdDoc ''
Set of UPS to monitor. See `man upsmon.conf` for details.
'';
};
settings = mkOption {
type = nutFormat.type;
default = {};
defaultText = literalMD ''
{
MINSUPPLIES = 1;
RUN_AS_USER = "root";
NOTIFYCMD = "''${pkgs.nut}/bin/upssched";
SHUTDOWNCMD = "''${pkgs.systemd}/bin/shutdown now";
}
'';
description = mdDoc "Additional settings to add to `upsmon.conf`.";
example = literalMD ''
{
MINSUPPLIES = 2;
NOTIFYFLAG = [
[ "ONLINE" "SYSLOG+EXEC" ]
[ "ONBATT" "SYSLOG+EXEC" ]
];
}
'';
};
};
config = {
enable = mkDefault (elem cfg.mode [ "standalone" "netserver" "netclient" ]);
settings = {
RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
MINSUPPLIES = mkDefault 1;
NOTIFYCMD = mkDefault "${pkgs.nut}/bin/upssched";
SHUTDOWNCMD = mkDefault "${pkgs.systemd}/bin/shutdown now";
MONITOR = flip mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]);
};
};
};
userOptions = {
options = {
passwordFile = mkOption {
type = types.str;
description = lib.mdDoc ''
The full path to a file that contains the user's (clear text)
password. The password file is read on service start.
'';
};
actions = mkOption {
type = with types; listOf str;
default = [];
description = lib.mdDoc ''
Allow the user to do certain things with upsd.
See `man upsd.users` for details.
'';
};
instcmds = mkOption {
type = with types; listOf str;
default = [];
description = lib.mdDoc ''
Let the user initiate specific instant commands. Use "ALL" to grant all commands automatically. For the full list of what your UPS supports, use "upscmd -l".
See `man upsd.users` for details.
'';
};
upsmon = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc ''
Add the necessary actions for a upsmon process to work.
See `man upsd.users` for details.
'';
};
};
};
in
@ -103,19 +384,14 @@ in
# powerManagement.powerDownCommands
power.ups = {
enable = mkOption {
default = false;
type = with types; bool;
description = lib.mdDoc ''
Enables support for Power Devices, such as Uninterruptible Power
Supplies, Power Distribution Units and Solar Controllers.
'';
};
enable = mkEnableOption (lib.mdDoc ''
Enables support for Power Devices, such as Uninterruptible Power
Supplies, Power Distribution Units and Solar Controllers.
'');
# This option is not used yet.
mode = mkOption {
default = "standalone";
type = types.str;
type = types.enum [ "none" "standalone" "netserver" "netclient" ];
description = lib.mdDoc ''
The MODE determines which part of the NUT is to be started, and
which configuration files must be modified.
@ -148,6 +424,13 @@ in
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Open ports in the firewall for `upsd`.
'';
};
maxStartDelay = mkOption {
default = 45;
@ -161,6 +444,22 @@ in
'';
};
upsmon = mkOption {
default = {};
description = lib.mdDoc ''
Options for the `upsmon.conf` configuration file.
'';
type = types.submodule upsmonOptions;
};
upsd = mkOption {
default = {};
description = lib.mdDoc ''
Options for the `upsd.conf` configuration file.
'';
type = types.submodule upsdOptions;
};
ups = mkOption {
default = {};
# see nut/etc/ups.conf.sample
@ -172,46 +471,95 @@ in
type = with types; attrsOf (submodule upsOptions);
};
users = mkOption {
default = {};
description = lib.mdDoc ''
Users that can access upsd. See `man upsd.users`.
'';
type = with types; attrsOf (submodule userOptions);
};
};
};
config = mkIf cfg.enable {
assertions = [
(let
totalPowerValue = foldl' add 0 (map (monitor: monitor.powerValue) (attrValues cfg.upsmon.monitor));
minSupplies = cfg.upsmon.settings.MINSUPPLIES;
in mkIf cfg.upsmon.enable {
assertion = totalPowerValue >= minSupplies;
message = ''
`power.ups.upsmon`: Total configured power value (${toString totalPowerValue}) must be at least MINSUPPLIES (${toString minSupplies}).
'';
})
];
environment.systemPackages = [ pkgs.nut ];
systemd.services.upsmon = {
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts =
if cfg.upsd.listen == []
then [ defaultPort ]
else unique (forEach cfg.upsd.listen (listen: listen.port));
};
systemd.services.upsmon = let
secrets = mapAttrsToList (name: monitor: "upsmon_password_${name}") cfg.upsmon.monitor;
createUpsmonConf = installSecrets upsmonConf "/run/nut/upsmon.conf" secrets;
in {
enable = cfg.upsmon.enable;
description = "Uninterruptible Power Supplies (Monitor)";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "forking";
script = "${pkgs.nut}/sbin/upsmon";
environment.NUT_CONFPATH = "/etc/nut/";
environment.NUT_STATEPATH = "/var/lib/nut/";
serviceConfig = {
Type = "forking";
ExecStartPre = "${createUpsmonConf}";
ExecStart = "${pkgs.nut}/sbin/upsmon";
ExecReload = "${pkgs.nut}/sbin/upsmon -c reload";
LoadCredential = mapAttrsToList (name: monitor: "upsmon_password_${name}:${monitor.passwordFile}") cfg.upsmon.monitor;
};
environment.NUT_CONFPATH = "/etc/nut";
environment.NUT_STATEPATH = "/var/lib/nut";
};
systemd.services.upsd = {
systemd.services.upsd = let
secrets = mapAttrsToList (name: user: "upsdusers_password_${name}") cfg.users;
createUpsdUsers = installSecrets upsdUsers "/run/nut/upsd.users" secrets;
in {
enable = cfg.upsd.enable;
description = "Uninterruptible Power Supplies (Daemon)";
after = [ "network.target" "upsmon.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "forking";
# TODO: replace 'root' by another username.
script = "${pkgs.nut}/sbin/upsd -u root";
environment.NUT_CONFPATH = "/etc/nut/";
environment.NUT_STATEPATH = "/var/lib/nut/";
serviceConfig = {
Type = "forking";
ExecStartPre = "${createUpsdUsers}";
# TODO: replace 'root' by another username.
ExecStart = "${pkgs.nut}/sbin/upsd -u root";
ExecReload = "${pkgs.nut}/sbin/upsd -c reload";
LoadCredential = mapAttrsToList (name: user: "upsdusers_password_${name}:${user.passwordFile}") cfg.users;
};
environment.NUT_CONFPATH = "/etc/nut";
environment.NUT_STATEPATH = "/var/lib/nut";
restartTriggers = [
config.environment.etc."nut/upsd.conf".source
];
};
systemd.services.upsdrv = {
enable = cfg.upsd.enable;
description = "Uninterruptible Power Supplies (Register all UPS)";
after = [ "upsd.service" ];
wantedBy = [ "multi-user.target" ];
# TODO: replace 'root' by another username.
script = "${pkgs.nut}/bin/upsdrvctl -u root start";
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
# TODO: replace 'root' by another username.
ExecStart = "${pkgs.nut}/bin/upsdrvctl -u root start";
};
environment.NUT_CONFPATH = "/etc/nut/";
environment.NUT_STATEPATH = "/var/lib/nut/";
environment.NUT_CONFPATH = "/etc/nut";
environment.NUT_STATEPATH = "/var/lib/nut";
};
environment.etc = {
@ -223,24 +571,23 @@ in
''
maxstartdelay = ${toString cfg.maxStartDelay}
${flip concatStringsSep (forEach (attrValues cfg.ups) (ups: ups.summary)) "
"}
${concatStringsSep "\n\n" (forEach (attrValues cfg.ups) (ups: ups.summary))}
'';
"nut/upsd.conf".source = pkgs.writeText "upsd.conf"
''
${concatStringsSep "\n" (forEach cfg.upsd.listen (listen: "LISTEN ${listen.address} ${toString listen.port}"))}
${cfg.upsd.extraConfig}
'';
"nut/upssched.conf".source = cfg.schedulerRules;
# These file are containing private information and thus should not
# be stored inside the Nix store.
/*
"nut/upsd.conf".source = "";
"nut/upsd.users".source = "";
"nut/upsmon.conf".source = "";
*/
"nut/upsd.users".source = "/run/nut/upsd.users";
"nut/upsmon.conf".source = "/run/nut/upsmon.conf";
};
power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
systemd.tmpfiles.rules = [
"d /var/state/ups -"
"d /var/lib/nut 700"
];

View File

@ -120,6 +120,12 @@ in {
if [ -z "$(ls -A '${cfg.spoolDir}')" ]; then
touch "${cfg.spoolDir}/.firstRun"
fi
if ! test -e ${cfg.spoolDir}/.erlang.cookie; then
touch ${cfg.spoolDir}/.erlang.cookie
chmod 600 ${cfg.spoolDir}/.erlang.cookie
dd if=/dev/random bs=16 count=1 | base64 > ${cfg.spoolDir}/.erlang.cookie
fi
'';
postStart = ''

View File

@ -29,6 +29,11 @@ in
config = lib.mkIf cfg.enable {
nix.settings.extra-allowed-users = [ "harmonia" ];
users.users.harmonia = {
isSystemUser = true;
group = "harmonia";
};
users.groups.harmonia = { };
systemd.services.harmonia = {
description = "harmonia binary cache service";
@ -50,7 +55,6 @@ in
ExecStart = lib.getExe cfg.package;
User = "harmonia";
Group = "harmonia";
DynamicUser = true;
PrivateUsers = true;
DeviceAllow = [ "" ];
UMask = "0066";

View File

@ -64,8 +64,10 @@ in
};
systemd.services.iwd = {
path = [ config.networking.resolvconf.package ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [ configFile ];
serviceConfig.ReadWritePaths = "-/etc/resolv.conf";
};
};

View File

@ -50,7 +50,7 @@ in
};
defaultVoicePort = mkOption {
type = types.int;
type = types.port;
default = 9987;
description = lib.mdDoc ''
Default UDP port for clients to connect to virtual servers - used for first virtual server, subsequent ones will open on incrementing port numbers by default.
@ -67,7 +67,7 @@ in
};
fileTransferPort = mkOption {
type = types.int;
type = types.port;
default = 30033;
description = lib.mdDoc ''
TCP port opened for file transfers.
@ -84,10 +84,26 @@ in
};
queryPort = mkOption {
type = types.int;
type = types.port;
default = 10011;
description = lib.mdDoc ''
TCP port opened for ServerQuery connections.
TCP port opened for ServerQuery connections using the raw telnet protocol.
'';
};
querySshPort = mkOption {
type = types.port;
default = 10022;
description = lib.mdDoc ''
TCP port opened for ServerQuery connections using the SSH protocol.
'';
};
queryHttpPort = mkOption {
type = types.port;
default = 10080;
description = lib.mdDoc ''
TCP port opened for ServerQuery connections using the HTTP protocol.
'';
};
@ -128,7 +144,9 @@ in
];
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.fileTransferPort ] ++ optionals (cfg.openFirewallServerQuery) [ cfg.queryPort (cfg.queryPort + 11) ];
allowedTCPPorts = [ cfg.fileTransferPort ] ++ (map (port:
mkIf cfg.openFirewallServerQuery port
) [cfg.queryPort cfg.querySshPort cfg.queryHttpPort]);
# subsequent vServers will use the incremented voice port, let's just open the next 10
allowedUDPPortRanges = [ { from = cfg.defaultVoicePort; to = cfg.defaultVoicePort + 10; } ];
};
@ -141,13 +159,19 @@ in
serviceConfig = {
ExecStart = ''
${ts3}/bin/ts3server \
dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
dbsqlpath=${ts3}/lib/teamspeak/sql/ \
logpath=${cfg.logPath} \
license_accepted=1 \
default_voice_port=${toString cfg.defaultVoicePort} \
${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
filetransfer_port=${toString cfg.fileTransferPort} \
query_port=${toString cfg.queryPort} \
query_ssh_port=${toString cfg.querySshPort} \
query_http_port=${toString cfg.queryHttpPort} \
${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
${optionalString (cfg.queryIP != null) "query_ip=${cfg.queryIP}"} \
query_port=${toString cfg.queryPort} license_accepted=1
${optionalString (cfg.queryIP != null) "query_ssh_ip=${cfg.queryIP}"} \
${optionalString (cfg.queryIP != null) "query_http_ip=${cfg.queryIP}"} \
'';
WorkingDirectory = cfg.dataDir;
User = user;

View File

@ -3,7 +3,6 @@ with lib;
let
clamavUser = "clamav";
stateDir = "/var/lib/clamav";
runDir = "/run/clamav";
clamavGroup = clamavUser;
cfg = config.services.clamav;
pkg = pkgs.clamav;
@ -99,6 +98,29 @@ in
'';
};
};
scanner = {
enable = mkEnableOption (lib.mdDoc "ClamAV scanner");
interval = mkOption {
type = types.str;
default = "*-*-* 04:00:00";
description = lib.mdDoc ''
How often clamdscan is invoked. See systemd.time(7) for more
information about the format.
By default this runs using 10 cores at most, be sure to run it at a time of low traffic.
'';
};
scanDirectories = mkOption {
type = with types; listOf str;
default = [ "/home" "/var/lib" "/tmp" "/etc" "/var/tmp" ];
description = lib.mdDoc ''
List of directories to scan.
The default includes everything I could think of that is valid for nixos. Feel free to contribute a PR to add to the default if you see something missing.
'';
};
};
};
};
@ -117,9 +139,8 @@ in
services.clamav.daemon.settings = {
DatabaseDirectory = stateDir;
LocalSocket = "${runDir}/clamd.ctl";
PidFile = "${runDir}/clamd.pid";
TemporaryDirectory = "/tmp";
LocalSocket = "/run/clamav/clamd.ctl";
PidFile = "/run/clamav/clamd.pid";
User = "clamav";
Foreground = true;
};
@ -182,7 +203,6 @@ in
ExecStart = "${pkg}/bin/freshclam";
SuccessExitStatus = "1"; # if databases are up to date
StateDirectory = "clamav";
RuntimeDirectory = "clamav";
User = clamavUser;
Group = clamavGroup;
PrivateTmp = "yes";
@ -204,7 +224,6 @@ in
serviceConfig = {
Type = "oneshot";
StateDirectory = "clamav";
RuntimeDirectory = "clamav";
User = clamavUser;
Group = clamavGroup;
PrivateTmp = "yes";
@ -230,12 +249,31 @@ in
Type = "oneshot";
ExecStart = "${pkgs.fangfrisch}/bin/fangfrisch --conf ${fangfrischConfigFile} refresh";
StateDirectory = "clamav";
RuntimeDirectory = "clamav";
User = clamavUser;
Group = clamavGroup;
PrivateTmp = "yes";
PrivateDevices = "yes";
};
};
systemd.timers.clamdscan = mkIf cfg.scanner.enable {
description = "Timer for ClamAV virus scanner";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.scanner.interval;
Unit = "clamdscan.service";
};
};
systemd.services.clamdscan = mkIf cfg.scanner.enable {
description = "ClamAV virus scanner";
after = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
wants = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkg}/bin/clamdscan --multiscan --fdpass --infected --allmatch ${lib.concatStringsSep " " cfg.scanner.scanDirectories}";
};
};
};
}

View File

@ -333,10 +333,10 @@ in
cfg.settings.watch-dir;
StateDirectory = [
"transmission"
"transmission/.config/transmission-daemon"
"transmission/.incomplete"
"transmission/Downloads"
"transmission/watch-dir"
"transmission/${settingsDir}"
"transmission/${incompleteDir}"
"transmission/${downloadsDir}"
"transmission/${watchDir}"
];
StateDirectoryMode = mkDefault 750;
# The following options are only for optimizing:

View File

@ -25,7 +25,6 @@ let
maintainers
catAttrs
collect
splitString
hasPrefix
;
@ -329,7 +328,8 @@ in
};
hostname = mkOption {
type = str;
type = nullOr str;
default = null;
example = "keycloak.example.com";
description = lib.mdDoc ''
The hostname part of the public URL used as base for
@ -451,7 +451,7 @@ in
keycloakConfig = lib.generators.toKeyValue {
mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
mkValueString = v: with builtins;
mkValueString = v:
if isInt v then toString v
else if isString v then v
else if true == v then "true"
@ -480,6 +480,14 @@ in
assertion = createLocalPostgreSQL -> config.services.postgresql.settings.standard_conforming_strings or true;
message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
}
{
assertion = cfg.settings.hostname != null || cfg.settings.hostname-url or null != null;
message = "Setting the Keycloak hostname is required, see `services.keycloak.settings.hostname`";
}
{
assertion = !(cfg.settings.hostname != null && cfg.settings.hostname-url or null != null);
message = "`services.keycloak.settings.hostname` and `services.keycloak.settings.hostname-url` are mutually exclusive";
}
];
environment.systemPackages = [ keycloakBuild ];

View File

@ -0,0 +1,177 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.windmill;
in
{
options.services.windmill = {
enable = lib.mkEnableOption (lib.mdDoc "windmill service");
serverPort = lib.mkOption {
type = lib.types.port;
default = 8001;
description = lib.mdDoc "Port the windmill server listens on.";
};
lspPort = lib.mkOption {
type = lib.types.port;
default = 3001;
description = lib.mdDoc "Port the windmill lsp listens on.";
};
database = {
name = lib.mkOption {
type = lib.types.str;
# the simplest database setup is to have the database named like the user.
default = "windmill";
description = lib.mdDoc "Database name.";
};
user = lib.mkOption {
type = lib.types.str;
# the simplest database setup is to have the database user like the name.
default = "windmill";
description = lib.mdDoc "Database user.";
};
urlPath = lib.mkOption {
type = lib.types.path;
description = lib.mdDoc ''
Path to the file containing the database url windmill should connect to. This is not deducted from database user and name as it might contain a secret
'';
example = "config.age.secrets.DATABASE_URL_FILE.path";
};
createLocally = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc "Whether to create a local database automatically.";
};
};
baseUrl = lib.mkOption {
type = lib.types.str;
description = lib.mdDoc ''
The base url that windmill will be served on.
'';
example = "https://windmill.example.com";
};
logLevel = lib.mkOption {
type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
default = "info";
description = lib.mdDoc "Log level";
};
};
config = lib.mkIf cfg.enable {
services.postgresql = lib.optionalAttrs (cfg.database.createLocally) {
enable = lib.mkDefault true;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
{ name = cfg.database.user;
ensureDBOwnership = true;
}
];
};
systemd.services =
let
serviceConfig = {
DynamicUser = true;
# using the same user to simplify db connection
User = cfg.database.user;
ExecStart = "${pkgs.windmill}/bin/windmill";
Restart = "always";
LoadCredential = [
"DATABASE_URL_FILE:${cfg.database.urlPath}"
];
};
in
{
# coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
# modified to not grant priviledges on all tables
# create role windmill_user and windmill_admin only if they don't exist
postgresql.postStart = lib.mkIf cfg.database.createLocally (lib.mkAfter ''
$PSQL -tA <<"EOF"
DO $$
BEGIN
IF NOT EXISTS (
SELECT FROM pg_catalog.pg_roles
WHERE rolname = 'windmill_user'
) THEN
CREATE ROLE windmill_user;
GRANT ALL PRIVILEGES ON DATABASE ${cfg.database.name} TO windmill_user;
ELSE
RAISE NOTICE 'Role "windmill_user" already exists. Skipping.';
END IF;
IF NOT EXISTS (
SELECT FROM pg_catalog.pg_roles
WHERE rolname = 'windmill_admin'
) THEN
CREATE ROLE windmill_admin WITH BYPASSRLS;
GRANT windmill_user TO windmill_admin;
ELSE
RAISE NOTICE 'Role "windmill_admin" already exists. Skipping.';
END IF;
GRANT windmill_admin TO windmill;
END
$$;
EOF
'');
windmill-server = {
description = "Windmill server";
after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
wantedBy = [ "multi-user.target" ];
serviceConfig = serviceConfig // { StateDirectory = "windmill";};
environment = {
DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
PORT = builtins.toString cfg.serverPort;
WM_BASE_URL = cfg.baseUrl;
RUST_LOG = cfg.logLevel;
MODE = "server";
};
};
windmill-worker = {
description = "Windmill worker";
after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
wantedBy = [ "multi-user.target" ];
serviceConfig = serviceConfig // { StateDirectory = "windmill-worker";};
environment = {
DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
WM_BASE_URL = cfg.baseUrl;
RUST_LOG = cfg.logLevel;
MODE = "worker";
WORKER_GROUP = "default";
KEEP_JOB_DIR = "false";
};
};
windmill-worker-native = {
description = "Windmill worker native";
after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
wantedBy = [ "multi-user.target" ];
serviceConfig = serviceConfig // { StateDirectory = "windmill-worker-native";};
environment = {
DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
WM_BASE_URL = cfg.baseUrl;
RUST_LOG = cfg.logLevel;
MODE = "worker";
WORKER_GROUP = "native";
};
};
};
};
}

View File

@ -34,7 +34,7 @@ let
# copy additional plugin(s), theme(s) and language(s)
${concatStringsSep "\n" (mapAttrsToList (name: theme: "cp -r ${theme} $out/share/wordpress/wp-content/themes/${name}") cfg.themes)}
${concatStringsSep "\n" (mapAttrsToList (name: plugin: "cp -r ${plugin} $out/share/wordpress/wp-content/plugins/${name}") cfg.plugins)}
${concatMapStringsSep "\n" (language: "cp -r ${language}/* $out/share/wordpress/wp-content/languages/") cfg.languages}
${concatMapStringsSep "\n" (language: "cp -r ${language} $out/share/wordpress/wp-content/languages/") cfg.languages}
'';
};

View File

@ -147,7 +147,7 @@ in
default = configFile;
defaultText = "A Caddyfile automatically generated by values from services.caddy.*";
example = literalExpression ''
pkgs.writeTextDir "Caddyfile" '''
pkgs.writeText "Caddyfile" '''
example.com
root * /var/www/wordpress
@ -164,9 +164,9 @@ in
};
adapter = mkOption {
default = if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null;
default = if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null;
defaultText = literalExpression ''
if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null
if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null
'';
example = literalExpression "nginx";
type = with types; nullOr str;

View File

@ -0,0 +1,158 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.nginx.tailscaleAuth;
in
{
options.services.nginx.tailscaleAuth = {
enable = mkEnableOption (lib.mdDoc "Enable tailscale.nginx-auth, to authenticate nginx users via tailscale.");
package = lib.mkPackageOptionMD pkgs "tailscale-nginx-auth" {};
user = mkOption {
type = types.str;
default = "tailscale-nginx-auth";
description = lib.mdDoc "User which runs tailscale-nginx-auth";
};
group = mkOption {
type = types.str;
default = "tailscale-nginx-auth";
description = lib.mdDoc "Group which runs tailscale-nginx-auth";
};
expectedTailnet = mkOption {
default = "";
type = types.nullOr types.str;
example = "tailnet012345.ts.net";
description = lib.mdDoc ''
If you want to prevent node sharing from allowing users to access services
across tailnets, declare your expected tailnets domain here.
'';
};
socketPath = mkOption {
default = "/run/tailscale-nginx-auth/tailscale-nginx-auth.sock";
type = types.path;
description = lib.mdDoc ''
Path of the socket listening to nginx authorization requests.
'';
};
virtualHosts = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc ''
A list of nginx virtual hosts to put behind tailscale.nginx-auth
'';
};
};
config = mkIf cfg.enable {
services.tailscale.enable = true;
services.nginx.enable = true;
users.users.${cfg.user} = {
isSystemUser = true;
inherit (cfg) group;
};
users.groups.${cfg.group} = { };
users.users.${config.services.nginx.user}.extraGroups = [ cfg.group ];
systemd.sockets.tailscale-nginx-auth = {
description = "Tailscale NGINX Authentication socket";
partOf = [ "tailscale-nginx-auth.service" ];
wantedBy = [ "sockets.target" ];
listenStreams = [ cfg.socketPath ];
socketConfig = {
SocketMode = "0660";
SocketUser = cfg.user;
SocketGroup = cfg.group;
};
};
systemd.services.tailscale-nginx-auth = {
description = "Tailscale NGINX Authentication service";
after = [ "nginx.service" ];
wants = [ "nginx.service" ];
requires = [ "tailscale-nginx-auth.socket" ];
serviceConfig = {
ExecStart = "${lib.getExe cfg.package}";
RuntimeDirectory = "tailscale-nginx-auth";
User = cfg.user;
Group = cfg.group;
BindPaths = [ "/run/tailscale/tailscaled.sock" ];
CapabilityBoundingSet = "";
DeviceAllow = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictNamespaces = true;
RestrictAddressFamilies = [ "AF_UNIX" ];
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallErrorNumber = "EPERM";
SystemCallFilter = [
"@system-service"
"~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
];
};
};
services.nginx.virtualHosts = genAttrs
cfg.virtualHosts
(vhost: {
locations."/auth" = {
extraConfig = ''
internal;
proxy_pass http://unix:${cfg.socketPath};
proxy_pass_request_body off;
# Upstream uses $http_host here, but we are using gixy to check nginx configurations
# gixy wants us to use $host: https://github.com/yandex/gixy/blob/master/docs/en/plugins/hostspoofing.md
proxy_set_header Host $host;
proxy_set_header Remote-Addr $remote_addr;
proxy_set_header Remote-Port $remote_port;
proxy_set_header Original-URI $request_uri;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
'';
};
locations."/".extraConfig = ''
auth_request /auth;
auth_request_set $auth_user $upstream_http_tailscale_user;
auth_request_set $auth_name $upstream_http_tailscale_name;
auth_request_set $auth_login $upstream_http_tailscale_login;
auth_request_set $auth_tailnet $upstream_http_tailscale_tailnet;
auth_request_set $auth_profile_picture $upstream_http_tailscale_profile_picture;
proxy_set_header X-Webauth-User "$auth_user";
proxy_set_header X-Webauth-Name "$auth_name";
proxy_set_header X-Webauth-Login "$auth_login";
proxy_set_header X-Webauth-Tailnet "$auth_tailnet";
proxy_set_header X-Webauth-Profile-Picture "$auth_profile_picture";
${lib.optionalString (cfg.expectedTailnet != "") ''proxy_set_header Expected-Tailnet "${cfg.expectedTailnet}";''}
'';
});
};
meta.maintainers = with maintainers; [ phaer ];
}

View File

@ -1612,7 +1612,7 @@ let
description = lib.mdDoc ''
Each attribute in this set specifies an option in the
`[WireGuardPeer]` section of the unit. See
{manpage}`systemd.network(5)` for details.
{manpage}`systemd.netdev(5)` for details.
'';
};
};

View File

@ -5,7 +5,9 @@ let
preseedFormat = pkgs.formats.yaml { };
in
{
meta.maintainers = [ lib.maintainers.adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
options = {
virtualisation.incus = {

View File

@ -1,7 +1,9 @@
{ lib, config, pkgs, ... }:
{
meta.maintainers = with lib.maintainers; [ adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
imports = [
./lxc-instance-common.nix

View File

@ -2,21 +2,19 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.virtualisation.lxc;
in
{
###### interface
meta = {
maintainers = lib.teams.lxc.members;
};
options.virtualisation.lxc = {
enable =
mkOption {
type = types.bool;
lib.mkOption {
type = lib.types.bool;
default = false;
description =
lib.mdDoc ''
@ -27,8 +25,8 @@ in
};
systemConfig =
mkOption {
type = types.lines;
lib.mkOption {
type = lib.types.lines;
default = "";
description =
lib.mdDoc ''
@ -38,8 +36,8 @@ in
};
defaultConfig =
mkOption {
type = types.lines;
lib.mkOption {
type = lib.types.lines;
default = "";
description =
lib.mdDoc ''
@ -49,8 +47,8 @@ in
};
usernetConfig =
mkOption {
type = types.lines;
lib.mkOption {
type = lib.types.lines;
default = "";
description =
lib.mdDoc ''
@ -62,7 +60,7 @@ in
###### implementation
config = mkIf cfg.enable {
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.lxc ];
environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;

View File

@ -2,18 +2,18 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.virtualisation.lxc.lxcfs;
in {
meta.maintainers = [ maintainers.mic92 ];
meta = {
maintainers = lib.teams.lxc.members;
};
###### interface
options.virtualisation.lxc.lxcfs = {
enable =
mkOption {
type = types.bool;
lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc ''
This enables LXCFS, a FUSE filesystem for LXC.
@ -27,7 +27,7 @@ in {
};
###### implementation
config = mkIf cfg.enable {
config = lib.mkIf cfg.enable {
systemd.services.lxcfs = {
description = "FUSE filesystem for LXC";
wantedBy = [ "multi-user.target" ];

View File

@ -45,7 +45,9 @@ let
chown -R root:root "$PREFIX"
'';
in {
meta.maintainers = with lib.maintainers; [ adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
options = {
virtualisation.lxd.agent.enable = lib.mkEnableOption (lib.mdDoc "Enable LXD agent");

View File

@ -6,6 +6,10 @@ let
then "ttyS0"
else "ttyAMA0"; # aarch64
in {
meta = {
maintainers = lib.teams.lxc.members;
};
imports = [
./lxc-instance-common.nix

View File

@ -6,12 +6,14 @@ let
cfg = config.virtualisation.lxd;
preseedFormat = pkgs.formats.yaml {};
in {
meta = {
maintainers = lib.teams.lxc.members;
};
imports = [
(lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
];
###### interface
options = {
virtualisation.lxd = {
enable = lib.mkOption {

View File

@ -163,7 +163,7 @@ in {
btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
btrbk-section-order = handleTest ./btrbk-section-order.nix {};
budgie = handleTest ./budgie.nix {};
buildbot = handleTest ./buildbot.nix {};
buildbot = handleTestOn [ "x86_64-linux" ] ./buildbot.nix {};
buildkite-agents = handleTest ./buildkite-agents.nix {};
c2fmzq = handleTest ./c2fmzq.nix {};
caddy = handleTest ./caddy.nix {};

View File

@ -48,11 +48,19 @@ import ./make-test-python.nix ({ pkgs, ... }: {
};
};
};
specialisation.explicit-config-file.configuration = {
services.caddy.configFile = pkgs.writeText "Caddyfile" ''
localhost:80
respond "hello world"
'';
};
};
};
testScript = { nodes, ... }:
let
explicitConfigFile = "${nodes.webserver.system.build.toplevel}/specialisation/explicit-config-file";
justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/config-reload";
multipleConfigs = "${nodes.webserver.system.build.toplevel}/specialisation/multiple-configs";
rfc42Config = "${nodes.webserver.system.build.toplevel}/specialisation/rfc42";
@ -84,5 +92,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
)
webserver.wait_for_open_port(80)
webserver.succeed("curl http://localhost | grep hello")
with subtest("explicit configFile"):
webserver.succeed(
"${explicitConfigFile}/bin/switch-to-configuration test >&2"
)
webserver.wait_for_open_port(80)
webserver.succeed("curl http://localhost | grep hello")
'';
})

View File

@ -13,6 +13,9 @@
networking.firewall.allowedTCPPorts = [ 5000 ];
system.extraDependencies = [ pkgs.emptyFile ];
# check that extra-allowed-users is effective for harmonia
nix.settings.allowed-users = [];
};
client01 = {

View File

@ -43,7 +43,7 @@ in {
# test loading custom components
customComponents = with pkgs.home-assistant-custom-components; [
prometheus-sensor
prometheus_sensor
];
# test loading lovelace modules

View File

@ -14,7 +14,9 @@ in
{
name = "incus-container";
meta.maintainers = with lib.maintainers; [ adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = { ... }: {
virtualisation = {

View File

@ -3,7 +3,9 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
{
name = "incus-preseed";
meta.maintainers = with lib.maintainers; [ adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = { lib, ... }: {
virtualisation = {

View File

@ -3,7 +3,9 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
{
name = "incus-socket-activated";
meta.maintainers = with lib.maintainers; [ adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = { lib, ... }: {
virtualisation = {

View File

@ -19,7 +19,9 @@ in
{
name = "incus-virtual-machine";
meta.maintainers = with lib.maintainers; [ adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = {...}: {
virtualisation = {

View File

@ -18,8 +18,8 @@ let
in {
name = "lxd-container";
meta = with pkgs.lib.maintainers; {
maintainers = [ patryk27 adamcstephens ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = { lib, ... }: {

View File

@ -5,11 +5,11 @@
# iptables to nftables requires a full reboot, which is a bit hard inside NixOS
# tests.
import ../make-test-python.nix ({ pkgs, ...} : {
import ../make-test-python.nix ({ pkgs, lib, ...} : {
name = "lxd-nftables";
meta = with pkgs.lib.maintainers; {
maintainers = [ patryk27 ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = { lib, ... }: {

View File

@ -4,7 +4,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
name = "lxd-preseed";
meta = {
maintainers = with lib.maintainers; [ adamcstephens ];
maintainers = lib.teams.lxc.members;
};
nodes.machine = { lib, ... }: {

View File

@ -1,8 +1,8 @@
import ../make-test-python.nix ({ pkgs, lib, ... }: {
name = "lxd-ui";
meta = with pkgs.lib.maintainers; {
maintainers = [ jnsgruk ];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = { lib, ... }: {

View File

@ -18,8 +18,8 @@ let
in {
name = "lxd-virtual-machine";
meta = with pkgs.lib.maintainers; {
maintainers = [adamcstephens];
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine = {lib, ...}: {

View File

@ -1,28 +1,48 @@
import ../make-test-python.nix ({...}: {
name = "spark";
{ pkgs, ... }:
nodes = {
worker = { nodes, pkgs, ... }: {
services.spark.worker = {
enable = true;
master = "master:7077";
};
virtualisation.memorySize = 2048;
};
master = { config, pkgs, ... }: {
services.spark.master = {
enable = true;
bind = "0.0.0.0";
};
networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
};
let
inherit (pkgs) lib;
tests = {
default = testsForPackage { sparkPackage = pkgs.spark; };
};
testScript = ''
master.wait_for_unit("spark-master.service")
worker.wait_for_unit("spark-worker.service")
worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
'';
})
testsForPackage = args: lib.recurseIntoAttrs {
sparkCluster = testSparkCluster args;
passthru.override = args': testsForPackage (args // args');
};
testSparkCluster = { sparkPackage, ... }: pkgs.nixosTest ({
name = "spark";
nodes = {
worker = { nodes, pkgs, ... }: {
services.spark = {
package = sparkPackage;
worker = {
enable = true;
master = "master:7077";
};
};
virtualisation.memorySize = 2048;
};
master = { config, pkgs, ... }: {
services.spark = {
package = sparkPackage;
master = {
enable = true;
bind = "0.0.0.0";
};
};
networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
};
};
testScript = ''
master.wait_for_unit("spark-master.service")
worker.wait_for_unit("spark-worker.service")
worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
worker.succeed("spark-submit --version | systemd-cat")
worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
'';
});
in tests

View File

@ -470,7 +470,7 @@ Preferred source hash type is sha256. There are several ways to get it.
in the package expression, attempt build and extract correct hash from error messages.
> **Warning**
> [!Warning]
> You must use one of these four fake hashes and not some arbitrarily-chosen hash.
> See [here][secure-hashes]

View File

@ -2,12 +2,12 @@
stdenv.mkDerivation rec {
pname = "airwindows-lv2";
version = "22.0";
version = "26.0";
src = fetchFromSourcehut {
owner = "~hannes";
repo = pname;
rev = "v${version}";
sha256 = "sha256-u62wLRrJ45ap981Q8JmMnanc8AWQb1MJHK32PEr10I4=";
sha256 = "sha256-CmNe70ii3WfQ6GGHVqTEyQ2HVubzoeoeN3JsCZSbsPM=";
};
nativeBuildInputs = [ meson ninja pkg-config ];

View File

@ -36,6 +36,11 @@ stdenv.mkDerivation rec {
mkdir -p $out/share/cheesecutter/example_tunes
cp -r tunes/* $out/share/cheesecutter/example_tunes
install -Dm444 arch/fd/ccutter.desktop -t $out/share/applications
for res in $(ls icons | sed -e 's/cc//g' -e 's/.png//g'); do
install -Dm444 icons/cc$res.png $out/share/icons/hicolor/''${res}x''${res}/apps/cheesecutter.png
done
'';
postFixup =

View File

@ -36,6 +36,17 @@ mkDerivation rec {
PREFIXSHORTCUT=$out"
'';
postInstall = ''
mkdir -p $out/share/applications
ln -s $out/fmit.desktop $out/share/applications/fmit.desktop
mkdir -p $out/share/icons/hicolor/128x128/apps
ln -s $out/fmit.png $out/share/icons/hicolor/128x128/apps/fmit.png
mkdir -p $out/share/icons/hicolor/scalable/apps
ln -s $out/fmit.svg $out/share/icons/hicolor/scalable/apps/fmit.svg
'';
meta = with lib; {
description = "Free Musical Instrument Tuner";
longDescription = ''

View File

@ -55,6 +55,11 @@ python3.pkgs.buildPythonApplication rec {
librsvg
];
postInstall = ''
install -Dm444 gspeech.desktop -t $out/share/applications
install -Dm444 icons/*.svg -t $out/share/icons/hicolor/scalable/apps
'';
postFixup = ''
wrapProgram $out/bin/gspeech --prefix PATH : ${lib.makeBinPath [ picotts sox ]}
wrapProgram $out/bin/gspeech-cli --prefix PATH : ${lib.makeBinPath [ picotts sox ]}

View File

@ -56,6 +56,8 @@ stdenv.mkDerivation rec {
installPhase = ''
mkdir -p $out/bin
cp in-formant $out/bin
install -Dm444 $src/dist-res/in-formant.desktop -t $out/share/applications
install -Dm444 $src/dist-res/in-formant.png -t $out/share/icons/hicolor/512x512/apps
'';
meta = with lib; {

View File

@ -16,7 +16,7 @@ python3.pkgs.buildPythonPackage rec {
postPatch = ''
substituteInPlace setup.py \
--replace "'rpi-ws281x>=4.3.0; platform_system == \"Linux\"'," "" \
--replace '"sentry-sdk==1.14.0",' "sentry-sdk" \
--replace "sentry-sdk==1.14.0" "sentry-sdk" \
--replace "~=" ">="
'';

View File

@ -11,13 +11,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "praat";
version = "6.3.20";
version = "6.4";
src = fetchFromGitHub {
owner = "praat";
repo = "praat";
rev = "v${finalAttrs.version}";
hash = "sha256-hVQPLRyDXrqpheAqzC/hQ/ZaFxP1c7ClAJQs3wlEcGc=";
hash = "sha256-S05A8e3CFzQA7NtZlt85OfkS3cF05QSMWLcuR4UMCV8=";
};
nativeBuildInputs = [
@ -47,6 +47,11 @@ stdenv.mkDerivation (finalAttrs: {
runHook preInstall
install -Dt $out/bin praat
install -Dm444 main/praat.desktop -t $out/share/applications
install -Dm444 main/praat-32.ico $out/share/icons/hicolor/32x32/apps/praat.ico
install -Dm444 main/praat-256.ico $out/share/icons/hicolor/256x256/apps/praat.ico
install -Dm444 main/praat-480.png $out/share/icons/hicolor/480x480/apps/praat.png
install -Dm444 main/praat-480.svg $out/share/icons/hicolor/scalable/apps/praat.svg
runHook postInstall
'';

View File

@ -20,6 +20,13 @@ stdenv.mkDerivation rec {
nativeBuildInputs = [ cmake ];
buildInputs = [ SDL2 ] ++ lib.optional stdenv.isLinux alsa-lib;
postInstall = ''
install -Dm444 "$src/release/other/Freedesktop.org Resources/ProTracker 2 clone.desktop" \
-t $out/share/applications
install -Dm444 "$src/release/other/Freedesktop.org Resources/ProTracker 2 clone.png" \
-t $out/share/icons/hicolor/512x512/apps
'';
passthru.tests = {
pt2-clone-opens = nixosTests.pt2-clone;
};

View File

@ -13,14 +13,14 @@
stdenv.mkDerivation (finalAttrs: {
pname = "qpwgraph";
version = "0.6.0";
version = "0.6.1";
src = fetchFromGitLab {
domain = "gitlab.freedesktop.org";
owner = "rncbc";
repo = "qpwgraph";
rev = "v${finalAttrs.version}";
sha256 = "sha256-wJ+vUw16yBBFjMdJogF1nkLnAh3o2ndN9+0png8ZVJ4=";
sha256 = "sha256-oB8/q0igSZoaDzKzgmGAECU0qJwO67t9qWw+fB2vfxg=";
};
nativeBuildInputs = [ cmake pkg-config wrapQtAppsHook ];

View File

@ -17,7 +17,9 @@
, libxdg_basedir
, wxGTK
# GStreamer
, glib-networking
, gst_all_1
, libsoup_3
# User-agent info
, lsb-release
# rt2rtng
@ -58,6 +60,8 @@ stdenv.mkDerivation rec {
libxdg_basedir
lsb-release
wxGTK
# for https gstreamer / libsoup
glib-networking
] ++ gstInputs
++ pythonInputs;
@ -89,6 +93,8 @@ stdenv.mkDerivation rec {
preFixup = ''
gappsWrapperArgs+=(--suffix PATH : ${lib.makeBinPath [ dbus ]})
wrapProgram $out/bin/rt2rtng --prefix PYTHONPATH : $PYTHONPATH
# for GStreamer
gappsWrapperArgs+=(--prefix LD_LIBRARY_PATH : "${lib.getLib libsoup_3}/lib")
'';
meta = with lib; {

View File

@ -28,13 +28,13 @@ let
in
stdenv.mkDerivation rec {
pname = "reaper";
version = "7.05";
version = "7.06";
src = fetchurl {
url = url_for_platform version stdenv.hostPlatform.qemuArch;
hash = if stdenv.isDarwin then "sha256-jaT+3cIFVfBopgeeTkpNs9rFX50unlPJogdhkI9bsWU=" else {
x86_64-linux = "sha256-P/PnbJPr4ErDz5ho1/dLERhqkKjdetHzKpCpfVZAYb0=";
aarch64-linux = "sha256-PdnBVlHwoEEv2SPq/p5oyiOlduCEqL35gAY+QIJU1Ys=";
hash = if stdenv.isDarwin then "sha256-4ANi5KhNbJvDCO2iPX/oayGf/ZeIMfkhp0FQRrBYowo=" else {
x86_64-linux = "sha256-tq0K2HSDTZg7iw6ypS5oUuQi3HIYzbl9DWo2SOKGDVY=";
aarch64-linux = "sha256-MGpfdSQsMykp6QNq1JqxIsFqdhNyefPnEIyC4t1S6Vs=";
}.${stdenv.hostPlatform.system};
};

View File

@ -24,6 +24,10 @@ stdenv.mkDerivation rec {
ruby
];
postInstall = ''
cp -r share $out/
'';
postFixup = ''
wrapProgram $out/bin/rrip_cli \
--prefix PATH : ${lib.makeBinPath [ cddiscid cdparanoia ruby ]}

View File

@ -17,13 +17,13 @@
stdenv.mkDerivation rec {
pname = "timeshift";
version = "23.07.1";
version = "23.12.1";
src = fetchFromGitHub {
owner = "linuxmint";
repo = "timeshift";
rev = version;
sha256 = "RnArZTzvH+mdT7zAHTRem8+Z8CFjWVvd3p/HwZC/v+U=";
sha256 = "uesedEXPfvI/mRs8BiNkv8B2vVxmtTSaIvlQIsajkVg=";
};
patches = [

View File

@ -25,11 +25,11 @@
stdenv.mkDerivation rec {
pname = if withGui then "bitcoin-knots" else "bitcoind-knots";
version = "23.0.knots20220529";
version = "25.1.knots20231115";
src = fetchurl {
url = "https://bitcoinknots.org/files/23.x/${version}/bitcoin-${version}.tar.gz";
sha256 = "0c6l4bvj4ck8gp5vm4dla3l32swsp6ijk12fyf330wgry4mhqxyi";
url = "https://bitcoinknots.org/files/25.x/${version}/bitcoin-${version}.tar.gz";
sha256 = "b6251beee95cf6701c6ebc443b47fb0e99884880f2661397f964a8828add4002";
};
nativeBuildInputs =

View File

@ -72,6 +72,10 @@ stdenv.mkDerivation rec {
install -Dm644 share/pixmaps/bitcoin256.png $out/share/pixmaps/bitcoin.png
'';
preConfigure = lib.optionalString stdenv.isDarwin ''
export MACOSX_DEPLOYMENT_TARGET=10.13
'';
configureFlags = [
"--with-boost-libdir=${boost.out}/lib"
"--disable-bench"

View File

@ -27,13 +27,13 @@
stdenv.mkDerivation rec {
pname = "exodus";
version = "23.10.24";
version = "23.11.6";
src = fetchurl {
name = "exodus-linux-x64-${version}.zip";
url = "https://downloads.exodus.com/releases/${pname}-linux-x64-${version}.zip";
curlOptsList = [ "--user-agent" "Mozilla/5.0" ];
sha256 = "sha256-g28jSQaqjnM34sCpyYLSipUoU3pqAcXQIyWhlrR4xz4=";
sha256 = "sha256-s7LPOUDDQIgASMr3EmEUgtwWHl6mdDez4H3L+Mj3LQA=";
};
nativeBuildInputs = [ unzip ];

View File

@ -2,11 +2,11 @@
let
pname = "ledger-live-desktop";
version = "2.71.0";
version = "2.71.1";
src = fetchurl {
url = "https://download.live.ledger.com/${pname}-${version}-linux-x86_64.AppImage";
hash = "sha256-boZ28o8bg2TXZcc1mx4ZlPIPRFK9wy4+MTbYLT5XCQU=";
hash = "sha256-+1i4ycURuT0xSF2yLQM5uyDFzeeGQ8H4On2Pb3oIRYc=";
};
appimageContents = appimageTools.extractType2 {

View File

@ -21,11 +21,11 @@
let
pname = "sparrow";
version = "1.7.9";
version = "1.8.1";
src = fetchurl {
url = "https://github.com/sparrowwallet/${pname}/releases/download/${version}/${pname}-${version}-x86_64.tar.gz";
sha256 = "0bz8mx6mszqadx7nlb4ini45r2r57grdgmrq6k9lxgrgcpd8gasy";
sha256 = "sha256-dpYGMclYMjxjUbIcSZ7V54I1LTVfHxAKH9+7CaprD4U=";
};
launcher = writeScript "sparrow" ''

View File

@ -9,17 +9,18 @@
, openssl
, readline
, zlib
, nix-update-script
}:
stdenv.mkDerivation rec {
pname = "ton";
version = "2023.06";
version = "2023.10";
src = fetchFromGitHub {
owner = "ton-blockchain";
repo = "ton";
rev = "v${version}";
sha256 = "sha256-mDYuOokCGS1sDP6fHDXhGboDjn4JeyA5ea4/6RRt9x4=";
sha256 = "sha256-K1RhhW7EvwYV7/ng3NPjSGdHEQvJZ7K97YXd7s5wghc=";
fetchSubmodules = true;
};
@ -39,6 +40,8 @@ stdenv.mkDerivation rec {
zlib
];
passthru.updateScript = nix-update-script { };
meta = with lib; {
description = "A fully decentralized layer-1 blockchain designed by Telegram";
homepage = "https://ton.org/";

View File

@ -14,14 +14,14 @@ let
in
stdenv.mkDerivation rec {
pname = "aseprite";
version = "1.2.40";
version = "1.3";
src = fetchFromGitHub {
owner = "aseprite";
repo = "aseprite";
rev = "v${version}";
fetchSubmodules = true;
hash = "sha256-KUdJA6HTAKrLT8xrwFikVDbc5RODysclcsEyQekMRZo=";
hash = "sha256-MSLStUmKAbGKFOQmUcRVrkjZCDglSjTmC6MGWJOCjKU=";
};
nativeBuildInputs = [

View File

@ -10,13 +10,13 @@
stdenv.mkDerivation rec {
pname = "emacspeak";
version = "56.0";
version = "58.0";
src = fetchFromGitHub {
owner = "tvraman";
repo = pname;
rev = version;
hash= "sha256-juy+nQ7DrG818/uTH6Dv/lrrzu8qzPWwi0sX7JrhHK8=";
hash= "sha256-5pWC17nvy3ZuG0bR//LqDVpKsH5hFSFf63Q33a1BfBk=";
};
nativeBuildInputs = [

View File

@ -5626,6 +5626,18 @@ final: prev:
meta.homepage = "https://github.com/mawkler/modicator.nvim/";
};
modus-themes-nvim = buildVimPlugin {
pname = "modus-themes.nvim";
version = "2023-11-07";
src = fetchFromGitHub {
owner = "miikanissi";
repo = "modus-themes.nvim";
rev = "bd5c541f13ee77c6df5d6a5d5c321ab907aa5e11";
sha256 = "1xm691bghn9618czifsrymcxmqjhamk8vj8g790r2bm42lgwcs84";
};
meta.homepage = "https://github.com/miikanissi/modus-themes.nvim/";
};
molokai = buildVimPlugin {
pname = "molokai";
version = "2015-11-11";

View File

@ -470,6 +470,7 @@ https://github.com/jghauser/mkdir.nvim/,main,
https://github.com/jakewvincent/mkdnflow.nvim/,HEAD,
https://github.com/SidOfc/mkdx/,,
https://github.com/mawkler/modicator.nvim/,HEAD,
https://github.com/miikanissi/modus-themes.nvim/,HEAD,
https://github.com/tomasr/molokai/,,
https://github.com/benlubas/molten-nvim/,HEAD,
https://github.com/loctvl842/monokai-pro.nvim/,HEAD,

View File

@ -2123,19 +2123,19 @@ let
kddejong.vscode-cfn-lint =
let
inherit (python3Packages) cfn-lint;
inherit (python3Packages) cfn-lint pydot;
in
buildVscodeMarketplaceExtension {
mktplcRef = {
name = "vscode-cfn-lint";
publisher = "kddejong";
version = "0.21.0";
version = "0.25.1";
sha256 = "sha256-IueXiN+077tiecAsVCzgYksWYTs00mZv6XJVMtRJ/PQ=";
};
nativeBuildInputs = [ jq moreutils ];
buildInputs = [ cfn-lint ];
buildInputs = [ cfn-lint pydot ];
postInstall = ''
cd "$out/$installPrefix"
@ -3136,15 +3136,17 @@ let
mktplcRef = {
publisher = "shd101wyy";
name = "markdown-preview-enhanced";
version = "0.6.10";
sha256 = "sha256-nCsl7ZYwuTvNZSTUMR6jEywClmcPm8xW6ABu9220wJI=";
version = "0.8.10";
sha256 = "sha256-BjTV2uH9QqCS1VJ94XXgzNMJb4FB4Ee+t/5uAQfJCuM=";
};
meta = {
description = "Provides a live preview of markdown using either markdown-it or pandoc";
longDescription = ''
Markdown Preview Enhanced provides you with many useful functionalities
such as automatic scroll sync, math typesetting, mermaid, PlantUML,
pandoc, PDF export, code chunk, presentation writer, etc.
Markdown Preview Enhanced is an extension that provides you with
many useful functionalities such as automatic scroll sync, math
typesetting, mermaid, PlantUML, pandoc, PDF export, code chunk,
presentation writer, etc. A lot of its ideas are inspired by
Markdown Preview Plus and RStudio Markdown.
'';
homepage = "https://github.com/shd101wyy/vscode-markdown-preview-enhanced";
license = lib.licenses.ncsa;

View File

@ -28,6 +28,9 @@ vscode-utils.buildVscodeMarketplaceExtension {
EOF
}
jq "$(print_jq_query)" ./package.json | sponge ./package.json
# Add a link from temp to /tmp so that the extension gets a writable
# directory to write to.
ln -s /tmp temp
'';
meta = {

View File

@ -247,7 +247,11 @@ in
);
postFixup = lib.optionalString stdenv.isLinux ''
patchelf --add-needed ${libglvnd}/lib/libGLESv2.so.2 $out/lib/vscode/${executableName}
patchelf \
--add-needed ${libglvnd}/lib/libGLESv2.so.2 \
--add-needed ${libglvnd}/lib/libGL.so.1 \
--add-needed ${libglvnd}/lib/libEGL.so.1 \
$out/lib/vscode/${executableName}
'';
inherit meta;

View File

@ -1,40 +0,0 @@
{ lib, stdenv
, fetchurl
, ncurses
, readline
}:
stdenv.mkDerivation rec {
pname = "ytree";
version = "2.05";
src = fetchurl {
url = "https://han.de/~werner/${pname}-${version}.tar.gz";
sha256 = "sha256-jPixUeSRO1t/epHf/VxzBhBqQkd+xE5x1ix19mq2Glc=";
};
buildInputs = [
ncurses
readline
];
# don't save timestamp, in order to improve reproducibility
postPatch = ''
substituteInPlace Makefile --replace 'gzip' 'gzip -n'
'';
installFlags = [ "DESTDIR=${placeholder "out"}" ];
preInstall = ''
mkdir -p $out/bin $out/share/man/man1
'';
meta = with lib; {
description = "A curses-based file manager similar to DOS Xtree(TM)";
homepage = "https://www.han.de/~werner/ytree.html";
license = licenses.gpl2Plus;
maintainers = with maintainers; [ AndersonTorres ];
platforms = with platforms; unix;
};
}
# TODO: X11 support

View File

@ -95,6 +95,15 @@ mkDerivation rec {
dontWrapGApps = true;
postInstall = ''
install -Dm444 $src/snap/gui/{ccViewer,cloudcompare}.png -t $out/share/icons/hicolor/256x256/apps
install -Dm444 $src/snap/gui/{ccViewer,cloudcompare}.desktop -t $out/share/applications
substituteInPlace $out/share/applications/{ccViewer,cloudcompare}.desktop \
--replace 'Exec=cloudcompare.' 'Exec=' \
--replace 'Icon=''${SNAP}/meta/gui/' 'Icon=' \
--replace '.png' ""
'';
# fix file dialogs crashing on non-NixOS (and avoid double wrapping)
preFixup = ''
qtWrapperArgs+=("''${gappsWrapperArgs[@]}")

View File

@ -1,93 +0,0 @@
diff --git a/plug-ins/pdf/pdf-import.cpp b/plug-ins/pdf/pdf-import.cpp
index 189737908..a2a479693 100644
--- a/plug-ins/pdf/pdf-import.cpp
+++ b/plug-ins/pdf/pdf-import.cpp
@@ -152,12 +152,12 @@ public :
void
updateLineDash (GfxState *state)
{
- double *dashPattern;
- int dashLength;
- double dashStart;
-
- state->getLineDash (&dashPattern, &dashLength, &dashStart);
- this->dash_length = dashLength ? dashPattern[0] * scale : 1.0;
+ const double *dashPattern=NULL;
+ int dashLength=0;
+ double dashStart=0;
+ const std::vector<double> &dash = state->getLineDash(&dashStart); // > Poppler 22.09 ...
+ dashPattern = dash.data();
+ dashLength = dash.size();
if (dashLength == 0)
this->line_style = DIA_LINE_STYLE_SOLID;
@@ -318,10 +318,11 @@ public :
//FIXME: Dia is really unhappy about zero size fonts
if (!(state->getFontSize() > 0.0))
return;
- GfxFont *f = state->getFont();
+ const std::shared_ptr<GfxFont> f = state->getFont(); // poppler 22.05 ... header changed
+ gconstpointer f1 = &f; // GLib typedef const void * gconstpointer;
// instead of building the same font over and over again
- if (g_hash_table_lookup (this->font_map, f)) {
+ if (g_hash_table_lookup (this->font_map, f1)) {
++font_map_hits;
return;
}
@@ -333,8 +334,9 @@ public :
gchar *family = g_strdup (f->getFamily() ? f->getFamily()->c_str() : "sans");
// we are (not anymore) building the same font over and over again
+ f1 = &f;
g_print ("Font 0x%x: '%s' size=%g (* %g)\n",
- GPOINTER_TO_INT (f), family, state->getTransformedFontSize(), scale);
+ GPOINTER_TO_INT (f1), family, state->getTransformedFontSize(), scale);
// now try to make a fontname Dia/Pango can cope with
// strip style postfix - we already have extracted the style bits above
@@ -354,7 +356,9 @@ public :
fsize *= fabs(fm[3] / fm[0]);
font = dia_font_new (family, style, fsize * scale / 0.8);
- g_hash_table_insert (this->font_map, f, font);
+ f1 = &f;
+ gpointer f2 = (gpointer)f1; // GLib typedef void* gpointer;
+ g_hash_table_insert (this->font_map, f2, font);
g_free (family);
}
void updateTextShift(GfxState *state, double shift)
@@ -721,11 +725,12 @@ DiaOutputDev::drawString(GfxState *state, GooString *s)
return;
if (!(state->getFontSize() > 0.0))
return;
- font = (DiaFont *)g_hash_table_lookup (this->font_map, state->getFont());
+ gconstpointer f_1 = &state->getFont();
+ font = (DiaFont *)g_hash_table_lookup (this->font_map, f_1);
// we have to decode the string data first
{
- GfxFont *f = state->getFont();
+ const std::shared_ptr<GfxFont> f = state->getFont();
const char *p = s->c_str();
CharCode code;
int j = 0, m, n;
@@ -870,8 +875,8 @@ import_pdf(const gchar *filename, DiagramData *dia, DiaContext *ctx, void* user_
std::unique_ptr<PDFDoc> doc;
GooString *fileName = new GooString(filename);
// no passwords yet
- GooString *ownerPW = NULL;
- GooString *userPW = NULL;
+ const std::optional<GooString> ownerPW;
+ const std::optional<GooString> userPW;
gboolean ret = FALSE;
// without this we will get strange crashes (at least with /O2 build)
@@ -899,6 +904,7 @@ import_pdf(const gchar *filename, DiagramData *dia, DiaContext *ctx, void* user_
delete diaOut;
ret = TRUE;
}
+ doc.reset();
delete fileName;
return ret;

View File

@ -41,6 +41,12 @@ rustPlatform.buildRustPackage rec {
AppKit
];
postInstall = ''
install -Dm444 assets/epick.desktop -t $out/share/applications
install -Dm444 assets/icon.svg $out/share/icons/hicolor/scalable/apps/epick.svg
install -Dm444 assets/icon.png $out/share/icons/hicolor/48x48/apps/epick.png
'';
postFixup = lib.optionalString stdenv.isLinux ''
patchelf $out/bin/epick --add-rpath ${lib.makeLibraryPath [ libGL ]}
'';

View File

@ -1,200 +0,0 @@
ls diff --git focusblur-3.2.6/src/aaa.h focusblur-3.2.6/src/aaa.h
index 4a6d90b..c74cab2 100644
--- focusblur-3.2.6/src/aaa.h
+++ focusblur-3.2.6/src/aaa.h
@@ -19,8 +19,7 @@
#ifndef __AAA_H__
#define __AAA_H__
-#include <glib/gmacros.h>
-#include <glib/gtypes.h>
+#include <glib.h>
G_BEGIN_DECLS
diff --git focusblur-3.2.6/src/brush.h focusblur-3.2.6/src/brush.h
index 685b253..8778fec 100644
--- focusblur-3.2.6/src/brush.h
+++ focusblur-3.2.6/src/brush.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_BRUSH_H__
#define __FOCUSBLUR_BRUSH_H__
-#include <glib/gtypes.h>
+#include <glib.h>
#include "focusblurtypes.h"
G_BEGIN_DECLS
diff --git focusblur-3.2.6/src/depthmap.h focusblur-3.2.6/src/depthmap.h
index 78f5e99..baee540 100644
--- focusblur-3.2.6/src/depthmap.h
+++ focusblur-3.2.6/src/depthmap.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_DEPTHMAP_H__
#define __FOCUSBLUR_DEPTHMAP_H__
-#include <glib/gtypes.h>
+#include <glib.h>
#include "focusblurtypes.h"
#include "focusblurenums.h"
diff --git focusblur-3.2.6/src/diffusion.h focusblur-3.2.6/src/diffusion.h
index 07ffe4b..3c1e4b9 100644
--- focusblur-3.2.6/src/diffusion.h
+++ focusblur-3.2.6/src/diffusion.h
@@ -23,7 +23,7 @@
#define __FOCUSBLUR_DIFFUSION_H__
-#include <glib/gtypes.h>
+#include <glib.h>
#include "focusblur.h"
#include "focusblurtypes.h"
diff --git focusblur-3.2.6/src/fftblur.h focusblur-3.2.6/src/fftblur.h
index 124bcba..cd809fa 100644
--- focusblur-3.2.6/src/fftblur.h
+++ focusblur-3.2.6/src/fftblur.h
@@ -23,8 +23,7 @@
#define __FOCUSBLUR_FFTBLUR_H__
-#include <glib/gmacros.h>
-#include <glib/gtypes.h>
+#include <glib.h>
#include <libgimpwidgets/gimpwidgetstypes.h>
#include "focusblurparam.h"
diff --git focusblur-3.2.6/src/fftblurbuffer.h focusblur-3.2.6/src/fftblurbuffer.h
index b34d682..42e6380 100644
--- focusblur-3.2.6/src/fftblurbuffer.h
+++ focusblur-3.2.6/src/fftblurbuffer.h
@@ -28,8 +28,7 @@
#endif
#include <fftw3.h>
-#include <glib/gmacros.h>
-#include <glib/gtypes.h>
+#include <glib.h>
#include <gtk/gtkstyle.h>
#include <libgimp/gimptypes.h>
#include <libgimpwidgets/gimpwidgetstypes.h>
diff --git focusblur-3.2.6/src/fftblurproc.h focusblur-3.2.6/src/fftblurproc.h
index 495572d..10a34f4 100644
--- focusblur-3.2.6/src/fftblurproc.h
+++ focusblur-3.2.6/src/fftblurproc.h
@@ -23,8 +23,7 @@
#define __FOCUSBLUR_FFTBLUR_PROC_H__
-#include <glib/gmacros.h>
-#include <glib/gtypes.h>
+#include <glib.h>
#include "focusblurtypes.h"
diff --git focusblur-3.2.6/src/focusblur.h focusblur-3.2.6/src/focusblur.h
index 54ca40a..d7e13a6 100644
--- focusblur-3.2.6/src/focusblur.h
+++ focusblur-3.2.6/src/focusblur.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_H__
#define __FOCUSBLUR_H__
-#include <glib/gmacros.h>
+#include <glib.h>
G_BEGIN_DECLS
diff --git focusblur-3.2.6/src/focusblurparam.h focusblur-3.2.6/src/focusblurparam.h
index 64c887b..32865b4 100644
--- focusblur-3.2.6/src/focusblurparam.h
+++ focusblur-3.2.6/src/focusblurparam.h
@@ -22,8 +22,7 @@
#ifndef __FOCUSBLUR_PARAM_H__
#define __FOCUSBLUR_PARAM_H__
-#include <glib/gmacros.h>
-#include <glib/gtypes.h>
+#include <glib.h>
#include <gtk/gtkstyle.h>
#include <libgimp/gimptypes.h>
diff --git focusblur-3.2.6/src/focusblurstock.h focusblur-3.2.6/src/focusblurstock.h
index 15f3603..cfc0567 100644
--- focusblur-3.2.6/src/focusblurstock.h
+++ focusblur-3.2.6/src/focusblurstock.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_STOCK_H__
#define __FOCUSBLUR_STOCK_H__
-#include <glib/gtypes.h>
+#include <glib.h>
G_BEGIN_DECLS
diff --git focusblur-3.2.6/src/focusblurtypes.h focusblur-3.2.6/src/focusblurtypes.h
index 0954c60..1531c84 100644
--- focusblur-3.2.6/src/focusblurtypes.h
+++ focusblur-3.2.6/src/focusblurtypes.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_TYPES_H__
#define __FOCUSBLUR_TYPES_H__
-#include <glib/gmacros.h>
+#include <glib.h>
G_BEGIN_DECLS
diff --git focusblur-3.2.6/src/interface.h focusblur-3.2.6/src/interface.h
index 6defd27..e819c60 100644
--- focusblur-3.2.6/src/interface.h
+++ focusblur-3.2.6/src/interface.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_INTERFACE_H__
#define __FOCUSBLUR_INTERFACE_H__
-#include <glib/gtypes.h>
+#include <glib.h>
#include "focusblurtypes.h"
diff --git focusblur-3.2.6/src/render.h focusblur-3.2.6/src/render.h
index febbd24..a501f1e 100644
--- focusblur-3.2.6/src/render.h
+++ focusblur-3.2.6/src/render.h
@@ -24,7 +24,7 @@
#include "config.h"
-#include <glib/gtypes.h>
+#include <glib.h>
//#include <libgimp/gimp.h>
#include <libgimp/gimpui.h>
diff --git focusblur-3.2.6/src/shine.h focusblur-3.2.6/src/shine.h
index c5a3621..86b4c09 100644
--- focusblur-3.2.6/src/shine.h
+++ focusblur-3.2.6/src/shine.h
@@ -22,7 +22,7 @@
#ifndef __FOCUSBLUR_SHINE_H__
#define __FOCUSBLUR_SHINE_H__
-#include <glib/gtypes.h>
+#include <glib.h>
#include <libgimp/gimptypes.h>
#include "focusblurtypes.h"
diff --git focusblur-3.2.6/src/source.h focusblur-3.2.6/src/source.h
index 50d34ca..8eec35c 100644
--- focusblur-3.2.6/src/source.h
+++ focusblur-3.2.6/src/source.h
@@ -24,7 +24,7 @@
#include "config.h"
-#include <glib/gtypes.h>
+#include <glib.h>
#include <libgimp/gimptypes.h>
#include "focusblurtypes.h"

View File

@ -21,6 +21,14 @@ stdenv.mkDerivation rec {
installPhase = ''
install -D ./goxel $out/bin/goxel
for res in $(ls data/icons | sed -e 's/icon//g' -e 's/.png//g'); do
install -Dm444 data/icons/icon$res.png $out/share/icons/hicolor/''${res}x''${res}/apps/goxel.png
done
install -Dm444 snap/gui/goxel.desktop -t $out/share/applications
substituteInPlace $out/share/applications/goxel.desktop \
--replace 'Icon=''${SNAP}/icon.png' 'Icon=goxel'
'';
meta = with lib; {

View File

@ -19,7 +19,7 @@
python3.pkgs.buildPythonApplication rec {
pname = "komikku";
version = "1.29.0";
version = "1.31.0";
format = "other";
@ -27,7 +27,7 @@ python3.pkgs.buildPythonApplication rec {
owner = "valos";
repo = "Komikku";
rev = "v${version}";
hash = "sha256-efKYmsDbdDxgOHkv05zwlq88NzW7pYOQOYcJqPeKXkY=";
hash = "sha256-7u7F2Z1fYr3S1Sx9FAVmimQbT0o6tb96jXG0o9+4/rc=";
};
nativeBuildInputs = [

View File

@ -66,6 +66,11 @@ rustPlatform.buildRustPackage rec {
"--skip=bench"
];
postInstall = ''
install -Dm444 $src/res/oculante.png -t $out/share/icons/hicolor/128x128/apps/
install -Dm444 $src/res/oculante.desktop -t $out/share/applications
'';
postFixup = lib.optionalString stdenv.isLinux ''
patchelf $out/bin/oculante --add-rpath ${lib.makeLibraryPath [ libxkbcommon libX11 ]}
'';

View File

@ -30,6 +30,15 @@ rustPlatform.buildRustPackage rec {
buildInputs = [ gtk3-x11 atk glib librsvg ];
postInstall = ''
install -Dm444 res/icons/tk.categulario.pizarra.svg $out/share/icons/hicolor/scalable/apps/pizarra.svg
install -Dm444 res/pizarra.desktop -t $out/share/applications
substituteInPlace $out/share/applications/pizarra.desktop \
--replace "TryExec=/usr/bin/" "TryExec=" \
--replace "Exec=/usr/bin/" "Exec=" \
--replace "Icon=/usr/share/icons/hicolor/scalable/apps/pizarra.svg" "Icon=pizarra"
'';
meta = with lib; {
description = "A simple blackboard written in GTK";
longDescription = ''

View File

@ -29,6 +29,10 @@ mkDerivation rec {
pkg-config
];
cmakeFlags = [
"-DVIDEO_SUPPORT=ON"
];
buildInputs = [
exiv2
mpv

File diff suppressed because it is too large Load Diff

View File

@ -26,13 +26,13 @@
stdenv.mkDerivation rec {
pname = "rnote";
version = "0.9.2";
version = "0.9.3";
src = fetchFromGitHub {
owner = "flxzt";
repo = "rnote";
rev = "v${version}";
hash = "sha256-LLJurn5KJBlTtFrQXcc7HZqtIATOLgiwJqUsZe4cRIo=";
hash = "sha256-TeOBLPQc4y1lstqZUBDS3vUPama80UieifmxL2Qswvw=";
};
cargoDeps = rustPlatform.importCargoLock {

View File

@ -24,7 +24,7 @@
, srcs
# provided as callPackage input to enable easier overrides through overlays
, cargoSha256 ? "sha256-YR7d8F1LWDHY+h2ZQe52u3KWIeEMTnrbU4DO+hpIOec="
, cargoSha256 ? "sha256-EXsAvI8dKgCGmLbGr9fdk/F9UwtSfd/aIyqAy5tvFSI="
}:
mkDerivation rec {

View File

@ -1 +1 @@
WGET_ARGS=( https://download.kde.org/stable/release-service/23.08.3/src -A '*.tar.xz' )
WGET_ARGS=( https://download.kde.org/stable/release-service/23.08.4/src -A '*.tar.xz' )

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@
let
pname = "1password";
version = if channel == "stable" then "8.10.20" else "8.10.20-1.BETA";
version = if channel == "stable" then "8.10.20" else "8.10.22-21.BETA";
sources = {
stable = {
@ -33,19 +33,19 @@ let
beta = {
x86_64-linux = {
url = "https://downloads.1password.com/linux/tar/beta/x86_64/1password-${version}.x64.tar.gz";
hash = "sha256-+wHxtlE0zeVEObzdpcIP75LKbbjsG8LMqdIPFkY0BoU=";
hash = "sha256-R4jj5U2a8AoAs1qVIjMQx6odK0Ks4WeqRURf3pOOduo=";
};
aarch64-linux = {
url = "https://downloads.1password.com/linux/tar/beta/aarch64/1password-${version}.arm64.tar.gz";
hash = "sha256-BRsp/hhBwgQFU+5Tt1M9V5Lx8oRLN3uaqLrzrPo/xpo=";
hash = "sha256-1opo/RZ0aTZn3Jo9XIw/g8WYK2xgRiaRKgd7RstGJ5g=";
};
x86_64-darwin = {
url = "https://downloads.1password.com/mac/1Password-${version}-x86_64.zip";
hash = "sha256-WVP5a007cU1GR/lnL7C6QiJpTTsjzaiS69H2LJzYm70=";
hash = "sha256-jlQgXlLLUF78g2B7KYgTSQZAEe57TRw4vN7MPn3IwwI=";
};
aarch64-darwin = {
url = "https://downloads.1password.com/mac/1Password-${version}-aarch64.zip";
hash = "sha256-BBSUSSnot1ktC0ik7yMhqsgLdkeQBrJUpHBvwu0w9m0=";
hash = "sha256-nzKESK3QKsi0Xzm3ytXWIH08LV2F6jLKvCLDHzVR9xQ=";
};
};
};

Some files were not shown because too many files have changed in this diff Show More