Merge branch 'master' of https://github.com/nixos/nixpkgs into tarball-closureinfo

This commit is contained in:
Ding Xiang Fei 2018-11-26 12:04:07 +08:00
commit b011049cf6
1356 changed files with 96697 additions and 86247 deletions

6
.github/CODEOWNERS vendored
View File

@ -120,3 +120,9 @@
# Dhall # Dhall
/pkgs/development/dhall-modules @Gabriel439 @Profpatsch /pkgs/development/dhall-modules @Gabriel439 @Profpatsch
/pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch /pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch
# Idris
/pkgs/development/idris-modules @Infinisil
# Bazel
/pkgs/development/tools/build-managers/bazel @mboes @Profpatsch

View File

@ -14,6 +14,7 @@
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"` - [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nox --run "nox-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`) - [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after) - [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after)
- [ ] Assured whether relevant documentation is up to date
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md). - [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md).
--- ---

View File

@ -20,7 +20,7 @@ release and `nixos-unstable` for the latest successful build of master:
% git rebase channels/nixos-18.09 % git rebase channels/nixos-18.09
``` ```
For pull-requests, please rebase onto nixpkgs `master`. For pull requests, please rebase onto nixpkgs `master`.
[NixOS](https://nixos.org/nixos/) Linux distribution source code is located inside [NixOS](https://nixos.org/nixos/) Linux distribution source code is located inside
`nixos/` folder. `nixos/` folder.

View File

@ -191,6 +191,23 @@ args.stdenv.mkDerivation (args // {
<section xml:id="sec-package-naming"> <section xml:id="sec-package-naming">
<title>Package naming</title> <title>Package naming</title>
<para>
The key words
<emphasis>must</emphasis>,
<emphasis>must not</emphasis>,
<emphasis>required</emphasis>,
<emphasis>shall</emphasis>,
<emphasis>shall not</emphasis>,
<emphasis>should</emphasis>,
<emphasis>should not</emphasis>,
<emphasis>recommended</emphasis>,
<emphasis>may</emphasis>,
and <emphasis>optional</emphasis> in this section
are to be interpreted as described in
<link xlink:href="https://tools.ietf.org/html/rfc2119">RFC 2119</link>.
Only <emphasis>emphasized</emphasis> words are to be interpreted in this way.
</para>
<para> <para>
In Nixpkgs, there are generally three different names associated with a In Nixpkgs, there are generally three different names associated with a
package: package:
@ -231,14 +248,15 @@ args.stdenv.mkDerivation (args // {
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
Generally, try to stick to the upstream package name. The <literal>name</literal> attribute <emphasis>should</emphasis>
be identical to the upstream package name.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Dont use uppercase letters in the <literal>name</literal> attribute The <literal>name</literal> attribute <emphasis>must not</emphasis>
— e.g., <literal>"mplayer-1.0rc2"</literal> instead of contain uppercase letters — e.g., <literal>"mplayer-1.0rc2"</literal>
<literal>"MPlayer-1.0rc2"</literal>. instead of <literal>"MPlayer-1.0rc2"</literal>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -252,14 +270,14 @@ args.stdenv.mkDerivation (args // {
<para> <para>
If a package is not a release but a commit from a repository, then the If a package is not a release but a commit from a repository, then the
version part of the name <emphasis>must</emphasis> be the date of that version part of the name <emphasis>must</emphasis> be the date of that
(fetched) commit. The date must be in <literal>"YYYY-MM-DD"</literal> (fetched) commit. The date <emphasis>must</emphasis> be in <literal>"YYYY-MM-DD"</literal>
format. Also append <literal>"unstable"</literal> to the name - e.g., format. Also append <literal>"unstable"</literal> to the name - e.g.,
<literal>"pkgname-unstable-2014-09-23"</literal>. <literal>"pkgname-unstable-2014-09-23"</literal>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
Dashes in the package name should be preserved in new variable names, Dashes in the package name <emphasis>should</emphasis> be preserved in new variable names,
rather than converted to underscores or camel cased — e.g., rather than converted to underscores or camel cased — e.g.,
<varname>http-parser</varname> instead of <varname>http_parser</varname> <varname>http-parser</varname> instead of <varname>http_parser</varname>
or <varname>httpParser</varname>. The hyphenated style is preferred in or <varname>httpParser</varname>. The hyphenated style is preferred in
@ -268,7 +286,7 @@ args.stdenv.mkDerivation (args // {
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
If there are multiple versions of a package, this should be reflected in If there are multiple versions of a package, this <emphasis>should</emphasis> be reflected in
the variable names in <filename>all-packages.nix</filename>, e.g. the variable names in <filename>all-packages.nix</filename>, e.g.
<varname>json-c-0-9</varname> and <varname>json-c-0-11</varname>. If <varname>json-c-0-9</varname> and <varname>json-c-0-11</varname>. If
there is an obvious “default” version, make an attribute like there is an obvious “default” version, make an attribute like

View File

@ -132,7 +132,7 @@
</itemizedlist> </itemizedlist>
<para> <para>
The difference between an a package being unsupported on some system and The difference between a package being unsupported on some system and
being broken is admittedly a bit fuzzy. If a program being broken is admittedly a bit fuzzy. If a program
<emphasis>ought</emphasis> to work on a certain platform, but doesn't, the <emphasis>ought</emphasis> to work on a certain platform, but doesn't, the
platform should be included in <literal>meta.platforms</literal>, but marked platform should be included in <literal>meta.platforms</literal>, but marked
@ -175,11 +175,12 @@
</programlisting> </programlisting>
</para> </para>
<para> <para>
A more useful example, the following configuration allows only allows For a more useful example, try the following. This configuration
flash player and visual studio code: only allows unfree packages named flash player and visual studio
code:
<programlisting> <programlisting>
{ {
allowUnfreePredicate = (pkg: elem (builtins.parseDrvName pkg.name).name [ "flashplayer" "vscode" ]); allowUnfreePredicate = (pkg: builtins.elem (builtins.parseDrvName pkg.name).name [ "flashplayer" "vscode" ]);
} }
</programlisting> </programlisting>
</para> </para>
@ -286,8 +287,8 @@
<para> <para>
You can define a function called <varname>packageOverrides</varname> in your You can define a function called <varname>packageOverrides</varname> in your
local <filename>~/.config/nixpkgs/config.nix</filename> to override nix local <filename>~/.config/nixpkgs/config.nix</filename> to override Nix
packages. It must be a function that takes pkgs as an argument and return packages. It must be a function that takes pkgs as an argument and returns a
modified set of packages. modified set of packages.
<programlisting> <programlisting>
{ {

View File

@ -6,17 +6,17 @@
<title>Introduction</title> <title>Introduction</title>
<para> <para>
"Cross-compilation" means compiling a program on one machine for another "Cross-compilation" means compiling a program on one machine for another type
type of machine. For example, a typical use of cross compilation is to of machine. For example, a typical use of cross-compilation is to compile
compile programs for embedded devices. These devices often don't have the programs for embedded devices. These devices often don't have the computing
computing power and memory to compile their own programs. One might think power and memory to compile their own programs. One might think that
that cross-compilation is a fairly niche concern, but there are advantages cross-compilation is a fairly niche concern. However, there are significant
to being rigorous about distinguishing build-time vs run-time environments advantages to rigorously distinguishing between build-time and run-time
even when one is developing and deploying on the same machine. Nixpkgs is environments! This applies even when one is developing and deploying on the
increasingly adopting the opinion that packages should be written with same machine. Nixpkgs is increasingly adopting the opinion that packages
cross-compilation in mind, and nixpkgs should evaluate in a similar way (by should be written with cross-compilation in mind, and nixpkgs should evaluate
minimizing cross-compilation-specific special cases) whether or not one is in a similar way (by minimizing cross-compilation-specific special cases)
cross-compiling. whether or not one is cross-compiling.
</para> </para>
<para> <para>
@ -34,15 +34,15 @@
<title>Platform parameters</title> <title>Platform parameters</title>
<para> <para>
Nixpkgs follows the Nixpkgs follows the <link
<link xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">common xlink:href="https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html">conventions
historical convention of GNU autoconf</link> of distinguishing between 3 of GNU autoconf</link>. We distinguish between 3 types of platforms when
types of platform: <wordasword>build</wordasword>, building a derivation: <wordasword>build</wordasword>,
<wordasword>host</wordasword>, and <wordasword>target</wordasword>. In <wordasword>host</wordasword>, and <wordasword>target</wordasword>. In
summary, <wordasword>build</wordasword> is the platform on which a package summary, <wordasword>build</wordasword> is the platform on which a package
is being built, <wordasword>host</wordasword> is the platform on which it is being built, <wordasword>host</wordasword> is the platform on which it
is to run. The third attribute, <wordasword>target</wordasword>, is will run. The third attribute, <wordasword>target</wordasword>, is relevant
relevant only for certain specific compilers and build tools. only for certain specific compilers and build tools.
</para> </para>
<para> <para>
@ -64,7 +64,7 @@
<para> <para>
The "build platform" is the platform on which a package is built. Once The "build platform" is the platform on which a package is built. Once
someone has a built package, or pre-built binary package, the build someone has a built package, or pre-built binary package, the build
platform should not matter and be safe to ignore. platform should not matter and can be ignored.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -94,11 +94,11 @@
<para> <para>
The build process of certain compilers is written in such a way that the The build process of certain compilers is written in such a way that the
compiler resulting from a single build can itself only produce binaries compiler resulting from a single build can itself only produce binaries
for a single platform. The task specifying this single "target platform" for a single platform. The task of specifying this single "target
is thus pushed to build time of the compiler. The root cause of this platform" is thus pushed to build time of the compiler. The root cause of
mistake is often that the compiler (which will be run on the host) and this that the compiler (which will be run on the host) and the standard
the the standard library/runtime (which will be run on the target) are library/runtime (which will be run on the target) are built by a single
built by a single build process. build process.
</para> </para>
<para> <para>
There is no fundamental need to think about a single target ahead of There is no fundamental need to think about a single target ahead of
@ -135,8 +135,10 @@
<para> <para>
This is a two-component shorthand for the platform. Examples of this This is a two-component shorthand for the platform. Examples of this
would be "x86_64-darwin" and "i686-linux"; see would be "x86_64-darwin" and "i686-linux"; see
<literal>lib.systems.doubles</literal> for more. This format isn't very <literal>lib.systems.doubles</literal> for more. The first component
standard, but has built-in support in Nix, such as the corresponds to the CPU architecture of the platform and the second to the
operating system of the platform (<literal>[cpu]-[os]</literal>). This
format has built-in support in Nix, such as the
<varname>builtins.currentSystem</varname> impure string. <varname>builtins.currentSystem</varname> impure string.
</para> </para>
</listitem> </listitem>
@ -147,12 +149,13 @@
</term> </term>
<listitem> <listitem>
<para> <para>
This is a 3- or 4- component shorthand for the platform. Examples of This is a 3- or 4- component shorthand for the platform. Examples of this
this would be "x86_64-unknown-linux-gnu" and "aarch64-apple-darwin14". would be <literal>x86_64-unknown-linux-gnu</literal> and
This is a standard format called the "LLVM target triple", as they are <literal>aarch64-apple-darwin14</literal>. This is a standard format
pioneered by LLVM and traditionally just used for the called the "LLVM target triple", as they are pioneered by LLVM. In the
<varname>targetPlatform</varname>. This format is strictly more 4-part form, this corresponds to
informative than the "Nix host double", as the previous format could <literal>[cpu]-[vendor]-[os]-[abi]</literal>. This format is strictly
more informative than the "Nix host double", as the previous format could
analogously be termed. This needs a better name than analogously be termed. This needs a better name than
<varname>config</varname>! <varname>config</varname>!
</para> </para>
@ -164,12 +167,11 @@
</term> </term>
<listitem> <listitem>
<para> <para>
This is a nix representation of a parsed LLVM target triple with This is a Nix representation of a parsed LLVM target triple
white-listed components. This can be specified directly, or actually with white-listed components. This can be specified directly,
parsed from the <varname>config</varname>. [Technically, only one need or actually parsed from the <varname>config</varname>. See
be specified and the others can be inferred, though the precision of <literal>lib.systems.parse</literal> for the exact
inference may not be very good.] See representation.
<literal>lib.systems.parse</literal> for the exact representation.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -193,7 +195,7 @@
<listitem> <listitem>
<para> <para>
These predicates are defined in <literal>lib.systems.inspect</literal>, These predicates are defined in <literal>lib.systems.inspect</literal>,
and slapped on every platform. They are superior to the ones in and slapped onto every platform. They are superior to the ones in
<varname>stdenv</varname> as they force the user to be explicit about <varname>stdenv</varname> as they force the user to be explicit about
which platform they are inspecting. Please use these instead of those. which platform they are inspecting. Please use these instead of those.
</para> </para>
@ -221,7 +223,7 @@
<para> <para>
In this section we explore the relationship between both runtime and In this section we explore the relationship between both runtime and
buildtime dependencies and the 3 Autoconf platforms. build-time dependencies and the 3 Autoconf platforms.
</para> </para>
<para> <para>
@ -249,17 +251,17 @@
</para> </para>
<para> <para>
Some examples will probably make this clearer. If a package is being built Some examples will make this clearer. If a package is being built with a
with a <literal>(build, host, target)</literal> platform triple of <literal>(build, host, target)</literal> platform triple of <literal>(foo,
<literal>(foo, bar, bar)</literal>, then its build-time dependencies would bar, bar)</literal>, then its build-time dependencies would have a triple of
have a triple of <literal>(foo, foo, bar)</literal>, and <emphasis>those <literal>(foo, foo, bar)</literal>, and <emphasis>those packages'</emphasis>
packages'</emphasis> build-time dependencies would have triple of build-time dependencies would have a triple of <literal>(foo, foo,
<literal>(foo, foo, foo)</literal>. In other words, it should take two foo)</literal>. In other words, it should take two "rounds" of following
"rounds" of following build-time dependency edges before one reaches a build-time dependency edges before one reaches a fixed point where, by the
fixed point where, by the sliding window principle, the platform triple no sliding window principle, the platform triple no longer changes. Indeed,
longer changes. Indeed, this happens with cross compilation, where only this happens with cross-compilation, where only rounds of native
rounds of native dependencies starting with the second necessarily coincide dependencies starting with the second necessarily coincide with native
with native packages. packages.
</para> </para>
<note> <note>
@ -271,23 +273,23 @@
</note> </note>
<para> <para>
How does this work in practice? Nixpkgs is now structured so that How does this work in practice? Nixpkgs is now structured so that build-time
build-time dependencies are taken from <varname>buildPackages</varname>, dependencies are taken from <varname>buildPackages</varname>, whereas
whereas run-time dependencies are taken from the top level attribute set. run-time dependencies are taken from the top level attribute set. For
For example, <varname>buildPackages.gcc</varname> should be used at build example, <varname>buildPackages.gcc</varname> should be used at build-time,
time, while <varname>gcc</varname> should be used at run time. Now, for while <varname>gcc</varname> should be used at run-time. Now, for most of
most of Nixpkgs's history, there was no <varname>buildPackages</varname>, Nixpkgs's history, there was no <varname>buildPackages</varname>, and most
and most packages have not been refactored to use it explicitly. Instead, packages have not been refactored to use it explicitly. Instead, one can use
one can use the six (<emphasis>gasp</emphasis>) attributes used for the six (<emphasis>gasp</emphasis>) attributes used for specifying
specifying dependencies as documented in dependencies as documented in <xref linkend="ssec-stdenv-dependencies"/>. We
<xref linkend="ssec-stdenv-dependencies"/>. We "splice" together the "splice" together the run-time and build-time package sets with
run-time and build-time package sets with <varname>callPackage</varname>, <varname>callPackage</varname>, and then <varname>mkDerivation</varname> for
and then <varname>mkDerivation</varname> for each of four attributes pulls each of four attributes pulls the right derivation out. This splicing can be
the right derivation out. This splicing can be skipped when not cross skipped when not cross-compiling as the package sets are the same, but is a
compiling as the package sets are the same, but is a bit slow for cross bit slow for cross-compiling. Because of this, a best-of-both-worlds
compiling. Because of this, a best-of-both-worlds solution is in the works solution is in the works with no splicing or explicit access of
with no splicing or explicit access of <varname>buildPackages</varname> <varname>buildPackages</varname> needed. For now, feel free to use either
needed. For now, feel free to use either method. method.
</para> </para>
<note> <note>
@ -305,11 +307,11 @@
<title>Cross packaging cookbook</title> <title>Cross packaging cookbook</title>
<para> <para>
Some frequently problems when packaging for cross compilation are good to Some frequently encountered problems when packaging for cross-compilation
just spell and answer. Ideally the information above is exhaustive, so this should be answered here. Ideally, the information above is exhaustive, so
section cannot provide any new information, but its ludicrous and cruel to this section cannot provide any new information, but it is ludicrous and
expect everyone to spend effort working through the interaction of many cruel to expect everyone to spend effort working through the interaction of
features just to figure out the same answer to the same common problem. many features just to figure out the same answer to the same common problem.
Feel free to add to this list! Feel free to add to this list!
</para> </para>
@ -364,17 +366,9 @@
<section xml:id="sec-cross-usage"> <section xml:id="sec-cross-usage">
<title>Cross-building packages</title> <title>Cross-building packages</title>
<note>
<para>
More information needs to moved from the old wiki, especially
<link xlink:href="https://nixos.org/wiki/CrossCompiling" />, for this
section.
</para>
</note>
<para> <para>
Nixpkgs can be instantiated with <varname>localSystem</varname> alone, in Nixpkgs can be instantiated with <varname>localSystem</varname> alone, in
which case there is no cross compiling and everything is built by and for which case there is no cross-compiling and everything is built by and for
that system, or also with <varname>crossSystem</varname>, in which case that system, or also with <varname>crossSystem</varname>, in which case
packages run on the latter, but all building happens on the former. Both packages run on the latter, but all building happens on the former. Both
parameters take the same schema as the 3 (build, host, and target) platforms parameters take the same schema as the 3 (build, host, and target) platforms
@ -440,15 +434,14 @@ nix-build &lt;nixpkgs&gt; --arg crossSystem.config '&lt;arch&gt;-&lt;os&gt;-&lt;
build plan or package set. A simple "build vs deploy" dichotomy is adequate: build plan or package set. A simple "build vs deploy" dichotomy is adequate:
the sliding window principle described in the previous section shows how to the sliding window principle described in the previous section shows how to
interpolate between the these two "end points" to get the 3 platform triple interpolate between the these two "end points" to get the 3 platform triple
for each bootstrapping stage. That means for any package a given package for each bootstrapping stage. That means for any package a given package set,
set, even those not bound on the top level but only reachable via even those not bound on the top level but only reachable via dependencies or
dependencies or <varname>buildPackages</varname>, the three platforms will <varname>buildPackages</varname>, the three platforms will be defined as one
be defined as one of <varname>localSystem</varname> or of <varname>localSystem</varname> or <varname>crossSystem</varname>, with the
<varname>crossSystem</varname>, with the former replacing the latter as one former replacing the latter as one traverses build-time dependencies. A last
traverses build-time dependencies. A last simple difference then is simple difference is that <varname>crossSystem</varname> should be null when
<varname>crossSystem</varname> should be null when one doesn't want to one doesn't want to cross-compile, while the <varname>*Platform</varname>s
cross-compile, while the <varname>*Platform</varname>s are always non-null. are always non-null. <varname>localSystem</varname> is always non-null.
<varname>localSystem</varname> is always non-null.
</para> </para>
</section> </section>
<!--============================================================--> <!--============================================================-->
@ -461,14 +454,14 @@ nix-build &lt;nixpkgs&gt; --arg crossSystem.config '&lt;arch&gt;-&lt;os&gt;-&lt;
<note> <note>
<para> <para>
If one explores nixpkgs, they will see derivations with names like If one explores Nixpkgs, they will see derivations with names like
<literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is <literal>gccCross</literal>. Such <literal>*Cross</literal> derivations is a
a holdover from before we properly distinguished between the host and holdover from before we properly distinguished between the host and target
target platforms —the derivation with "Cross" in the name covered the platforms—the derivation with "Cross" in the name covered the <literal>build
<literal>build = host != target</literal> case, while the other covered the = host != target</literal> case, while the other covered the <literal>host =
<literal>host = target</literal>, with build platform the same or not based target</literal>, with build platform the same or not based on whether one
on whether one was using its <literal>.nativeDrv</literal> or was using its <literal>.nativeDrv</literal> or <literal>.crossDrv</literal>.
<literal>.crossDrv</literal>. This ugliness will disappear soon. This ugliness will disappear soon.
</para> </para>
</note> </note>
</section> </section>

View File

@ -1,39 +1,115 @@
Idris packages # Idris packages
==============
This directory contains build rules for idris packages. In addition, ## Installing Idris
it contains several functions to build and compose those packages.
Everything is exposed to the user via the `idrisPackages` attribute.
callPackage The easiest way to get a working idris version is to install the `idris` attribute:
------------
This is like the normal nixpkgs callPackage function, specialized to ```
idris packages. $ # On NixOS
$ nix-env -i nixos.idris
$ # On non-NixOS
$ nix-env -i nixpkgs.idris
```
builtins This however only provides the `prelude` and `base` libraries. To install additional libraries:
---------
This is a list of all of the libraries that come packaged with Idris ```
itself. $ nix-env -iE 'pkgs: pkgs.idrisPackages.with-packages (with pkgs.idrisPackages; [ contrib pruviloj ])'
```
build-idris-package To see all available Idris packages:
-------------------- ```
$ # On NixOS
$ nix-env -qaPA nixos.idrisPackages
$ # On non-NixOS
$ nix-env -qaPA nixpkgs.idrisPackages
```
A function to build an idris package. Its sole argument is a set like Similarly, entering a `nix-shell`:
you might pass to `stdenv.mkDerivation`, except `build-idris-package` ```
sets several attributes for you. See `build-idris-package.nix` for $ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
details. ```
build-builtin-package ## Starting Idris with library support
----------------------
A version of `build-idris-package` specialized to builtin libraries. To have access to these libraries in idris, call it with an argument `-p <library name>` for each library:
Mostly for internal use.
with-packages ```
------------- $ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
[nix-shell:~]$ idris -p contrib -p pruviloj
```
Bundle idris together with a list of packages. Because idris currently A listing of all available packages the Idris binary has access to is available via `--listlibs`:
only supports a single directory in its library path, you must include
all desired libraries here, including `prelude` and `base`. ```
$ idris --listlibs
00prelude-idx.ibc
pruviloj
base
contrib
prelude
00pruviloj-idx.ibc
00base-idx.ibc
00contrib-idx.ibc
```
## Building an Idris project with Nix
As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`:
```nix
{ build-idris-package
, fetchFromGitHub
, contrib
, lightyear
, lib
}:
build-idris-package {
name = "yaml";
version = "2018-01-25";
# This is the .ipkg file that should be built, defaults to the package name
# In this case it should build `Yaml.ipkg` instead of `yaml.ipkg`
# This is only necessary because the yaml packages ipkg file is
# different from its package name here.
ipkgName = "Yaml";
# Idris dependencies to provide for the build
idrisDeps = [ contrib lightyear ];
src = fetchFromGitHub {
owner = "Heather";
repo = "Idris.Yaml";
rev = "5afa51ffc839844862b8316faba3bafa15656db4";
sha256 = "1g4pi0swmg214kndj85hj50ccmckni7piprsxfdzdfhg87s0avw7";
};
meta = {
description = "Idris YAML lib";
homepage = https://github.com/Heather/Idris.Yaml;
license = lib.licenses.mit;
maintainers = [ lib.maintainers.brainrape ];
};
}
```
Assuming this file is saved as `yaml.nix`, it's buildable using
```
$ nix-build -E '(import <nixpkgs> {}).idrisPackages.callPackage ./yaml.nix {}'
```
Or it's possible to use
```nix
with import <nixpkgs> {};
{
yaml = idrisPackages.callPackage ./yaml.nix {};
}
```
in another file (say `default.nix`) to be able to build it with
```
$ nix-build -A yaml
```

View File

@ -14,7 +14,7 @@ project.
The package set also provides support for multiple Node.js versions. The policy The package set also provides support for multiple Node.js versions. The policy
is that a new package should be added to the collection for the latest stable LTS is that a new package should be added to the collection for the latest stable LTS
release (which is currently 8.x), unless there is an explicit reason to support release (which is currently 10.x), unless there is an explicit reason to support
a different release. a different release.
If your package uses native addons, you need to examine what kind of native If your package uses native addons, you need to examine what kind of native
@ -26,7 +26,7 @@ build system it uses. Here are some examples:
After you have identified the correct system, you need to override your package After you have identified the correct system, you need to override your package
expression while adding in build system as a build input. For example, `dat` expression while adding in build system as a build input. For example, `dat`
requires `node-gyp-build`, so we override its expression in `default-v8.nix`: requires `node-gyp-build`, so we override its expression in `default-v10.nix`:
```nix ```nix
dat = nodePackages.dat.override (oldAttrs: { dat = nodePackages.dat.override (oldAttrs: {
@ -36,9 +36,9 @@ dat = nodePackages.dat.override (oldAttrs: {
To add a package from NPM to nixpkgs: To add a package from NPM to nixpkgs:
1. Modify `pkgs/development/node-packages/node-packages-v8.json` to add, update 1. Modify `pkgs/development/node-packages/node-packages-v10.json` to add, update
or remove package entries. (Or `pkgs/development/node-packages/node-packages-v10.json` or remove package entries. (Or `pkgs/development/node-packages/node-packages-v8.json`
for packages depending on Node.js 10.x) for packages depending on Node.js 8.x)
2. Run the script: `(cd pkgs/development/node-packages && ./generate.sh)`. 2. Run the script: `(cd pkgs/development/node-packages && ./generate.sh)`.
3. Build your new package to test your changes: 3. Build your new package to test your changes:
`cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`. `cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`.

View File

@ -486,7 +486,7 @@ and in this case the `python35` interpreter is automatically used.
Versions 2.7, 3.5, 3.6 and 3.7 of the CPython interpreter are available as Versions 2.7, 3.5, 3.6 and 3.7 of the CPython interpreter are available as
respectively `python27`, `python35`, `python36`, and `python37`. The PyPy respectively `python27`, `python35`, `python36`, and `python37`. The PyPy
interpreter is available as `pypy`. The aliases `python2` and `python3` interpreter is available as `pypy`. The aliases `python2` and `python3`
correspond to respectively `python27` and `python36`. The default interpreter, correspond to respectively `python27` and `python37`. The default interpreter,
`python`, maps to `python2`. The Nix expressions for the interpreters can be `python`, maps to `python2`. The Nix expressions for the interpreters can be
found in `pkgs/development/interpreters/python`. found in `pkgs/development/interpreters/python`.
@ -537,7 +537,7 @@ sets are
and the aliases and the aliases
* `pkgs.python2Packages` pointing to `pkgs.python27Packages` * `pkgs.python2Packages` pointing to `pkgs.python27Packages`
* `pkgs.python3Packages` pointing to `pkgs.python36Packages` * `pkgs.python3Packages` pointing to `pkgs.python37Packages`
* `pkgs.pythonPackages` pointing to `pkgs.python2Packages` * `pkgs.pythonPackages` pointing to `pkgs.python2Packages`
#### `buildPythonPackage` function #### `buildPythonPackage` function
@ -1078,8 +1078,7 @@ To modify only a Python package set instead of a whole Python derivation, use th
Use the following overlay template: Use the following overlay template:
```nix ```nix
self: super: self: super: {
{
python = super.python.override { python = super.python.override {
packageOverrides = python-self: python-super: { packageOverrides = python-self: python-super: {
zerobin = python-super.zerobin.overrideAttrs (oldAttrs: { zerobin = python-super.zerobin.overrideAttrs (oldAttrs: {
@ -1094,6 +1093,25 @@ self: super:
} }
``` ```
### How to use Intel's MKL with numpy and scipy?
A `site.cfg` is created that configures BLAS based on the `blas` parameter
of the `numpy` derivation. By passing in `mkl`, `numpy` and packages depending
on `numpy` will be built with `mkl`.
The following is an overlay that configures `numpy` to use `mkl`:
```nix
self: super: {
python36 = super.python36.override {
packageOverrides = python-self: python-super: {
numpy = python-super.numpy.override {
blas = super.pkgs.mkl;
};
};
};
}
```
## Contributing ## Contributing
### Contributing guidelines ### Contributing guidelines

View File

@ -12,7 +12,7 @@
<para> <para>
The Nix language allows a derivation to produce multiple outputs, which is The Nix language allows a derivation to produce multiple outputs, which is
similar to what is utilized by other Linux distribution packaging systems. similar to what is utilized by other Linux distribution packaging systems.
The outputs reside in separate nix store paths, so they can be mostly The outputs reside in separate Nix store paths, so they can be mostly
handled independently of each other, including passing to build inputs, handled independently of each other, including passing to build inputs,
garbage collection or binary substitution. The exception is that building garbage collection or binary substitution. The exception is that building
from source always produces all the outputs. from source always produces all the outputs.

View File

@ -3,9 +3,9 @@
xml:id="chap-overlays"> xml:id="chap-overlays">
<title>Overlays</title> <title>Overlays</title>
<para> <para>
This chapter describes how to extend and change Nixpkgs packages using This chapter describes how to extend and change Nixpkgs using overlays.
overlays. Overlays are used to add layers in the fix-point used by Nixpkgs to Overlays are used to add layers in the fixed-point used by Nixpkgs to compose
compose the set of all packages. the set of all packages.
</para> </para>
<para> <para>
Nixpkgs can be configured with a list of overlays, which are applied in Nixpkgs can be configured with a list of overlays, which are applied in
@ -60,7 +60,7 @@
<para> <para>
First, if an First, if an
<link xlink:href="#sec-overlays-argument"><varname>overlays</varname> <link xlink:href="#sec-overlays-argument"><varname>overlays</varname>
argument</link> to the nixpkgs function itself is given, then that is argument</link> to the Nixpkgs function itself is given, then that is
used and no path lookup will be performed. used and no path lookup will be performed.
</para> </para>
</listitem> </listitem>

View File

@ -205,7 +205,7 @@ $ cat $(PRINT_PATH=1 nix-prefetch-url $i | tail -n 1) \
<para> <para>
Nixpkgs provides a number of packages that will install Eclipse in its Nixpkgs provides a number of packages that will install Eclipse in its
various forms, these range from the bare-bones Eclipse Platform to the more various forms. These range from the bare-bones Eclipse Platform to the more
fully featured Eclipse SDK or Scala-IDE packages and multiple version are fully featured Eclipse SDK or Scala-IDE packages and multiple version are
often available. It is possible to list available Eclipse packages by often available. It is possible to list available Eclipse packages by
issuing the command: issuing the command:

View File

@ -6,13 +6,13 @@
<title>Darwin (macOS)</title> <title>Darwin (macOS)</title>
<para> <para>
Some common issues when packaging software for darwin: Some common issues when packaging software for Darwin:
</para> </para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
The darwin <literal>stdenv</literal> uses clang instead of gcc. When The Darwin <literal>stdenv</literal> uses clang instead of gcc. When
referring to the compiler <varname>$CC</varname> or <command>cc</command> referring to the compiler <varname>$CC</varname> or <command>cc</command>
will work in both cases. Some builds hardcode gcc/g++ in their build will work in both cases. Some builds hardcode gcc/g++ in their build
scripts, that can usually be fixed with using something like scripts, that can usually be fixed with using something like
@ -31,7 +31,7 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
On darwin libraries are linked using absolute paths, libraries are On Darwin, libraries are linked using absolute paths, libraries are
resolved by their <literal>install_name</literal> at link time. Sometimes resolved by their <literal>install_name</literal> at link time. Sometimes
packages won't set this correctly causing the library lookups to fail at packages won't set this correctly causing the library lookups to fail at
runtime. This can be fixed by adding extra linker flags or by running runtime. This can be fixed by adding extra linker flags or by running
@ -96,8 +96,8 @@
</programlisting> </programlisting>
<para> <para>
The package <literal>xcbuild</literal> can be used to build projects that The package <literal>xcbuild</literal> can be used to build projects that
really depend on Xcode, however projects that build some kind of graphical really depend on Xcode. However, this replacement is not 100%
interface won't work without using Xcode in an impure way. compatible with Xcode and can occasionally cause issues.
</para> </para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>

View File

@ -17,22 +17,20 @@
</para> </para>
</warning> </warning>
<para> <para>
The nixpkgs project receives a fairly high number of contributions via GitHub The Nixpkgs project receives a fairly high number of contributions via GitHub
pull-requests. Reviewing and approving these is an important task and a way pull requests. Reviewing and approving these is an important task and a way
to contribute to the project. to contribute to the project.
</para> </para>
<para> <para>
The high change rate of nixpkgs makes any pull request that remains open for The high change rate of Nixpkgs makes any pull request that remains open for
too long subject to conflicts that will require extra work from the submitter too long subject to conflicts that will require extra work from the submitter
or the merger. Reviewing pull requests in a timely manner and being or the merger. Reviewing pull requests in a timely manner and being responsive
responsive to the comments is the key to avoid these. GitHub provides sort to the comments is the key to avoid this issue. GitHub provides sort filters
filters that can be used to see the that can be used to see the <link
<link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc">most
recently</link> and the recently</link> and the <link
<link
xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc">least
recently</link> updated pull-requests. We highly encourage looking at recently</link> updated pull requests. We highly encourage looking at
<link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone"> <link xlink:href="https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone">
this list of ready to merge, unreviewed pull requests</link>. this list of ready to merge, unreviewed pull requests</link>.
</para> </para>
@ -43,12 +41,12 @@
</para> </para>
<para> <para>
GitHub provides reactions as a simple and quick way to provide feedback to GitHub provides reactions as a simple and quick way to provide feedback to
pull-requests or any comments. The thumb-down reaction should be used with pull requests or any comments. The thumb-down reaction should be used with
care and if possible accompanied with some explanation so the submitter has care and if possible accompanied with some explanation so the submitter has
directions to improve their contribution. directions to improve their contribution.
</para> </para>
<para> <para>
Pull-request reviews should include a list of what has been reviewed in a pull request reviews should include a list of what has been reviewed in a
comment, so other reviewers and mergers can know the state of the review. comment, so other reviewers and mergers can know the state of the review.
</para> </para>
<para> <para>
@ -60,8 +58,8 @@
<title>Package updates</title> <title>Package updates</title>
<para> <para>
A package update is the most trivial and common type of pull-request. These A package update is the most trivial and common type of pull request. These
pull-requests mainly consist of updating the version part of the package pull requests mainly consist of updating the version part of the package
name and the source hash. name and the source hash.
</para> </para>
@ -77,7 +75,7 @@
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
Add labels to the pull-request. (Requires commit rights) Add labels to the pull request. (Requires commit rights)
</para> </para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
@ -144,8 +142,8 @@
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
Pull-requests are often targeted to the master or staging branch, and pull requests are often targeted to the master or staging branch, and
building the pull-request locally when it is submitted can trigger many building the pull request locally when it is submitted can trigger many
source builds. source builds.
</para> </para>
<para> <para>
@ -174,14 +172,14 @@ $ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
</callout> </callout>
<callout arearefs='reviewing-rebase-3'> <callout arearefs='reviewing-rebase-3'>
<para> <para>
Fetching the pull-request changes, <varname>PRNUMBER</varname> is the Fetching the pull request changes, <varname>PRNUMBER</varname> is the
number at the end of the pull-request title and number at the end of the pull request title and
<varname>BASEBRANCH</varname> the base branch of the pull-request. <varname>BASEBRANCH</varname> the base branch of the pull request.
</para> </para>
</callout> </callout>
<callout arearefs='reviewing-rebase-4'> <callout arearefs='reviewing-rebase-4'>
<para> <para>
Rebasing the pull-request changes to the nixos-unstable branch. Rebasing the pull request changes to the nixos-unstable branch.
</para> </para>
</callout> </callout>
</calloutlist> </calloutlist>
@ -190,10 +188,10 @@ $ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD <co
<listitem> <listitem>
<para> <para>
The <link xlink:href="https://github.com/madjar/nox">nox</link> tool can The <link xlink:href="https://github.com/madjar/nox">nox</link> tool can
be used to review a pull-request content in a single command. It doesn't be used to review a pull request content in a single command. It doesn't
rebase on a channel branch so it might trigger multiple source builds. rebase on a channel branch so it might trigger multiple source builds.
<varname>PRNUMBER</varname> should be replaced by the number at the end <varname>PRNUMBER</varname> should be replaced by the number at the end
of the pull-request title. of the pull request title.
</para> </para>
<screen> <screen>
$ nix-shell -p nox --run "nox-review -k pr PRNUMBER" $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
@ -230,7 +228,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
<title>New packages</title> <title>New packages</title>
<para> <para>
New packages are a common type of pull-requests. These pull requests New packages are a common type of pull requests. These pull requests
consists in adding a new nix-expression for a package. consists in adding a new nix-expression for a package.
</para> </para>
@ -241,7 +239,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
Add labels to the pull-request. (Requires commit rights) Add labels to the pull request. (Requires commit rights)
</para> </para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
@ -279,7 +277,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
A maintainer must be set, this can be the package submitter or a A maintainer must be set. This can be the package submitter or a
community member that accepts to take maintainership of the package. community member that accepts to take maintainership of the package.
</para> </para>
</listitem> </listitem>
@ -361,7 +359,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
Add labels to the pull-request. (Requires commit rights) Add labels to the pull request. (Requires commit rights)
</para> </para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
@ -474,7 +472,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
Add labels to the pull-request. (Requires commit rights) Add labels to the pull request. (Requires commit rights)
</para> </para>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
@ -576,7 +574,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
like to be a long-term reviewer for related submissions, please contact the like to be a long-term reviewer for related submissions, please contact the
current reviewers for that topic. They will give you information about the current reviewers for that topic. They will give you information about the
reviewing process. The main reviewers for a topic can be hard to find as reviewing process. The main reviewers for a topic can be hard to find as
there is no list, but checking past pull-requests to see who reviewed or there is no list, but checking past pull requests to see who reviewed or
git-blaming the code to see who committed to that topic can give some hints. git-blaming the code to see who committed to that topic can give some hints.
</para> </para>
@ -586,7 +584,7 @@ $ nix-shell -p nox --run "nox-review -k pr PRNUMBER"
</para> </para>
</section> </section>
<section xml:id="reviewing-contributions--merging-pull-requests"> <section xml:id="reviewing-contributions--merging-pull-requests">
<title>Merging pull-requests</title> <title>Merging pull requests</title>
<para> <para>
It is possible for community members that have enough knowledge and It is possible for community members that have enough knowledge and

View File

@ -228,18 +228,19 @@ genericBuild
</para> </para>
<para> <para>
The extension of <envar>PATH</envar> with dependencies, alluded to above, The extension of <envar>PATH</envar> with dependencies, alluded to
proceeds according to the relative platforms alone. The process is carried above, proceeds according to the relative platforms alone. The
out only for dependencies whose host platform matches the new derivation's process is carried out only for dependencies whose host platform
build platformi.e. which run on the platform where the new derivation matches the new derivation's build platform i.e. dependencies which
will be built. run on the platform where the new derivation will be built.
<footnote xml:id="footnote-stdenv-native-dependencies-in-path"> <footnote xml:id="footnote-stdenv-native-dependencies-in-path">
<para> <para>
Currently, that means for native builds all dependencies are put on the Currently, this means for native builds all dependencies are put
<envar>PATH</envar>. But in the future that may not be the case for sake on the <envar>PATH</envar>. But in the future that may not be the
of matching cross: the platforms would be assumed to be unique for native case for sake of matching cross: the platforms would be assumed
and cross builds alike, so only the <varname>depsBuild*</varname> and to be unique for native and cross builds alike, so only the
<varname>nativeBuildDependencies</varname> dependencies would affect the <varname>depsBuild*</varname> and
<varname>nativeBuildInputs</varname> would be added to the
<envar>PATH</envar>. <envar>PATH</envar>.
</para> </para>
</footnote> </footnote>
@ -251,28 +252,27 @@ genericBuild
<para> <para>
The dependency is propagated when it forces some of its other-transitive The dependency is propagated when it forces some of its other-transitive
(non-immediate) downstream dependencies to also take it on as an immediate (non-immediate) downstream dependencies to also take it on as an immediate
dependency. Nix itself already takes a package's transitive dependencies dependency. Nix itself already takes a package's transitive dependencies into
into account, but this propagation ensures nixpkgs-specific infrastructure account, but this propagation ensures nixpkgs-specific infrastructure like
like setup hooks (mentioned above) also are run as if the propagated setup hooks (mentioned above) also are run as if the propagated dependency.
dependency.
</para> </para>
<para> <para>
It is important to note dependencies are not necessary propagated as the It is important to note that dependencies are not necessarily propagated as
same sort of dependency that they were before, but rather as the the same sort of dependency that they were before, but rather as the
corresponding sort so that the platform rules still line up. The exact rules corresponding sort so that the platform rules still line up. The exact rules
for dependency propagation can be given by assigning each sort of dependency for dependency propagation can be given by assigning to each dependency two
two integers based one how it's host and target platforms are offset from integers based one how its host and target platforms are offset from the
the depending derivation's platforms. Those offsets are given are given depending derivation's platforms. Those offsets are given below in the
below in the descriptions of each dependency list attribute. descriptions of each dependency list attribute. Algorithmically, we traverse
Algorithmically, we traverse propagated inputs, accumulating every propagated inputs, accumulating every propagated dependency's propagated
propagated dep's propagated deps and adjusting them to account for the dependencies and adjusting them to account for the "shift in perspective"
"shift in perspective" described by the current dep's platform offsets. This described by the current dependency's platform offsets. This results in sort
results in sort a transitive closure of the dependency relation, with the a transitive closure of the dependency relation, with the offsets being
offsets being approximately summed when two dependency links are combined. approximately summed when two dependency links are combined. We also prune
We also prune transitive deps whose combined offsets go out-of-bounds, which transitive dependencies whose combined offsets go out-of-bounds, which can be
can be viewed as a filter over that transitive closure removing dependencies viewed as a filter over that transitive closure removing dependencies that
that are blatantly absurd. are blatantly absurd.
</para> </para>
<para> <para>
@ -288,7 +288,7 @@ genericBuild
</para> </para>
</footnote> </footnote>
They're confusing in very different ways so... hopefully if something doesn't They're confusing in very different ways so... hopefully if something doesn't
make sense in one presentation, it does in the other! make sense in one presentation, it will in the other!
<programlisting> <programlisting>
let mapOffset(h, t, i) = i + (if i &lt;= 0 then h else t - 1) let mapOffset(h, t, i) = i + (if i &lt;= 0 then h else t - 1)
@ -307,13 +307,13 @@ dep(h0, _, A, B)
propagated-dep(h1, t1, B, C) propagated-dep(h1, t1, B, C)
h0 + h1 in {-1, 0, 1} h0 + h1 in {-1, 0, 1}
h0 + t1 in {-1, 0, -1} h0 + t1 in {-1, 0, -1}
-------------------------------------- Take immediate deps' propagated deps ----------------------------- Take immediate dependencies' propagated dependencies
propagated-dep(mapOffset(h0, t0, h1), propagated-dep(mapOffset(h0, t0, h1),
mapOffset(h0, t0, t1), mapOffset(h0, t0, t1),
A, C)</programlisting> A, C)</programlisting>
<programlisting> <programlisting>
propagated-dep(h, t, A, B) propagated-dep(h, t, A, B)
-------------------------------------- Propagated deps count as deps ----------------------------- Propagated dependencies count as dependencies
dep(h, t, A, B)</programlisting> dep(h, t, A, B)</programlisting>
Some explanation of this monstrosity is in order. In the common case, the Some explanation of this monstrosity is in order. In the common case, the
target offset of a dependency is the successor to the target offset: target offset of a dependency is the successor to the target offset:
@ -324,31 +324,31 @@ let f(h, h + 1, i) = i + (if i &lt;= 0 then h else (h + 1) - 1)
let f(h, h + 1, i) = i + (if i &lt;= 0 then h else h) let f(h, h + 1, i) = i + (if i &lt;= 0 then h else h)
let f(h, h + 1, i) = i + h let f(h, h + 1, i) = i + h
</programlisting> </programlisting>
This is where the "sum-like" comes from above: We can just sum all the host This is where "sum-like" comes in from above: We can just sum all of the host
offset to get the host offset of the transitive dependency. The target offsets to get the host offset of the transitive dependency. The target
offset is the transitive dep is simply the host offset + 1, just as it was offset is the transitive dependency is simply the host offset + 1, just as it
with the dependencies composed to make this transitive one; it can be was with the dependencies composed to make this transitive one; it can be
ignored as it doesn't add any new information. ignored as it doesn't add any new information.
</para> </para>
<para> <para>
Because of the bounds checks, the uncommon cases are <literal>h = Because of the bounds checks, the uncommon cases are <literal>h = t</literal>
t</literal> and <literal>h + 2 = t</literal>. In the former case, the and <literal>h + 2 = t</literal>. In the former case, the motivation for
motivation for <function>mapOffset</function> is that since its host and <function>mapOffset</function> is that since its host and target platforms
target platforms are the same, no transitive dep of it should be able to are the same, no transitive dependency of it should be able to "discover" an
"discover" an offset greater than its reduced target offsets. offset greater than its reduced target offsets.
<function>mapOffset</function> effectively "squashes" all its transitive <function>mapOffset</function> effectively "squashes" all its transitive
dependencies' offsets so that none will ever be greater than the target dependencies' offsets so that none will ever be greater than the target
offset of the original <literal>h = t</literal> package. In the other case, offset of the original <literal>h = t</literal> package. In the other case,
<literal>h + 1</literal> is skipped over between the host and target <literal>h + 1</literal> is skipped over between the host and target offsets.
offsets. Instead of squashing the offsets, we need to "rip" them apart so no Instead of squashing the offsets, we need to "rip" them apart so no
transitive dependencies' offset is that one. transitive dependencies' offset is that one.
</para> </para>
<para> <para>
Overall, the unifying theme here is that propagation shouldn't be Overall, the unifying theme here is that propagation shouldn't be introducing
introducing transitive dependencies involving platforms the needing package transitive dependencies involving platforms the depending package is unaware
is unaware of. The offset bounds checking and definition of of. The offset bounds checking and definition of
<function>mapOffset</function> together ensure that this is the case. <function>mapOffset</function> together ensure that this is the case.
Discovering a new offset is discovering a new platform, and since those Discovering a new offset is discovering a new platform, and since those
platforms weren't in the derivation "spec" of the needing package, they platforms weren't in the derivation "spec" of the needing package, they
@ -369,20 +369,20 @@ let f(h, h + 1, i) = i + h
A list of dependencies whose host and target platforms are the new A list of dependencies whose host and target platforms are the new
derivation's build platform. This means a <literal>-1</literal> host and derivation's build platform. This means a <literal>-1</literal> host and
<literal>-1</literal> target offset from the new derivation's platforms. <literal>-1</literal> target offset from the new derivation's platforms.
They are programs/libraries used at build time that furthermore produce These are programs and libraries used at build time that produce programs
programs/libraries also used at build time. If the dependency doesn't and libraries also used at build time. If the dependency doesn't care
care about the target platform (i.e. isn't a compiler or similar tool), about the target platform (i.e. isn't a compiler or similar tool), put it
put it in <varname>nativeBuildInputs</varname> instead. The most common in <varname>nativeBuildInputs</varname> instead. The most common use of
use for this <literal>buildPackages.stdenv.cc</literal>, the default C this <literal>buildPackages.stdenv.cc</literal>, the default C compiler
compiler for this role. That example crops up more than one might think for this role. That example crops up more than one might think in old
in old commonly used C libraries. commonly used C libraries.
</para> </para>
<para> <para>
Since these packages are able to be run at build time, that are always Since these packages are able to be run at build-time, they are always
added to the <envar>PATH</envar>, as described above. But since these added to the <envar>PATH</envar>, as described above. But since these
packages are only guaranteed to be able to run then, they shouldn't packages are only guaranteed to be able to run then, they shouldn't
persist as run-time dependencies. This isn't currently enforced, but persist as run-time dependencies. This isn't currently enforced, but could
could be in the future. be in the future.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -395,21 +395,20 @@ let f(h, h + 1, i) = i + h
A list of dependencies whose host platform is the new derivation's build A list of dependencies whose host platform is the new derivation's build
platform, and target platform is the new derivation's host platform. This platform, and target platform is the new derivation's host platform. This
means a <literal>-1</literal> host offset and <literal>0</literal> target means a <literal>-1</literal> host offset and <literal>0</literal> target
offset from the new derivation's platforms. They are programs/libraries offset from the new derivation's platforms. These are programs and
used at build time that, if they are a compiler or similar tool, produce libraries used at build-time that, if they are a compiler or similar tool,
code to run at run time—i.e. tools used to build the new derivation. If produce code to run at run-time—i.e. tools used to build the new
the dependency doesn't care about the target platform (i.e. isn't a derivation. If the dependency doesn't care about the target platform (i.e.
compiler or similar tool), put it here, rather than in isn't a compiler or similar tool), put it here, rather than in
<varname>depsBuildBuild</varname> or <varname>depsBuildTarget</varname>. <varname>depsBuildBuild</varname> or <varname>depsBuildTarget</varname>.
This would be called <varname>depsBuildHost</varname> but for historical This could be called <varname>depsBuildHost</varname> but
continuity. <varname>nativeBuildInputs</varname> is used for historical continuity.
</para> </para>
<para> <para>
Since these packages are able to be run at build time, that are added to Since these packages are able to be run at build-time, they are added to
the <envar>PATH</envar>, as described above. But since these packages the <envar>PATH</envar>, as described above. But since these packages are
only are guaranteed to be able to run then, they shouldn't persist as only guaranteed to be able to run then, they shouldn't persist as run-time
run-time dependencies. This isn't currently enforced, but could be in the dependencies. This isn't currently enforced, but could be in the future.
future.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -422,34 +421,33 @@ let f(h, h + 1, i) = i + h
A list of dependencies whose host platform is the new derivation's build A list of dependencies whose host platform is the new derivation's build
platform, and target platform is the new derivation's target platform. platform, and target platform is the new derivation's target platform.
This means a <literal>-1</literal> host offset and <literal>1</literal> This means a <literal>-1</literal> host offset and <literal>1</literal>
target offset from the new derivation's platforms. They are programs used target offset from the new derivation's platforms. These are programs used
at build time that produce code to run at run with code produced by the at build time that produce code to run with code produced by the depending
depending package. Most commonly, these would tools used to build the package. Most commonly, these are tools used to build the runtime or
runtime or standard library the currently-being-built compiler will standard library that the currently-being-built compiler will inject into
inject into any code it compiles. In many cases, the currently-being any code it compiles. In many cases, the currently-being-built-compiler is
built compiler is itself employed for that task, but when that compiler itself employed for that task, but when that compiler won't run (i.e. its
won't run (i.e. its build and host platform differ) this is not possible. build and host platform differ) this is not possible. Other times, the
Other times, the compiler relies on some other tool, like binutils, that compiler relies on some other tool, like binutils, that is always built
is always built separately so the dependency is unconditional. separately so that the dependency is unconditional.
</para> </para>
<para> <para>
This is a somewhat confusing dependency to wrap ones head around, and for This is a somewhat confusing concept to wrap ones head around, and for
good reason. As the only one where the platform offsets are not adjacent good reason. As the only dependency type where the platform offsets are
integers, it requires thinking of a bootstrapping stage not adjacent integers, it requires thinking of a bootstrapping stage
<emphasis>two</emphasis> away from the current one. It and it's use-case <emphasis>two</emphasis> away from the current one. It and its use-case go
go hand in hand and are both considered poor form: try not to need this hand in hand and are both considered poor form: try to not need this sort
sort dependency, and try not avoid building standard libraries / runtimes of dependency, and try to avoid building standard libraries and runtimes
in the same derivation as the compiler produces code using them. Instead in the same derivation as the compiler produces code using them. Instead
strive to build those like a normal library, using the newly-built strive to build those like a normal library, using the newly-built
compiler just as a normal library would. In short, do not use this compiler just as a normal library would. In short, do not use this
attribute unless you are packaging a compiler and are sure it is needed. attribute unless you are packaging a compiler and are sure it is needed.
</para> </para>
<para> <para>
Since these packages are able to be run at build time, that are added to Since these packages are able to run at build time, they are added to the
the <envar>PATH</envar>, as described above. But since these packages <envar>PATH</envar>, as described above. But since these packages are only
only are guaranteed to be able to run then, they shouldn't persist as guaranteed to be able to run then, they shouldn't persist as run-time
run-time dependencies. This isn't currently enforced, but could be in the dependencies. This isn't currently enforced, but could be in the future.
future.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -460,15 +458,15 @@ let f(h, h + 1, i) = i + h
<listitem> <listitem>
<para> <para>
A list of dependencies whose host and target platforms match the new A list of dependencies whose host and target platforms match the new
derivation's host platform. This means a both <literal>0</literal> host derivation's host platform. This means a <literal>0</literal> host offset
offset and <literal>0</literal> target offset from the new derivation's and <literal>0</literal> target offset from the new derivation's host
host platform. These are packages used at run-time to generate code also platform. These are packages used at run-time to generate code also used
used at run-time. In practice, that would usually be tools used by at run-time. In practice, this would usually be tools used by compilers
compilers for metaprogramming/macro systems, or libraries used by the for macros or a metaprogramming system, or libraries used by the macros or
macros/metaprogramming code itself. It's always preferable to use a metaprogramming code itself. It's always preferable to use a
<varname>depsBuildBuild</varname> dependency in the derivation being <varname>depsBuildBuild</varname> dependency in the derivation being built
built than a <varname>depsHostHost</varname> on the tool doing the over a <varname>depsHostHost</varname> on the tool doing the building for
building for this purpose. this purpose.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -479,20 +477,20 @@ let f(h, h + 1, i) = i + h
<listitem> <listitem>
<para> <para>
A list of dependencies whose host platform and target platform match the A list of dependencies whose host platform and target platform match the
new derivation's. This means a <literal>0</literal> host offset and new derivation's. This means a <literal>0</literal> host offset and a
<literal>1</literal> target offset from the new derivation's host <literal>1</literal> target offset from the new derivation's host
platform. This would be called <varname>depsHostTarget</varname> but for platform. This would be called <varname>depsHostTarget</varname> but for
historical continuity. If the dependency doesn't care about the target historical continuity. If the dependency doesn't care about the target
platform (i.e. isn't a compiler or similar tool), put it here, rather platform (i.e. isn't a compiler or similar tool), put it here, rather than
than in <varname>depsBuildBuild</varname>. in <varname>depsBuildBuild</varname>.
</para> </para>
<para> <para>
These often are programs/libraries used by the new derivation at These are often programs and libraries used by the new derivation at
<emphasis>run</emphasis>-time, but that isn't always the case. For <emphasis>run</emphasis>-time, but that isn't always the case. For
example, the machine code in a statically linked library is only used at example, the machine code in a statically-linked library is only used at
run time, but the derivation containing the library is only needed at run-time, but the derivation containing the library is only needed at
build time. Even in the dynamic case, the library may also be needed at build-time. Even in the dynamic case, the library may also be needed at
build time to appease the linker. build-time to appease the linker.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -581,7 +579,7 @@ let f(h, h + 1, i) = i + h
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<varname>depsTargetTarget</varname> <varname>depsTargetTargetPropagated</varname>
</term> </term>
<listitem> <listitem>
<para> <para>
@ -604,10 +602,10 @@ let f(h, h + 1, i) = i + h
<listitem> <listitem>
<para> <para>
A natural number indicating how much information to log. If set to 1 or A natural number indicating how much information to log. If set to 1 or
higher, <literal>stdenv</literal> will print moderate debug information higher, <literal>stdenv</literal> will print moderate debugging
during the build. In particular, the <command>gcc</command> and information during the build. In particular, the <command>gcc</command>
<command>ld</command> wrapper scripts will print out the complete command and <command>ld</command> wrapper scripts will print out the complete
line passed to the wrapped tools. If set to 6 or higher, the command line passed to the wrapped tools. If set to 6 or higher, the
<literal>stdenv</literal> setup script will be run with <literal>set <literal>stdenv</literal> setup script will be run with <literal>set
-x</literal> tracing. If set to 7 or higher, the <command>gcc</command> -x</literal> tracing. If set to 7 or higher, the <command>gcc</command>
and <command>ld</command> wrapper scripts will also be run with and <command>ld</command> wrapper scripts will also be run with
@ -666,11 +664,10 @@ passthru = {
<literal>hello.baz.value1</literal>. We don't specify any usage or schema <literal>hello.baz.value1</literal>. We don't specify any usage or schema
of <literal>passthru</literal> - it is meant for values that would be of <literal>passthru</literal> - it is meant for values that would be
useful outside the derivation in other parts of a Nix expression (e.g. in useful outside the derivation in other parts of a Nix expression (e.g. in
other derivations). An example would be to convey some specific other derivations). An example would be to convey some specific dependency
dependency of your derivation which contains a program with plugins of your derivation which contains a program with plugins support. Later,
support. Later, others who make derivations with plugins can use others who make derivations with plugins can use passed-through dependency
passed-through dependency to ensure that their plugin would be to ensure that their plugin would be binary-compatible with built program.
binary-compatible with built program.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -836,7 +833,7 @@ passthru = {
<para> <para>
Zip files are unpacked using <command>unzip</command>. However, Zip files are unpacked using <command>unzip</command>. However,
<command>unzip</command> is not in the standard environment, so you <command>unzip</command> is not in the standard environment, so you
should add it to <varname>buildInputs</varname> yourself. should add it to <varname>nativeBuildInputs</varname> yourself.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -1076,6 +1073,17 @@ passthru = {
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<varname>prefixKey</varname>
</term>
<listitem>
<para>
The key to use when specifying the prefix. By default, this is set to
<option>--prefix=</option> as that is used by the majority of packages.
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<varname>dontAddDisableDepTrack</varname> <varname>dontAddDisableDepTrack</varname>
@ -1133,12 +1141,11 @@ passthru = {
By default, when cross compiling, the configure script has By default, when cross compiling, the configure script has
<option>--build=...</option> and <option>--host=...</option> passed. <option>--build=...</option> and <option>--host=...</option> passed.
Packages can instead pass <literal>[ "build" "host" "target" ]</literal> Packages can instead pass <literal>[ "build" "host" "target" ]</literal>
or a subset to control exactly which platform flags are passed. or a subset to control exactly which platform flags are passed. Compilers
Compilers and other tools should use this to also pass the target and other tools can use this to also pass the target platform.
platform, for example.
<footnote xml:id="footnote-stdenv-build-time-guessing-impurity"> <footnote xml:id="footnote-stdenv-build-time-guessing-impurity">
<para> <para>
Eventually these will be passed when in native builds too, to improve Eventually these will be passed building natively as well, to improve
determinism: build-time guessing, as is done today, is a risk of determinism: build-time guessing, as is done today, is a risk of
impurity. impurity.
</para> </para>
@ -1203,17 +1210,6 @@ passthru = {
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<varname>checkInputs</varname>
</term>
<listitem>
<para>
A list of dependencies used by the phase. This gets included in
<varname>buildInputs</varname> when <varname>doCheck</varname> is set.
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<varname>makeFlags</varname> <varname>makeFlags</varname>
@ -1363,6 +1359,18 @@ makeFlagsArray=(CFLAGS="-O0 -g" LDFLAGS="-lfoo -lbar")
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<varname>checkInputs</varname>
</term>
<listitem>
<para>
A list of dependencies used by the phase. This gets included in
<varname>nativeBuildInputs</varname> when <varname>doCheck</varname> is
set.
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<varname>preCheck</varname> <varname>preCheck</varname>
@ -1635,12 +1643,10 @@ installTargets = "install-bin install-doc";</programlisting>
</term> </term>
<listitem> <listitem>
<para> <para>
A package can export a <link A package can export a <link linkend="ssec-setup-hooks">setup hook</link>
linkend="ssec-setup-hooks">setup by setting this variable. The setup hook, if defined, is copied to
hook</link> by setting this variable. The setup hook, if defined, is <filename>$out/nix-support/setup-hook</filename>. Environment variables
copied to <filename>$out/nix-support/setup-hook</filename>. Environment are then substituted in it using <function
variables are then substituted in it using
<function
linkend="fun-substituteAll">substituteAll</function>. linkend="fun-substituteAll">substituteAll</function>.
</para> </para>
</listitem> </listitem>
@ -2074,12 +2080,12 @@ someVar=$(stripHash $name)
<title>Package setup hooks</title> <title>Package setup hooks</title>
<para> <para>
Nix itself considers a build-time dependency merely something that should Nix itself considers a build-time dependency as merely something that should
previously be built and accessible at build time—packages themselves are previously be built and accessible at build time—packages themselves are
on their own to perform any additional setup. In most cases, that is fine, on their own to perform any additional setup. In most cases, that is fine,
and the downstream derivation can deal with it's own dependencies. But for a and the downstream derivation can deal with its own dependencies. But for a
few common tasks, that would result in almost every package doing the same few common tasks, that would result in almost every package doing the same
sort of setup work---depending not on the package itself, but entirely on sort of setup workdepending not on the package itself, but entirely on
which dependencies were used. which dependencies were used.
</para> </para>
@ -2094,20 +2100,19 @@ someVar=$(stripHash $name)
</para> </para>
<para> <para>
The Setup hook mechanism is a bit of a sledgehammer though: a powerful The setup hook mechanism is a bit of a sledgehammer though: a powerful
feature with a broad and indiscriminate area of effect. The combination of feature with a broad and indiscriminate area of effect. The combination of
its power and implicit use may be expedient, but isn't without costs. Nix its power and implicit use may be expedient, but isn't without costs. Nix
itself is unchanged, but the spirit of adding dependencies being effect-free itself is unchanged, but the spirit of added dependencies being effect-free
is violated even if the letter isn't. For example, if a derivation path is is violated even if the letter isn't. For example, if a derivation path is
mentioned more than once, Nix itself doesn't care and simply makes sure the mentioned more than once, Nix itself doesn't care and simply makes sure the
dependency derivation is already built just the same—depending is just dependency derivation is already built just the same—depending is just
needing something to exist, and needing is idempotent. However, a dependency needing something to exist, and needing is idempotent. However, a dependency
specified twice will have its setup hook run twice, and that could easily specified twice will have its setup hook run twice, and that could easily
change the build environment (though a well-written setup hook will change the build environment (though a well-written setup hook will therefore
therefore strive to be idempotent so this is in fact not observable). More strive to be idempotent so this is in fact not observable). More broadly,
broadly, setup hooks are anti-modular in that multiple dependencies, whether setup hooks are anti-modular in that multiple dependencies, whether the same
the same or different, should not interfere and yet their setup hooks may or different, should not interfere and yet their setup hooks may well do so.
well do so.
</para> </para>
<para> <para>
@ -2126,15 +2131,14 @@ someVar=$(stripHash $name)
<para> <para>
Packages adding a hook should not hard code a specific hook, but rather Packages adding a hook should not hard code a specific hook, but rather
choose a variable <emphasis>relative</emphasis> to how they are included. choose a variable <emphasis>relative</emphasis> to how they are included.
Returning to the C compiler wrapper example, if it itself is an Returning to the C compiler wrapper example, if the wrapper itself is an
<literal>n</literal> dependency, then it only wants to accumulate flags from <literal>n</literal> dependency, then it only wants to accumulate flags from
<literal>n + 1</literal> dependencies, as only those ones match the <literal>n + 1</literal> dependencies, as only those ones match the
compiler's target platform. The <envar>hostOffset</envar> variable is compiler's target platform. The <envar>hostOffset</envar> variable is defined
defined with the current dependency's host offset with the current dependency's host offset <envar>targetOffset</envar> with
<envar>targetOffset</envar> with its target offset, before it's setup hook its target offset, before its setup hook is sourced. Additionally, since most
is sourced. Additionally, since most environment hooks don't care about the environment hooks don't care about the target platform, that means the setup
target platform, That means the setup hook can append to the right bash hook can append to the right bash array by doing something like
array by doing something like
<programlisting language="bash"> <programlisting language="bash">
addEnvHooks "$hostOffset" myBashFunction addEnvHooks "$hostOffset" myBashFunction
</programlisting> </programlisting>
@ -2142,7 +2146,7 @@ addEnvHooks "$hostOffset" myBashFunction
<para> <para>
The <emphasis>existence</emphasis> of setups hooks has long been documented The <emphasis>existence</emphasis> of setups hooks has long been documented
and packages inside Nixpkgs are free to use these mechanism. Other packages, and packages inside Nixpkgs are free to use this mechanism. Other packages,
however, should not rely on these mechanisms not changing between Nixpkgs however, should not rely on these mechanisms not changing between Nixpkgs
versions. Because of the existing issues with this system, there's little versions. Because of the existing issues with this system, there's little
benefit from mandating it be stable for any period of time. benefit from mandating it be stable for any period of time.
@ -2159,19 +2163,19 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
Bintools Wrapper wraps the binary utilities for a bunch of miscellaneous The Bintools Wrapper wraps the binary utilities for a bunch of
purposes. These are GNU Binutils when targetting Linux, and a mix of miscellaneous purposes. These are GNU Binutils when targetting Linux, and
cctools and GNU binutils for Darwin. [The "Bintools" name is supposed to a mix of cctools and GNU binutils for Darwin. [The "Bintools" name is
be a compromise between "Binutils" and "cctools" not denoting any supposed to be a compromise between "Binutils" and "cctools" not denoting
specific implementation.] Specifically, the underlying bintools package, any specific implementation.] Specifically, the underlying bintools
and a C standard library (glibc or Darwin's libSystem, just for the package, and a C standard library (glibc or Darwin's libSystem, just for
dynamic loader) are all fed in, and dependency finding, hardening (see the dynamic loader) are all fed in, and dependency finding, hardening
below), and purity checks for each are handled by Bintools Wrapper. (see below), and purity checks for each are handled by the Bintools
Packages typically depend on CC Wrapper, which in turn (at run time) Wrapper. Packages typically depend on CC Wrapper, which in turn (at run
depends on Bintools Wrapper. time) depends on the Bintools Wrapper.
</para> </para>
<para> <para>
Bintools Wrapper was only just recently split off from CC Wrapper, so The Bintools Wrapper was only just recently split off from CC Wrapper, so
the division of labor is still being worked out. For example, it the division of labor is still being worked out. For example, it
shouldn't care about about the C standard library, but just take a shouldn't care about about the C standard library, but just take a
derivation with the dynamic loader (which happens to be the glibc on derivation with the dynamic loader (which happens to be the glibc on
@ -2179,24 +2183,24 @@ addEnvHooks "$hostOffset" myBashFunction
to need to share, and probably the most important to understand. It is to need to share, and probably the most important to understand. It is
currently accomplished by collecting directories of host-platform currently accomplished by collecting directories of host-platform
dependencies (i.e. <varname>buildInputs</varname> and dependencies (i.e. <varname>buildInputs</varname> and
<varname>nativeBuildInputs</varname>) in environment variables. Bintools <varname>nativeBuildInputs</varname>) in environment variables. The
Wrapper's setup hook causes any <filename>lib</filename> and Bintools Wrapper's setup hook causes any <filename>lib</filename> and
<filename>lib64</filename> subdirectories to be added to <filename>lib64</filename> subdirectories to be added to
<envar>NIX_LDFLAGS</envar>. Since CC Wrapper and Bintools Wrapper use <envar>NIX_LDFLAGS</envar>. Since the CC Wrapper and the Bintools Wrapper
the same strategy, most of the Bintools Wrapper code is sparsely use the same strategy, most of the Bintools Wrapper code is sparsely
commented and refers to CC Wrapper. But CC Wrapper's code, by contrast, commented and refers to the CC Wrapper. But the CC Wrapper's code, by
has quite lengthy comments. Bintools Wrapper merely cites those, rather contrast, has quite lengthy comments. The Bintools Wrapper merely cites
than repeating them, to avoid falling out of sync. those, rather than repeating them, to avoid falling out of sync.
</para> </para>
<para> <para>
A final task of the setup hook is defining a number of standard A final task of the setup hook is defining a number of standard
environment variables to tell build systems which executables full-fill environment variables to tell build systems which executables fulfill
which purpose. They are defined to just be the base name of the tools, which purpose. They are defined to just be the base name of the tools,
under the assumption that Bintools Wrapper's binaries will be on the under the assumption that the Bintools Wrapper's binaries will be on the
path. Firstly, this helps poorly-written packages, e.g. ones that look path. Firstly, this helps poorly-written packages, e.g. ones that look
for just <command>gcc</command> when <envar>CC</envar> isn't defined yet for just <command>gcc</command> when <envar>CC</envar> isn't defined yet
<command>clang</command> is to be used. Secondly, this helps packages <command>clang</command> is to be used. Secondly, this helps packages not
not get confused when cross-compiling, in which case multiple Bintools get confused when cross-compiling, in which case multiple Bintools
Wrappers may simultaneously be in use. Wrappers may simultaneously be in use.
<footnote xml:id="footnote-stdenv-per-platform-wrapper"> <footnote xml:id="footnote-stdenv-per-platform-wrapper">
<para> <para>
@ -2208,20 +2212,20 @@ addEnvHooks "$hostOffset" myBashFunction
</para> </para>
</footnote> </footnote>
<envar>BUILD_</envar>- and <envar>TARGET_</envar>-prefixed versions of <envar>BUILD_</envar>- and <envar>TARGET_</envar>-prefixed versions of
the normal environment variable are defined for the additional Bintools the normal environment variable are defined for additional Bintools
Wrappers, properly disambiguating them. Wrappers, properly disambiguating them.
</para> </para>
<para> <para>
A problem with this final task is that Bintools Wrapper is honest and A problem with this final task is that the Bintools Wrapper is honest and
defines <envar>LD</envar> as <command>ld</command>. Most packages, defines <envar>LD</envar> as <command>ld</command>. Most packages,
however, firstly use the C compiler for linking, secondly use however, firstly use the C compiler for linking, secondly use
<envar>LD</envar> anyways, defining it as the C compiler, and thirdly, <envar>LD</envar> anyways, defining it as the C compiler, and thirdly,
only so define <envar>LD</envar> when it is undefined as a fallback. only so define <envar>LD</envar> when it is undefined as a fallback. This
This triple-threat means Bintools Wrapper will break those packages, as triple-threat means Bintools Wrapper will break those packages, as LD is
LD is already defined as the actual linker which the package won't already defined as the actual linker which the package won't override yet
override yet doesn't want to use. The workaround is to define, just for doesn't want to use. The workaround is to define, just for the
the problematic package, <envar>LD</envar> as the C compiler. A good way problematic package, <envar>LD</envar> as the C compiler. A good way to
to do this would be <command>preConfigure = "LD=$CC"</command>. do this would be <command>preConfigure = "LD=$CC"</command>.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2231,30 +2235,31 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes. The CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes.
Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C
standard library (glibc or Darwin's libSystem, just for the dynamic standard library (glibc or Darwin's libSystem, just for the dynamic
loader) are all fed in, and dependency finding, hardening (see below), loader) are all fed in, and dependency finding, hardening (see below),
and purity checks for each are handled by CC Wrapper. Packages typically and purity checks for each are handled by the CC Wrapper. Packages
depend on CC Wrapper, which in turn (at run time) depends on Bintools typically depend on the CC Wrapper, which in turn (at run-time) depends
Wrapper. on the Bintools Wrapper.
</para> </para>
<para> <para>
Dependency finding is undoubtedly the main task of CC Wrapper. This Dependency finding is undoubtedly the main task of the CC Wrapper. This
works just like Bintools Wrapper, except that any works just like the Bintools Wrapper, except that any
<filename>include</filename> subdirectory of any relevant dependency is <filename>include</filename> subdirectory of any relevant dependency is
added to <envar>NIX_CFLAGS_COMPILE</envar>. The setup hook itself added to <envar>NIX_CFLAGS_COMPILE</envar>. The setup hook itself
contains some lengthy comments describing the exact convoluted mechanism contains some lengthy comments describing the exact convoluted mechanism
by which this is accomplished. by which this is accomplished.
</para> </para>
<para> <para>
CC Wrapper also like Bintools Wrapper defines standard environment Similarly, the CC Wrapper follows the Bintools Wrapper in defining
variables with the names of the tools it wraps, for the same reasons standard environment variables with the names of the tools it wraps, for
described above. Importantly, while it includes a <command>cc</command> the same reasons described above. Importantly, while it includes a
symlink to the c compiler for portability, the <envar>CC</envar> will be <command>cc</command> symlink to the c compiler for portability, the
defined using the compiler's "real name" (i.e. <command>gcc</command> or <envar>CC</envar> will be defined using the compiler's "real name" (i.e.
<command>clang</command>). This helps lousy build systems that inspect <command>gcc</command> or <command>clang</command>). This helps lousy
on the name of the compiler rather than run it. build systems that inspect on the name of the compiler rather than run
it.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2314,9 +2319,11 @@ addEnvHooks "$hostOffset" myBashFunction
<listitem> <listitem>
<para> <para>
The <varname>autoreconfHook</varname> derivation adds The <varname>autoreconfHook</varname> derivation adds
<varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize <varname>autoreconfPhase</varname>, which runs autoreconf, libtoolize and
and automake, essentially preparing the configure script in automake, essentially preparing the configure script in autotools-based
autotools-based builds. builds. Most autotools-based packages come with the configure script
pre-generated, but this hook is necessary for a few packages and when you
need to patch the packages configure scripts.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2360,9 +2367,9 @@ addEnvHooks "$hostOffset" myBashFunction
</term> </term>
<listitem> <listitem>
<para> <para>
Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable the Exports <envar>GDK_PIXBUF_MODULE_FILE</envar> environment variable to the
the builder. Add librsvg package to <varname>buildInputs</varname> to builder. Add librsvg package to <varname>buildInputs</varname> to get svg
get svg support. support.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -2399,7 +2406,7 @@ addEnvHooks "$hostOffset" myBashFunction
PaX flags on Linux (where it is available by default; on all other PaX flags on Linux (where it is available by default; on all other
platforms, <varname>paxmark</varname> is a no-op). For example, to platforms, <varname>paxmark</varname> is a no-op). For example, to
disable secure memory protections on the executable disable secure memory protections on the executable
<replaceable>foo</replaceable>: <replaceable>foo</replaceable>
<programlisting> <programlisting>
postFixup = '' postFixup = ''
paxmark m $out/bin/<replaceable>foo</replaceable> paxmark m $out/bin/<replaceable>foo</replaceable>
@ -2452,6 +2459,103 @@ addEnvHooks "$hostOffset" myBashFunction
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
cmake
</term>
<listitem>
<para>
Overrides the default configure phase to run the CMake command. By
default, we use the Make generator of CMake. In
addition, dependencies are added automatically to CMAKE_PREFIX_PATH so
that packages are correctly detected by CMake. Some additional flags
are passed in to give similar behavior to configure-based packages. You
can disable this hooks behavior by setting configurePhase to a custom
value, or by setting dontUseCmakeConfigure. cmakeFlags controls flags
passed only to CMake. By default, parallel building is enabled as CMake
supports parallel building almost everywhere. When Ninja is also in
use, CMake will detect that and use the ninja generator.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
xcbuildHook
</term>
<listitem>
<para>
Overrides the build and install phases to run the “xcbuild” command.
This hook is needed when a project only comes with build files for the
XCode build system. You can disable this behavior by setting buildPhase
and configurePhase to a custom value. xcbuildFlags controls flags
passed only to xcbuild.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
meson
</term>
<listitem>
<para>
Overrides the configure phase to run meson to generate Ninja files. You
can disable this behavior by setting configurePhase to a custom value,
or by setting dontUseMesonConfigure. To run these files, you should
accompany meson with ninja. mesonFlags controls only the flags passed
to meson. By default, parallel building is enabled as Meson supports
parallel building almost everywhere.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
ninja
</term>
<listitem>
<para>
Overrides the build, install, and check phase to run ninja instead of
make. You can disable this behavior with the dontUseNinjaBuild,
dontUseNinjaInstall, and dontUseNinjaCheck, respectively. Parallel
building is enabled by default in Ninja.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
unzip
</term>
<listitem>
<para>
This setup hook will allow you to unzip .zip files specified in $src.
There are many similar packages like unrar, undmg, etc.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
wafHook
</term>
<listitem>
<para>
Overrides the configure, build, and install phases. This will run the
"waf" script used by many projects. If waf doesnt exist, it will copy
the version of waf available in Nixpkgs wafFlags can be used to pass
flags to the waf script.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
scons
</term>
<listitem>
<para>
Overrides the build, install, and check phases. This uses the scons
build system as a replacement for make. scons does not provide a
configure phase, so everything is managed at build and install time.
</para>
</listitem>
</varlistentry>
</variablelist> </variablelist>
</para> </para>
</section> </section>

View File

@ -41,6 +41,18 @@ rec {
# think of it as an infix operator `g extends f` that mimics the syntax from # think of it as an infix operator `g extends f` that mimics the syntax from
# Java. It may seem counter-intuitive to have the "base class" as the second # Java. It may seem counter-intuitive to have the "base class" as the second
# argument, but it's nice this way if several uses of `extends` are cascaded. # argument, but it's nice this way if several uses of `extends` are cascaded.
#
# To get a better understanding how `extends` turns a function with a fix
# point (the package set we start with) into a new function with a different fix
# point (the desired packages set) lets just see, how `extends g f`
# unfolds with `g` and `f` defined above:
#
# extends g f = self: let super = f self; in super // g self super;
# = self: let super = { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }; in super // g self super
# = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } // g self { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; }
# = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } // { foo = "foo" + " + "; }
# = self: { foo = "foo + "; bar = "bar"; foobar = self.foo + self.bar; }
#
extends = f: rattrs: self: let super = rattrs self; in super // f self super; extends = f: rattrs: self: let super = rattrs self; in super // f self super;
# Compose two extending functions of the type expected by 'extends' # Compose two extending functions of the type expected by 'extends'

View File

@ -47,6 +47,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
amd = { amd = {
fullName = "AMD License Agreement"; fullName = "AMD License Agreement";
url = http://developer.amd.com/amd-license-agreement/; url = http://developer.amd.com/amd-license-agreement/;
free = false;
}; };
apsl20 = spdx { apsl20 = spdx {
@ -104,14 +105,10 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = ''BSD 4-clause "Original" or "Old" License''; fullName = ''BSD 4-clause "Original" or "Old" License'';
}; };
bsl10 = {
fullName = "Business Source License 1.0";
url = https://mariadb.com/bsl10;
};
bsl11 = { bsl11 = {
fullName = "Business Source License 1.1"; fullName = "Business Source License 1.1";
url = https://mariadb.com/bsl11; url = https://mariadb.com/bsl11;
free = false;
}; };
clArtistic = spdx { clArtistic = spdx {

View File

@ -82,6 +82,9 @@ rec {
aarch64 = { bits = 64; significantByte = littleEndian; family = "arm"; version = "8"; }; aarch64 = { bits = 64; significantByte = littleEndian; family = "arm"; version = "8"; };
aarch64_be = { bits = 64; significantByte = bigEndian; family = "arm"; version = "8"; }; aarch64_be = { bits = 64; significantByte = bigEndian; family = "arm"; version = "8"; };
i386 = { bits = 32; significantByte = littleEndian; family = "x86"; };
i486 = { bits = 32; significantByte = littleEndian; family = "x86"; };
i586 = { bits = 32; significantByte = littleEndian; family = "x86"; };
i686 = { bits = 32; significantByte = littleEndian; family = "x86"; }; i686 = { bits = 32; significantByte = littleEndian; family = "x86"; };
x86_64 = { bits = 64; significantByte = littleEndian; family = "x86"; }; x86_64 = { bits = 64; significantByte = littleEndian; family = "x86"; };

View File

@ -129,6 +129,13 @@ rec {
/* Returns the current nixpkgs release number as string. */ /* Returns the current nixpkgs release number as string. */
release = lib.strings.fileContents ../.version; release = lib.strings.fileContents ../.version;
/* Returns the current nixpkgs release code name.
On each release the first letter is bumped and a new animal is chosen
starting with that new letter.
*/
codeName = "Koi";
/* Returns the current nixpkgs version suffix as string. */ /* Returns the current nixpkgs version suffix as string. */
versionSuffix = versionSuffix =
let suffixFile = ../.version-suffix; let suffixFile = ../.version-suffix;

View File

@ -73,6 +73,11 @@
github = "acowley"; github = "acowley";
name = "Anthony Cowley"; name = "Anthony Cowley";
}; };
adamt = {
email = "mail@adamtulinius.dk";
github = "adamtulinius";
name = "Adam Tulinius";
};
adelbertc = { adelbertc = {
email = "adelbertc@gmail.com"; email = "adelbertc@gmail.com";
github = "adelbertc"; github = "adelbertc";
@ -688,6 +693,11 @@
github = "campadrenalin"; github = "campadrenalin";
name = "Philip Horger"; name = "Philip Horger";
}; };
candeira = {
email = "javier@candeira.com";
github = "candeira";
name = "Javier Candeira";
};
canndrew = { canndrew = {
email = "shum@canndrew.org"; email = "shum@canndrew.org";
github = "canndrew"; github = "canndrew";
@ -762,6 +772,11 @@
github = "ChengCat"; github = "ChengCat";
name = "Yucheng Zhang"; name = "Yucheng Zhang";
}; };
chessai = {
email = "chessai1996@gmail.com";
github = "chessai";
name = "Daniel Cartwright";
};
chiiruno = { chiiruno = {
email = "okinan@protonmail.com"; email = "okinan@protonmail.com";
github = "chiiruno"; github = "chiiruno";
@ -1304,6 +1319,11 @@
github = "ellis"; github = "ellis";
name = "Ellis Whitehead"; name = "Ellis Whitehead";
}; };
elseym = {
email = "elseym@me.com";
github = "elseym";
name = "Simon Waibl";
};
elvishjerricco = { elvishjerricco = {
email = "elvishjerricco@gmail.com"; email = "elvishjerricco@gmail.com";
github = "ElvishJerricco"; github = "ElvishJerricco";
@ -2012,6 +2032,11 @@
github = "jhhuh"; github = "jhhuh";
name = "Ji-Haeng Huh"; name = "Ji-Haeng Huh";
}; };
jhillyerd = {
email = "james+nixos@hillyerd.com";
github = "jhillyerd";
name = "James Hillyerd";
};
jirkamarsik = { jirkamarsik = {
email = "jiri.marsik89@gmail.com"; email = "jiri.marsik89@gmail.com";
github = "jirkamarsik"; github = "jirkamarsik";
@ -3826,6 +3851,11 @@
github = "sauyon"; github = "sauyon";
name = "Sauyon Lee"; name = "Sauyon Lee";
}; };
sboosali = {
email = "SamBoosalis@gmail.com";
github = "sboosali";
name = "Sam Boosalis";
};
schmitthenner = { schmitthenner = {
email = "development@schmitthenner.eu"; email = "development@schmitthenner.eu";
github = "fkz"; github = "fkz";
@ -4838,6 +4868,11 @@
github = "umazalakain"; github = "umazalakain";
name = "Unai Zalakain"; name = "Unai Zalakain";
}; };
zaninime = {
email = "francesco@zanini.me";
github = "zaninime";
name = "Francesco Zanini";
};
zarelit = { zarelit = {
email = "david@zarel.net"; email = "david@zarel.net";
github = "zarelit"; github = "zarelit";
@ -4892,4 +4927,9 @@
github = "zzamboni"; github = "zzamboni";
name = "Diego Zamboni"; name = "Diego Zamboni";
}; };
mredaelli = {
email = "massimo@typish.io";
github = "mredaelli";
name = "Massimo Redaelli";
};
} }

View File

@ -22,5 +22,6 @@
<xi:include href="networking.xml" /> <xi:include href="networking.xml" />
<xi:include href="linux-kernel.xml" /> <xi:include href="linux-kernel.xml" />
<xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" /> <xi:include href="../generated/modules.xml" xpointer="xpointer(//section[@id='modules']/*)" />
<xi:include href="profiles.xml" />
<!-- Apache; libvirtd virtualisation --> <!-- Apache; libvirtd virtualisation -->
</part> </part>

View File

@ -0,0 +1,39 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="ch-profiles">
<title>Profiles</title>
<para>
In some cases, it may be desirable to take advantage of commonly-used,
predefined configurations provided by nixpkgs, but different from those that
come as default. This is a role fulfilled by NixOS's Profiles, which come as
files living in <filename>&lt;nixpkgs/nixos/modules/profiles&gt;</filename>.
That is to say, expected usage is to add them to the imports list of your
<filename>/etc/configuration.nix</filename> as such:
</para>
<programlisting>
imports = [
&lt;nixpkgs/nixos/modules/profiles/profile-name.nix&gt;
];
</programlisting>
<para>
Even if some of these profiles seem only useful in the context of
install media, many are actually intended to be used in real installs.
</para>
<para>
What follows is a brief explanation on the purpose and use-case for each
profile. Detailing each option configured by each one is out of scope.
</para>
<xi:include href="profiles/all-hardware.xml" />
<xi:include href="profiles/base.xml" />
<xi:include href="profiles/clone-config.xml" />
<xi:include href="profiles/demo.xml" />
<xi:include href="profiles/docker-container.xml" />
<xi:include href="profiles/graphical.xml" />
<xi:include href="profiles/hardened.xml" />
<xi:include href="profiles/headless.xml" />
<xi:include href="profiles/installation-device.xml" />
<xi:include href="profiles/minimal.xml" />
<xi:include href="profiles/qemu-guest.xml" />
</chapter>

View File

@ -0,0 +1,20 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-all-hardware">
<title>All Hardware</title>
<para>
Enables all hardware supported by NixOS: i.e., all firmware is
included, and all devices from which one may boot are enabled in the initrd.
Its primary use is in the NixOS installation CDs.
</para>
<para>
The enabled kernel modules include support for SATA and PATA, SCSI
(partially), USB, Firewire (untested), Virtio (QEMU, KVM, etc.), VMware, and
Hyper-V. Additionally, <xref linkend="opt-hardware.enableAllFirmware"/> is
enabled, and the firmware for the ZyDAS ZD1211 chipset is specifically
installed.
</para>
</section>

View File

@ -0,0 +1,15 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-base">
<title>Base</title>
<para>
Defines the software packages included in the "minimal"
installation CD. It installs several utilities useful in a simple recovery or
install media, such as a text-mode web browser, and tools for manipulating
block devices, networking, hardware diagnostics, and filesystems (with their
respective kernel modules).
</para>
</section>

View File

@ -0,0 +1,14 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-clone-config">
<title>Clone Config</title>
<para>
This profile is used in installer images.
It provides an editable configuration.nix that imports all the modules that
were also used when creating the image in the first place.
As a result it allows users to edit and rebuild the live-system.
</para>
</section>

View File

@ -0,0 +1,13 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-demo">
<title>Demo</title>
<para>
This profile just enables a <systemitem class="username">demo</systemitem> user, with password <literal>demo</literal>, uid <literal>1000</literal>, <systemitem class="groupname">wheel</systemitem>
group and <link linkend="opt-services.xserver.displayManager.sddm.autoLogin">
autologin in the SDDM display manager</link>.
</para>
</section>

View File

@ -0,0 +1,15 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-docker-container">
<title>Docker Container</title>
<para>
This is the profile from which the Docker images are generated. It prepares a
working system by importing the <link linkend="sec-profile-minimal">Minimal</link> and
<link linkend="sec-profile-clone-config">Clone Config</link> profiles, and setting appropriate
configuration options that are useful inside a container context, like
<xref linkend="opt-boot.isContainer"/>.
</para>
</section>

View File

@ -0,0 +1,21 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-graphical">
<title>Graphical</title>
<para>
Defines a NixOS configuration with the Plasma 5 desktop. It's used by the
graphical installation CD.
</para>
<para>
It sets <xref linkend="opt-services.xserver.enable"/>,
<xref linkend="opt-services.xserver.displayManager.sddm.enable"/>,
<xref linkend="opt-services.xserver.desktopManager.plasma5.enable"/> (
<link linkend="opt-services.xserver.desktopManager.plasma5.enableQt4Support">
without Qt4 Support</link>), and
<xref linkend="opt-services.xserver.libinput.enable"/> to true. It also
includes glxinfo and firefox in the system packages list.
</para>
</section>

View File

@ -0,0 +1,22 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-hardened">
<title>Hardened</title>
<para>
A profile with most (vanilla) hardening options enabled by default,
potentially at the cost of features and performance.
</para>
<para>
This includes a hardened kernel, and limiting the system information
available to processes through the <filename>/sys</filename> and
<filename>/proc</filename> filesystems. It also disables the User Namespaces
feature of the kernel, which stops Nix from being able to build anything
(this particular setting can be overriden via
<xref linkend="opt-security.allowUserNamespaces"/>). See the <literal
xlink:href="https://github.com/nixos/nixpkgs/tree/master/nixos/modules/profiles/hardened.nix">
profile source</literal> for further detail on which settings are altered.
</para>
</section>

View File

@ -0,0 +1,18 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-headless">
<title>Headless</title>
<para>
Common configuration for headless machines (e.g., Amazon EC2 instances).
</para>
<para>
Disables <link linkend="opt-sound.enable">sound</link>,
<link linkend="opt-boot.vesa">vesa</link>, serial consoles,
<link linkend="opt-systemd.enableEmergencyMode">emergency mode</link>,
<link linkend="opt-boot.loader.grub.splashImage">grub splash images</link> and
configures the kernel to reboot automatically on panic.
</para>
</section>

View File

@ -0,0 +1,35 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-installation-device">
<title>Installation Device</title>
<para>
Provides a basic configuration for installation devices like CDs. This means
enabling hardware scans, using the <link linkend="sec-profile-clone-config">
Clone Config profile</link> to guarantee
<filename>/etc/nixos/configuration.nix</filename> exists (for
<command>nixos-rebuild</command> to work), a copy of the Nixpkgs channel
snapshot used to create the install media.
</para>
<para>
Additionally, documentation for <link linkend="opt-documentation.enable">
Nixpkgs</link> and <link linkend="opt-documentation.nixos.enable">NixOS
</link> are forcefully enabled (to override the
<link linkend="sec-profile-minimal">Minimal profile</link> preference); the
NixOS manual is shown automatically on TTY 8, sudo and udisks are disabled.
Autologin is enabled as root.
</para>
<para>
A message is shown to the user to start a display manager if needed,
ssh with <xref linkend="opt-services.openssh.permitRootLogin"/> are enabled (but
doesn't autostart). WPA Supplicant is also enabled without autostart.
</para>
<para>
Finally, vim is installed, root is set to not have a password, the kernel is
made more silent for remote public IP installs, and several settings are
tweaked so that the installer has a better chance of succeeding under
low-memory environments.
</para>
</section>

View File

@ -0,0 +1,17 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-minimal">
<title>Minimal</title>
<para>
This profile defines a small NixOS configuration. It does not contain any
graphical stuff. It's a very short file that enables
<link linkend="opt-environment.noXlibs">noXlibs</link>, sets
<link linkend="opt-i18n.supportedLocales">i18n.supportedLocales</link>
to only support the user-selected locale,
<link linkend="opt-documentation.enable">disables packages' documentation
</link>, and <link linkend="opt-sound.enable">disables sound</link>.
</para>
</section>

View File

@ -0,0 +1,16 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-profile-qemu-guest">
<title>QEMU Guest</title>
<para>
This profile contains common configuration for virtual machines running under
QEMU (using virtio).
</para>
<para>
It makes virtio modules available on the initrd, sets the system time from
the hardware clock to work around a bug in qemu-kvm, and
<link linkend="opt-security.rngd.enable">enables rngd</link>.
</para>
</section>

View File

@ -167,7 +167,7 @@ $ nixos-generate-config --root /mnt
{ {
imports = imports =
[ &lt;nixos/modules/installer/scan/not-detected.nix> [ &lt;nixos/modules/installer/scan/not-detected.nix&gt;
]; ];
boot.initrd.availableKernelModules = [ "ehci_hcd" "ahci" ]; boot.initrd.availableKernelModules = [ "ehci_hcd" "ahci" ];

View File

@ -19,7 +19,9 @@
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para /> <para>
The default Python 3 interpreter is now CPython 3.7 instead of CPython 3.6.
</para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -220,6 +222,17 @@
reset to the default value (<literal>false</literal>). reset to the default value (<literal>false</literal>).
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Network interface indiscriminate NixOS firewall options
(<literal>networking.firewall.allow*</literal>) are now preserved when also
setting interface specific rules such as <literal>networking.firewall.interfaces.en0.allow*</literal>.
These rules continue to use the pseudo device "default"
(<literal>networking.firewall.interfaces.default.*</literal>), and assigning
to this pseudo device will override the (<literal>networking.firewall.allow*</literal>)
options.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>

View File

@ -1,4 +1,13 @@
{ system, pkgs, minimal ? false, config ? {} }: { system
, # Use a minimal kernel?
minimal ? false
, # Ignored
config ? null
# Nixpkgs, for qemu, lib and more
, pkgs
, # NixOS configuration to add to the VMs
extraConfigurations ? []
}:
with pkgs.lib; with pkgs.lib;
with import ../lib/qemu-flags.nix { inherit pkgs; }; with import ../lib/qemu-flags.nix { inherit pkgs; };
@ -28,7 +37,8 @@ rec {
../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs ../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
{ key = "no-manual"; documentation.nixos.enable = false; } { key = "no-manual"; documentation.nixos.enable = false; }
{ key = "qemu"; system.build.qemu = qemu; } { key = "qemu"; system.build.qemu = qemu; }
] ++ optional minimal ../modules/testing/minimal-kernel.nix; ] ++ optional minimal ../modules/testing/minimal-kernel.nix
++ extraConfigurations;
extraArgs = { inherit nodes; }; extraArgs = { inherit nodes; };
}; };

View File

@ -1,6 +1,13 @@
{ system, pkgs, minimal ? false, config ? {} }: { system
, pkgs
# Use a minimal kernel?
, minimal ? false
# Ignored
, config ? null
# Modules to add to each VM
, extraConfigurations ? [] }:
with import ./build-vms.nix { inherit system pkgs minimal config; }; with import ./build-vms.nix { inherit system pkgs minimal extraConfigurations; };
with pkgs; with pkgs;
let let

View File

@ -269,7 +269,7 @@ in
}; };
config = mkIf (config.fonts.fontconfig.enable && cfg.enable) { config = mkIf (config.fonts.fontconfig.enable && config.fonts.fontconfig.penultimate.enable) {
fonts.fontconfig.confPackages = [ penultimateConf ]; fonts.fontconfig.confPackages = [ penultimateConf ];

View File

@ -5,7 +5,7 @@ with lib;
options = { options = {
gtk.iconCache.enable = mkOption { gtk.iconCache.enable = mkOption {
type = types.bool; type = types.bool;
default = true; default = config.services.xserver.enable;
description = '' description = ''
Whether to build icon theme caches for GTK+ applications. Whether to build icon theme caches for GTK+ applications.
''; '';

View File

@ -247,6 +247,10 @@ in
# a collision with an apparently unrelated environment # a collision with an apparently unrelated environment
# variable with the same name exported by dhcpcd. # variable with the same name exported by dhcpcd.
interface_order='lo lo[0-9]*' interface_order='lo lo[0-9]*'
'' + optionalString config.services.nscd.enable ''
# Invalidate the nscd cache whenever resolv.conf is
# regenerated.
libc_restart='${pkgs.systemd}/bin/systemctl try-restart --no-block nscd.service 2> /dev/null'
'' + optionalString (length resolvconfOptions > 0) '' '' + optionalString (length resolvconfOptions > 0) ''
# Options as described in resolv.conf(5) # Options as described in resolv.conf(5)
resolv_conf_options='${concatStringsSep " " resolvconfOptions}' resolv_conf_options='${concatStringsSep " " resolvconfOptions}'

View File

@ -10,6 +10,15 @@ in
options.hardware.ckb-next = { options.hardware.ckb-next = {
enable = mkEnableOption "the Corsair keyboard/mouse driver"; enable = mkEnableOption "the Corsair keyboard/mouse driver";
gid = mkOption {
type = types.nullOr types.int;
default = null;
example = 100;
description = ''
Limit access to the ckb daemon to a particular group.
'';
};
package = mkOption { package = mkOption {
type = types.package; type = types.package;
default = pkgs.ckb-next; default = pkgs.ckb-next;
@ -26,8 +35,8 @@ in
systemd.services.ckb-next = { systemd.services.ckb-next = {
description = "Corsair Keyboards and Mice Daemon"; description = "Corsair Keyboards and Mice Daemon";
wantedBy = ["multi-user.target"]; wantedBy = ["multi-user.target"];
script = "exec ${cfg.package}/bin/ckb-next-daemon";
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/ckb-next-daemon ${optionalString (cfg.gid != null) "--gid=${builtins.toString cfg.gid}"}";
Restart = "on-failure"; Restart = "on-failure";
StandardOutput = "syslog"; StandardOutput = "syslog";
}; };

View File

@ -8,7 +8,7 @@ let
version = "2.40-13.0"; version = "2.40-13.0";
src = pkgs.fetchurl { src = pkgs.fetchurl {
url = "http://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/${name}_amd64.deb"; url = "https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/${name}_amd64.deb";
sha256 = "11w7fwk93lmfw0yya4jpjwdmgjimqxx6412sqa166g1pz4jil4sw"; sha256 = "11w7fwk93lmfw0yya4jpjwdmgjimqxx6412sqa166g1pz4jil4sw";
}; };
@ -34,7 +34,7 @@ let
meta = with lib; { meta = with lib; {
description = "HP Smart Array CLI"; description = "HP Smart Array CLI";
homepage = http://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/; homepage = https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/;
license = licenses.unfreeRedistributable; license = licenses.unfreeRedistributable;
platforms = [ "x86_64-linux" ]; platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ volth ]; maintainers = with maintainers; [ volth ];

View File

@ -7,7 +7,7 @@ let nodes = import networkExpr; in
with import ../../../../lib/testing.nix { with import ../../../../lib/testing.nix {
inherit system; inherit system;
pkgs = import ../.. { inherit system config; }; pkgs = import ../../../../.. { inherit system config; };
}; };
(makeTest { inherit nodes; testScript = ""; }).driver (makeTest { inherit nodes; testScript = ""; }).driver

View File

@ -314,15 +314,17 @@ push @attrs, "services.xserver.videoDrivers = [ \"$videoDriver\" ];" if $videoDr
# Generate the swapDevices option from the currently activated swap # Generate the swapDevices option from the currently activated swap
# devices. # devices.
my @swaps = read_file("/proc/swaps"); my @swaps = read_file("/proc/swaps", err_mode => 'carp');
shift @swaps;
my @swapDevices; my @swapDevices;
if (@swaps) {
shift @swaps;
foreach my $swap (@swaps) { foreach my $swap (@swaps) {
$swap =~ /^(\S+)\s/; $swap =~ /^(\S+)\s/;
next unless -e $1; next unless -e $1;
my $dev = findStableDevPath $1; my $dev = findStableDevPath $1;
push @swapDevices, "{ device = \"$dev\"; }"; push @swapDevices, "{ device = \"$dev\"; }";
} }
}
# Generate the fileSystems option from the currently mounted # Generate the fileSystems option from the currently mounted

View File

@ -101,7 +101,7 @@
iodined = 66; iodined = 66;
#libvirtd = 67; # unused #libvirtd = 67; # unused
graphite = 68; graphite = 68;
statsd = 69; #statsd = 69; # removed 2018-11-14
transmission = 70; transmission = 70;
postgres = 71; postgres = 71;
#vboxusers = 72; # unused #vboxusers = 72; # unused
@ -175,7 +175,7 @@
dnsmasq = 141; dnsmasq = 141;
uhub = 142; uhub = 142;
yandexdisk = 143; yandexdisk = 143;
#collectd = 144; #unused mxisd = 144; # was once collectd
consul = 145; consul = 145;
mailpile = 146; mailpile = 146;
redmine = 147; redmine = 147;
@ -336,6 +336,7 @@
solr = 309; solr = 309;
alerta = 310; alerta = 310;
minetest = 311; minetest = 311;
rss2email = 312;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399! # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -411,7 +412,7 @@
iodined = 66; iodined = 66;
libvirtd = 67; libvirtd = 67;
graphite = 68; graphite = 68;
#statsd = 69; # unused #statsd = 69; # removed 2018-11-14
transmission = 70; transmission = 70;
postgres = 71; postgres = 71;
vboxusers = 72; vboxusers = 72;
@ -483,7 +484,7 @@
#dnsmasq = 141; # unused #dnsmasq = 141; # unused
uhub = 142; uhub = 142;
#yandexdisk = 143; # unused #yandexdisk = 143; # unused
#collectd = 144; # unused mxisd = 144; # was once collectd
#consul = 145; # unused #consul = 145; # unused
mailpile = 146; mailpile = 146;
redmine = 147; redmine = 147;
@ -632,6 +633,7 @@
solr = 309; solr = 309;
alerta = 310; alerta = 310;
minetest = 311; minetest = 311;
rss2email = 312;
# When adding a gid, make sure it doesn't match an existing # When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal # uid. Users and groups with the same name should have equal

View File

@ -43,6 +43,7 @@ in
nixos.codeName = mkOption { nixos.codeName = mkOption {
readOnly = true; readOnly = true;
type = types.str; type = types.str;
default = lib.trivial.codeName;
description = "The NixOS release code name (e.g. <literal>Emu</literal>)."; description = "The NixOS release code name (e.g. <literal>Emu</literal>).";
}; };
@ -79,9 +80,6 @@ in
version = mkDefault (cfg.release + cfg.versionSuffix); version = mkDefault (cfg.release + cfg.versionSuffix);
revision = mkIf (pathIsDirectory gitRepo) (mkDefault gitCommitId); revision = mkIf (pathIsDirectory gitRepo) (mkDefault gitCommitId);
versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId)); versionSuffix = mkIf (pathIsDirectory gitRepo) (mkDefault (".git." + gitCommitId));
# Note: the first letter is bumped on every release. It's an animal.
codeName = "Koi";
}; };
# Generate /etc/os-release. See # Generate /etc/os-release. See

View File

@ -64,7 +64,6 @@
./i18n/input-method/ibus.nix ./i18n/input-method/ibus.nix
./i18n/input-method/nabi.nix ./i18n/input-method/nabi.nix
./i18n/input-method/uim.nix ./i18n/input-method/uim.nix
./installer/tools/auto-upgrade.nix
./installer/tools/tools.nix ./installer/tools/tools.nix
./misc/assertions.nix ./misc/assertions.nix
./misc/crashdump.nix ./misc/crashdump.nix
@ -104,6 +103,7 @@
./programs/less.nix ./programs/less.nix
./programs/light.nix ./programs/light.nix
./programs/mosh.nix ./programs/mosh.nix
./programs/mininet.nix
./programs/mtr.nix ./programs/mtr.nix
./programs/nano.nix ./programs/nano.nix
./programs/npm.nix ./programs/npm.nix
@ -303,6 +303,7 @@
./services/logging/graylog.nix ./services/logging/graylog.nix
./services/logging/heartbeat.nix ./services/logging/heartbeat.nix
./services/logging/journalbeat.nix ./services/logging/journalbeat.nix
./services/logging/journaldriver.nix
./services/logging/journalwatch.nix ./services/logging/journalwatch.nix
./services/logging/klogd.nix ./services/logging/klogd.nix
./services/logging/logcheck.nix ./services/logging/logcheck.nix
@ -329,6 +330,7 @@
./services/mail/postgrey.nix ./services/mail/postgrey.nix
./services/mail/spamassassin.nix ./services/mail/spamassassin.nix
./services/mail/rspamd.nix ./services/mail/rspamd.nix
./services/mail/rss2email.nix
./services/mail/rmilter.nix ./services/mail/rmilter.nix
./services/mail/nullmailer.nix ./services/mail/nullmailer.nix
./services/misc/airsonic.nix ./services/misc/airsonic.nix
@ -452,7 +454,6 @@
./services/monitoring/riemann-tools.nix ./services/monitoring/riemann-tools.nix
./services/monitoring/scollector.nix ./services/monitoring/scollector.nix
./services/monitoring/smartd.nix ./services/monitoring/smartd.nix
./services/monitoring/statsd.nix
./services/monitoring/sysstat.nix ./services/monitoring/sysstat.nix
./services/monitoring/systemhealth.nix ./services/monitoring/systemhealth.nix
./services/monitoring/teamviewer.nix ./services/monitoring/teamviewer.nix
@ -559,6 +560,7 @@
./services/networking/miredo.nix ./services/networking/miredo.nix
./services/networking/mstpd.nix ./services/networking/mstpd.nix
./services/networking/murmur.nix ./services/networking/murmur.nix
./services/networking/mxisd.nix
./services/networking/namecoind.nix ./services/networking/namecoind.nix
./services/networking/nat.nix ./services/networking/nat.nix
./services/networking/ndppd.nix ./services/networking/ndppd.nix
@ -621,6 +623,7 @@
./services/networking/supplicant.nix ./services/networking/supplicant.nix
./services/networking/supybot.nix ./services/networking/supybot.nix
./services/networking/syncthing.nix ./services/networking/syncthing.nix
./services/networking/syncthing-relay.nix
./services/networking/tcpcrypt.nix ./services/networking/tcpcrypt.nix
./services/networking/teamspeak3.nix ./services/networking/teamspeak3.nix
./services/networking/tinc.nix ./services/networking/tinc.nix
@ -802,6 +805,7 @@
./system/boot/timesyncd.nix ./system/boot/timesyncd.nix
./system/boot/tmp.nix ./system/boot/tmp.nix
./system/etc/etc.nix ./system/etc/etc.nix
./tasks/auto-upgrade.nix
./tasks/bcache.nix ./tasks/bcache.nix
./tasks/cpu-freq.nix ./tasks/cpu-freq.nix
./tasks/encrypted-devices.nix ./tasks/encrypted-devices.nix

View File

@ -12,6 +12,8 @@ with lib;
boot.kernelPackages = mkDefault pkgs.linuxPackages_hardened; boot.kernelPackages = mkDefault pkgs.linuxPackages_hardened;
nix.allowedUsers = mkDefault [ "@users" ];
security.hideProcessInformation = mkDefault true; security.hideProcessInformation = mkDefault true;
security.lockKernelModules = mkDefault true; security.lockKernelModules = mkDefault true;

View File

@ -77,7 +77,7 @@ in
systemd.packages = [ pkgs.gnupg ]; systemd.packages = [ pkgs.gnupg ];
environment.extraInit = '' environment.interactiveShellInit = ''
# Bind gpg-agent to this TTY if gpg commands are used. # Bind gpg-agent to this TTY if gpg commands are used.
export GPG_TTY=$(tty) export GPG_TTY=$(tty)

View File

@ -0,0 +1,39 @@
# Global configuration for mininet
# kernel must have NETNS/VETH/SCHED
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.mininet;
generatedPath = with pkgs; makeSearchPath "bin" [
iperf ethtool iproute socat
];
pyEnv = pkgs.python.withPackages(ps: [ ps.mininet-python ]);
mnexecWrapped = pkgs.runCommand "mnexec-wrapper"
{ buildInputs = [ pkgs.makeWrapper pkgs.pythonPackages.wrapPython ]; }
''
makeWrapper ${pkgs.mininet}/bin/mnexec \
$out/bin/mnexec \
--prefix PATH : "${generatedPath}"
ln -s ${pyEnv}/bin/mn $out/bin/mn
# mn errors out without a telnet binary
# pkgs.telnet brings an undesired ifconfig into PATH see #43105
ln -s ${pkgs.telnet}/bin/telnet $out/bin/telnet
'';
in
{
options.programs.mininet.enable = mkEnableOption "Mininet";
config = mkIf cfg.enable {
virtualisation.vswitch.enable = true;
environment.systemPackages = [ mnexecWrapped ];
};
}

View File

@ -12,17 +12,28 @@ let
let let
pName = _p: (builtins.parseDrvName (_p.name)).name; pName = _p: (builtins.parseDrvName (_p.name)).name;
in pName mysql == pName pkgs.mariadb; in pName mysql == pName pkgs.mariadb;
isMysqlAtLeast57 =
let
pName = _p: (builtins.parseDrvName (_p.name)).name;
in (pName mysql == pName pkgs.mysql57)
&& ((builtins.compareVersions mysql.version "5.7") >= 0);
pidFile = "${cfg.pidDir}/mysqld.pid"; pidFile = "${cfg.pidDir}/mysqld.pid";
mysqldAndInstallOptions =
"--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${mysql}";
mysqldOptions = mysqldOptions =
"--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${mysql} " + "${mysqldAndInstallOptions} --pid-file=${pidFile}";
"--pid-file=${pidFile}"; # For MySQL 5.7+, --insecure creates the root user without password
# (earlier versions and MariaDB do this by default).
installOptions =
"${mysqldAndInstallOptions} ${lib.optionalString isMysqlAtLeast57 "--insecure"}";
myCnf = pkgs.writeText "my.cnf" myCnf = pkgs.writeText "my.cnf"
'' ''
[mysqld] [mysqld]
port = ${toString cfg.port} port = ${toString cfg.port}
datadir = ${cfg.dataDir}
${optionalString (cfg.bind != null) "bind-address = ${cfg.bind}" } ${optionalString (cfg.bind != null) "bind-address = ${cfg.bind}" }
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"} ${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"}
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"} ${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"}
@ -147,7 +158,7 @@ in
option is changed. This means that users created and permissions assigned once through this option or option is changed. This means that users created and permissions assigned once through this option or
otherwise have to be removed manually. otherwise have to be removed manually.
''; '';
example = [ example = literalExample ''[
{ {
name = "nextcloud"; name = "nextcloud";
ensurePermissions = { ensurePermissions = {
@ -160,7 +171,7 @@ in
"*.*" = "SELECT, LOCK TABLES"; "*.*" = "SELECT, LOCK TABLES";
}; };
} }
]; ]'';
}; };
# FIXME: remove this option; it's a really bad idea. # FIXME: remove this option; it's a really bad idea.
@ -252,7 +263,7 @@ in
if ! test -e ${cfg.dataDir}/mysql; then if ! test -e ${cfg.dataDir}/mysql; then
mkdir -m 0700 -p ${cfg.dataDir} mkdir -m 0700 -p ${cfg.dataDir}
chown -R ${cfg.user} ${cfg.dataDir} chown -R ${cfg.user} ${cfg.dataDir}
${mysql}/bin/mysql_install_db ${mysqldOptions} ${mysql}/bin/mysql_install_db ${installOptions}
touch /tmp/mysql_init touch /tmp/mysql_init
fi fi

View File

@ -39,14 +39,14 @@ with lib;
systemd.packages = [ pkgs.accountsservice ]; systemd.packages = [ pkgs.accountsservice ];
systemd.services.accounts-daemon = { systemd.services.accounts-daemon = recursiveUpdate {
wantedBy = [ "graphical.target" ]; wantedBy = [ "graphical.target" ];
# Accounts daemon looks for dbus interfaces in $XDG_DATA_DIRS/accountsservice # Accounts daemon looks for dbus interfaces in $XDG_DATA_DIRS/accountsservice
environment.XDG_DATA_DIRS = "${config.system.path}/share"; environment.XDG_DATA_DIRS = "${config.system.path}/share";
} // (optionalAttrs (!config.users.mutableUsers) { } (optionalAttrs (!config.users.mutableUsers) {
environment.NIXOS_USERS_PURE = "true"; environment.NIXOS_USERS_PURE = "true";
}); });
}; };

View File

@ -29,7 +29,7 @@ with lib;
config = mkIf config.services.gnome3.seahorse.enable { config = mkIf config.services.gnome3.seahorse.enable {
environment.systemPackages = [ pkgs.gnome3.seahorse ]; environment.systemPackages = [ pkgs.gnome3.seahorse pkgs.gnome3.dconf ];
services.dbus.packages = [ pkgs.gnome3.seahorse ]; services.dbus.packages = [ pkgs.gnome3.seahorse ];

View File

@ -45,7 +45,9 @@ let
else "${config.socket}${maybeOption "mode"}${maybeOption "owner"}${maybeOption "group"}"; else "${config.socket}${maybeOption "mode"}${maybeOption "owner"}${maybeOption "group"}";
}; };
workerOpts = { name, ... }: { traceWarning = w: x: builtins.trace "warning: ${w}" x;
workerOpts = { name, options, ... }: {
options = { options = {
enable = mkOption { enable = mkOption {
type = types.nullOr types.bool; type = types.nullOr types.bool;
@ -59,9 +61,18 @@ let
}; };
type = mkOption { type = mkOption {
type = types.nullOr (types.enum [ type = types.nullOr (types.enum [
"normal" "controller" "fuzzy_storage" "rspamd_proxy" "lua" "normal" "controller" "fuzzy_storage" "rspamd_proxy" "lua" "proxy"
]); ]);
description = "The type of this worker"; description = ''
The type of this worker. The type <literal>proxy</literal> is
deprecated and only kept for backwards compatibility and should be
replaced with <literal>rspamd_proxy</literal>.
'';
apply = let
from = "services.rspamd.workers.\”${name}\".type";
files = options.type.files;
warning = "The option `${from}` defined in ${showFiles files} has enum value `proxy` which has been renamed to `rspamd_proxy`";
in x: if x == "proxy" then traceWarning warning "rspamd_proxy" else x;
}; };
bindSockets = mkOption { bindSockets = mkOption {
type = types.listOf (types.either types.str (types.submodule bindSocketOpts)); type = types.listOf (types.either types.str (types.submodule bindSocketOpts));

View File

@ -0,0 +1,136 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.rss2email;
in {
###### interface
options = {
services.rss2email = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable rss2email.";
};
to = mkOption {
type = types.str;
description = "Mail address to which to send emails";
};
interval = mkOption {
type = types.str;
default = "12h";
description = "How often to check the feeds, in systemd interval format";
};
config = mkOption {
type = with types; attrsOf (either str (either int bool));
default = {};
description = ''
The configuration to give rss2email.
Default will use system-wide <literal>sendmail</literal> to send the
email. This is rss2email's default when running
<literal>r2e new</literal>.
This set contains key-value associations that will be set in the
<literal>[DEFAULT]</literal> block along with the
<literal>to</literal> parameter.
See
<literal>https://github.com/rss2email/rss2email/blob/master/r2e.1</literal>
for more information on which parameters are accepted.
'';
};
feeds = mkOption {
description = "The feeds to watch.";
type = types.attrsOf (types.submodule {
options = {
url = mkOption {
type = types.str;
description = "The URL at which to fetch the feed.";
};
to = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Email address to which to send feed items.
If <literal>null</literal>, this will not be set in the
configuration file, and rss2email will make it default to
<literal>rss2email.to</literal>.
'';
};
};
});
};
};
};
###### implementation
config = mkIf cfg.enable {
users.groups = {
rss2email.gid = config.ids.gids.rss2email;
};
users.users = {
rss2email = {
description = "rss2email user";
uid = config.ids.uids.rss2email;
group = "rss2email";
};
};
services.rss2email.config.to = cfg.to;
systemd.services.rss2email = let
conf = pkgs.writeText "rss2email.cfg" (lib.generators.toINI {} ({
DEFAULT = cfg.config;
} // lib.mapAttrs' (name: feed: nameValuePair "feed.${name}" (
{ inherit (feed) url; } //
lib.optionalAttrs (feed.to != null) { inherit (feed) to; }
)) cfg.feeds
));
in
{
preStart = ''
mkdir -p /var/rss2email
chmod 700 /var/rss2email
cp ${conf} /var/rss2email/conf.cfg
if [ ! -f /var/rss2email/db.json ]; then
echo '{"version":2,"feeds":[]}' > /var/rss2email/db.json
fi
chown -R rss2email:rss2email /var/rss2email
'';
path = [ pkgs.system-sendmail ];
serviceConfig = {
ExecStart =
"${pkgs.rss2email}/bin/r2e -c /var/rss2email/conf.cfg -d /var/rss2email/db.json run";
User = "rss2email";
PermissionsStartOnly = "true";
};
};
systemd.timers.rss2email = {
partOf = [ "rss2email.service" ];
wantedBy = [ "timers.target" ];
timerConfig.OnBootSec = "0";
timerConfig.OnUnitActiveSec = cfg.interval;
};
};
meta.maintainers = with lib.maintainers; [ ekleog ];
}

View File

@ -405,6 +405,9 @@ in
cp -Rf ${pkgs.zookeeper}/* ${cfg.baseDir}/zookeeper cp -Rf ${pkgs.zookeeper}/* ${cfg.baseDir}/zookeeper
chown -R zookeeper ${cfg.baseDir}/zookeeper/conf chown -R zookeeper ${cfg.baseDir}/zookeeper/conf
chmod -R u+w ${cfg.baseDir}/zookeeper/conf chmod -R u+w ${cfg.baseDir}/zookeeper/conf
replace_what=$(echo ${pkgs.zookeeper} | sed 's/[\/&]/\\&/g')
replace_with=$(echo ${cfg.baseDir}/zookeeper | sed 's/[\/&]/\\&/g')
sed -i 's/'"$replace_what"'/'"$replace_with"'/g' ${cfg.baseDir}/zookeeper/bin/zk*.sh
''; '';
}; };
users.users = singleton { users.users = singleton {

View File

@ -564,11 +564,11 @@ in {
[ -L /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log [ -L /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
[ -L /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp [ -L /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
[ -L /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads [ -L /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
${optionalString cfg.smtp.enable '' ${optionalString cfg.smtp.enable ''
ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
''} ''}
cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret ${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
# JSON is a subset of YAML # JSON is a subset of YAML

View File

@ -117,11 +117,11 @@ in
buildCores = mkOption { buildCores = mkOption {
type = types.int; type = types.int;
default = 1; default = 0;
example = 64; example = 64;
description = '' description = ''
This option defines the maximum number of concurrent tasks during This option defines the maximum number of concurrent tasks during
one build. It affects, e.g., -j option for make. The default is 1. one build. It affects, e.g., -j option for make.
The special value 0 means that the builder should use all The special value 0 means that the builder should use all
available CPU cores in the system. Some builds may become available CPU cores in the system. Some builds may become
non-deterministic with this option; use with care! Packages will non-deterministic with this option; use with care! Packages will

View File

@ -180,7 +180,7 @@ in
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
ExecStart = "${pkgs.apcupsd}/bin/apcupsd --killpower -f ${configFile}"; ExecStart = "${pkgs.apcupsd}/bin/apcupsd --killpower -f ${configFile}";
TimeoutSec = 0; TimeoutSec = "infinity";
StandardOutput = "tty"; StandardOutput = "tty";
RemainAfterExit = "yes"; RemainAfterExit = "yes";
}; };

View File

@ -12,7 +12,7 @@ let
localConfig = { localConfig = {
global = { global = {
"plugins directory" = "${wrappedPlugins}/libexec/netdata/plugins.d ${pkgs.netdata}/libexec/netdata/plugins.d"; "plugins directory" = "${pkgs.netdata}/libexec/netdata/plugins.d ${wrappedPlugins}/libexec/netdata/plugins.d";
}; };
web = { web = {
"web files owner" = "root"; "web files owner" = "root";
@ -53,6 +53,31 @@ in {
''; '';
}; };
python = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable python-based plugins
'';
};
extraPackages = mkOption {
default = ps: [];
defaultText = "ps: []";
example = literalExample ''
ps: [
ps.psycopg2
ps.docker
ps.dnspython
]
'';
description = ''
Extra python packages available at runtime
to enable additional python plugins.
'';
};
};
config = mkOption { config = mkOption {
type = types.attrsOf types.attrs; type = types.attrsOf types.attrs;
default = {}; default = {};
@ -74,21 +99,27 @@ in {
message = "Cannot specify both config and configText"; message = "Cannot specify both config and configText";
} }
]; ];
systemd.tmpfiles.rules = [
"d /var/cache/netdata 0755 ${cfg.user} ${cfg.group} -"
"Z /var/cache/netdata - ${cfg.user} ${cfg.group} -"
"d /var/log/netdata 0755 ${cfg.user} ${cfg.group} -"
"Z /var/log/netdata - ${cfg.user} ${cfg.group} -"
"d /var/lib/netdata 0755 ${cfg.user} ${cfg.group} -"
"Z /var/lib/netdata - ${cfg.user} ${cfg.group} -"
"d /etc/netdata 0755 ${cfg.user} ${cfg.group} -"
"Z /etc/netdata - ${cfg.user} ${cfg.group} -"
];
systemd.services.netdata = { systemd.services.netdata = {
path = with pkgs; [ gawk curl ];
description = "Real time performance monitoring"; description = "Real time performance monitoring";
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
preStart = concatStringsSep "\n" (map (dir: '' path = (with pkgs; [ gawk curl ]) ++ lib.optional cfg.python.enable
mkdir -vp ${dir} (pkgs.python3.withPackages cfg.python.extraPackages);
chmod 750 ${dir}
chown -R ${cfg.user}:${cfg.group} ${dir}
'') [ "/var/cache/netdata"
"/var/log/netdata"
"/var/lib/netdata" ]);
serviceConfig = { serviceConfig = {
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
Environment="PYTHONPATH=${pkgs.netdata}/libexec/netdata/python.d/python_modules";
PermissionsStartOnly = true; PermissionsStartOnly = true;
ExecStart = "${pkgs.netdata}/bin/netdata -D -c ${configFile}"; ExecStart = "${pkgs.netdata}/bin/netdata -D -c ${configFile}";
TimeoutStopSec = 60; TimeoutStopSec = 60;
@ -96,7 +127,7 @@ in {
}; };
security.wrappers."apps.plugin" = { security.wrappers."apps.plugin" = {
source = "${pkgs.netdata}/libexec/netdata/plugins.d/apps.plugin"; source = "${pkgs.netdata}/libexec/netdata/plugins.d/apps.plugin.org";
capabilities = "cap_dac_read_search,cap_sys_ptrace+ep"; capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
owner = cfg.user; owner = cfg.user;
group = cfg.group; group = cfg.group;

View File

@ -78,7 +78,7 @@ in
mkdir -p "$(dirname ${escapeShellArg cfg.databasePath})" mkdir -p "$(dirname ${escapeShellArg cfg.databasePath})"
''; '';
serviceConfig = { serviceConfig = {
TimeoutStartSec = 0; TimeoutStartSec = "infinity";
ExecStart = "${pkgs.osquery}/bin/osqueryd --logger_path ${escapeShellArg cfg.loggerPath} --pidfile ${escapeShellArg cfg.pidfile} --database_path ${escapeShellArg cfg.databasePath}"; ExecStart = "${pkgs.osquery}/bin/osqueryd --logger_path ${escapeShellArg cfg.loggerPath} --pidfile ${escapeShellArg cfg.pidfile} --database_path ${escapeShellArg cfg.databasePath}";
KillMode = "process"; KillMode = "process";
KillSignal = "SIGTERM"; KillSignal = "SIGTERM";

View File

@ -5,10 +5,18 @@ with lib;
let let
cfg = config.services.prometheus.alertmanager; cfg = config.services.prometheus.alertmanager;
mkConfigFile = pkgs.writeText "alertmanager.yml" (builtins.toJSON cfg.configuration); mkConfigFile = pkgs.writeText "alertmanager.yml" (builtins.toJSON cfg.configuration);
alertmanagerYml =
if cfg.configText != null then checkedConfig = file: pkgs.runCommand "checked-config" { buildInputs = [ cfg.package ]; } ''
ln -s ${file} $out
amtool check-config $out
'';
alertmanagerYml = let
yml = if cfg.configText != null then
pkgs.writeText "alertmanager.yml" cfg.configText pkgs.writeText "alertmanager.yml" cfg.configText
else mkConfigFile; else mkConfigFile;
in checkedConfig yml;
cmdlineArgs = cfg.extraFlags ++ [ cmdlineArgs = cfg.extraFlags ++ [
"--config.file ${alertmanagerYml}" "--config.file ${alertmanagerYml}"
"--web.listen-address ${cfg.listenAddress}:${toString cfg.port}" "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
@ -23,6 +31,15 @@ in {
services.prometheus.alertmanager = { services.prometheus.alertmanager = {
enable = mkEnableOption "Prometheus Alertmanager"; enable = mkEnableOption "Prometheus Alertmanager";
package = mkOption {
type = types.package;
default = pkgs.prometheus-alertmanager;
defaultText = "pkgs.alertmanager";
description = ''
Package that should be used for alertmanager.
'';
};
user = mkOption { user = mkOption {
type = types.str; type = types.str;
default = "nobody"; default = "nobody";
@ -40,8 +57,8 @@ in {
}; };
configuration = mkOption { configuration = mkOption {
type = types.attrs; type = types.nullOr types.attrs;
default = {}; default = null;
description = '' description = ''
Alertmanager configuration as nix attribute set. Alertmanager configuration as nix attribute set.
''; '';
@ -119,15 +136,22 @@ in {
}; };
}; };
config = mkMerge [
config = mkIf cfg.enable { (mkIf cfg.enable {
assertions = singleton {
assertion = cfg.configuration != null || cfg.configText != null;
message = "Can not enable alertmanager without a configuration. "
+ "Set either the `configuration` or `configText` attribute.";
};
})
(mkIf cfg.enable {
networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port; networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
systemd.services.alertmanager = { systemd.services.alertmanager = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" ]; after = [ "network.target" ];
script = '' script = ''
${pkgs.prometheus-alertmanager.bin}/bin/alertmanager \ ${cfg.package}/bin/alertmanager \
${concatStringsSep " \\\n " cmdlineArgs} ${concatStringsSep " \\\n " cmdlineArgs}
''; '';
@ -140,5 +164,6 @@ in {
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
}; };
}; };
}; })
];
} }

View File

@ -8,7 +8,7 @@ let
systemhealth = with pkgs; stdenv.mkDerivation { systemhealth = with pkgs; stdenv.mkDerivation {
name = "systemhealth-1.0"; name = "systemhealth-1.0";
src = fetchurl { src = fetchurl {
url = "http://www.brianlane.com/static/downloads/systemhealth/systemhealth-1.0.tar.bz2"; url = "https://www.brianlane.com/downloads/systemhealth/systemhealth-1.0.tar.bz2";
sha256 = "1q69lz7hmpbdpbz36zb06nzfkj651413n9icx0njmyr3xzq1j9qy"; sha256 = "1q69lz7hmpbdpbz36zb06nzfkj651413n9icx0njmyr3xzq1j9qy";
}; };
buildInputs = [ python ]; buildInputs = [ python ];

View File

@ -176,10 +176,8 @@ in
''; '';
serviceConfig = { serviceConfig = {
Type="forking";
PIDFile="/run/glusterd.pid";
LimitNOFILE=65536; LimitNOFILE=65536;
ExecStart="${glusterfs}/sbin/glusterd -p /run/glusterd.pid --log-level=${cfg.logLevel} ${toString cfg.extraFlags}"; ExecStart="${glusterfs}/sbin/glusterd --no-daemon --log-level=${cfg.logLevel} ${toString cfg.extraFlags}";
KillMode=cfg.killMode; KillMode=cfg.killMode;
TimeoutStopSec=cfg.stopKillTimeout; TimeoutStopSec=cfg.stopKillTimeout;
}; };

View File

@ -185,7 +185,7 @@ in
PermissionsStartOnly = true; PermissionsStartOnly = true;
User = if cfg.dropPrivileges then "consul" else null; User = if cfg.dropPrivileges then "consul" else null;
Restart = "on-failure"; Restart = "on-failure";
TimeoutStartSec = "0"; TimeoutStartSec = "infinity";
} // (optionalAttrs (cfg.leaveOnStop) { } // (optionalAttrs (cfg.leaveOnStop) {
ExecStop = "${cfg.package.bin}/bin/consul leave"; ExecStop = "${cfg.package.bin}/bin/consul leave";
}); });

View File

@ -58,6 +58,9 @@ let
${text} ${text}
''; in "${dir}/bin/${name}"; ''; in "${dir}/bin/${name}";
defaultInterface = { default = mapAttrs (name: value: cfg."${name}") commonOptions; };
allInterfaces = defaultInterface // cfg.interfaces;
startScript = writeShScript "firewall-start" '' startScript = writeShScript "firewall-start" ''
${helpers} ${helpers}
@ -154,7 +157,7 @@ let
ip46tables -A nixos-fw -p tcp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"} ip46tables -A nixos-fw -p tcp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
'' ''
) cfg.allowedTCPPorts ) cfg.allowedTCPPorts
) cfg.interfaces)} ) allInterfaces)}
# Accept connections to the allowed TCP port ranges. # Accept connections to the allowed TCP port ranges.
${concatStrings (mapAttrsToList (iface: cfg: ${concatStrings (mapAttrsToList (iface: cfg:
@ -164,7 +167,7 @@ let
ip46tables -A nixos-fw -p tcp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"} ip46tables -A nixos-fw -p tcp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
'' ''
) cfg.allowedTCPPortRanges ) cfg.allowedTCPPortRanges
) cfg.interfaces)} ) allInterfaces)}
# Accept packets on the allowed UDP ports. # Accept packets on the allowed UDP ports.
${concatStrings (mapAttrsToList (iface: cfg: ${concatStrings (mapAttrsToList (iface: cfg:
@ -173,7 +176,7 @@ let
ip46tables -A nixos-fw -p udp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"} ip46tables -A nixos-fw -p udp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
'' ''
) cfg.allowedUDPPorts ) cfg.allowedUDPPorts
) cfg.interfaces)} ) allInterfaces)}
# Accept packets on the allowed UDP port ranges. # Accept packets on the allowed UDP port ranges.
${concatStrings (mapAttrsToList (iface: cfg: ${concatStrings (mapAttrsToList (iface: cfg:
@ -183,7 +186,7 @@ let
ip46tables -A nixos-fw -p udp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"} ip46tables -A nixos-fw -p udp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
'' ''
) cfg.allowedUDPPortRanges ) cfg.allowedUDPPortRanges
) cfg.interfaces)} ) allInterfaces)}
# Accept IPv4 multicast. Not a big security risk since # Accept IPv4 multicast. Not a big security risk since
# probably nobody is listening anyway. # probably nobody is listening anyway.
@ -508,15 +511,11 @@ in
}; };
interfaces = mkOption { interfaces = mkOption {
default = { default = { };
default = mapAttrs (name: value: cfg."${name}") commonOptions;
};
type = with types; attrsOf (submodule [ { options = commonOptions; } ]); type = with types; attrsOf (submodule [ { options = commonOptions; } ]);
description = description =
'' ''
Interface-specific open ports. Setting this value will override Interface-specific open ports.
all values of the <literal>networking.firewall.allowed*</literal>
options.
''; '';
}; };
} // commonOptions; } // commonOptions;

View File

@ -11,7 +11,7 @@ let
src = pkgs.fetchurl { src = pkgs.fetchurl {
name = "flashpolicyd_v0.6.zip"; name = "flashpolicyd_v0.6.zip";
url = "http://www.adobe.com/content/dotcom/en/devnet/flashplayer/articles/socket_policy_files/_jcr_content/articlePrerequistes/multiplefiles/node_1277808777771/file.res/flashpolicyd_v0.6%5B1%5D.zip"; url = "https://download.adobe.com/pub/adobe/devnet/flashplayer/articles/socket_policy_files/flashpolicyd_v0.6.zip";
sha256 = "16zk237233npwfq1m4ksy4g5lzy1z9fp95w7pz0cdlpmv0fv9sm3"; sha256 = "16zk237233npwfq1m4ksy4g5lzy1z9fp95w7pz0cdlpmv0fv9sm3";
}; };

View File

@ -0,0 +1,125 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.mxisd;
server = optionalAttrs (cfg.server.name != null) { inherit (cfg.server) name; }
// optionalAttrs (cfg.server.port != null) { inherit (cfg.server) port; };
baseConfig = {
matrix.domain = cfg.matrix.domain;
key.path = "${cfg.dataDir}/signing.key";
storage = {
provider.sqlite.database = "${cfg.dataDir}/mxisd.db";
};
} // optionalAttrs (server != {}) { inherit server; };
# merges baseConfig and extraConfig into a single file
fullConfig = recursiveUpdate baseConfig cfg.extraConfig;
configFile = pkgs.writeText "mxisd-config.yaml" (builtins.toJSON fullConfig);
in {
options = {
services.mxisd = {
enable = mkEnableOption "mxisd matrix federated identity server";
package = mkOption {
type = types.package;
default = pkgs.mxisd;
defaultText = "pkgs.mxisd";
description = "The mxisd package to use";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/mxisd";
description = "Where data mxisd uses resides";
};
extraConfig = mkOption {
type = types.attrs;
default = {};
description = "Extra options merged into the mxisd configuration";
};
matrix = {
domain = mkOption {
type = types.str;
description = ''
the domain of the matrix homeserver
'';
};
};
server = {
name = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Public hostname of mxisd, if different from the Matrix domain.
'';
};
port = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
HTTP port to listen on (unencrypted)
'';
};
};
};
};
config = mkIf cfg.enable {
users.users = [
{
name = "mxisd";
group = "mxisd";
home = cfg.dataDir;
createHome = true;
shell = "${pkgs.bash}/bin/bash";
uid = config.ids.uids.mxisd;
}
];
users.groups = [
{
name = "mxisd";
gid = config.ids.gids.mxisd;
}
];
systemd.services.mxisd = {
description = "a federated identity server for the matrix ecosystem";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
# mxisd / spring.boot needs the configuration to be named "application.yaml"
preStart = ''
config=${cfg.dataDir}/application.yaml
cp ${configFile} $config
chmod 444 $config
'';
serviceConfig = {
Type = "simple";
User = "mxisd";
Group = "mxisd";
ExecStart = "${cfg.package}/bin/mxisd --spring.config.location=${cfg.dataDir}/ --spring.profiles.active=systemd --java.security.egd=file:/dev/./urandom";
WorkingDirectory = cfg.dataDir;
PermissionsStartOnly = true;
SuccessExitStatus = 143;
Restart = "on-failure";
};
};
};
}

View File

@ -28,8 +28,7 @@ with lib;
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "forking"; serviceConfig.Type = "forking";
script = "${pkgs.oidentd}/sbin/oidentd -u oidentd -g nogroup" + script = "${pkgs.oidentd}/sbin/oidentd -u oidentd -g nogroup";
optionalString config.networking.enableIPv6 " -a ::";
}; };
users.users.oidentd = { users.users.oidentd = {

View File

@ -0,0 +1,121 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.syncthing.relay;
dataDirectory = "/var/lib/syncthing-relay";
relayOptions =
[
"--keys=${dataDirectory}"
"--listen=${cfg.listenAddress}:${toString cfg.port}"
"--status-srv=${cfg.statusListenAddress}:${toString cfg.statusPort}"
"--provided-by=${escapeShellArg cfg.providedBy}"
]
++ optional (cfg.pools != null) "--pools=${escapeShellArg (concatStringsSep "," cfg.pools)}"
++ optional (cfg.globalRateBps != null) "--global-rate=${toString cfg.globalRateBps}"
++ optional (cfg.perSessionRateBps != null) "--per-session-rate=${toString cfg.perSessionRateBps}"
++ cfg.extraOptions;
in {
###### interface
options.services.syncthing.relay = {
enable = mkEnableOption "Syncthing relay service";
listenAddress = mkOption {
type = types.str;
default = "";
example = "1.2.3.4";
description = ''
Address to listen on for relay traffic.
'';
};
port = mkOption {
type = types.port;
default = 22067;
description = ''
Port to listen on for relay traffic. This port should be added to
<literal>networking.firewall.allowedTCPPorts</literal>.
'';
};
statusListenAddress = mkOption {
type = types.str;
default = "";
example = "1.2.3.4";
description = ''
Address to listen on for serving the relay status API.
'';
};
statusPort = mkOption {
type = types.port;
default = 22070;
description = ''
Port to listen on for serving the relay status API. This port should be
added to <literal>networking.firewall.allowedTCPPorts</literal>.
'';
};
pools = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = ''
Relay pools to join. If null, uses the default global pool.
'';
};
providedBy = mkOption {
type = types.str;
default = "";
description = ''
Human-readable description of the provider of the relay (you).
'';
};
globalRateBps = mkOption {
type = types.nullOr types.ints.positive;
default = null;
description = ''
Global bandwidth rate limit in bytes per second.
'';
};
perSessionRateBps = mkOption {
type = types.nullOr types.ints.positive;
default = null;
description = ''
Per session bandwidth rate limit in bytes per second.
'';
};
extraOptions = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Extra command line arguments to pass to strelaysrv.
'';
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.syncthing-relay = {
description = "Syncthing relay service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
StateDirectory = baseNameOf dataDirectory;
Restart = "on-failure";
ExecStart = "${pkgs.syncthing-relay}/bin/strelaysrv ${concatStringsSep " " relayOptions}";
};
};
};
}

View File

@ -92,6 +92,7 @@ let
# Hidden services # Hidden services
+ concatStrings (flip mapAttrsToList cfg.hiddenServices (n: v: '' + concatStrings (flip mapAttrsToList cfg.hiddenServices (n: v: ''
HiddenServiceDir ${torDirectory}/onion/${v.name} HiddenServiceDir ${torDirectory}/onion/${v.name}
${optionalString (v.version != null) "HiddenServiceVersion ${toString v.version}"}
${flip concatMapStrings v.map (p: '' ${flip concatMapStrings v.map (p: ''
HiddenServicePort ${toString p.port} ${p.destination} HiddenServicePort ${toString p.port} ${p.destination}
'')} '')}
@ -667,6 +668,12 @@ in
}; };
})); }));
}; };
version = mkOption {
default = null;
description = "Rendezvous service descriptor version to publish for the hidden service. Currently, versions 2 and 3 are supported. (Default: 2)";
type = types.nullOr (types.enum [ 2 3 ]);
};
}; };
config = { config = {

View File

@ -119,7 +119,7 @@ in
{ Type = "oneshot"; { Type = "oneshot";
ExecStart = "${pkgs.cloud-init}/bin/cloud-init init --local"; ExecStart = "${pkgs.cloud-init}/bin/cloud-init init --local";
RemainAfterExit = "yes"; RemainAfterExit = "yes";
TimeoutSec = "0"; TimeoutSec = "infinity";
StandardOutput = "journal+console"; StandardOutput = "journal+console";
}; };
}; };
@ -137,7 +137,7 @@ in
{ Type = "oneshot"; { Type = "oneshot";
ExecStart = "${pkgs.cloud-init}/bin/cloud-init init"; ExecStart = "${pkgs.cloud-init}/bin/cloud-init init";
RemainAfterExit = "yes"; RemainAfterExit = "yes";
TimeoutSec = "0"; TimeoutSec = "infinity";
StandardOutput = "journal+console"; StandardOutput = "journal+console";
}; };
}; };
@ -153,7 +153,7 @@ in
{ Type = "oneshot"; { Type = "oneshot";
ExecStart = "${pkgs.cloud-init}/bin/cloud-init modules --mode=config"; ExecStart = "${pkgs.cloud-init}/bin/cloud-init modules --mode=config";
RemainAfterExit = "yes"; RemainAfterExit = "yes";
TimeoutSec = "0"; TimeoutSec = "infinity";
StandardOutput = "journal+console"; StandardOutput = "journal+console";
}; };
}; };
@ -169,7 +169,7 @@ in
{ Type = "oneshot"; { Type = "oneshot";
ExecStart = "${pkgs.cloud-init}/bin/cloud-init modules --mode=final"; ExecStart = "${pkgs.cloud-init}/bin/cloud-init modules --mode=final";
RemainAfterExit = "yes"; RemainAfterExit = "yes";
TimeoutSec = "0"; TimeoutSec = "infinity";
StandardOutput = "journal+console"; StandardOutput = "journal+console";
}; };
}; };

View File

@ -21,8 +21,8 @@ let
db_database=${cfg.database.name} db_database=${cfg.database.name}
db_username=${cfg.database.user} db_username=${cfg.database.user}
db_password=${cfg.database.password} db_password=${cfg.database.password}
db_port=${if (cfg.database.port != null) then cfg.database.port db_port=${toString (if (cfg.database.port != null) then cfg.database.port
else default_port} else default_port)}
'' ''
} }
${cfg.extraConfig} ${cfg.extraConfig}

View File

@ -86,7 +86,7 @@ let
name= "mediawiki-1.29.1"; name= "mediawiki-1.29.1";
src = pkgs.fetchurl { src = pkgs.fetchurl {
url = "http://download.wikimedia.org/mediawiki/1.29/${name}.tar.gz"; url = "https://releases.wikimedia.org/mediawiki/1.29/${name}.tar.gz";
sha256 = "03mpazbxvb011s2nmlw5p6dc43yjgl5yrsilmj1imyykm57bwb3m"; sha256 = "03mpazbxvb011s2nmlw5p6dc43yjgl5yrsilmj1imyykm57bwb3m";
}; };
@ -311,7 +311,7 @@ in
description = '' description = ''
Any additional text to be appended to MediaWiki's Any additional text to be appended to MediaWiki's
configuration file. This is a PHP script. For configuration configuration file. This is a PHP script. For configuration
settings, see <link xlink:href='http://www.mediawiki.org/wiki/Manual:Configuration_settings'/>. settings, see <link xlink:href='https://www.mediawiki.org/wiki/Manual:Configuration_settings'/>.
''; '';
}; };

View File

@ -13,7 +13,8 @@ let
# Map video driver names to driver packages. FIXME: move into card-specific modules. # Map video driver names to driver packages. FIXME: move into card-specific modules.
knownVideoDrivers = { knownVideoDrivers = {
virtualbox = { modules = [ kernelPackages.virtualboxGuestAdditions ]; driverName = "vboxvideo"; }; # Alias so people can keep using "virtualbox" instead of "vboxvideo".
virtualbox = { modules = [ xorg.xf86videovboxvideo ]; driverName = "vboxvideo"; };
# modesetting does not have a xf86videomodesetting package as it is included in xorgserver # modesetting does not have a xf86videomodesetting package as it is included in xorgserver
modesetting = {}; modesetting = {};
@ -564,8 +565,6 @@ in
knownVideoDrivers; knownVideoDrivers;
in optional (driver != null) ({ inherit name; modules = []; driverName = name; } // driver)); in optional (driver != null) ({ inherit name; modules = []; driverName = name; } // driver));
nixpkgs.config = optionalAttrs (elem "vboxvideo" cfg.videoDrivers) { xorg.abiCompat = "1.18"; };
assertions = [ assertions = [
{ assertion = config.security.polkit.enable; { assertion = config.security.polkit.enable;
message = "X11 requires Polkit to be enabled (security.polkit.enable = true)."; message = "X11 requires Polkit to be enabled (security.polkit.enable = true).";

View File

@ -4,6 +4,6 @@ let self = {
"16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz"; "16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz";
"17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz"; "17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
"18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz"; "18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
"18.09" = "gs://nixos-cloud-images/nixos-image-18.09.1228.a4c4cbb613c-x86_64-linux.raw.tar.gz";
latest = self."18.03"; latest = self."18.09";
}; in self }; in self

View File

@ -165,7 +165,7 @@ in
ExecStop = "${gce}/bin/google_metadata_script_runner --debug --script-type shutdown"; ExecStop = "${gce}/bin/google_metadata_script_runner --debug --script-type shutdown";
Type = "oneshot"; Type = "oneshot";
RemainAfterExit = true; RemainAfterExit = true;
TimeoutStopSec = 0; TimeoutStopSec = "infinity";
}; };
}; };

View File

@ -99,7 +99,7 @@ in
hydra = handleTest ./hydra {}; hydra = handleTest ./hydra {};
i3wm = handleTest ./i3wm.nix {}; i3wm = handleTest ./i3wm.nix {};
iftop = handleTest ./iftop.nix {}; iftop = handleTest ./iftop.nix {};
incron = handleTest tests/incron.nix {}; incron = handleTest ./incron.nix {};
influxdb = handleTest ./influxdb.nix {}; influxdb = handleTest ./influxdb.nix {};
initrd-network-ssh = handleTest ./initrd-network-ssh {}; initrd-network-ssh = handleTest ./initrd-network-ssh {};
initrdNetwork = handleTest ./initrd-network.nix {}; initrdNetwork = handleTest ./initrd-network.nix {};
@ -179,6 +179,7 @@ in
radicale = handleTest ./radicale.nix {}; radicale = handleTest ./radicale.nix {};
redmine = handleTest ./redmine.nix {}; redmine = handleTest ./redmine.nix {};
rspamd = handleTest ./rspamd.nix {}; rspamd = handleTest ./rspamd.nix {};
rss2email = handleTest ./rss2email.nix {};
rsyslogd = handleTest ./rsyslogd.nix {}; rsyslogd = handleTest ./rsyslogd.nix {};
runInMachine = handleTest ./run-in-machine.nix {}; runInMachine = handleTest ./run-in-machine.nix {};
rxe = handleTest ./rxe.nix {}; rxe = handleTest ./rxe.nix {};
@ -190,10 +191,10 @@ in
smokeping = handleTest ./smokeping.nix {}; smokeping = handleTest ./smokeping.nix {};
snapper = handleTest ./snapper.nix {}; snapper = handleTest ./snapper.nix {};
solr = handleTest ./solr.nix {}; solr = handleTest ./solr.nix {};
#statsd = handleTest ./statsd.nix {}; # statsd is broken: #45946
strongswan-swanctl = handleTest ./strongswan-swanctl.nix {}; strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
sudo = handleTest ./sudo.nix {}; sudo = handleTest ./sudo.nix {};
switchTest = handleTest ./switch-test.nix {}; switchTest = handleTest ./switch-test.nix {};
syncthing-relay = handleTest ./syncthing-relay.nix {};
systemd = handleTest ./systemd.nix {}; systemd = handleTest ./systemd.nix {};
taskserver = handleTest ./taskserver.nix {}; taskserver = handleTest ./taskserver.nix {};
tomcat = handleTest ./tomcat.nix {}; tomcat = handleTest ./tomcat.nix {};

View File

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:blogChannel="http://backend.userland.com/blogChannelModule" version="2.0"><channel><title>NixOS News</title><link>https://nixos.org</link><description>News for NixOS, the purely functional Linux distribution.</description><item><title>
NixOS 18.09 released
</title><link>https://nixos.org/news.html</link><description>
<a href="https://github.com/NixOS/nixos-artwork/blob/master/releases/18.09-jellyfish/jellyfish.png">
<img class="inline" src="logo/nixos-logo-18.09-jellyfish-lores.png" alt="18.09 Jellyfish logo" with="100" height="87"/>
</a>
NixOS 18.09 “Jellyfish” has been released, the tenth stable release branch.
See the <a href="/nixos/manual/release-notes.html#sec-release-18.09">release notes</a>
for details. You can get NixOS 18.09 ISOs and VirtualBox appliances
from the <a href="nixos/download.html">download page</a>.
For information on how to upgrade from older release branches
to 18.09, check out the
<a href="/nixos/manual/index.html#sec-upgrading">manual section on upgrading</a>.
</description><pubDate>Sat Oct 06 2018 00:00:00 GMT</pubDate></item></channel></rss>

View File

@ -27,6 +27,7 @@ import ./make-test.nix ({ pkgs, lib, ...} : with lib; {
enable = true; enable = true;
databasePassword = "dbPassword"; databasePassword = "dbPassword";
initialRootPassword = "notproduction"; initialRootPassword = "notproduction";
smtp.enable = true;
secrets = { secrets = {
secret = "secret"; secret = "secret";
otp = "otpsecret"; otp = "otpsecret";

View File

@ -10,6 +10,7 @@ import ./make-test.nix ({ pkgs, ...} : {
{ users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; }; { users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
users.users.sybil = { isNormalUser = true; group = "wheel"; }; users.users.sybil = { isNormalUser = true; group = "wheel"; };
imports = [ ../modules/profiles/hardened.nix ]; imports = [ ../modules/profiles/hardened.nix ];
nix.useSandbox = false;
virtualisation.emptyDiskImages = [ 4096 ]; virtualisation.emptyDiskImages = [ 4096 ];
boot.initrd.postDeviceCommands = '' boot.initrd.postDeviceCommands = ''
${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb ${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
@ -63,5 +64,11 @@ import ./make-test.nix ({ pkgs, ...} : {
$machine->succeed("mount /dev/disk/by-label/EFISYS /efi"); $machine->succeed("mount /dev/disk/by-label/EFISYS /efi");
$machine->succeed("mountpoint -q /efi"); # now mounted $machine->succeed("mountpoint -q /efi"); # now mounted
}; };
# Test Nix dæmon usage
subtest "nix-daemon", sub {
$machine->fail("su -l nobody -s /bin/sh -c 'nix ping-store'");
$machine->succeed("su -l alice -c 'nix ping-store'") =~ "OK";
};
''; '';
}) })

View File

@ -481,7 +481,7 @@ in {
# Test whether opening encrypted filesystem with keyfile # Test whether opening encrypted filesystem with keyfile
# Checks for regression of missing cryptsetup, when no luks device without # Checks for regression of missing cryptsetup, when no luks device without
# keyfile is configured # keyfile is configured
filesystemEncryptedWithKeyfile = makeInstallerTest "filesystemEncryptedWithKeyfile" encryptedFSWithKeyfile = makeInstallerTest "encryptedFSWithKeyfile"
{ createPartitions = '' { createPartitions = ''
$machine->succeed( $machine->succeed(
"flock /dev/vda parted --script /dev/vda -- mklabel msdos" "flock /dev/vda parted --script /dev/vda -- mklabel msdos"

View File

@ -99,7 +99,7 @@ in pkgs.lib.mapAttrs mkKeyboardTest {
homerow.expect = [ "a" "r" "s" "t" "n" "e" "i" "o" ]; homerow.expect = [ "a" "r" "s" "t" "n" "e" "i" "o" ];
}; };
extraConfig.i18n.consoleKeyMap = "en-latin9"; extraConfig.i18n.consoleKeyMap = "colemak/colemak";
extraConfig.services.xserver.layout = "us"; extraConfig.services.xserver.layout = "us";
extraConfig.services.xserver.xkbVariant = "colemak"; extraConfig.services.xserver.xkbVariant = "colemak";
}; };

21
nixos/tests/mxisd.nix Normal file
View File

@ -0,0 +1,21 @@
import ./make-test.nix ({ pkgs, ... } : {
name = "mxisd";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ mguentner ];
};
nodes = {
server_mxisd = args : {
services.mxisd.enable = true;
services.mxisd.matrix.domain = "example.org";
};
};
testScript = ''
startAll;
$server_mxisd->waitForUnit("mxisd.service");
$server_mxisd->waitForOpenPort(8090);
$server_mxisd->succeed("curl -Ssf \"http://127.0.0.1:8090/_matrix/identity/api/v1\"")
'';
})

View File

@ -13,6 +13,25 @@ import ./make-test.nix {
}]; }];
}]; }];
rules = [ ''testrule = count(up{job="prometheus"})'' ]; rules = [ ''testrule = count(up{job="prometheus"})'' ];
# a very simple version of the alertmanager configuration just to see if
# configuration checks & service startup are working
alertmanager = {
enable = true;
listenAddress = "[::1]";
port = 9093;
configuration = {
route.receiver = "webhook";
receivers = [
{
name = "webhook";
webhook_configs = [
{ url = "http://localhost"; }
];
}
];
};
};
}; };
}; };
}; };
@ -22,5 +41,8 @@ import ./make-test.nix {
$one->waitForUnit("prometheus.service"); $one->waitForUnit("prometheus.service");
$one->waitForOpenPort(9090); $one->waitForOpenPort(9090);
$one->succeed("curl -s http://127.0.0.1:9090/metrics"); $one->succeed("curl -s http://127.0.0.1:9090/metrics");
$one->waitForUnit("alertmanager.service");
$one->waitForOpenPort("9093");
$one->succeed("curl -f -s http://localhost:9093/");
''; '';
} }

View File

@ -235,6 +235,7 @@ in
services.rspamd = { services.rspamd = {
enable = true; enable = true;
postfix.enable = true; postfix.enable = true;
workers.rspamd_proxy.type = "proxy";
}; };
}; };
testScript = '' testScript = ''

66
nixos/tests/rss2email.nix Normal file
View File

@ -0,0 +1,66 @@
import ./make-test.nix {
name = "opensmtpd";
nodes = {
server = { pkgs, ... }: {
imports = [ common/user-account.nix ];
services.nginx = {
enable = true;
virtualHosts."127.0.0.1".root = ./common/webroot;
};
services.rss2email = {
enable = true;
to = "alice@localhost";
interval = "1";
config.from = "test@example.org";
feeds = {
nixos = { url = "http://127.0.0.1/news-rss.xml"; };
};
};
services.opensmtpd = {
enable = true;
extraServerArgs = [ "-v" ];
serverConfiguration = ''
listen on 127.0.0.1
action dovecot_deliver mda \
"${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
match from any for local action dovecot_deliver
'';
};
services.dovecot2 = {
enable = true;
enableImap = true;
mailLocation = "maildir:~/mail";
protocols = [ "imap" ];
};
environment.systemPackages = let
checkMailLanded = pkgs.writeScriptBin "check-mail-landed" ''
#!${pkgs.python3.interpreter}
import imaplib
with imaplib.IMAP4('127.0.0.1', 143) as imap:
imap.login('alice', 'foobar')
imap.select()
status, refs = imap.search(None, 'ALL')
print("=====> Result of search for all:", status, refs)
assert status == 'OK'
assert len(refs) > 0
status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
assert status == 'OK'
'';
in [ pkgs.opensmtpd checkMailLanded ];
};
};
testScript = ''
startAll;
$server->waitForUnit("network-online.target");
$server->waitForUnit("opensmtpd");
$server->waitForUnit("dovecot2");
$server->waitForUnit("nginx");
$server->waitForUnit("rss2email");
$server->waitUntilSucceeds('check-mail-landed >&2');
'';
}

View File

@ -1,51 +0,0 @@
import ./make-test.nix ({ pkgs, lib, ... }:
with lib;
{
name = "statsd";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ ma27 ];
};
machine = {
services.statsd.enable = true;
services.statsd.backends = [ "statsd-influxdb-backend" "console" ];
services.statsd.extraConfig = ''
influxdb: {
username: "root",
password: "root",
database: "statsd"
}
'';
services.influxdb.enable = true;
systemd.services.influx-init = {
description = "Setup Influx Test Base";
after = [ "influxdb.service" ];
before = [ "statsd.service" ];
script = ''
echo "CREATE DATABASE statsd" | ${pkgs.influxdb}/bin/influx
'';
};
};
testScript = ''
$machine->start();
$machine->waitForUnit("statsd.service");
$machine->waitForOpenPort(8126);
# check state of the `statsd` server
$machine->succeed('[ "health: up" = "$(echo health | nc 127.0.0.1 8126 -w 120 -N)" ];');
# confirm basic examples for metrics derived from docs:
# https://github.com/etsy/statsd/blob/v0.8.0/README.md#usage and
# https://github.com/etsy/statsd/blob/v0.8.0/docs/admin_interface.md
$machine->succeed("echo 'foo:1|c' | nc -u -w 0 127.0.0.1 8125");
$machine->succeed("echo counters | nc -w 120 127.0.0.1 8126 -N | grep foo");
$machine->succeed("echo 'delcounters foo' | nc -w 120 127.0.0.1 8126 -N");
$machine->fail("echo counters | nc -w 120 127.0.0.1 8126 -N | grep foo");
'';
})

View File

@ -0,0 +1,22 @@
import ./make-test.nix ({ lib, pkgs, ... }: {
name = "syncthing-relay";
meta.maintainers = with pkgs.stdenv.lib.maintainers; [ delroth ];
machine = {
environment.systemPackages = [ pkgs.jq ];
services.syncthing.relay = {
enable = true;
providedBy = "nixos-test";
pools = []; # Don't connect to any pool while testing.
port = 12345;
statusPort = 12346;
};
};
testScript = ''
$machine->waitForUnit("syncthing-relay.service");
$machine->waitForOpenPort(12345);
$machine->waitForOpenPort(12346);
$machine->succeed("curl http://localhost:12346/status | jq -r '.options.\"provided-by\"'") =~ /nixos-test/ or die;
'';
})

View File

@ -1,10 +1,10 @@
{ stdenv, fetchFromGitHub, cmake, pkgconfig, git, doxygen, graphviz { stdenv, fetchFromGitHub, cmake, pkgconfig, git, doxygen, graphviz
, boost, miniupnpc, openssl, unbound, cppzmq , boost, miniupnpc, openssl, unbound, cppzmq
, zeromq, pcsclite, readline , zeromq, pcsclite, readline, libsodium
}: }:
let let
version = "0.12.0.0"; version = "0.12.6.0";
in in
stdenv.mkDerivation { stdenv.mkDerivation {
name = "aeon-${version}"; name = "aeon-${version}";
@ -12,16 +12,16 @@ stdenv.mkDerivation {
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "aeonix"; owner = "aeonix";
repo = "aeon"; repo = "aeon";
rev = "v${version}"; rev = "v${version}-aeon";
fetchSubmodules = true; fetchSubmodules = true;
sha256 = "1schzlscslhqq7zcd68b1smqlaf7k789x1rwpplm7qi5iz9a8cfr"; sha256 = "19r1snqwixccl27jwv6i0s86qck036pdlhyhl891bbiyvi55h14n";
}; };
nativeBuildInputs = [ cmake pkgconfig git doxygen graphviz ]; nativeBuildInputs = [ cmake pkgconfig git doxygen graphviz ];
buildInputs = [ buildInputs = [
boost miniupnpc openssl unbound boost miniupnpc openssl unbound
cppzmq zeromq pcsclite readline cppzmq zeromq pcsclite readline libsodium
]; ];
cmakeFlags = [ cmakeFlags = [

View File

@ -1,22 +1,23 @@
{ stdenv, python3, pkgconfig, which, libtool, autoconf, automake, { stdenv, python3, pkgconfig, which, libtool, autoconf, automake,
autogen, sqlite, gmp, zlib, fetchFromGitHub }: autogen, sqlite, gmp, zlib, fetchFromGitHub, fetchpatch }:
with stdenv.lib; with stdenv.lib;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "clightning-${version}"; name = "clightning-${version}";
version = "0.6.1"; version = "0.6.2";
src = fetchFromGitHub { src = fetchFromGitHub {
fetchSubmodules = true; fetchSubmodules = true;
owner = "ElementsProject"; owner = "ElementsProject";
repo = "lightning"; repo = "lightning";
rev = "v${version}"; rev = "v${version}";
sha256 = "0qx30i1c97ic4ii8bm0sk9dh76nfg4ihl9381gxjj14i4jr1q8y4"; sha256 = "18yns0yyf7kc4p4n1crxdqh37j9faxkx216nh2ip7cxj4x8bf9gx";
}; };
enableParallelBuilding = true; enableParallelBuilding = true;
buildInputs = [ which sqlite gmp zlib autoconf libtool automake autogen python3 pkgconfig ]; nativeBuildInputs = [ autoconf autogen automake libtool pkgconfig which ];
buildInputs = [ sqlite gmp zlib python3 ];
makeFlags = [ "prefix=$(out)" ]; makeFlags = [ "prefix=$(out)" ];
@ -24,6 +25,15 @@ stdenv.mkDerivation rec {
./configure --prefix=$out --disable-developer --disable-valgrind ./configure --prefix=$out --disable-developer --disable-valgrind
''; '';
# NOTE: remove me in 0.6.3
patches = [
(fetchpatch {
name = "clightning_0_6_2-compile-error.patch";
url = https://patch-diff.githubusercontent.com/raw/ElementsProject/lightning/pull/2070.patch;
sha256 = "1576fqik5zcpz5zsvp2ks939bgiz0jc22yf24iv61000dd5j6na9";
})
];
postPatch = '' postPatch = ''
echo "" > tools/refresh-submodules.sh echo "" > tools/refresh-submodules.sh
patchShebangs tools/generate-wire.py patchShebangs tools/generate-wire.py

View File

@ -6,19 +6,20 @@
, qtwebengine, qtx11extras, qtxmlpatterns , qtwebengine, qtx11extras, qtxmlpatterns
, monero, unbound, readline, boost, libunwind , monero, unbound, readline, boost, libunwind
, libsodium, pcsclite, zeromq, cppzmq, pkgconfig , libsodium, pcsclite, zeromq, cppzmq, pkgconfig
, hidapi
}: }:
with stdenv.lib; with stdenv.lib;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "monero-gui-${version}"; name = "monero-gui-${version}";
version = "0.13.0.3"; version = "0.13.0.4";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "monero-project"; owner = "monero-project";
repo = "monero-gui"; repo = "monero-gui";
rev = "v${version}"; rev = "v${version}";
sha256 = "1rvxwz7p1yw9c817n07m60xvmv2p97s82sfzwkg2x880fpxb0gj9"; sha256 = "142yj5s15bhm300dislq3x5inw1f37shnrd5vyj78jjcvry3wymw";
}; };
nativeBuildInputs = [ qmake pkgconfig ]; nativeBuildInputs = [ qmake pkgconfig ];
@ -29,7 +30,7 @@ stdenv.mkDerivation rec {
qtwebchannel qtwebengine qtx11extras qtwebchannel qtwebengine qtx11extras
qtxmlpatterns monero unbound readline qtxmlpatterns monero unbound readline
boost libunwind libsodium pcsclite zeromq boost libunwind libsodium pcsclite zeromq
cppzmq makeWrapper cppzmq makeWrapper hidapi
]; ];
patches = [ patches = [
@ -86,7 +87,7 @@ stdenv.mkDerivation rec {
description = "Private, secure, untraceable currency"; description = "Private, secure, untraceable currency";
homepage = https://getmonero.org/; homepage = https://getmonero.org/;
license = licenses.bsd3; license = licenses.bsd3;
platforms = platforms.all; platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ rnhmjoj ]; maintainers = with maintainers; [ rnhmjoj ];
}; };
} }

View File

@ -11,12 +11,12 @@ with stdenv.lib;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "monero-${version}"; name = "monero-${version}";
version = "0.13.0.3"; version = "0.13.0.4";
src = fetchgit { src = fetchgit {
url = "https://github.com/monero-project/monero.git"; url = "https://github.com/monero-project/monero.git";
rev = "v${version}"; rev = "v${version}";
sha256 = "03qx8y74zxnmabdi5r3a274pp8zvm3xhkdwi1xf5sb40vf4sfmwb"; sha256 = "1ambgakapijhsi1pd70vw8vvnlwa3nid944lqkbfq3wl25lmc70d";
}; };
nativeBuildInputs = [ cmake pkgconfig git ]; nativeBuildInputs = [ cmake pkgconfig git ];

View File

@ -3,13 +3,13 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "nano-wallet-${version}"; name = "nano-wallet-${version}";
version = "16.2"; version = "16.3";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "nanocurrency"; owner = "nanocurrency";
repo = "raiblocks"; repo = "raiblocks";
rev = "V${version}"; rev = "V${version}";
sha256 = "18zp4xl5iwwrnzrqzsygdrym5565v8dpfz0jxczw21896kw1i9i7"; sha256 = "1rhq7qzfd8li33pmzcjxrhbbgdklxlcijam62s385f8yqjwy80dz";
fetchSubmodules = true; fetchSubmodules = true;
}; };

View File

@ -1,9 +1,9 @@
let let
version = "2.1.3"; version = "2.2.1";
sha256 = "0il18r229r32jzwsjksp8cc63rp6cf6c0j5dvbfzrnv1zndw0cg3"; sha256 = "1m65pks2jk83j82f1i901p03qb54xhcp6gfjngcm975187zzvmcq";
cargoSha256 = "08dyb0lgf66zfq9xmfkhcn6rj070d49dm0rjl3v39sfag6sryz20"; cargoSha256 = "1mf1jgphwvhlqkvzrgbhnqfyqgf3ljc1l9zckyilzmw5k4lf4g1w";
patches = [ patches = [
./patches/vendored-sources-2.1.patch ./patches/vendored-sources-2.2.patch
]; ];
in in
import ./parity.nix { inherit version sha256 cargoSha256 patches; } import ./parity.nix { inherit version sha256 cargoSha256 patches; }

View File

@ -1,7 +1,7 @@
let let
version = "2.0.8"; version = "2.1.6";
sha256 = "1bz6dvx8wxhs3g447s62d9091sard2x7w2zd6iy7hf76wg0p73hr"; sha256 = "0njkypszi0fjh9y0zfgxbycs4c1wpylk7wx6xn1pp6gqvvri6hav";
cargoSha256 = "0wj93md87fr7a9ag73h0rd9xxqscl0lhbj3g3kvnqrqz9xxajing"; cargoSha256 = "116sj7pi50k5gb1i618g4pgckqaf8kb13jh2a3shj2kwywzzcgjs";
patches = [ ./patches/vendored-sources-2.0.patch ]; patches = [ ./patches/vendored-sources-2.1.patch ];
in in
import ./parity.nix { inherit version sha256 cargoSha256 patches; } import ./parity.nix { inherit version sha256 cargoSha256 patches; }

View File

@ -14,7 +14,7 @@ index 72652ad2f..3c0eca89a 100644
+ +
+[source."https://github.com/nikvolf/parity-tokio-ipc"] +[source."https://github.com/nikvolf/parity-tokio-ipc"]
+git = "https://github.com/nikvolf/parity-tokio-ipc" +git = "https://github.com/nikvolf/parity-tokio-ipc"
+rev = "7c9bbe3bc45d8e72a92b0951acc877da228abd50" +rev = "c0f80b40399d7f08ef1e6869569640eb28645f56"
+replace-with = "vendored-sources" +replace-with = "vendored-sources"
+ +
+[source."https://github.com/nikvolf/tokio-named-pipes"] +[source."https://github.com/nikvolf/tokio-named-pipes"]

View File

@ -44,7 +44,7 @@ index 72652ad2f..3c0eca89a 100644
+ +
+[source."https://github.com/paritytech/jsonrpc.git"] +[source."https://github.com/paritytech/jsonrpc.git"]
+git = "https://github.com/paritytech/jsonrpc.git" +git = "https://github.com/paritytech/jsonrpc.git"
+branch = "parity-1.11" +branch = "parity-2.2"
+replace-with = "vendored-sources" +replace-with = "vendored-sources"
+ +
+[source."https://github.com/paritytech/libusb-rs"] +[source."https://github.com/paritytech/libusb-rs"]

View File

@ -1,4 +1,5 @@
{ stdenv, fetchurl, makeWrapper, pkgconfig, alsaLib, dbus, libjack2 { stdenv, fetchurl, makeWrapper, pkgconfig, alsaLib, dbus, libjack2
, wafHook
, python2Packages}: , python2Packages}:
let let
@ -8,19 +9,14 @@ in stdenv.mkDerivation rec {
version = "8"; version = "8";
src = fetchurl { src = fetchurl {
url = "http://repo.or.cz/a2jmidid.git/snapshot/7383d268c4bfe85df9f10df6351677659211d1ca.tar.gz"; url = "https://repo.or.cz/a2jmidid.git/snapshot/7383d268c4bfe85df9f10df6351677659211d1ca.tar.gz";
sha256 = "06dgf5655znbvrd7fhrv8msv6zw8vk0hjqglcqkh90960mnnmwz7"; sha256 = "06dgf5655znbvrd7fhrv8msv6zw8vk0hjqglcqkh90960mnnmwz7";
}; };
nativeBuildInputs = [ pkgconfig ]; nativeBuildInputs = [ pkgconfig makeWrapper wafHook ];
buildInputs = [ makeWrapper alsaLib dbus libjack2 python dbus-python ]; buildInputs = [ alsaLib dbus libjack2 python dbus-python ];
configurePhase = "${python.interpreter} waf configure --prefix=$out"; postInstall = ''
buildPhase = "${python.interpreter} waf";
installPhase = ''
${python.interpreter} waf install
wrapProgram $out/bin/a2j_control --set PYTHONPATH $PYTHONPATH wrapProgram $out/bin/a2j_control --set PYTHONPATH $PYTHONPATH
''; '';

Some files were not shown because too many files have changed in this diff Show More