Merge branch master into staging

This commit is contained in:
Eric Seidel 2015-04-06 18:58:08 -07:00
commit 6ec373d776
430 changed files with 8926 additions and 4887 deletions

View File

@ -612,15 +612,45 @@ sed -i '/ = data_files/d' setup.py</programlisting>
<section xml:id="ssec-language-ruby"><title>Ruby</title>
<para>For example, to package yajl-ruby package, use gem-nix:</para>
<para>There currently is support to bundle applications that are packaged as Ruby gems. The utility "bundix" allows you to write a <filename>Gemfile</filename>, let bundler create a <filename>Gemfile.lock</filename>, and then convert
this into a nix expression that contains all Gem dependencies automatically.</para>
<para>For example, to package sensu, we did:</para>
<screen>
$ nix-env -i gem-nix
$ gem-nix --no-user-install --nix-file=pkgs/development/interpreters/ruby/generated.nix yajl-ruby
$ nix-build -A rubyPackages.yajl-ruby
</screen>
</section>
<![CDATA[$ cd pkgs/servers/monitoring
$ mkdir sensu
$ cat > Gemfile
source 'https://rubygems.org'
gem 'sensu'
$ bundler package --path /tmp/vendor/bundle
$ $(nix-build '&nixpkgs>' -A bundix)/bin/bundix
$ cat > default.nix
{ lib, bundlerEnv, ruby }:
bundlerEnv {
name = "sensu-0.17.1";
inherit ruby;
gemfile = ./Gemfile;
lockfile = ./Gemfile.lock;
gemset = ./gemset.nix;
meta = with lib; {
description = "A monitoring framework that aims to be simple, malleable,
and scalable.";
homepage = http://sensuapp.org/;
license = with licenses; mit;
maintainers = with maintainers; [ theuni ];
platforms = platforms.unix;
};
}]]>
</screen>
<para>Please check in the <filename>Gemfile</filename>, <filename>Gemfile.lock</filename> and the <filename>gemset.nix</filename> so future updates can be run easily.
</para>
</section>
<section xml:id="ssec-language-go"><title>Go</title>

View File

@ -54,6 +54,7 @@
copumpkin = "Dan Peebles <pumpkingod@gmail.com>";
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
cstrahan = "Charles Strahan <charles.c.strahan@gmail.com>";
cwoac = "Oliver Matthews <oliver@codersoffortune.net>";
DamienCassou = "Damien Cassou <damien.cassou@gmail.com>";
davidrusu = "David Rusu <davidrusu.me@gmail.com>";
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
@ -102,6 +103,7 @@
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
joamaki = "Jussi Maki <joamaki@gmail.com>";
joelmo = "Joel Moberg <joel.moberg@gmail.com>";
joelteon = "Joel Taylor <me@joelt.io>";
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
@ -153,6 +155,7 @@
pjones = "Peter Jones <pjones@devalot.com>";
pkmx = "Chih-Mao Chen <pkmx.tw@gmail.com>";
plcplc = "Philip Lykke Carlsen <plcplc@gmail.com>";
pmahoney = "Patrick Mahoney <pat@polycrystal.org>";
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
@ -189,6 +192,7 @@
tailhook = "Paul Colomiets <paul@colomiets.name>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
theuni = "Christian Theune <ct@flyingcircus.io>";
thoughtpolice = "Austin Seipp <aseipp@pobox.com>";
titanous = "Jonathan Rudenberg <jonathan@titanous.com>";
tomberek = "Thomas Bereknyei <tomberek@gmail.com>";

View File

@ -9,25 +9,69 @@ rec {
/* Evaluate a set of modules. The result is a set of two
attributes: options: the nested set of all option declarations,
and config: the nested set of all option values. */
evalModules = { modules, prefix ? [], args ? {}, check ? true }:
and config: the nested set of all option values.
!!! Please think twice before adding to this argument list! The more
that is specified here instead of in the modules themselves the harder
it is to transparently move a set of modules to be a submodule of another
config (as the proper arguments need to be replicated at each call to
evalModules) and the less declarative the module set is. */
evalModules = { modules
, prefix ? []
, # This would be remove in the future, Prefer _module.args option instead.
args ? {}
, # This would be remove in the future, Prefer _module.check option instead.
check ? true
}:
let
args' = args // { lib = import ./.; } // result;
closed = closeModules modules args';
# This internal module declare internal options under the `_module'
# attribute. These options are fragile, as they are used by the
# module system to change the interpretation of modules.
internalModule = rec {
_file = ./modules.nix;
key = _file;
options = {
_module.args = mkOption {
type = types.attrsOf types.unspecified;
internal = true;
description = "Arguments passed to each module.";
};
_module.check = mkOption {
type = types.uniq types.bool;
internal = true;
default = check;
description = "Whether to check whether all option definitions have matching declarations.";
};
};
config = {
_module.args = args;
};
};
closed = closeModules (modules ++ [ internalModule ]) { inherit config options; lib = import ./.; };
# Note: the list of modules is reversed to maintain backward
# compatibility with the old module system. Not sure if this is
# the most sensible policy.
options = mergeModules prefix (reverseList closed);
# Traverse options and extract the option values into the final
# config set. At the same time, check whether all option
# definitions have matching declarations.
# !!! _module.check's value can't depend on any other config values
# without an infinite recursion. One way around this is to make the
# 'config' passed around to the modules be unconditionally unchecked,
# and only do the check in 'result'.
config = yieldConfig prefix options;
yieldConfig = prefix: set:
let res = removeAttrs (mapAttrs (n: v:
if isOption v then v.value
else yieldConfig (prefix ++ [n]) v) set) ["_definedNames"];
in
if check && set ? _definedNames then
if options._module.check.value && set ? _definedNames then
fold (m: res:
fold (name: res:
if set ? ${name} then res else throw "The option `${showOption (prefix ++ [name])}' defined in `${m.file}' does not exist.")
@ -43,7 +87,7 @@ rec {
let
toClosureList = file: parentKey: imap (n: x:
if isAttrs x || isFunction x then
unifyModuleSyntax file "${parentKey}:anon-${toString n}" (applyIfFunction x args)
unifyModuleSyntax file "${parentKey}:anon-${toString n}" (unpackSubmodule applyIfFunction x args)
else
unifyModuleSyntax (toString x) (toString x) (applyIfFunction (import x) args));
in
@ -74,7 +118,39 @@ rec {
config = removeAttrs m ["key" "_file" "require" "imports"];
};
applyIfFunction = f: arg: if isFunction f then f arg else f;
applyIfFunction = f: arg@{ config, options, lib }: if isFunction f then
let
# Module arguments are resolved in a strict manner when attribute set
# deconstruction is used. As the arguments are now defined with the
# config._module.args option, the strictness used on the attribute
# set argument would cause an infinite loop, if the result of the
# option is given as argument.
#
# To work-around the strictness issue on the deconstruction of the
# attributes set argument, we create a new attribute set which is
# constructed to satisfy the expected set of attributes. Thus calling
# a module will resolve strictly the attributes used as argument but
# not their values. The values are forwarding the result of the
# evaluation of the option.
requiredArgs = builtins.attrNames (builtins.functionArgs f);
extraArgs = builtins.listToAttrs (map (name: {
inherit name;
value = config._module.args.${name};
}) requiredArgs);
in f (extraArgs // arg)
else
f;
/* We have to pack and unpack submodules. We cannot wrap the expected
result of the function as we would no longer be able to list the arguments
of the submodule. (see applyIfFunction) */
unpackSubmodule = unpack: m: args:
if isType "submodule" m then
{ _file = m.file; } // (unpack m.submodule args)
else unpack m args;
packSubmodule = file: m:
{ _type = "submodule"; file = file; submodule = m; };
/* Merge a list of modules. This will recurse over the option
declarations in all modules, combining them into a single set.
@ -106,12 +182,9 @@ rec {
else []
) configs);
nrOptions = count (m: isOption m.options) decls;
# Process mkMerge and mkIf properties.
defns' = concatMap (m:
if m.config ? ${name}
then map (m': { inherit (m) file; value = m'; }) (dischargeProperties m.config.${name})
else []
) configs;
# Extract the definitions for this loc
defns' = map (m: { inherit (m) file; value = m.config.${name}; })
(filter (m: m.config ? ${name}) configs);
in
if nrOptions == length decls then
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
@ -156,15 +229,12 @@ rec {
current option declaration as the file use for the submodule. If the
submodule defines any filename, then we ignore the enclosing option file. */
options' = toList opt.options.options;
addModuleFile = m:
if isFunction m then args: { _file = opt.file; } // (m args)
else { _file = opt.file; } // m;
coerceOption = file: opt:
if isFunction opt then args: { _file = file; } // (opt args)
else { _file = file; options = opt; };
if isFunction opt then packSubmodule file opt
else packSubmodule file { options = opt; };
getSubModules = opt.options.type.getSubModules or null;
submodules =
if getSubModules != null then map addModuleFile getSubModules ++ res.options
if getSubModules != null then map (packSubmodule opt.file) getSubModules ++ res.options
else if opt.options ? options then map (coerceOption opt.file) options' ++ res.options
else res.options;
in opt.options // res //
@ -177,27 +247,17 @@ rec {
config value. */
evalOptionValue = loc: opt: defs:
let
# Process mkOverride properties, adding in the default
# value specified in the option declaration (if any).
defsFinal' = filterOverrides
((if opt ? default then [{ file = head opt.declarations; value = mkOptionDefault opt.default; }] else []) ++ defs);
# Sort mkOrder properties.
defsFinal =
# Avoid sorting if we don't have to.
if any (def: def.value._type or "" == "order") defsFinal'
then sortProperties defsFinal'
else defsFinal';
# Add in the default value for this option, if any.
defs' = (optional (opt ? default)
{ file = head opt.declarations; value = mkOptionDefault opt.default; }) ++ defs;
# Handle properties, check types, and merge everything together
inherit (mergeDefinitions loc opt.type defs') isDefined defsFinal mergedValue;
files = map (def: def.file) defsFinal;
# Type-check the remaining definitions, and merge them if
# possible.
merged =
if defsFinal == [] then
throw "The option `${showOption loc}' is used but not defined."
else
fold (def: res:
if opt.type.check def.value then res
else throw "The option value `${showOption loc}' in `${def.file}' is not a ${opt.type.name}.")
(opt.type.merge loc defsFinal) defsFinal;
if isDefined then mergedValue
else throw "The option `${showOption loc}' is used but not defined.";
# Finally, apply the apply function to the merged
# value. This allows options to yield a value computed
# from the definitions.
@ -205,10 +265,42 @@ rec {
in opt //
{ value = addErrorContext "while evaluating the option `${showOption loc}':" value;
definitions = map (def: def.value) defsFinal;
isDefined = defsFinal != [];
inherit files;
inherit isDefined files;
};
# Merge definitions of a value of a given type
mergeDefinitions = loc: type: defs: rec {
defsFinal =
let
# Process mkMerge and mkIf properties
processIfAndMerge = defs: concatMap (m:
map (value: { inherit (m) file; inherit value; }) (dischargeProperties m.value)
) defs;
# Process mkOverride properties
processOverride = defs: filterOverrides defs;
# Sort mkOrder properties
processOrder = defs:
# Avoid sorting if we don't have to.
if any (def: def.value._type or "" == "order") defs
then sortProperties defs
else defs;
in
processOrder (processOverride (processIfAndMerge defs));
# Type-check the remaining definitions, and merge them
mergedValue = fold (def: res:
if type.check def.value then res
else throw "The option value `${showOption loc}' in `${def.file}' is not a ${type.name}.")
(type.merge loc defsFinal) defsFinal;
isDefined = defsFinal != [];
optionalValue =
if isDefined then { value = mergedValue; }
else {};
};
/* Given a config set, expand mkMerge properties, and push down the
other properties into the children. The result is a list of
config sets that do not have properties at top-level. For

View File

@ -57,13 +57,17 @@ checkConfigError() {
fi
}
# Check boolean option.
checkConfigOutput "false" config.enable ./declare-enable.nix
checkConfigError 'The option .* defined in .* does not exist.' config.enable ./define-enable.nix
# Check mkForce without submodules.
set -- config.enable ./declare-enable.nix ./define-enable.nix
checkConfigOutput "true" "$@"
checkConfigOutput "false" "$@" ./define-force-enable.nix
checkConfigOutput "false" "$@" ./define-enable-force.nix
# Check mkForce with option and submodules.
checkConfigError 'attribute .*foo.* .* not found' config.loaOfSub.foo.enable ./declare-loaOfSub-any-enable.nix
checkConfigOutput 'false' config.loaOfSub.foo.enable ./declare-loaOfSub-any-enable.nix ./define-loaOfSub-foo.nix
set -- config.loaOfSub.foo.enable ./declare-loaOfSub-any-enable.nix ./define-loaOfSub-foo-enable.nix
@ -73,6 +77,7 @@ checkConfigOutput 'false' "$@" ./define-loaOfSub-force-foo-enable.nix
checkConfigOutput 'false' "$@" ./define-loaOfSub-foo-force-enable.nix
checkConfigOutput 'false' "$@" ./define-loaOfSub-foo-enable-force.nix
# Check overriding effect of mkForce on submodule definitions.
checkConfigError 'attribute .*bar.* .* not found' config.loaOfSub.bar.enable ./declare-loaOfSub-any-enable.nix ./define-loaOfSub-foo.nix
checkConfigOutput 'false' config.loaOfSub.bar.enable ./declare-loaOfSub-any-enable.nix ./define-loaOfSub-foo.nix ./define-loaOfSub-bar.nix
set -- config.loaOfSub.bar.enable ./declare-loaOfSub-any-enable.nix ./define-loaOfSub-foo.nix ./define-loaOfSub-bar-enable.nix
@ -82,6 +87,26 @@ checkConfigError 'attribute .*bar.* .* not found' "$@" ./define-loaOfSub-force-f
checkConfigOutput 'true' "$@" ./define-loaOfSub-foo-force-enable.nix
checkConfigOutput 'true' "$@" ./define-loaOfSub-foo-enable-force.nix
# Check mkIf with submodules.
checkConfigError 'attribute .*foo.* .* not found' config.loaOfSub.foo.enable ./declare-enable.nix ./declare-loaOfSub-any-enable.nix
set -- config.loaOfSub.foo.enable ./declare-enable.nix ./declare-loaOfSub-any-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-if-loaOfSub-foo-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-loaOfSub-if-foo-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-loaOfSub-foo-if-enable.nix
checkConfigOutput 'false' "$@" ./define-loaOfSub-foo-enable-if.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-if-loaOfSub-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-loaOfSub-if-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-loaOfSub-foo-if-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-loaOfSub-foo-enable-if.nix
# Check _module.args.
checkConfigOutput "true" config.enable ./declare-enable.nix ./custom-arg-define-enable.nix
# Check _module.check.
set -- config.enable ./declare-enable.nix ./define-enable.nix ./define-loaOfSub-foo.nix
checkConfigError 'The option .* defined in .* does not exist.' "$@"
checkConfigOutput "true" "$@" ./define-module-check.nix
cat <<EOF
====== module tests ======
$pass Pass

View File

@ -0,0 +1,8 @@
{ lib, custom, ... }:
{
config = {
_module.args.custom = true;
enable = custom;
};
}

View File

@ -0,0 +1,5 @@
{ config, lib, ... }:
lib.mkIf config.enable {
loaOfSub.foo.enable = true;
}

View File

@ -0,0 +1,5 @@
{ config, lib, ... }:
{
loaOfSub.foo.enable = lib.mkIf config.enable true;
}

View File

@ -0,0 +1,7 @@
{ config, lib, ... }:
{
loaOfSub.foo = lib.mkIf config.enable {
enable = true;
};
}

View File

@ -0,0 +1,7 @@
{ config, lib, ... }:
{
loaOfSub = lib.mkIf config.enable {
foo.enable = true;
};
}

View File

@ -0,0 +1,3 @@
{
_module.check = false;
}

View File

@ -6,6 +6,7 @@ with import ./attrsets.nix;
with import ./options.nix;
with import ./trivial.nix;
with import ./strings.nix;
with {inherit (import ./modules.nix) mergeDefinitions; };
rec {
@ -109,11 +110,15 @@ rec {
listOf = elemType: mkOptionType {
name = "list of ${elemType.name}s";
check = value: isList value && all elemType.check value;
check = isList;
merge = loc: defs:
concatLists (imap (n: def: imap (m: def':
elemType.merge (loc ++ ["[${toString n}-${toString m}]"])
[{ inherit (def) file; value = def'; }]) def.value) defs);
map (x: x.value) (filter (x: x ? value) (concatLists (imap (n: def: imap (m: def':
(mergeDefinitions
(loc ++ ["[definition ${toString n}-entry ${toString m}]"])
elemType
[{ inherit (def) file; value = def'; }]
).optionalValue
) def.value) defs)));
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["*"]);
getSubModules = elemType.getSubModules;
substSubModules = m: listOf (elemType.substSubModules m);
@ -121,12 +126,14 @@ rec {
attrsOf = elemType: mkOptionType {
name = "attribute set of ${elemType.name}s";
check = x: isAttrs x && all elemType.check (attrValues x);
check = isAttrs;
merge = loc: defs:
zipAttrsWith (name: elemType.merge (loc ++ [name]))
mapAttrs (n: v: v.value) (filterAttrs (n: v: v ? value) (zipAttrsWith (name: defs:
(mergeDefinitions (loc ++ [name]) elemType defs).optionalValue
)
# Push down position info.
(map (def: listToAttrs (mapAttrsToList (n: def':
{ name = n; value = { inherit (def) file; value = def'; }; }) def.value)) defs);
{ name = n; value = { inherit (def) file; value = def'; }; }) def.value)) defs)));
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name>"]);
getSubModules = elemType.getSubModules;
substSubModules = m: attrsOf (elemType.substSubModules m);
@ -150,10 +157,7 @@ rec {
attrOnly = attrsOf elemType;
in mkOptionType {
name = "list or attribute set of ${elemType.name}s";
check = x:
if isList x then listOnly.check x
else if isAttrs x then attrOnly.check x
else false;
check = x: isList x || isAttrs x;
merge = loc: defs: attrOnly.merge loc (imap convertIfList defs);
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name?>"]);
getSubModules = elemType.getSubModules;
@ -194,7 +198,11 @@ rec {
let
coerce = def: if isFunction def then def else { config = def; };
modules = opts' ++ map (def: { _file = def.file; imports = [(coerce def.value)]; }) defs;
in (evalModules { inherit modules; args.name = last loc; prefix = loc; }).config;
in (evalModules {
inherit modules;
args.name = last loc;
prefix = loc;
}).config;
getSubOptions = prefix: (evalModules
{ modules = opts'; inherit prefix;
# FIXME: hack to get shit to evaluate.

View File

@ -25,6 +25,22 @@
<arg choice='plain'><option>--root</option></arg>
<replaceable>root</replaceable>
</arg>
<arg>
<group choice='req'>
<arg choice='plain'><option>--max-jobs</option></arg>
<arg choice='plain'><option>-j</option></arg>
</group>
<replaceable>number</replaceable>
</arg>
<arg>
<option>--cores</option>
<replaceable>number</replaceable>
</arg>
<arg>
<option>--option</option>
<replaceable>name</replaceable>
<replaceable>value</replaceable>
</arg>
<arg>
<arg choice='plain'><option>--show-trace</option></arg>
</arg>
@ -96,6 +112,37 @@ it.</para>
</listitem>
</varlistentry>
<varlistentry><term><option>--max-jobs</option></term>
<term><option>-j</option></term>
<listitem><para>Sets the maximum number of build jobs that Nix will
perform in parallel to the specified number. The default is <literal>1</literal>.
A higher value is useful on SMP systems or to exploit I/O latency.</para></listitem>
</varlistentry>
<varlistentry><term><option>--cores</option></term>
<listitem><para>Sets the value of the <envar>NIX_BUILD_CORES</envar>
environment variable in the invocation of builders. Builders can
use this variable at their discretion to control the maximum amount
of parallelism. For instance, in Nixpkgs, if the derivation
attribute <varname>enableParallelBuilding</varname> is set to
<literal>true</literal>, the builder passes the
<option>-j<replaceable>N</replaceable></option> flag to GNU Make.
The value <literal>0</literal> means that the builder should use all
available CPU cores in the system.</para></listitem>
</varlistentry>
<varlistentry><term><option>--option</option> <replaceable>name</replaceable> <replaceable>value</replaceable></term>
<listitem><para>Set the Nix configuration option
<replaceable>name</replaceable> to <replaceable>value</replaceable>.</para></listitem>
</varlistentry>
<varlistentry>
<term><option>--show-trace</option></term>
<listitem>

View File

@ -2,27 +2,51 @@
# configuration object (`config') from which we can retrieve option
# values.
{ system ? builtins.currentSystem
, pkgs ? null
, baseModules ? import ../modules/module-list.nix
, extraArgs ? {}
# !!! Please think twice before adding to this argument list!
# Ideally eval-config.nix would be an extremely thin wrapper
# around lib.evalModules, so that modular systems that have nixos configs
# as subcomponents (e.g. the container feature, or nixops if network
# expressions are ever made modular at the top level) can just use
# types.submodule instead of using eval-config.nix
{ # !!! system can be set modularly, would be nice to remove
system ? builtins.currentSystem
, # !!! is this argument needed any more? The pkgs argument can
# be set modularly anyway.
pkgs ? null
, # !!! what do we gain by making this configurable?
baseModules ? import ../modules/module-list.nix
, # !!! See comment about args in lib/modules.nix
extraArgs ? {}
, modules
, check ? true
, # !!! See comment about check in lib/modules.nix
check ? true
, prefix ? []
, lib ? import ../../lib
}:
let extraArgs_ = extraArgs; pkgs_ = pkgs; system_ = system;
extraModules = let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
in if e == "" then [] else [(import (builtins.toPath e))];
in
let
pkgsModule = rec {
_file = ./eval-config.nix;
key = _file;
config = {
nixpkgs.system = lib.mkDefault system_;
_module.args.pkgs = lib.mkIf (pkgs_ != null) (lib.mkForce pkgs_);
};
};
in rec {
# Merge the option definitions in all modules, forming the full
# system configuration.
inherit (pkgs.lib.evalModules {
inherit prefix;
modules = modules ++ extraModules ++ baseModules;
inherit (lib.evalModules {
inherit prefix check;
modules = modules ++ extraModules ++ baseModules ++ [ pkgsModule ];
args = extraArgs;
check = check && options.environment.checkConfigurationOptions.value;
}) config options;
# These are the extra arguments passed to every module. In
@ -33,40 +57,8 @@ in rec {
# the 64-bit package anyway. However, it would be cleaner to respect
# nixpkgs.config here.
extraArgs = extraArgs_ // {
inherit pkgs modules baseModules;
modulesPath = ../modules;
pkgs_i686 = import ./nixpkgs.nix { system = "i686-linux"; config.allowUnfree = true; };
utils = import ./utils.nix pkgs;
inherit modules baseModules;
};
# Import Nixpkgs, allowing the NixOS option nixpkgs.config to
# specify the Nixpkgs configuration (e.g., to set package options
# such as firefox.enableGeckoMediaPlayer, or to apply global
# overrides such as changing GCC throughout the system), and the
# option nixpkgs.system to override the platform type. This is
# tricky, because we have to prevent an infinite recursion: "pkgs"
# is passed as an argument to NixOS modules, but the value of "pkgs"
# depends on config.nixpkgs.config, which we get from the modules.
# So we call ourselves here with "pkgs" explicitly set to an
# instance that doesn't depend on nixpkgs.config.
pkgs =
if pkgs_ != null
then pkgs_
else import ./nixpkgs.nix (
let
system = if nixpkgsOptions.system != "" then nixpkgsOptions.system else system_;
nixpkgsOptions = (import ./eval-config.nix {
inherit system extraArgs modules prefix;
# For efficiency, leave out most NixOS modules; they don't
# define nixpkgs.config, so it's pointless to evaluate them.
baseModules = [ ../modules/misc/nixpkgs.nix ../modules/config/no-x-libs.nix ];
pkgs = import ./nixpkgs.nix { system = system_; config = {}; };
check = false;
}).config.nixpkgs;
in
{
inherit system;
inherit (nixpkgsOptions) config;
});
inherit (config._module.args) pkgs;
}

View File

@ -1,4 +1,4 @@
{ stdenv, perl, cdrkit, pathsFromGraph
{ stdenv, perl, pathsFromGraph, xorriso, syslinux
, # The file name of the resulting ISO image.
isoName ? "cd.iso"
@ -22,12 +22,18 @@
, # Whether this should be an efi-bootable El-Torito CD.
efiBootable ? false
, # Wheter this should be an hybrid CD (bootable from USB as well as CD).
usbBootable ? false
, # The path (in the ISO file system) of the boot image.
bootImage ? ""
, # The path (in the ISO file system) of the efi boot image.
efiBootImage ? ""
, # The path (outside the ISO file system) of the isohybrid-mbr image.
isohybridMbrImage ? ""
, # Whether to compress the resulting ISO image with bzip2.
compressImage ? false
@ -38,13 +44,14 @@
assert bootable -> bootImage != "";
assert efiBootable -> efiBootImage != "";
assert usbBootable -> isohybridMbrImage != "";
stdenv.mkDerivation {
name = "iso9660-image";
builder = ./make-iso9660-image.sh;
buildInputs = [perl cdrkit];
buildInputs = [perl xorriso syslinux];
inherit isoName bootable bootImage compressImage volumeID pathsFromGraph efiBootImage efiBootable;
inherit isoName bootable bootImage compressImage volumeID pathsFromGraph efiBootImage efiBootable isohybridMbrImage usbBootable;
# !!! should use XML.
sources = map (x: x.source) contents;

View File

@ -13,6 +13,20 @@ stripSlash() {
if test "${res:0:1}" = /; then res=${res:1}; fi
}
# Escape potential equal signs (=) with backslash (\=)
escapeEquals() {
echo "$1" | sed -e 's/\\/\\\\/g' -e 's/=/\\=/g'
}
# Queues an file/directory to be placed on the ISO.
# An entry consists of a local source path (2) and
# a destination path on the ISO (1).
addPath() {
target="$1"
source="$2"
echo "$(escapeEquals "$target")=$(escapeEquals "$source")" >> pathlist
}
stripSlash "$bootImage"; bootImage="$res"
@ -31,11 +45,20 @@ if test -n "$bootable"; then
fi
done
bootFlags="-b $bootImage -c .boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table"
isoBootFlags="-eltorito-boot ${bootImage}
-eltorito-catalog .boot.cat
-no-emul-boot -boot-load-size 4 -boot-info-table"
fi
if test -n "$usbBootable"; then
usbBootFlags="-isohybrid-mbr ${isohybridMbrImage}"
fi
if test -n "$efiBootable"; then
bootFlags="$bootFlags -eltorito-alt-boot -e $efiBootImage -no-emul-boot"
efiBootFlags="-eltorito-alt-boot
-e $efiBootImage
-no-emul-boot
-isohybrid-gpt-basdat"
fi
touch pathlist
@ -44,14 +67,14 @@ touch pathlist
# Add the individual files.
for ((i = 0; i < ${#targets_[@]}; i++)); do
stripSlash "${targets_[$i]}"
echo "$res=${sources_[$i]}" >> pathlist
addPath "$res" "${sources_[$i]}"
done
# Add the closures of the top-level store objects.
storePaths=$(perl $pathsFromGraph closure-*)
for i in $storePaths; do
echo "${i:1}=$i" >> pathlist
addPath "${i:1}" "$i"
done
@ -59,7 +82,7 @@ done
# nix-store --load-db.
if [ -n "$object" ]; then
printRegistration=1 perl $pathsFromGraph closure-* > nix-path-registration
echo "nix-path-registration=nix-path-registration" >> pathlist
addPath "nix-path-registration" "nix-path-registration"
fi
@ -70,22 +93,39 @@ for ((n = 0; n < ${#objects[*]}; n++)); do
if test "$symlink" != "none"; then
mkdir -p $(dirname ./$symlink)
ln -s $object ./$symlink
echo "$symlink=./$symlink" >> pathlist
addPath "$symlink" "./$symlink"
fi
done
# !!! what does this do?
cat pathlist | sed -e 's/=\(.*\)=\(.*\)=/\\=\1=\2\\=/' | tee pathlist.safer
mkdir -p $out/iso
genCommand="genisoimage -iso-level 4 -r -J $bootFlags -hide-rr-moved -graft-points -path-list pathlist.safer ${volumeID:+-V $volumeID}"
if test -z "$compressImage"; then
$genCommand -o $out/iso/$isoName
else
$genCommand | bzip2 > $out/iso/$isoName.bz2
xorriso="xorriso
-as mkisofs
-iso-level 3
-volid ${volumeID}
-appid nixos
-publisher nixos
-graft-points
-full-iso9660-filenames
${isoBootFlags}
${usbBootFlags}
${efiBootFlags}
-r
-path-list pathlist
--sort-weight 0 /
--sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
$xorriso -output $out/iso/$isoName
if test -n "$usbBootable"; then
echo "Making image hybrid..."
isohybrid --uefi $out/iso/$isoName
fi
if test -n "$compressImage"; then
echo "Compressing image..."
bzip2 $out/iso/$isoName
fi
mkdir -p $out/nix-support
echo $system > $out/nix-support/system

View File

@ -37,6 +37,10 @@ sub new {
if defined $args->{hda};
$startCommand .= "-cdrom $args->{cdrom} "
if defined $args->{cdrom};
$startCommand .= "-device piix3-usb-uhci -drive id=usbdisk,file=$args->{usb},if=none,readonly -device usb-storage,drive=usbdisk "
if defined $args->{usb};
$startCommand .= "-bios $args->{bios} "
if defined $args->{bios};
$startCommand .= $args->{qemuFlags} || "";
} else {
$startCommand = Cwd::abs_path $startCommand;

View File

@ -1,6 +1,6 @@
{ config, pkgs, ... }:
{ config, pkgs, lib, ... }:
with pkgs.lib;
with lib;
let fcBool = x: if x then "<bool>true</bool>" else "<bool>false</bool>";
in

View File

@ -27,6 +27,6 @@ with lib;
fonts.fontconfig.enable = false;
nixpkgs.config.packageOverrides = pkgs:
{ dbus = pkgs.dbus.override { useX11 = false; }; };
{ dbus = pkgs.dbus.override { x11Support = false; }; };
};
}

View File

@ -64,6 +64,6 @@ in
#
# Removed under grsecurity.
boot.kernel.sysctl."kernel.kptr_restrict" =
if config.security.grsecurity.enable then null else 1;
if (config.boot.kernelPackages.kernel.features.grsecurity or false) then null else 1;
};
}

View File

@ -110,7 +110,7 @@ let
shell = mkOption {
type = types.str;
default = "/run/current-system/sw/sbin/nologin";
default = "/run/current-system/sw/bin/nologin";
description = "The path to the user's shell.";
};

View File

@ -0,0 +1,18 @@
{ config, lib, ... }:
{
options.hardware.enableKSM = lib.mkEnableOption "Kernel Same-Page Merging";
config = lib.mkIf config.hardware.enableKSM {
systemd.services.enable-ksm = {
description = "Enable Kernel Same-Page Merging";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
script = ''
if [ -e /sys/kernel/mm/ksm ]; then
echo 1 > /sys/kernel/mm/ksm/run
fi
'';
};
};
}

View File

@ -13,7 +13,10 @@ let
# driver.
nvidiaForKernel = kernelPackages:
if elem "nvidia" drivers then
kernelPackages.nvidia_x11
if versionAtLeast kernelPackages.kernel.version "4.0" then
kernelPackages.nvidia_x11_beta
else
kernelPackages.nvidia_x11
else if elem "nvidiaLegacy173" drivers then
kernelPackages.nvidia_x11_legacy173
else if elem "nvidiaLegacy304" drivers then

View File

@ -36,6 +36,9 @@ with lib;
# EFI booting
isoImage.makeEfiBootable = true;
# USB booting
isoImage.makeUsbBootable = true;
# Add Memtest86+ to the CD.
boot.loader.grub.memtest86.enable = true;

View File

@ -7,53 +7,65 @@
with lib;
let
# Timeout in syslinux is in units of 1/10 of a second.
# 0 is used to disable timeouts.
syslinuxTimeout = if config.boot.loader.timeout == null then
0
else
max (config.boot.loader.timeout * 10) 1;
# The Grub image.
grubImage = pkgs.runCommand "grub_eltorito" {}
max = x: y: if x > y then x else y;
# The configuration file for syslinux.
# Notes on syslinux configuration and UNetbootin compatiblity:
# * Do not use '/syslinux/syslinux.cfg' as the path for this
# configuration. UNetbootin will not parse the file and use it as-is.
# This results in a broken configuration if the partition label does
# not match the specified config.isoImage.volumeID. For this reason
# we're using '/isolinux/isolinux.cfg'.
# * Use APPEND instead of adding command-line arguments directly after
# the LINUX entries.
# * COM32 entries (chainload, reboot, poweroff) are not recognized. They
# result in incorrect boot entries.
baseIsolinuxCfg =
''
${pkgs.grub2}/bin/grub-mkimage -p /boot/grub -O i386-pc -o tmp biosdisk iso9660 help linux linux16 chain png jpeg echo gfxmenu reboot
cat ${pkgs.grub2}/lib/grub/*/cdboot.img tmp > $out
''; # */
SERIAL 0 38400
TIMEOUT ${builtins.toString syslinuxTimeout}
UI vesamenu.c32
MENU TITLE NixOS
MENU BACKGROUND /isolinux/background.png
DEFAULT boot
# The configuration file for Grub.
grubCfg =
''
set default=${builtins.toString config.boot.loader.grub.default}
set timeout=${builtins.toString config.boot.loader.grub.timeout}
if loadfont /boot/grub/unicode.pf2; then
set gfxmode=640x480
insmod gfxterm
insmod vbe
terminal_output gfxterm
insmod png
if background_image /boot/grub/splash.png; then
set color_normal=white/black
set color_highlight=black/white
else
set menu_color_normal=cyan/blue
set menu_color_highlight=white/blue
fi
fi
${config.boot.loader.grub.extraEntries}
LABEL boot
MENU LABEL NixOS ${config.system.nixosVersion} Installer
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
INITRD /boot/initrd
'';
isolinuxMemtest86Entry = ''
LABEL memtest
MENU LABEL Memtest86+
LINUX /boot/memtest.bin
APPEND ${toString config.boot.loader.grub.memtest86.params}
'';
isolinuxCfg = baseIsolinuxCfg + (optionalString config.boot.loader.grub.memtest86.enable isolinuxMemtest86Entry);
# The efi boot image
efiDir = pkgs.runCommand "efi-directory" {} ''
mkdir -p $out/efi/boot
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/efi/boot/boot${targetArch}.efi
mkdir -p $out/EFI/boot
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
mkdir -p $out/loader/entries
echo "title NixOS LiveCD" > $out/loader/entries/nixos-livecd.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
echo "default nixos-livecd" > $out/loader/loader.conf
echo "timeout 5" >> $out/loader/loader.conf
echo "timeout ${builtins.toString config.boot.loader.gummiboot.timeout}" >> $out/loader/loader.conf
'';
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }
@ -163,6 +175,22 @@ in
'';
};
isoImage.makeUsbBootable = mkOption {
default = false;
description = ''
Whether the ISO image should be bootable from CD as well as USB.
'';
};
isoImage.splashImage = mkOption {
default = pkgs.fetchurl {
url = https://raw.githubusercontent.com/NixOS/nixos-artwork/5729ab16c6a5793c10a2913b5a1b3f59b91c36ee/ideas/grub-splash/grub-nixos-1.png;
sha256 = "43fd8ad5decf6c23c87e9026170a13588c2eba249d9013cb9f888da5e2002217";
};
description = ''
The splash image to use in the bootloader.
'';
};
};
@ -176,7 +204,7 @@ in
# !!! Hack - attributes expected by other modules.
system.boot.loader.kernelFile = "bzImage";
environment.systemPackages = [ pkgs.grub2 ];
environment.systemPackages = [ pkgs.grub2 pkgs.syslinux ];
# In stage 1 of the boot, mount the CD as the root FS by label so
# that we don't need to know its device. We pass the label of the
@ -226,7 +254,7 @@ in
options = "allow_other,cow,nonempty,chroot=/mnt-root,max_files=32768,hide_meta_files,dirs=/nix/.rw-store=rw:/nix/.ro-store=ro";
};
boot.initrd.availableKernelModules = [ "squashfs" "iso9660" ];
boot.initrd.availableKernelModules = [ "squashfs" "iso9660" "usb-storage" ];
boot.initrd.kernelModules = [ "loop" ];
@ -246,15 +274,12 @@ in
# Individual files to be included on the CD, outside of the Nix
# store on the CD.
isoImage.contents =
[ { source = grubImage;
target = "/boot/grub/grub_eltorito";
}
{ source = pkgs.substituteAll {
name = "grub.cfg";
src = pkgs.writeText "grub.cfg-in" grubCfg;
[ { source = pkgs.substituteAll {
name = "isolinux.cfg";
src = pkgs.writeText "isolinux.cfg-in" isolinuxCfg;
bootRoot = "/boot";
};
target = "/boot/grub/grub.cfg";
target = "/isolinux/isolinux.cfg";
}
{ source = config.boot.kernelPackages.kernel + "/bzImage";
target = "/boot/bzImage";
@ -262,51 +287,44 @@ in
{ source = config.system.build.initialRamdisk + "/initrd";
target = "/boot/initrd";
}
{ source = "${pkgs.grub2}/share/grub/unicode.pf2";
target = "/boot/grub/unicode.pf2";
}
{ source = config.boot.loader.grub.splashImage;
target = "/boot/grub/splash.png";
}
{ source = config.system.build.squashfsStore;
target = "/nix-store.squashfs";
}
{ source = "${pkgs.syslinux}/share/syslinux";
target = "/isolinux";
}
{ source = config.isoImage.splashImage;
target = "/isolinux/background.png";
}
] ++ optionals config.isoImage.makeEfiBootable [
{ source = efiImg;
target = "/boot/efi.img";
}
{ source = "${efiDir}/efi";
target = "/efi";
{ source = "${efiDir}/EFI";
target = "/EFI";
}
{ source = "${efiDir}/loader";
target = "/loader";
}
] ++ mapAttrsToList (n: v: { source = v; target = "/boot/${n}"; }) config.boot.loader.grub.extraFiles;
# The Grub menu.
boot.loader.grub.extraEntries =
''
menuentry "NixOS ${config.system.nixosVersion} Installer" {
linux /boot/bzImage init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
initrd /boot/initrd
] ++ optionals config.boot.loader.grub.memtest86.enable [
{ source = "${pkgs.memtest86plus}/memtest.bin";
target = "/boot/memtest.bin";
}
];
menuentry "Boot from hard disk" {
set root=(hd0)
chainloader +1
}
'';
boot.loader.grub.timeout = 10;
boot.loader.timeout = 10;
# Create the ISO image.
system.build.isoImage = import ../../../lib/make-iso9660-image.nix ({
inherit (pkgs) stdenv perl cdrkit pathsFromGraph;
inherit (pkgs) stdenv perl pathsFromGraph xorriso syslinux;
inherit (config.isoImage) isoName compressImage volumeID contents;
bootable = true;
bootImage = "/boot/grub/grub_eltorito";
bootImage = "/isolinux/isolinux.bin";
} // optionalAttrs config.isoImage.makeUsbBootable {
usbBootable = true;
isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
} // optionalAttrs config.isoImage.makeEfiBootable {
efiBootable = true;
efiBootImage = "boot/efi.img";

View File

@ -28,9 +28,14 @@ chrootCommand=(/run/current-system/sw/bin/bash)
while [ "$#" -gt 0 ]; do
i="$1"; shift 1
case "$i" in
-I)
given_path="$1"; shift 1
extraBuildFlags+=("$i" "$given_path")
--max-jobs|-j|--cores|-I)
j="$1"; shift 1
extraBuildFlags+=("$i" "$j")
;;
--option)
j="$1"; shift 1
k="$1"; shift 1
extraBuildFlags+=("$i" "$j" "$k")
;;
--root)
mountPoint="$1"; shift 1

View File

@ -1,15 +0,0 @@
{ lib, ... }:
with lib;
{
options = {
environment.checkConfigurationOptions = mkOption {
type = types.bool;
default = true;
description = ''
Whether to check the validity of the entire configuration.
'';
};
};
}

View File

@ -0,0 +1,14 @@
{ lib, pkgs, config, ... }:
{
_module.args = {
modulesPath = ../.;
pkgs_i686 = import ../../lib/nixpkgs.nix {
system = "i686-linux";
config.allowUnfree = true;
};
utils = import ../../lib/utils.nix pkgs;
};
}

View File

@ -212,6 +212,8 @@
uptimed = 184;
zope2 = 185;
ripple-data-api = 186;
mediatomb = 187;
rdnssd = 188;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -401,6 +403,8 @@
#uptimed = 184; # unused
#zope2 = 185; # unused
#ripple-data-api = 186; #unused
mediatomb = 187;
#rdnssd = 188; # unused
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View File

@ -60,6 +60,7 @@ in
nixpkgs.system = mkOption {
type = types.str;
default = builtins.currentSystem;
description = ''
Specifies the Nix platform type for which NixOS should be built.
If unset, it defaults to the platform type of your host system.
@ -71,6 +72,10 @@ in
};
config = {
nixpkgs.system = mkDefault pkgs.stdenv.system;
_module.args.pkgs = import ../../lib/nixpkgs.nix {
system = config.nixpkgs.system;
inherit (config.nixpkgs) config;
};
};
}

View File

@ -29,6 +29,7 @@
./hardware/all-firmware.nix
./hardware/cpu/amd-microcode.nix
./hardware/cpu/intel-microcode.nix
./hardware/ksm.nix
./hardware/network/b43.nix
./hardware/network/intel-2100bg.nix
./hardware/network/intel-2200bg.nix
@ -43,8 +44,8 @@
./installer/tools/nixos-checkout.nix
./installer/tools/tools.nix
./misc/assertions.nix
./misc/check-config.nix
./misc/crashdump.nix
./misc/extra-arguments.nix
./misc/ids.nix
./misc/lib.nix
./misc/locate.nix
@ -146,6 +147,7 @@
./services/desktops/telepathy.nix
./services/games/ghost-one.nix
./services/games/minecraft-server.nix
./services/games/minetest-server.nix
./services/hardware/acpid.nix
./services/hardware/amd-hybrid-graphics.nix
./services/hardware/bluetooth.nix
@ -191,6 +193,7 @@
./services/misc/gitlab.nix
./services/misc/gitolite.nix
./services/misc/gpsd.nix
./services/misc/mediatomb.nix
./services/misc/mesos-master.nix
./services/misc/mesos-slave.nix
./services/misc/nix-daemon.nix
@ -223,6 +226,7 @@
./services/monitoring/smartd.nix
./services/monitoring/statsd.nix
./services/monitoring/systemhealth.nix
./services/monitoring/teamviewer.nix
./services/monitoring/ups.nix
./services/monitoring/uptime.nix
./services/monitoring/zabbix-agent.nix

View File

@ -100,7 +100,7 @@ in
chgpasswd = { rootOK = true; };
};
security.setuidPrograms = [ "passwd" "chfn" "su" "newgrp"
security.setuidPrograms = [ "passwd" "chfn" "su" "sg" "newgrp"
"newuidmap" "newgidmap" # new in shadow 4.2.x
];

View File

@ -107,7 +107,6 @@ in zipModules ([]
++ obsolete [ "services" "sshd" "permitRootLogin" ] [ "services" "openssh" "permitRootLogin" ]
++ obsolete [ "services" "xserver" "startSSHAgent" ] [ "services" "xserver" "startOpenSSHAgent" ]
++ obsolete [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ]
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "xbmc" ]
# VirtualBox
++ obsolete [ "services" "virtualbox" "enable" ] [ "services" "virtualboxGuest" "enable" ]
@ -136,6 +135,12 @@ in zipModules ([]
++ obsolete [ "services" "mysql55" ] [ "services" "mysql" ]
++ obsolete [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ]
# XBMC
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
++ obsolete [ "services" "xserver" "desktopManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
# Options that are obsolete and have no replacement.
++ obsolete' [ "boot" "loader" "grub" "bootDevice" ]
++ obsolete' [ "boot" "initrd" "luks" "enable" ]

View File

@ -44,53 +44,41 @@ in
config = {
mode = mkOption {
type = types.str;
type = types.enum [ "auto" "custom" ];
default = "auto";
example = "custom";
description = ''
grsecurity configuration mode. This specifies whether
grsecurity is auto-configured or otherwise completely
manually configured. Can either be
<literal>custom</literal> or <literal>auto</literal>.
<literal>auto</literal> is recommended.
manually configured.
'';
};
priority = mkOption {
type = types.str;
type = types.enum [ "security" "performance" ];
default = "security";
example = "performance";
description = ''
grsecurity configuration priority. This specifies whether
the kernel configuration should emphasize speed or
security. Can either be <literal>security</literal> or
<literal>performance</literal>.
security.
'';
};
system = mkOption {
type = types.str;
default = "";
example = "desktop";
type = types.enum [ "desktop" "server" ];
default = "desktop";
description = ''
grsecurity system configuration. This specifies whether
the kernel configuration should be suitable for a Desktop
or a Server. Can either be <literal>server</literal> or
<literal>desktop</literal>.
grsecurity system configuration.
'';
};
virtualisationConfig = mkOption {
type = types.str;
default = "none";
example = "host";
type = types.nullOr (types.enum [ "host" "guest" ]);
default = null;
description = ''
grsecurity virtualisation configuration. This specifies
the virtualisation role of the machine - that is, whether
it will be a virtual machine guest, a virtual machine
host, or neither. Can be one of <literal>none</literal>,
<literal>host</literal>, or <literal>guest</literal>.
host, or neither.
'';
};
@ -106,17 +94,10 @@ in
};
virtualisationSoftware = mkOption {
type = types.str;
default = "";
example = "kvm";
type = types.nullOr (types.enum [ "kvm" "xen" "vmware" "virtualbox" ]);
default = null;
description = ''
grsecurity virtualisation software. Set this to the
specified virtual machine technology if the machine is
running as a guest, or a host.
Can be one of <literal>kvm</literal>,
<literal>xen</literal>, <literal>vmware</literal> or
<literal>virtualbox</literal>.
Configure grsecurity for use with this virtualisation software.
'';
};
@ -262,25 +243,13 @@ in
&& config.boot.kernelPackages.kernel.features.grsecurity;
message = "grsecurity enabled, but kernel doesn't have grsec support";
}
{ assertion = elem cfg.config.mode [ "auto" "custom" ];
message = "grsecurity mode must either be 'auto' or 'custom'.";
}
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.system [ "desktop" "server" ];
message = "when using auto grsec mode, system must be either 'desktop' or 'server'";
}
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.priority [ "performance" "security" ];
message = "when using auto grsec mode, priority must be 'performance' or 'security'.";
}
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.virtualisationConfig [ "host" "guest" "none" ];
message = "when using auto grsec mode, 'virt' must be 'host', 'guest' or 'none'.";
}
{ assertion = (cfg.config.mode == "auto" && (elem cfg.config.virtualisationConfig [ "host" "guest" ])) ->
{ assertion = (cfg.config.mode == "auto" && (cfg.config.virtualisationConfig != null)) ->
cfg.config.hardwareVirtualisation != null;
message = "when using auto grsec mode with virtualisation, you must specify if your hardware has virtualisation extensions";
}
{ assertion = (cfg.config.mode == "auto" && (elem cfg.config.virtualisationConfig [ "host" "guest" ])) ->
elem cfg.config.virtualisationSoftware [ "kvm" "xen" "virtualbox" "vmware" ];
message = "virtualisation software must be 'kvm', 'xen', 'vmware' or 'virtualbox'";
{ assertion = (cfg.config.mode == "auto" && (cfg.config.virtualisationConfig != null)) ->
cfg.config.virtualisationSoftware != null;
message = "grsecurity configured for virtualisation but no virtualisation software specified";
}
];

View File

@ -6,8 +6,9 @@
with lib;
let
parentConfig = config;
pamOpts = args: {
pamOpts = { config, name, ... }: let cfg = config; in let config = parentConfig; in {
options = {
@ -180,8 +181,8 @@ let
};
config = let cfg = args.config; in {
name = mkDefault args.name;
config = {
name = mkDefault name;
setLoginUid = mkDefault cfg.startSession;
limits = mkDefault config.security.pam.loginLimits;

View File

@ -77,7 +77,7 @@ in
root ALL=(ALL) SETENV: ALL
# Users in the "wheel" group can do anything.
%wheel ALL=(ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL
%wheel ALL=(ALL:ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL
${cfg.extraConfig}
'';

View File

@ -8,7 +8,7 @@ let
mysql = cfg.package;
is55 = mysql.mysqlVersion == "5.5";
atLeast55 = versionAtLeast mysql.mysqlVersion "5.5";
pidFile = "${cfg.pidDir}/mysqld.pid";
@ -22,7 +22,7 @@ let
port = ${toString cfg.port}
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"}
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"}
${optionalString (cfg.replication.role == "slave" && !is55)
${optionalString (cfg.replication.role == "slave" && !atLeast55)
''
master-host = ${cfg.replication.masterHost}
master-user = ${cfg.replication.masterUser}
@ -73,7 +73,7 @@ in
};
pidDir = mkOption {
default = "/var/run/mysql";
default = "/run/mysqld";
description = "Location of the file which stores the PID of the MySQL server";
};
@ -178,6 +178,10 @@ in
mkdir -m 0700 -p ${cfg.pidDir}
chown -R ${cfg.user} ${cfg.pidDir}
# Make the socket directory
mkdir -m 0700 -p /run/mysqld
chown -R ${cfg.user} /run/mysqld
'';
serviceConfig.ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
@ -186,7 +190,7 @@ in
''
# Wait until the MySQL server is available for use
count=0
while [ ! -e /tmp/mysql.sock ]
while [ ! -e /run/mysqld/mysqld.sock ]
do
if [ $count -eq 30 ]
then
@ -220,7 +224,7 @@ in
fi
'') cfg.initialDatabases}
${optionalString (cfg.replication.role == "slave" && is55)
${optionalString (cfg.replication.role == "slave" && atLeast55)
''
# Set up the replication master

View File

@ -0,0 +1,104 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.minetest-server;
flag = val: name: if val != null then "--${name} ${val} " else "";
flags = [
(flag cfg.gameId "gameid")
(flag cfg.world "world")
(flag cfg.configPath "config")
(flag cfg.logPath "logfile")
(flag cfg.port "port")
];
in
{
options = {
services.minetest-server = {
enable = mkOption {
type = types.bool;
default = false;
description = "If enabled, starts a Minetest Server.";
};
gameId = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Id of the game to use. To list available games run
`minetestserver --gameid list`.
If only one game exists, this option can be null.
'';
};
world = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Name of the world to use. To list available worlds run
`minetestserver --world list`.
If only one world exists, this option can be null.
'';
};
configPath = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path to the config to use.
If set to null, the config of the running user will be used:
`~/.minetest/minetest.conf`.
'';
};
logPath = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path to logfile for logging.
If set to null, logging will be output to stdout which means
all output will be catched by systemd.
'';
};
port = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
Port number to bind to.
If set to null, the default 30000 will be used.
'';
};
};
};
config = mkIf cfg.enable {
users.extraUsers.minetest = {
description = "Minetest Server Service user";
home = "/var/lib/minetest";
createHome = true;
uid = config.ids.uids.minetest;
};
systemd.services.minetest-server = {
description = "Minetest Server Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig.Restart = "always";
serviceConfig.User = "minetest";
script = ''
cd /var/lib/minetest
exec ${pkgs.minetest}/bin/minetestserver ${concatStrings flags}
'';
};
};
}

View File

@ -237,7 +237,10 @@ in
system.activationScripts.udevd =
''
echo "" > /proc/sys/kernel/hotplug
# The deprecated hotplug uevent helper is not used anymore
if [ -e /proc/sys/kernel/hotplug ]; then
echo "" > /proc/sys/kernel/hotplug
fi
# Regenerate the hardware database /var/lib/udev/hwdb.bin
# whenever systemd changes.

View File

@ -0,0 +1,282 @@
{ config, lib, pkgs, ... }:
with lib;
let
uid = config.ids.uids.mediatomb;
gid = config.ids.gids.mediatomb;
cfg = config.services.mediatomb;
mtConf = pkgs.writeText "config.xml" ''
<?xml version="1.0" encoding="UTF-8"?>
<config version="2" xmlns="http://mediatomb.cc/config/2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://mediatomb.cc/config/2 http://mediatomb.cc/config/2.xsd">
<server>
<ui enabled="yes" show-tooltips="yes">
<accounts enabled="no" session-timeout="30">
<account user="mediatomb" password="mediatomb"/>
</accounts>
</ui>
<name>${cfg.serverName}</name>
<udn>uuid:${cfg.uuid}</udn>
<home>${cfg.dataDir}</home>
<webroot>${pkgs.mediatomb}/share/mediatomb/web</webroot>
<storage>
<sqlite3 enabled="yes">
<database-file>mediatomb.db</database-file>
</sqlite3>
</storage>
<protocolInfo extend="${if cfg.ps3Support then "yes" else "no"}"/>
${if cfg.dsmSupport then ''
<custom-http-headers>
<add header="X-User-Agent: redsonic"/>
</custom-http-headers>
<manufacturerURL>redsonic.com</manufacturerURL>
<modelNumber>105</modelNumber>
'' else ""}
${if cfg.tg100Support then ''
<upnp-string-limit>101</upnp-string-limit>
'' else ""}
<extended-runtime-options>
<mark-played-items enabled="yes" suppress-cds-updates="yes">
<string mode="prepend">*</string>
<mark>
<content>video</content>
</mark>
</mark-played-items>
</extended-runtime-options>
</server>
<import hidden-files="no">
<scripting script-charset="UTF-8">
<common-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/common.js</common-script>
<playlist-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/playlists.js</playlist-script>
<virtual-layout type="builtin">
<import-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/import.js</import-script>
</virtual-layout>
</scripting>
<mappings>
<extension-mimetype ignore-unknown="no">
<map from="mp3" to="audio/mpeg"/>
<map from="ogx" to="application/ogg"/>
<map from="ogv" to="video/ogg"/>
<map from="oga" to="audio/ogg"/>
<map from="ogg" to="audio/ogg"/>
<map from="ogm" to="video/ogg"/>
<map from="asf" to="video/x-ms-asf"/>
<map from="asx" to="video/x-ms-asf"/>
<map from="wma" to="audio/x-ms-wma"/>
<map from="wax" to="audio/x-ms-wax"/>
<map from="wmv" to="video/x-ms-wmv"/>
<map from="wvx" to="video/x-ms-wvx"/>
<map from="wm" to="video/x-ms-wm"/>
<map from="wmx" to="video/x-ms-wmx"/>
<map from="m3u" to="audio/x-mpegurl"/>
<map from="pls" to="audio/x-scpls"/>
<map from="flv" to="video/x-flv"/>
<map from="mkv" to="video/x-matroska"/>
<map from="mka" to="audio/x-matroska"/>
${if cfg.ps3Support then ''
<map from="avi" to="video/divx"/>
'' else ""}
${if cfg.dsmSupport then ''
<map from="avi" to="video/avi"/>
'' else ""}
</extension-mimetype>
<mimetype-upnpclass>
<map from="audio/*" to="object.item.audioItem.musicTrack"/>
<map from="video/*" to="object.item.videoItem"/>
<map from="image/*" to="object.item.imageItem"/>
</mimetype-upnpclass>
<mimetype-contenttype>
<treat mimetype="audio/mpeg" as="mp3"/>
<treat mimetype="application/ogg" as="ogg"/>
<treat mimetype="audio/ogg" as="ogg"/>
<treat mimetype="audio/x-flac" as="flac"/>
<treat mimetype="audio/x-ms-wma" as="wma"/>
<treat mimetype="audio/x-wavpack" as="wv"/>
<treat mimetype="image/jpeg" as="jpg"/>
<treat mimetype="audio/x-mpegurl" as="playlist"/>
<treat mimetype="audio/x-scpls" as="playlist"/>
<treat mimetype="audio/x-wav" as="pcm"/>
<treat mimetype="audio/L16" as="pcm"/>
<treat mimetype="video/x-msvideo" as="avi"/>
<treat mimetype="video/mp4" as="mp4"/>
<treat mimetype="audio/mp4" as="mp4"/>
<treat mimetype="application/x-iso9660" as="dvd"/>
<treat mimetype="application/x-iso9660-image" as="dvd"/>
</mimetype-contenttype>
</mappings>
<online-content>
<YouTube enabled="no" refresh="28800" update-at-start="no" purge-after="604800" racy-content="exclude" format="mp4" hd="no">
<favorites user="mediatomb"/>
<standardfeed feed="most_viewed" time-range="today"/>
<playlists user="mediatomb"/>
<uploads user="mediatomb"/>
<standardfeed feed="recently_featured" time-range="today"/>
</YouTube>
</online-content>
</import>
<transcoding enabled="${if cfg.transcoding then "yes" else "no"}">
<mimetype-profile-mappings>
<transcode mimetype="video/x-flv" using="vlcmpeg"/>
<transcode mimetype="application/ogg" using="vlcmpeg"/>
<transcode mimetype="application/ogg" using="oggflac2raw"/>
<transcode mimetype="audio/x-flac" using="oggflac2raw"/>
</mimetype-profile-mappings>
<profiles>
<profile name="oggflac2raw" enabled="no" type="external">
<mimetype>audio/L16</mimetype>
<accept-url>no</accept-url>
<first-resource>yes</first-resource>
<accept-ogg-theora>no</accept-ogg-theora>
<agent command="ogg123" arguments="-d raw -o byteorder:big -f %out %in"/>
<buffer size="1048576" chunk-size="131072" fill-size="262144"/>
</profile>
<profile name="vlcmpeg" enabled="no" type="external">
<mimetype>video/mpeg</mimetype>
<accept-url>yes</accept-url>
<first-resource>yes</first-resource>
<accept-ogg-theora>yes</accept-ogg-theora>
<agent command="vlc" arguments="-I dummy %in --sout #transcode{venc=ffmpeg,vcodec=mp2v,vb=4096,fps=25,aenc=ffmpeg,acodec=mpga,ab=192,samplerate=44100,channels=2}:standard{access=file,mux=ps,dst=%out} vlc:quit"/>
<buffer size="14400000" chunk-size="512000" fill-size="120000"/>
</profile>
</profiles>
</transcoding>
</config>
'';
in {
###### interface
options = {
services.mediatomb = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the mediatomb DLNA server.
'';
};
serverName = mkOption {
type = types.string;
default = "mediatomb";
description = ''
How to identify the server on the network.
'';
};
ps3Support = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable ps3 specific tweaks.
WARNING: incompatible with DSM 320 support.
'';
};
dsmSupport = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable D-Link DSM 320 specific tweaks.
WARNING: incompatible with ps3 support.
'';
};
tg100Support = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable Telegent TG100 specific tweaks.
'';
};
transcoding = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable transcoding.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/mediatomb";
description = ''
The directory where mediatomb stores its state, data, etc.
'';
};
user = mkOption {
default = "mediatomb";
description = "User account under which mediatomb runs.";
};
group = mkOption {
default = "mediatomb";
description = "Group account under which mediatomb runs.";
};
port = mkOption {
default = 49152;
description = ''
The network port to listen on.
'';
};
uuid = mkOption {
default = "fdfc8a4e-a3ad-4c1d-b43d-a2eedb03a687";
description = ''
A unique (on your network) to identify the server by.
'';
};
customCfg = mkOption {
type = types.bool;
default = false;
description = ''
Allow mediatomb to create and use its own config file inside ${cfg.dataDir}.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.mediatomb = {
description = "MediaTomb media Server";
after = [ "local-fs.target" "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.mediatomb ];
serviceConfig.ExecStart = "${pkgs.mediatomb}/bin/mediatomb -p ${toString cfg.port} ${if cfg.customCfg then "" else "-c ${mtConf}"} -m ${cfg.dataDir}";
serviceConfig.User = "${cfg.user}";
};
users.extraGroups = optionalAttrs (cfg.group == "mediatomb") (singleton {
name = "mediatomb";
gid = gid;
});
users.extraUsers = optionalAttrs (cfg.user == "mediatomb") (singleton {
name = "mediatomb";
isSystemUser = true;
group = cfg.group;
home = "${cfg.dataDir}";
createHome = true;
description = "Mediatomb DLNA Server User";
});
networking.firewall = {
allowedUDPPorts = [ 1900 cfg.port ];
allowedTCPPorts = [ cfg.port ];
};
};
}

View File

@ -379,9 +379,6 @@ in
/nix/var/nix/gcroots/per-user \
/nix/var/nix/profiles/per-user \
/nix/var/nix/gcroots/tmp
ln -sf /nix/var/nix/profiles /nix/var/nix/gcroots/
ln -sf /nix/var/nix/manifests /nix/var/nix/gcroots/
'';
};

View File

@ -3,7 +3,7 @@
# of the virtual consoles. The latter is useful for the installation
# CD.
{ config, lib, pkgs, baseModules, ... } @ extraArgs:
{ config, lib, pkgs, baseModules, ... }:
with lib;
@ -18,7 +18,7 @@ let
eval = evalModules {
modules = [ versionModule ] ++ baseModules;
args = (removeAttrs extraArgs ["config" "options"]) // { modules = [ ]; };
args = (config._module.args) // { modules = [ ]; };
};
manual = import ../../../doc/manual {

View File

@ -34,7 +34,7 @@ let
cap=$(sed -nr 's/.*#%#\s+capabilities\s*=\s*(.+)/\1/p' $file)
wrapProgram $file \
--set PATH "/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/sbin" \
--set PATH "/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/bin" \
--set MUNIN_LIBDIR "${pkgs.munin}/lib" \
--set MUNIN_PLUGSTATE "/var/run/munin"
@ -194,7 +194,7 @@ in
mkdir -p /etc/munin/plugins
rm -rf /etc/munin/plugins/*
PATH="/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/sbin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash
PATH="/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/bin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash
'';
serviceConfig = {
ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/";

View File

@ -0,0 +1,45 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.teamviewer;
in
{
###### interface
options = {
services.teamviewer.enable = mkEnableOption "teamviewer daemon";
};
###### implementation
config = mkIf (cfg.enable) {
environment.systemPackages = [ pkgs.teamviewer ];
systemd.services.teamviewerd = {
description = "TeamViewer remote control daemon";
wantedBy = [ "graphical.target" ];
after = [ "NetworkManager-wait-online.service" "network.target" ];
serviceConfig = {
Type = "forking";
ExecStart = "${pkgs.teamviewer}/bin/teamviewerd -d";
PIDFile = "/run/teamviewerd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "on-abort";
StartLimitInterval = "60";
StartLimitBurst = "10";
};
};
};
}

View File

@ -44,7 +44,7 @@ let cfg = config.services.drbd; in
boot.extraModprobeConfig =
''
options drbd usermode_helper=/run/current-system/sw/sbin/drbdadm
options drbd usermode_helper=/run/current-system/sw/bin/drbdadm
'';
environment.etc = singleton

View File

@ -4,6 +4,9 @@ with lib;
let
cfg = config.services.btsync;
bittorrentSync = cfg.package;
listenAddr = cfg.httpListenAddr + ":" + (toString cfg.httpListenPort);
boolStr = x: if x then "true" else "false";
@ -57,7 +60,7 @@ let
''
{
"device_name": "${cfg.deviceName}",
"storage_path": "/var/lib/btsync/",
"storage_path": "${cfg.storagePath}",
"listening_port": ${toString cfg.listeningPort},
"use_gui": false,
@ -195,6 +198,24 @@ in
'';
};
package = mkOption {
type = types.package;
default = pkgs.bittorrentSync14;
example = literalExample "pkgs.bittorrentSync20";
description = ''
Branch of bittorrent sync to use.
'';
};
storagePath = mkOption {
type = types.path;
default = "/var/lib/btsync";
example = "/var/lib/btsync";
description = ''
Where to store the bittorrent sync files.
'';
};
apiKey = mkOption {
type = types.str;
default = "";
@ -258,7 +279,7 @@ in
users.extraUsers.btsync = {
description = "Bittorrent Sync Service user";
home = "/var/lib/btsync";
home = cfg.storagePath;
createHome = true;
uid = config.ids.uids.btsync;
group = "btsync";
@ -292,6 +313,6 @@ in
};
};
environment.systemPackages = [ pkgs.bittorrentSync ];
environment.systemPackages = [ cfg.package ];
};
}

View File

@ -183,6 +183,9 @@ in {
{ source = "${networkmanager_pptp}/etc/NetworkManager/VPN/nm-pptp-service.name";
target = "NetworkManager/VPN/nm-pptp-service.name";
}
{ source = "${networkmanager_l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name";
target = "NetworkManager/VPN/nm-l2tp-service.name";
}
] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == [])
{ source = overrideNameserversScript;
target = "NetworkManager/dispatcher.d/02overridedns";
@ -197,6 +200,7 @@ in {
networkmanager_vpnc
networkmanager_openconnect
networkmanager_pptp
networkmanager_l2tp
modemmanager
];
@ -240,6 +244,7 @@ in {
networkmanager_vpnc
networkmanager_openconnect
networkmanager_pptp
networkmanager_l2tp
modemmanager
];

View File

@ -4,7 +4,12 @@
{ config, lib, pkgs, ... }:
with lib;
let
mergeHook = pkgs.writeScript "rdnssd-merge-hook" ''
#! ${pkgs.stdenv.shell} -e
${pkgs.openresolv}/bin/resolvconf -u
'';
in
{
###### interface
@ -30,18 +35,39 @@ with lib;
config = mkIf config.services.rdnssd.enable {
jobs.rdnssd =
{ description = "RDNSS daemon";
systemd.services.rdnssd = {
description = "RDNSS daemon";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
# Start before the network interfaces are brought up so that
# the daemon receives RDNSS advertisements from the kernel.
startOn = "starting network-interfaces";
preStart = ''
# Create the proper run directory
mkdir -p /run/rdnssd
touch /run/rdnssd/resolv.conf
chown -R rdnssd /run/rdnssd
# !!! Should write to /var/run/rdnssd/resolv.conf and run the daemon under another uid.
exec = "${pkgs.ndisc6}/sbin/rdnssd --resolv-file /etc/resolv.conf -u root";
# Link the resolvconf interfaces to rdnssd
rm -f /run/resolvconf/interfaces/rdnssd
ln -s /run/rdnssd/resolv.conf /run/resolvconf/interfaces/rdnssd
${mergeHook}
'';
daemonType = "fork";
postStop = ''
rm -f /run/resolvconf/interfaces/rdnssd
${mergeHook}
'';
serviceConfig = {
ExecStart = "@${pkgs.ndisc6}/bin/rdnssd rdnssd -p /run/rdnssd/rdnssd.pid -r /run/rdnssd/resolv.conf -u rdnssd -H ${mergeHook}";
Type = "forking";
PIDFile = "/run/rdnssd/rdnssd.pid";
};
};
users.extraUsers.rdnssd = {
description = "RDNSSD Daemon User";
uid = config.ids.uids.rdnssd;
};
};

View File

@ -130,6 +130,9 @@ in
config.system.path
];
# Don't restart dbus-daemon. Bad things tend to happen if we do.
systemd.services.dbus.reloadIfChanged = true;
environment.pathsToLink = [ "/etc/dbus-1" "/share/dbus-1" ];
};

View File

@ -19,7 +19,7 @@ in
# E.g., if KDE is enabled, it supersedes xterm.
imports = [
./none.nix ./xterm.nix ./xfce.nix ./kde4.nix ./kde5.nix
./e19.nix ./gnome3.nix ./xbmc.nix ./kodi.nix
./e19.nix ./gnome3.nix ./kodi.nix
];
options = {

View File

@ -1,31 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.xserver.desktopManager.xbmc;
in
{
options = {
services.xserver.desktopManager.xbmc = {
enable = mkOption {
default = false;
example = true;
description = "Enable the xbmc multimedia center.";
};
};
};
config = mkIf cfg.enable {
services.xserver.desktopManager.session = [{
name = "xbmc";
start = ''
${pkgs.xbmc}/bin/xbmc --lircdev /var/run/lirc/lircd --standalone &
waitPID=$!
'';
}];
environment.systemPackages = [ pkgs.xbmc ];
};
}

View File

@ -55,7 +55,7 @@ let
[UserList]
minimum-uid=500
hidden-users=${concatStringsSep " " dmcfg.hiddenUsers}
hidden-shells=/run/current-system/sw/sbin/nologin
hidden-shells=/run/current-system/sw/bin/nologin
'';
lightdmConf = writeText "lightdm.conf"

View File

@ -26,7 +26,7 @@ let
[Users]
MaximumUid=${toString config.ids.uids.nixbld}
HideUsers=${concatStringsSep "," dmcfg.hiddenUsers}
HideShells=/run/current-system/sw/sbin/nologin
HideShells=/run/current-system/sw/bin/nologin
[XDisplay]
MinimumVT=${toString xcfg.tty}

View File

@ -384,9 +384,13 @@ system("@systemd@/bin/systemctl", "reset-failed");
# Make systemd reload its units.
system("@systemd@/bin/systemctl", "daemon-reload") == 0 or $res = 3;
# Signal dbus to reload its configuration before starting other units.
# Other units may rely on newly installed policy files under /etc/dbus-1
system("@systemd@/bin/systemctl", "reload-or-restart", "dbus.service");
# Reload units that need it. This includes remounting changed mount
# units.
if (scalar(keys %unitsToReload) > 0) {
print STDERR "reloading the following units: ", join(", ", sort(keys %unitsToReload)), "\n";
system("@systemd@/bin/systemctl", "reload", "--", sort(keys %unitsToReload)) == 0 or $res = 4;
unlink($reloadListFile);
}
# Restart changed services (those that have to be restarted rather
# than stopped and started).
@ -407,14 +411,6 @@ print STDERR "starting the following units: ", join(", ", @unitsToStartFiltered)
system("@systemd@/bin/systemctl", "start", "--", sort(keys %unitsToStart)) == 0 or $res = 4;
unlink($startListFile);
# Reload units that need it. This includes remounting changed mount
# units.
if (scalar(keys %unitsToReload) > 0) {
print STDERR "reloading the following units: ", join(", ", sort(keys %unitsToReload)), "\n";
system("@systemd@/bin/systemctl", "reload", "--", sort(keys %unitsToReload)) == 0 or $res = 4;
unlink($reloadListFile);
}
# Print failed and new units.
my (@failed, @new, @restarting);

View File

@ -1,7 +1,7 @@
{ config, lib, pkgs, utils, ... }:
with lib;
with utils;
with lib;
with import ./systemd-unit-options.nix { inherit config lib; };
let

View File

@ -1,5 +1,3 @@
{ config, pkgs, modulesPath, ... }:
{
imports = [ "${modulesPath}/virtualisation/amazon-image.nix" ];
imports = [ ./amazon-image.nix ];
}

View File

@ -57,6 +57,7 @@ in rec {
(all nixos.tests.installer.simple)
(all nixos.tests.installer.simpleLabels)
(all nixos.tests.installer.simpleProvided)
(all nixos.tests.installer.swraid)
(all nixos.tests.installer.btrfsSimple)
(all nixos.tests.installer.btrfsSubvols)
(all nixos.tests.installer.btrfsSubvolDefault)

View File

@ -260,6 +260,7 @@ in rec {
tests.installer.simple = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simple.test);
tests.installer.simpleLabels = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simpleLabels.test);
tests.installer.simpleProvided = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simpleProvided.test);
tests.installer.swraid = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).swraid.test);
tests.installer.btrfsSimple = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSimple.test);
tests.installer.btrfsSubvols = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSubvols.test);
tests.installer.btrfsSubvolDefault = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSubvolDefault.test);
@ -297,6 +298,7 @@ in rec {
# TODO: put in networking.nix after the test becomes more complete
tests.networkingProxy = callTest tests/networking-proxy.nix {};
tests.nfs3 = callTest tests/nfs.nix { version = 3; };
tests.nfs4 = callTest tests/nfs.nix { version = 4; };
tests.nsd = callTest tests/nsd.nix {};
tests.openssh = callTest tests/openssh.nix {};
tests.panamax = hydraJob (import tests/panamax.nix { system = "x86_64-linux"; });
@ -308,8 +310,12 @@ in rec {
tests.simple = callTest tests/simple.nix {};
tests.tomcat = callTest tests/tomcat.nix {};
tests.udisks2 = callTest tests/udisks2.nix {};
tests.virtualbox = callTest tests/virtualbox.nix {};
tests.virtualbox = hydraJob (import tests/virtualbox.nix { system = "x86_64-linux"; });
tests.xfce = callTest tests/xfce.nix {};
tests.bootBiosCdrom = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootBiosCdrom);
tests.bootBiosUsb = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootBiosUsb);
tests.bootUefiCdrom = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootUefiCdrom);
tests.bootUefiUsb = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootUefiUsb);
/* Build a bunch of typical closures so that Hydra can keep track of

63
nixos/tests/boot.nix Normal file
View File

@ -0,0 +1,63 @@
{ system ? builtins.currentSystem }:
with import ../lib/testing.nix { inherit system; };
with import ../lib/qemu-flags.nix;
with pkgs.lib;
let
iso =
(import ../lib/eval-config.nix {
inherit system;
modules =
[ ../modules/installer/cd-dvd/installation-cd-minimal.nix
../modules/testing/test-instrumentation.nix
{ key = "serial";
boot.loader.grub.timeout = mkOverride 0 0;
# The test cannot access the network, so any sources we
# need must be included in the ISO.
isoImage.storeContents =
[ pkgs.glibcLocales
pkgs.sudo
pkgs.docbook5
pkgs.docbook5_xsl
pkgs.grub
pkgs.perlPackages.XMLLibXML
pkgs.unionfs-fuse
pkgs.gummiboot
];
}
];
}).config.system.build.isoImage;
makeBootTest = name: machineConfig:
makeTest {
inherit iso;
name = "boot-" + name;
nodes = { };
testScript =
''
my $machine = createMachine({ ${machineConfig}, qemuFlags => '-m 768' });
$machine->start;
$machine->waitForUnit("multi-user.target");
$machine->shutdown;
'';
};
in {
bootBiosCdrom = makeBootTest "bios-cdrom" ''
cdrom => glob("${iso}/iso/*.iso")
'';
bootBiosUsb = makeBootTest "bios-usb" ''
usb => glob("${iso}/iso/*.iso")
'';
bootUefiCdrom = makeBootTest "uefi-cdrom" ''
cdrom => glob("${iso}/iso/*.iso"),
bios => '${pkgs.OVMF}/FV/OVMF.fd'
'';
bootUefiUsb = makeBootTest "uefi-usb" ''
usb => glob("${iso}/iso/*.iso"),
bios => '${pkgs.OVMF}/FV/OVMF.fd'
'';
}

View File

@ -109,7 +109,12 @@ import ./make-test.nix (
$machine->waitUntilSucceeds("${xdo "check-startup" ''
search --sync --onlyvisible --name "startup done"
# close first start help popup
key Escape
key -delay 1000 Escape
# XXX: This is to make sure the popup is closed, but we better do
# screenshots to detect visual changes.
key -delay 2000 Escape
key -delay 3000 Escape
key -delay 4000 Escape
windowfocus --sync
windowactivate --sync
''}");

View File

@ -327,12 +327,12 @@ in {
$machine->succeed(
"parted /dev/vda --"
. " mklabel msdos"
. " mkpart primary ext2 1M 30MB" # /boot
. " mkpart extended 30M -1s"
. " mkpart logical 31M 1531M" # md0 (root), first device
. " mkpart logical 1540M 3040M" # md0 (root), second device
. " mkpart logical 3050M 3306M" # md1 (swap), first device
. " mkpart logical 3320M 3576M", # md1 (swap), second device
. " mkpart primary ext2 1M 100MB" # /boot
. " mkpart extended 100M -1s"
. " mkpart logical 102M 1602M" # md0 (root), first device
. " mkpart logical 1603M 3103M" # md0 (root), second device
. " mkpart logical 3104M 3360M" # md1 (swap), first device
. " mkpart logical 3361M 3617M", # md1 (swap), second device
"udevadm settle",
"ls -l /dev/vda* >&2",
"cat /proc/partitions >&2",

View File

@ -1,4 +1,4 @@
import ./make-test.nix ({ version, ... }:
import ./make-test.nix ({ version ? 4, ... }:
let

View File

@ -244,6 +244,7 @@ import ./make-test.nix ({ pkgs, ... }: with pkgs.lib; let
for (my $i = 0; $i <= 120; $i += 10) {
$machine->sleep(10);
return if checkRunning_${name};
eval { $_[0]->() } if defined $_[0];
}
die "VirtualBox VM didn't start up within 2 minutes";
}
@ -335,7 +336,9 @@ in {
$machine->screenshot("gui_manager_started");
$machine->sendKeys("ret");
$machine->screenshot("gui_manager_sent_startup");
waitForStartup_simple;
waitForStartup_simple (sub {
$machine->sendKeys("ret");
});
$machine->screenshot("gui_started");
waitForVMBoot_simple;
$machine->screenshot("gui_booted");

View File

@ -9,7 +9,9 @@ stdenv.mkDerivation rec{
version = "0.10.0";
src = fetchurl {
url = "https://bitcoin.org/bin/bitcoin-core-0.10.0/bitcoin-${version}.tar.gz";
url = [ "https://bitcoin.org/bin/bitcoin-core-0.10.0/bitcoin-${version}.tar.gz"
"mirror://sourceforge/bitcoin/Bitcoin/bitcoin-0.10.0/bitcoin-${version}.tar.gz"
];
sha256 = "a516cf6d9f58a117607148405334b35d3178df1ba1c59229609d2bcd08d30624";
};

View File

@ -17,7 +17,7 @@ stdenv.mkDerivation rec {
QT_PLUGIN_PATH="${qtscriptgenerator}/lib/qt4/plugins";
buildInputs = [ qtscriptgenerator stdenv.cc.libc gettext curl
libxml2 mysql taglib taglib_extras loudmouth kdelibs automoc4 phonon strigi
libxml2 mysql.lib taglib taglib_extras loudmouth kdelibs automoc4 phonon strigi
soprano qca2 libmtp liblastfm libgpod pkgconfig qjson ffmpeg libofa nepomuk_core ];
cmakeFlags = "-DKDE4_BUILD_TESTS=OFF";

View File

@ -1,110 +0,0 @@
{ fetchgit, stdenv, unzip, pkgconfig, makeWrapper, libsndfile, libmicrohttpd, vim }:
stdenv.mkDerivation rec {
version = "8-1-2015";
name = "faust-compiler-${version}";
src = fetchgit {
url = git://git.code.sf.net/p/faudiostream/code;
rev = "4db76fdc02b6aec8d15a5af77fcd5283abe963ce";
sha256 = "f1ac92092ee173e4bcf6b2cb1ac385a7c390fb362a578a403b2b6edd5dc7d5d0";
};
# this version has a bug that manifests when doing faust2jack:
/*version = "0.9.67";*/
/*name = "faust-compiler-${version}";*/
/*src = fetchurl {*/
/*url = "http://downloads.sourceforge.net/project/faudiostream/faust-${version}.zip";*/
/*sha256 = "068vl9536zn0j4pknwfcchzi90rx5pk64wbcbd67z32w0csx8xm1";*/
/*};*/
buildInputs = [ unzip pkgconfig makeWrapper libsndfile libmicrohttpd vim];
makeFlags="PREFIX = $(out)";
FPATH="$out"; # <- where to search
patchPhase = ''
sed -i 's@?= $(shell uname -s)@:= Linux@g' architecture/osclib/oscpack/Makefile
sed -i 's@faust/misc.h@../../architecture/faust/misc.h@g' tools/sound2faust/sound2faust.cpp
sed -i 's@faust/gui/@../../architecture/faust/gui/@g' architecture/faust/misc.h
'';
buildPhase = ''
make -C compiler -f Makefile.unix
make -C architecture/osclib
g++ -O3 tools/sound2faust/sound2faust.cpp `pkg-config --cflags --static --libs sndfile` -o tools/sound2faust/sound2faust
make httpd
'';
installPhase = ''
echo install faust itself
mkdir -p $out/bin/
mkdir -p $out/include/
mkdir -p $out/include/faust/
mkdir -p $out/include/faust/osc/
install compiler/faust $out/bin/
echo install architecture and faust library files
mkdir -p $out/lib/faust
cp architecture/*.lib $out/lib/faust/
cp architecture/*.cpp $out/lib/faust/
echo install math documentation files
cp architecture/mathdoctexts-*.txt $out/lib/faust/
cp architecture/latexheader.tex $out/lib/faust/
echo install additional binary libraries: osc, http
([ -e architecture/httpdlib/libHTTPDFaust.a ] && cp architecture/httpdlib/libHTTPDFaust.a $out/lib/faust/) || echo libHTTPDFaust not available
cp architecture/osclib/*.a $out/lib/faust/
cp -r architecture/httpdlib/html/js $out/lib/faust/js
([ -e architecture/httpdlib/src/hexa/stylesheet ] && cp architecture/httpdlib/src/hexa/stylesheet $out/lib/faust/js/stylesheet.js) || echo stylesheet not available
([ -e architecture/httpdlib/src/hexa/jsscripts ] && cp architecture/httpdlib/src/hexa/jsscripts $out/lib/faust/js/jsscripts.js) || echo jsscripts not available
echo install includes files for architectures
cp -r architecture/faust $out/include/
echo install additional includes files for binary libraries: osc, http
cp architecture/osclib/faust/faust/OSCControler.h $out/include/faust/gui/
cp architecture/osclib/faust/faust/osc/*.h $out/include/faust/osc/
cp architecture/httpdlib/src/include/*.h $out/include/faust/gui/
echo patch header and cpp files
find $out/include/ -name "*.h" -type f | xargs sed "s@#include \"faust/@#include \"$out/include/faust/@g" -i
find $out/lib/faust/ -name "*.cpp" -type f | xargs sed "s@#include \"faust/@#include \"$out/include/faust/@g" -i
sed -i "s@../../architecture/faust/gui/@$out/include/faust/gui/@g" $out/include/faust/misc.h
wrapProgram $out/bin/faust \
--set FAUSTLIB $out/lib/faust \
--set FAUST_LIB_PATH $out/lib/faust \
--set FAUSTINC $out/include/
'';
meta = with stdenv.lib; {
description = "A functional programming language for realtime audio signal processing";
longDescription = ''
FAUST (Functional Audio Stream) is a functional programming
language specifically designed for real-time signal processing
and synthesis. FAUST targets high-performance signal processing
applications and audio plug-ins for a variety of platforms and
standards.
The Faust compiler translates DSP specifications into very
efficient C++ code. Thanks to the notion of architecture,
FAUST programs can be easily deployed on a large variety of
audio platforms and plugin formats (jack, alsa, ladspa, maxmsp,
puredata, csound, supercollider, pure, vst, coreaudio) without
any change to the FAUST code.
This package has just the compiler. Install faust for the full
set of faust2somethingElse tools.
'';
homepage = http://faust.grame.fr/;
downloadPage = http://sourceforge.net/projects/faudiostream/files/;
license = licenses.gpl2;
platforms = platforms.linux;
maintainers = [ maintainers.magnetophon ];
};
}

View File

@ -1,100 +1,209 @@
{ fetchgit, stdenv, bash, alsaLib, atk, cairo, faust-compiler, fontconfig, freetype
, gcc, gdk_pixbuf, glib, gtk, jack2, makeWrapper, opencv, pango, pkgconfig, unzip
, gtkSupport ? true
, jackaudioSupport ? true
{ stdenv
, coreutils
, fetchgit
, makeWrapper
, pkgconfig
}:
stdenv.mkDerivation rec {
with stdenv.lib.strings;
let
version = "8-1-2015";
name = "faust-${version}";
src = fetchgit {
url = git://git.code.sf.net/p/faudiostream/code;
rev = "4db76fdc02b6aec8d15a5af77fcd5283abe963ce";
sha256 = "f1ac92092ee173e4bcf6b2cb1ac385a7c390fb362a578a403b2b6edd5dc7d5d0";
};
# this version has a bug that manifests when doing faust2jack:
/*version = "0.9.67";*/
/*name = "faust-${version}";*/
/*src = fetchurl {*/
/*url = "http://downloads.sourceforge.net/project/faudiostream/faust-${version}.zip";*/
/*sha256 = "068vl9536zn0j4pknwfcchzi90rx5pk64wbcbd67z32w0csx8xm1";*/
/*};*/
buildInputs = [ bash unzip faust-compiler gcc makeWrapper pkgconfig ]
++ stdenv.lib.optionals gtkSupport [
alsaLib atk cairo fontconfig freetype gdk_pixbuf glib gtk pango
]
++ stdenv.lib.optional jackaudioSupport jack2
;
makeFlags="PREFIX=$(out)";
FPATH="$out"; # <- where to search
phases = [ "unpackPhase installPhase postInstall" ];
installPhase = ''
sed -i 23,24d tools/faust2appls/faust2jack
mkdir $out/bin
install tools/faust2appls/faust2alsaconsole $out/bin
install tools/faust2appls/faustpath $out/bin
install tools/faust2appls/faustoptflags $out/bin
install tools/faust2appls/faust2alsa $out/bin
install tools/faust2appls/faust2jack $out/bin
patchShebangs $out/bin
wrapProgram $out/bin/faust2alsaconsole \
--prefix PKG_CONFIG_PATH : ${alsaLib}/lib/pkgconfig \
--set FAUSTLIB ${faust-compiler}/lib/faust \
--set FAUSTINC ${faust-compiler}/include/
GTK_PKGCONFIG_PATHS=${gtk}/lib/pkgconfig:${pango}/lib/pkgconfig:${glib}/lib/pkgconfig:${cairo}/lib/pkgconfig:${gdk_pixbuf}/lib/pkgconfig:${atk}/lib/pkgconfig:${freetype}/lib/pkgconfig:${fontconfig}/lib/pkgconfig
wrapProgram $out/bin/faust2alsa \
--prefix PKG_CONFIG_PATH : ${alsaLib}/lib/pkgconfig:$GTK_PKGCONFIG_PATHS \
--set FAUSTLIB ${faust-compiler}/lib/faust \
--set FAUSTINC ${faust-compiler}/include/ \
wrapProgram $out/bin/faust2jack \
--prefix PKG_CONFIG_PATH : ${jack2}/lib/pkgconfig:${opencv}/lib/pkgconfig:$GTK_PKGCONFIG_PATHS \
--set FAUSTLIB ${faust-compiler}/lib/faust \
--set FAUSTINC ${faust-compiler}/include/ \
''
+ stdenv.lib.optionalString (!gtkSupport) "rm $out/bin/faust2alsa"
+ stdenv.lib.optionalString (!gtkSupport || !jackaudioSupport) "rm $out/bin/faust2jack"
;
postInstall = ''
sed -e "s@\$FAUST_INSTALL /usr/local /usr /opt /opt/local@${faust-compiler}@g" -i $out/bin/faustpath
sed -i "s@/bin/bash@${bash}/bin/bash@g" $out/bin/faustoptflags
find $out/bin/ -name "*faust2*" -type f | xargs sed "s@pkg-config@${pkgconfig}/bin/pkg-config@g" -i
find $out/bin/ -name "*faust2*" -type f | xargs sed "s@CXX=g++@CXX=${gcc}/bin/g++@g" -i
find $out/bin/ -name "*faust2*" -type f | xargs sed "s@faust -i -a @${faust-compiler}/bin/faust -i -a ${faust-compiler}/lib/faust/@g" -i
'';
meta = with stdenv.lib; {
description = "A functional programming language for realtime audio signal processing";
longDescription = ''
FAUST (Functional Audio Stream) is a functional programming
language specifically designed for real-time signal processing
and synthesis. FAUST targets high-performance signal processing
applications and audio plug-ins for a variety of platforms and
standards.
The Faust compiler translates DSP specifications into very
efficient C++ code. Thanks to the notion of architecture,
FAUST programs can be easily deployed on a large variety of
audio platforms and plugin formats (jack, alsa, ladspa, maxmsp,
puredata, csound, supercollider, pure, vst, coreaudio) without
any change to the FAUST code.
'';
homepage = http://faust.grame.fr/;
downloadPage = http://sourceforge.net/projects/faudiostream/files/;
license = licenses.gpl2;
platforms = platforms.linux;
maintainers = [ maintainers.magnetophon ];
maintainers = with maintainers; [ magnetophon pmahoney ];
};
}
faust = stdenv.mkDerivation {
name = "faust-${version}";
inherit src;
buildInputs = [ makeWrapper ];
passthru = {
inherit wrap wrapWithBuildEnv;
};
preConfigure = ''
makeFlags="$makeFlags prefix=$out"
# The faust makefiles use 'system ?= $(shell uname -s)' but nix
# defines 'system' env var, so undefine that so faust detects the
# correct system.
unset system
'';
# Remove most faust2appl scripts since they won't run properly
# without additional paths setup. See faust.wrap,
# faust.wrapWithBuildEnv.
postInstall = ''
# syntax error when eval'd directly
pattern="faust2!(svg)"
(shopt -s extglob; rm "$out"/bin/$pattern)
'';
postFixup = ''
# Set faustpath explicitly.
substituteInPlace "$out"/bin/faustpath \
--replace "/usr/local /usr /opt /opt/local" "$out"
# The 'faustoptflags' is 'source'd into other faust scripts and
# not used as an executable, so patch 'uname' usage directly
# rather than use makeWrapper.
substituteInPlace "$out"/bin/faustoptflags \
--replace uname "${coreutils}/bin/uname"
# wrapper for scripts that don't need faust.wrap*
for script in "$out"/bin/faust2*; do
wrapProgram "$script" \
--prefix PATH : "$out"/bin
done
'';
meta = meta // {
description = "A functional programming language for realtime audio signal processing";
longDescription = ''
FAUST (Functional Audio Stream) is a functional programming
language specifically designed for real-time signal processing
and synthesis. FAUST targets high-performance signal processing
applications and audio plug-ins for a variety of platforms and
standards.
The Faust compiler translates DSP specifications into very
efficient C++ code. Thanks to the notion of architecture,
FAUST programs can be easily deployed on a large variety of
audio platforms and plugin formats (jack, alsa, ladspa, maxmsp,
puredata, csound, supercollider, pure, vst, coreaudio) without
any change to the FAUST code.
This package has just the compiler, libraries, and headers.
Install faust2* for specific faust2appl scripts.
'';
};
};
# Default values for faust2appl.
faust2ApplBase =
{ baseName
, dir ? "tools/faust2appls"
, scripts ? [ baseName ]
, ...
}@args:
args // {
name = "${baseName}-${version}";
inherit src;
configurePhase = ":";
buildPhase = ":";
installPhase = ''
runHook preInstall
mkdir -p "$out/bin"
for script in ${concatStringsSep " " scripts}; do
cp "${dir}/$script" "$out/bin/"
done
runHook postInstall
'';
postInstall = ''
# For the faust2appl script, change 'faustpath' and
# 'faustoptflags' to absolute paths.
for script in "$out"/bin/*; do
substituteInPlace "$script" \
--replace ". faustpath" ". '${faust}/bin/faustpath'" \
--replace ". faustoptflags" ". '${faust}/bin/faustoptflags'"
done
'';
meta = meta // {
description = "The ${baseName} script, part of faust functional programming language for realtime audio signal processing";
};
};
# Some 'faust2appl' scripts, such as faust2alsa, run faust to
# generate cpp code, then invoke the c++ compiler to build the code.
# This builder wraps these scripts in parts of the stdenv such that
# when the scripts are called outside any nix build, they behave as
# if they were running inside a nix build in terms of compilers and
# paths being configured (e.g. rpath is set so that compiled
# binaries link to the libs inside the nix store)
#
# The function takes two main args: the appl name (e.g.
# 'faust2alsa') and an optional list of propagatedBuildInputs. It
# returns a derivation that contains only the bin/${appl} script,
# wrapped up so that it will run as if it was inside a nix build
# with those build inputs.
#
# The build input 'faust' is automatically added to the
# propagatedBuildInputs.
wrapWithBuildEnv =
{ baseName
, propagatedBuildInputs ? [ ]
, ...
}@args:
stdenv.mkDerivation ((faust2ApplBase args) // {
buildInputs = [ makeWrapper pkgconfig ];
propagatedBuildInputs = [ faust ] ++ propagatedBuildInputs;
postFixup = ''
# export parts of the build environment
for script in "$out"/bin/*; do
wrapProgram "$script" \
--set FAUST_LIB_PATH "${faust}/lib/faust" \
--prefix PATH : "$PATH" \
--prefix PKG_CONFIG_PATH : "$PKG_CONFIG_PATH" \
--set NIX_CFLAGS_COMPILE "\"$NIX_CFLAGS_COMPILE\"" \
--set NIX_LDFLAGS "\"$NIX_LDFLAGS\""
done
'';
});
# Builder for 'faust2appl' scripts, such as faust2firefox that
# simply need to be wrapped with some dependencies on PATH.
#
# The build input 'faust' is automatically added to the PATH.
wrap =
{ baseName
, runtimeInputs ? [ ]
, ...
}@args:
let
runtimePath = concatStringsSep ":" (map (p: "${p}/bin") ([ faust ] ++ runtimeInputs));
in stdenv.mkDerivation ((faust2ApplBase args) // {
buildInputs = [ makeWrapper ];
postFixup = ''
for script in "$out"/bin/*; do
wrapProgram "$script" --prefix PATH : "${runtimePath}"
done
'';
});
in faust

View File

@ -0,0 +1,15 @@
{ faust
, alsaLib
, qt4
}:
faust.wrapWithBuildEnv {
baseName = "faust2alqt";
propagatedBuildInputs = [
alsaLib
qt4
];
}

View File

@ -0,0 +1,29 @@
{ faust
, alsaLib
, atk
, cairo
, fontconfig
, freetype
, gdk_pixbuf
, glib
, gtk
, pango
}:
faust.wrapWithBuildEnv {
baseName = "faust2alsa";
propagatedBuildInputs = [
alsaLib
atk
cairo
fontconfig
freetype
gdk_pixbuf
glib
gtk
pango
];
}

View File

@ -0,0 +1,20 @@
{ faust
, csound
}:
faust.wrapWithBuildEnv {
baseName = "faust2csound";
propagatedBuildInputs = [
csound
];
# faust2csound generated .cpp files have
# #include "csdl.h"
# but that file is in the csound/ subdirectory
preFixup = ''
NIX_CFLAGS_COMPILE="$(printf '%s' "$NIX_CFLAGS_COMPILE" | sed 's%${csound}/include%${csound}/include/csound%')"
'';
}

View File

@ -0,0 +1,14 @@
{ faust
, xdg_utils
}:
# This just runs faust2svg, then attempts to open a browser using
# 'xdg-open'.
faust.wrap {
baseName = "faust2firefox";
runtimeInputs = [ xdg_utils ];
}

View File

@ -0,0 +1,23 @@
{ faust
, gtk
, jack2
, opencv
}:
faust.wrapWithBuildEnv {
baseName = "faust2jack";
scripts = [
"faust2jack"
"faust2jackinternal"
"faust2jackconsole"
];
propagatedBuildInputs = [
gtk
jack2
opencv
];
}

View File

@ -0,0 +1,22 @@
{ faust
, jack2
, opencv
, qt4
}:
faust.wrapWithBuildEnv {
baseName = "faust2jaqt";
scripts = [
"faust2jaqt"
"faust2jackserver"
];
propagatedBuildInputs = [
jack2
opencv
qt4
];
}

View File

@ -0,0 +1,11 @@
{ faust
, lv2
}:
faust.wrapWithBuildEnv {
baseName = "faust2lv2";
propagatedBuildInputs = [ lv2 ];
}

View File

@ -0,0 +1,17 @@
{ stdenv, fetchurl, pkgconfig, gtk2, libsndfile, portaudio }:
stdenv.mkDerivation rec {
name = "gnaural-1.0.20110606";
buildInputs = [ pkgconfig gtk2 libsndfile portaudio ];
src = fetchurl {
url = "mirror://sourceforge/gnaural/Gnaural/${name}.tar.gz";
sha256 = "0p9rasz1jmxf16vnpj17g3vzdjygcyz3l6nmbq6wr402l61f1vy5";
};
meta = with stdenv.lib;
{ description = "Auditory binaural-beat generator";
homepage = http://gnaural.sourceforge.net/;
licenses = licenses.gpl2;
maintainers = [ maintainers.emery ];
platforms = platforms.linux;
};
}

View File

@ -10,11 +10,11 @@
stdenv.mkDerivation rec {
name = "kid3-${version}";
version = "3.1.1";
version = "3.1.2";
src = fetchurl {
url = "http://downloads.sourceforge.net/project/kid3/kid3/${version}/${name}.tar.gz";
sha256 = "0mr617k712zpd99rgsy313jrb6jcjn1malj4lirzqhp7307wsf34";
sha256 = "0ik2bxg2im7nwcgi85g2dj148n80mfhks20rsxnzazl7afk9fl08";
};
buildInputs = with stdenv.lib;

View File

@ -3,11 +3,11 @@
pythonPackages.buildPythonPackage rec {
name = "mopidy-moped-${version}";
version = "0.3.3";
version = "0.5.0";
src = fetchurl {
url = "https://github.com/martijnboland/moped/archive/v${version}.tar.gz";
sha256 = "19f3asqx7wmla53nhrxzdwj6qlkjv2rcwh34jxp27bz7nkhn0ihv";
sha256 = "1bkx0c4yi48nxm1vzacdil9scn0ilwkbd1rgiga34p77lcg16qb2";
};
propagatedBuildInputs = [ mopidy ];

View File

@ -3,11 +3,11 @@
pythonPackages.buildPythonPackage rec {
name = "mopidy-mopify-${version}";
version = "0.1.6";
version = "1.4.1";
src = fetchurl {
url = "https://github.com/dirkgroenen/mopidy-mopify/archive/${version}.tar.gz";
sha256 = "3581de6b0b42d2ece63bc153dcdba0594fbbeaacf695f2cd1e5d199670d83775";
sha256 = "1i752vnkgqfps5vym63rbsh1xm141z8r68d80bi076zr6zbzdjj9";
};
propagatedBuildInputs = [ mopidy ];

View File

@ -3,11 +3,11 @@
pythonPackages.buildPythonPackage rec {
name = "mopidy-spotify-${version}";
version = "1.2.0";
version = "1.3.0";
src = fetchurl {
url = "https://github.com/mopidy/mopidy-spotify/archive/v${version}.tar.gz";
sha256 = "1fgxakylsx0nggis11v6bxfy8h3dl1n1v86liyfcj0xazb1mx69m";
sha256 = "0pwgg9xw86sjhv6w735fm0k81v0lv3gqlidgw90hr47hc4wajnzx";
};
propagatedBuildInputs = [ mopidy pythonPackages.pyspotify ];

View File

@ -5,11 +5,11 @@
pythonPackages.buildPythonPackage rec {
name = "mopidy-${version}";
version = "0.19.4";
version = "1.0.0";
src = fetchurl {
url = "https://github.com/mopidy/mopidy/archive/v${version}.tar.gz";
sha256 = "13dyn9pgq0jns6915diizviqyn64yfysb08k77xsmxrr4bhm1156";
sha256 = "15cz6mqw8ihqxhlssnbbssl3bi1xxbmq7krf3hv0zmmdj73ilsd6";
};
propagatedBuildInputs = with pythonPackages; [

View File

@ -0,0 +1,35 @@
{stdenv, fetchurl, scons, boost, ladspaH, pkgconfig }:
stdenv.mkDerivation rec {
version = "0.2-2";
name = "nova-filters-${version}";
src = fetchurl {
url = http://klingt.org/~tim/nova-filters/nova-filters_0.2-2.tar.gz;
sha256 = "16064vvl2w5lz4xi3lyjk4xx7fphwsxc14ajykvndiz170q32s6i";
};
buildInputs = [ scons boost ladspaH pkgconfig ];
patchPhase = ''
# remove TERM:
sed -i -e '4d' SConstruct
sed -i "s@mfpmath=sse@mfpmath=sse -I ${boost.dev}/include@g" SConstruct
sed -i "s@ladspa.h@${ladspaH}/include/ladspa.h@g" filters.cpp
sed -i "s/= check/= detail::filter_base<internal_type, checked>::check/" nova/source/dsp/filter.hpp
'';
buildPhase = ''
scons
'';
installPhase = ''
scons $sconsFlags "prefix=$out" install
'';
meta = {
homepage = http://klingt.org/~tim/nova-filters/;
description = "LADSPA plugins based on filters of nova";
license = stdenv.lib.licenses.gpl2Plus;
};
}

View File

@ -0,0 +1,50 @@
{ stdenv, fetchurl, pkgconfig, gtk3, intltool, itstool, libxml2, brasero
, libcanberra_gtk3, gnome3, gst_all_1, libmusicbrainz5, libdiscid, isocodes
, makeWrapper }:
let
major = "3.15";
minor = "92";
GST_PLUGIN_PATH = stdenv.lib.makeSearchPath "lib/gstreamer-1.0" [
gst_all_1.gst-plugins-base
gst_all_1.gst-plugins-good
gst_all_1.gst-plugins-bad
gst_all_1.gst-libav ];
in stdenv.mkDerivation rec {
version = "${major}.${minor}";
name = "sound-juicer-${version}";
src = fetchurl {
url = "http://download.gnome.org/sources/sound-juicer/${major}/${name}.tar.xz";
sha256 = "b1420f267a4c553f6ca242d3b6082b60682c3d7b431ac3c979bd1ccfbf2687dd";
};
buildInputs = [ pkgconfig gtk3 intltool itstool libxml2 brasero libcanberra_gtk3
gnome3.gsettings_desktop_schemas libmusicbrainz5 libdiscid isocodes
makeWrapper gnome3.dconf
gst_all_1.gstreamer gst_all_1.gst-plugins-base
gst_all_1.gst-plugins-good gst_all_1.gst-plugins-bad ];
preFixup = ''
for f in $out/bin/* $out/libexec/*; do
wrapProgram "$f" \
--prefix XDG_DATA_DIRS : "${gnome3.gnome_themes_standard}/share:$XDG_ICON_DIRS:$GSETTINGS_SCHEMAS_PATH" \
--prefix GST_PLUGIN_SYSTEM_PATH_1_0 : "$GST_PLUGIN_SYSTEM_PATH_1_0" \
--prefix GIO_EXTRA_MODULES : "${gnome3.dconf}/lib/gio/modules" \
--prefix GST_PLUGIN_PATH : "${GST_PLUGIN_PATH}"
done
'';
postInstall = ''
rm $out/share/icons/hicolor/icon-theme.cache
'';
meta = with stdenv.lib; {
description = "A Gnome CD Ripper";
homepage = https://wiki.gnome.org/Apps/SoundJuicer;
maintainers = [ maintainers.bdimcheff ];
license = licenses.gpl2;
platforms = platforms.linux;
};
}

View File

@ -1,5 +1,5 @@
{ stdenv, fetchurl, cmake, pkgconfig, attica, boost, gnutls, libechonest
, liblastfm, lucenepp, phonon, phonon_backend_vlc, qca2, qca2_ossl, qjson, qt4
, liblastfm, lucenepp, phonon, phonon_backend_vlc, qca2, qjson, qt4
, qtkeychain, quazip, sparsehash, taglib, websocketpp, makeWrapper
, enableXMPP ? true, libjreen ? null
@ -38,7 +38,6 @@ in stdenv.mkDerivation rec {
postInstall = let
pluginPath = stdenv.lib.concatStringsSep ":" [
"${phonon_backend_vlc}/lib/kde4/plugins"
"${qca2_ossl}/lib/qt4/plugins"
];
in ''
for i in "$out"/bin/*; do

View File

@ -26,20 +26,17 @@ stdenv.mkDerivation rec {
"--sysconfdir=/etc"
] ++ stdenv.lib.optional useGTK2 "--with-gtk2";
installFlags = [ "DESTDIR=\${out}" ];
installFlags = [
"localstatedir=\${TMPDIR}"
"sysconfdir=\${out}/etc"
];
postInstall = ''
mv $out/$out/* $out
DIR=$out/$out
while rmdir $DIR 2>/dev/null; do
DIR="$(dirname "$DIR")"
done
substituteInPlace "$out/share/xgreeters/lightdm-gtk-greeter.desktop" \
--replace "Exec=lightdm-gtk-greeter" "Exec=$out/sbin/lightdm-gtk-greeter"
wrapProgram "$out/sbin/lightdm-gtk-greeter" \
--prefix XDG_DATA_DIRS ":" "${hicolor_icon_theme}/share"
'';
substituteInPlace "$out/share/xgreeters/lightdm-gtk-greeter.desktop" \
--replace "Exec=lightdm-gtk-greeter" "Exec=$out/sbin/lightdm-gtk-greeter"
wrapProgram "$out/sbin/lightdm-gtk-greeter" \
--prefix XDG_DATA_DIRS ":" "${hicolor_icon_theme}/share"
'';
meta = with stdenv.lib; {
homepage = http://launchpad.net/lightdm-gtk-greeter;

View File

@ -27,16 +27,10 @@ stdenv.mkDerivation rec {
] ++ stdenv.lib.optional (qt4 != null) "--enable-liblightdm-qt"
++ stdenv.lib.optional (qt5 != null) "--enable-liblightdm-qt5";
installFlags = [ "DESTDIR=\${out}" ];
# Correct for the nested nix folder tree
postInstall = ''
mv $out/$out/* $out
DIR=$out/$out
while rmdir $DIR 2>/dev/null; do
DIR="$(dirname "$DIR")"
done
'';
installFlags = [
"sysconfdir=\${out}/etc"
"localstatedir=\${TMPDIR}"
];
meta = with stdenv.lib; {
homepage = http://launchpad.net/lightdm;

View File

@ -1,26 +1,61 @@
{stdenv, fetchgit
{ stdenv, fetchurl, fetchgit
, autoconf, automake, pkgconfig, shared_mime_info, intltool
, glib, mono, gtk-sharp, gnome-sharp
, glib, mono, gtk-sharp, gnome, gnome-sharp, unzip
}:
stdenv.mkDerivation rec {
version = "5.1.4.0";
revision = "7d45bbe2ee22625f125d0c52548524f02d005cca";
version = "5.7.0.660";
revision = "6a74f9bdb90d9415b597064d815c9be38b401fee";
name = "monodevelop-${version}";
src = fetchgit {
url = https://github.com/mono/monodevelop.git;
rev = revision;
sha256 = "0qy12zdvb0jiic3pq1w9mcsz2wwxrn0m92abd184q06yg5m48g1b";
};
srcs = [
(fetchurl {
url = "http://download.mono-project.com/sources/monodevelop/${name}.tar.bz2";
sha256 = "0i9fpjkcys991dhxh02zf9imar3aj6fldk9ymy09vmr10f4d7vbf";
})
(fetchurl {
url = "https://launchpadlibrarian.net/153448659/NUnit-2.6.3.zip";
sha256 = "0vzbziq44zy7fyyhb44mf9ypfi7gvs17rxpg8c9d9lvvdpkshhcp";
})
(fetchurl {
url = "https://launchpadlibrarian.net/68057829/NUnit-2.5.10.11092.zip";
sha256 = "0k5h5bz1p2v3d0w0hpkpbpvdkcszgp8sr9ik498r1bs72w5qlwnc";
})
(fetchgit {
url = "https://github.com/mono/nuget-binary.git";
rev = "ecb27dd49384d70b6c861d28763906f2b25b7c8";
sha256 = "0dj0yglgwn07xw2crr66vl0vcgnr6m041pynyq0kdd0z8nlp92ki";
})
];
sourceRoot = "monodevelop-5.7";
postPatch = ''
# From https://bugzilla.xamarin.com/show_bug.cgi?id=23696#c19
# it seems parts of MonoDevelop 5.2+ need NUnit 2.6.4, which isn't included
# (?), so download it and put it in the right place in the tree
mkdir -v -p packages/NUnit.2.6.3/lib
cp -vfR ../NUnit-2.6.3/bin/framework/* packages/NUnit.2.6.3/lib
mkdir -v -p packages/NUnit.Runners.2.6.3/tools/lib
cp -vfR ../NUnit-2.6.3/bin/lib/* packages/NUnit.Runners.2.6.3/tools/lib
# cecil needs NUnit 2.5.10 - this is also missing from the tar
cp -vfR ../NUnit-2.5.10.11092/bin/net-2.0/framework/* external/cecil/Test/libs/nunit-2.5.10
# the tar doesn't include the nuget binary, so grab it from github and copy it
# into the right place
cp -vfR ../nuget-binary-*/* external/nuget-binary/
'';
buildInputs = [
autoconf automake pkgconfig shared_mime_info intltool
mono gtk-sharp gnome-sharp
mono gtk-sharp gnome-sharp unzip
];
preConfigure = "patchShebangs ./configure";
preBuild = ''
cat > ./main/buildinfo <<EOF
cat > ./buildinfo <<EOF
Release ID: ${version}
Git revision: ${revision}
Build date: 1970-01-01 00:00:01
@ -31,9 +66,9 @@ stdenv.mkDerivation rec {
for prog in monodevelop mdtool; do
patch -p 0 $out/bin/$prog <<EOF
2a3,5
> export MONO_GAC_PREFIX=${gtk-sharp}:\$MONO_GAC_PREFIX
> export MONO_GAC_PREFIX=${gnome-sharp}:${gtk-sharp}:\$MONO_GAC_PREFIX
> export PATH=${mono}/bin:\$PATH
> export LD_LIBRARY_PATH=${glib}/lib:${gnome-sharp}/lib:${gtk-sharp}/lib:${gtk-sharp.gtk}/lib:\$LD_LIBRARY_PATH
> export LD_LIBRARY_PATH=${glib}/lib:${gnome.libgnomeui}/lib:${gnome.gnome_vfs}/lib:${gnome-sharp}/lib:${gtk-sharp}/lib:${gtk-sharp.gtk}/lib:\$LD_LIBRARY_PATH
>
EOF
done
@ -43,5 +78,6 @@ stdenv.mkDerivation rec {
meta = with stdenv.lib; {
platforms = platforms.linux;
maintainers = with maintainers; [ obadz ];
};
}

View File

@ -3,7 +3,7 @@
assert stdenv.system == "i686-linux" || stdenv.system == "x86_64-linux";
let
build = "3065";
build = "3083";
libPath = stdenv.lib.makeLibraryPath [glib xlibs.libX11 gtk cairo pango];
in let
# package with just the binaries
@ -13,15 +13,15 @@ in let
src =
if stdenv.system == "i686-linux" then
fetchurl {
name = "sublimetext-3.0.65.tar.bz2";
name = "sublimetext-3.0.83.tar.bz2";
url = "http://c758482.r82.cf2.rackcdn.com/sublime_text_3_build_${build}_x32.tar.bz2";
sha256 = "e25f84fe0d0c02ce71274d334fd42ce6313adcd4ec1d588b165d25f5e93ad78d";
sha256 = "0r9irk2gdwdx0dk7lgssr4krfvf3lf71pzaz5hyjc704zaxf5s49";
}
else
fetchurl {
name = "sublimetext-3.0.65.tar.bz2";
name = "sublimetext-3.0.83.tar.bz2";
url = "http://c758482.r82.cf2.rackcdn.com/sublime_text_3_build_${build}_x64.tar.bz2";
sha256 = "fe548e6d86d72cd7e90eee9d5396b590ae6e8f8b0dfc661d86c814214e60faea";
sha256 = "1vhlrqz7xscmjnxpz60mdpvflanl26d7673ml7psd75n0zvcfra5";
};
dontStrip = true;

View File

@ -1,4 +1,4 @@
{ stdenv, fetchurl, qt4, popplerQt4, zlib, pkgconfig, poppler}:
{ stdenv, fetchurl, qt4, poppler_qt4, zlib, pkgconfig, poppler}:
stdenv.mkDerivation rec {
pname = "texmaker";
@ -10,7 +10,7 @@ stdenv.mkDerivation rec {
sha256 = "1h5rxdq6f05wk3lnlw96fxwrb14k77cx1mwy648127h2c8nsgw4z";
};
buildInputs = [ qt4 popplerQt4 zlib ];
buildInputs = [ qt4 poppler_qt4 zlib ];
nativeBuildInputs = [ pkgconfig poppler ];

View File

@ -1,4 +1,4 @@
{ stdenv, fetchurl, qt4, popplerQt4, zlib}:
{ stdenv, fetchurl, qt4, poppler_qt4, zlib}:
stdenv.mkDerivation rec {
pname = "texstudio";
@ -11,10 +11,10 @@ stdenv.mkDerivation rec {
sha256 = "167d78nfk265jjvl129nr70v8ladb2rav2qyhw7ngr6m54gak831";
};
buildInputs = [ qt4 popplerQt4 zlib ];
buildInputs = [ qt4 poppler_qt4 zlib ];
preConfigure = ''
export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -I$(echo ${popplerQt4}/include/poppler/qt4) "
export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -I$(echo ${poppler_qt4}/include/poppler/qt4) "
qmake PREFIX=$out texstudio.pro
'';

View File

@ -40,7 +40,7 @@ stdenv.mkDerivation rec {
(mkEnable (libcl != null) "opencl")
(mkWith true "modules")
(mkWith true "gcc-arch=${arch}")
(mkEnable true "hdri")
#(mkEnable true "hdri") This breaks some dependencies
(mkWith (perl != null) "perl")
(mkWith (jemalloc != null) "jemalloc")
(mkWith true "frozenpaths")
@ -79,6 +79,18 @@ stdenv.mkDerivation rec {
libxml2
];
propagatedBuildInputs = []
++ (stdenv.lib.optional (lcms2 != null) lcms2)
++ (stdenv.lib.optional (liblqr1 != null) liblqr1)
++ (stdenv.lib.optional (fftw != null) fftw)
++ (stdenv.lib.optional (libtool != null) libtool)
++ (stdenv.lib.optional (jemalloc != null) jemalloc)
++ (stdenv.lib.optional (libXext != null) libXext)
++ (stdenv.lib.optional (libX11 != null) libX11)
++ (stdenv.lib.optional (libXt != null) libXt)
++ (stdenv.lib.optional (bzip2 != null) bzip2)
;
postInstall = ''(cd "$out/include" && ln -s ImageMagick* ImageMagick)'';
meta = with stdenv.lib; {

View File

@ -1,13 +1,13 @@
{stdenv, fetchurl, fetchurlGnome, gtk, pkgconfig, perl, perlXMLParser, libxml2, gettext
{stdenv, fetchurl, gtk, pkgconfig, perl, perlXMLParser, libxml2, gettext
, python, libxml2Python, docbook5, docbook_xsl, libxslt, intltool, libart_lgpl
, withGNOME ? false, libgnomeui }:
stdenv.mkDerivation rec {
name = src.pkgname;
name = "dia-${minVer}.3";
minVer = "0.97";
src = fetchurlGnome {
project = "dia";
major = "0"; minor = "97"; patchlevel = "3"; extension = "xz";
src = fetchurl {
url = "mirror://gnome/sources/dia/${minVer}/${name}.tar.xz";
sha256 = "0d3x6w0l6fwd0l8xx06y1h56xf8ss31yzia3a6xr9y28xx44x492";
};

View File

@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
buildInputs = [ qt4 kdelibs phonon qimageblitz qca2 eigen lcms libjpeg libtiff
jasper libgphoto2 kdepimlibs gettext soprano liblqr1 lensfun qjson libkdcraw
opencv libkexiv2 libkipi boost shared_desktop_ontologies marble mysql ];
opencv libkexiv2 libkipi boost shared_desktop_ontologies marble mysql.lib ];
# Make digikam find some FindXXXX.cmake
KDEDIRS="${marble}:${qjson}";

View File

@ -18,7 +18,7 @@ stdenv.mkDerivation rec {
buildInputs = [
boost eigen gettext jasper kdelibs kdepimlibs lcms lensfun
libgphoto2 libjpeg libkdcraw libkexiv2 libkipi liblqr1 libpgf
libtiff marble mysql opencv phonon qca2 qimageblitz qjson qt4
libtiff marble mysql.lib opencv phonon qca2 qimageblitz qjson qt4
shared_desktop_ontologies soprano
];

View File

@ -28,12 +28,13 @@ stdenv.mkDerivation rec {
#configureFlags = [ "--disable-print" ];
# "screenshot" needs this.
NIX_LDFLAGS = "-rpath ${xlibs.libX11}/lib";
NIX_LDFLAGS = "-rpath ${xlibs.libX11}/lib"
+ stdenv.lib.optionalString stdenv.isDarwin " -lintl";
meta = {
description = "The GNU Image Manipulation Program";
homepage = http://www.gimp.org/;
license = stdenv.lib.licenses.gpl3Plus;
platforms = stdenv.lib.platforms.linux;
platforms = stdenv.lib.platforms.unix;
};
}

View File

@ -1,33 +1,32 @@
{ stdenv, fetchurl, xulrunner }:
stdenv.mkDerivation rec {
name = "pencil-2.0.5";
version = "2.0.8";
name = "pencil-${version}";
src = fetchurl {
url = "http://evoluspencil.googlecode.com/files/${name}.tar.gz";
sha256 = "0rn5nb08p8wph5s5gajkil6y06zgrm86p4gnjdgv76czx1fqazm0";
url = "https://github.com/prikhi/pencil/releases/download/v${version}/Pencil-${version}-linux-pkg.tar.gz";
sha256 = "3426d0222b213649e448b06384556718c833667394f442682ff66da3cda1b881";
};
# Pre-built package
buildPhase = "true";
buildPhase = "";
installPhase = ''
mkdir -p "$out"
cp -r usr/* "$out"
cp COPYING "$out/share/pencil"
sed -e "s|/usr/bin/xulrunner|${xulrunner}/bin/xulrunner|" \
-e "s|/usr/share/pencil|$out/share/pencil|" \
-e "s|/usr/share/evolus-pencil|$out/share/evolus-pencil|" \
-i "$out/bin/pencil"
sed -e "s|/usr/bin/pencil|$out/bin/pencil|" \
-e "s|Icon=.*|Icon=$out/share/pencil/skin/classic/icon.svg|" \
-e "s|Icon=.*|Icon=$out/share/evolus-pencil/skin/classic/icon.svg|" \
-i "$out/share/applications/pencil.desktop"
'';
meta = with stdenv.lib; {
description = "GUI prototyping/mockup tool";
homepage = http://pencil.evolus.vn/;
homepage = http://github.com/prikhi/pencil;
license = licenses.gpl2; # Commercial license is also available
maintainers = [ maintainers.bjornfor ];
maintainers = with maintainers; [ bjornfor prikhi ];
platforms = platforms.linux;
};
}

View File

@ -0,0 +1,40 @@
{ stdenv, fetchurl, cairo, colord, glib, gtk3, intltool, itstool, libxml2
, makeWrapper, pkgconfig, saneBackends, systemd, vala }:
let version = "3.16.0.1"; in
stdenv.mkDerivation rec {
name = "simple-scan-${version}";
src = fetchurl {
sha256 = "0p1knmbrdwrnjjk5x0szh3ja2lfamaaynj2ai92zgci2ma5xh2ma";
url = "https://launchpad.net/simple-scan/3.16/${version}/+download/${name}.tar.xz";
};
meta = with stdenv.lib; {
description = "Simple scanning utility";
longDescription = ''
A really easy way to scan both documents and photos. You can crop out the
bad parts of a photo and rotate it if it is the wrong way round. You can
print your scans, export them to pdf, or save them in a range of image
formats. Basically a frontend for SANE - which is the same backend as
XSANE uses. This means that all existing scanners will work and the
interface is well tested.
'';
homepage = https://launchpad.net/simple-scan;
license = with licenses; gpl3Plus;
platforms = with platforms; linux;
maintainers = with maintainers; [ nckx ];
};
buildInputs = [ cairo colord glib gtk3 intltool itstool libxml2 makeWrapper
pkgconfig saneBackends systemd vala ];
enableParallelBuilding = true;
doCheck = true;
preFixup = ''
wrapProgram "$out/bin/simple-scan" \
--prefix XDG_DATA_DIRS : "$GSETTINGS_SCHEMAS_PATH"
'';
}

View File

@ -107,7 +107,7 @@ let
OggVorbis = libvorbis;
OpenAL = openal;
OpenEXR = openexr;
Poppler = poppler.poppler_qt4;
Poppler = poppler_qt4;
Prison = prison;
PulseAudio = pulseaudio;
PythonLibrary = python;

View File

@ -10,11 +10,11 @@
with lib;
stdenv.mkDerivation rec {
name = "blender-2.73a";
name = "blender-2.74";
src = fetchurl {
url = "http://download.blender.org/source/${name}.tar.gz";
sha256 = "114ipidrja6ryi6wv0w55wmh10ikazy24r8js596g7b9fpkzpymc";
sha256 = "178i19pz7jl79b4wn92869j6qymawsa0kaw1dxaprbjnqsvcx8qc";
};
patches = [ ./sm52.patch ];

View File

@ -1,10 +1,10 @@
{ stdenv, cmake, fetchurl, pkgconfig, qt4, zlib, bzip2 }:
stdenv.mkDerivation rec {
name = "doomseeker-0.12.2b";
name = "doomseeker-1.0";
src = fetchurl {
url = "http://doomseeker.drdteam.org/files/${name}_src.tar.bz2";
sha256 = "1bcrxc3g9c6b4d8dbm2rx0ldxkqc5fc91jndkwiaykf8hajm0jnr";
sha256 = "172ybxg720r64hp6aah0hqvxklqv1cf8v7kwx0ng5ap0h20jydbw";
};
cmakeFlags = ''

View File

@ -1,13 +1,12 @@
{ stdenv, fetchurl, buildPythonPackage, pythonPackages, slowaes }:
buildPythonPackage rec {
namePrefix = "";
name = "electrum-${version}";
version = "2.0.3";
version = "2.0.4";
src = fetchurl {
url = "https://download.electrum.org/Electrum-${version}.tar.gz";
sha256 = "1kzrbnkl5jps0kf0420vzpiqjk3v1jxvlrxwhc0f58xbqyc7l4mj";
sha256 = "0q9vrrzy2iypfg2zvs3glzvqyq65dnwn1ijljvfqfwrkpvpp0zxp";
};
propagatedBuildInputs = with pythonPackages; [
@ -24,16 +23,21 @@ buildPythonPackage rec {
tlslite
];
postPatch = ''
preInstall = ''
mkdir -p $out/share
sed -i 's@usr_share = .*@usr_share = os.getenv("out")+"/share"@' setup.py
'';
meta = {
description = "Bitcoin thin-wallet";
long-description = "Electrum is an easy to use Bitcoin client. It protects you from losing coins in a backup mistake or computer failure, because your wallet can be recovered from a secret phrase that you can write on paper or learn by heart. There is no waiting time when you start the client, because it does not download the Bitcoin blockchain.";
homepage = "https://electrum.org";
license = stdenv.lib.licenses.gpl3;
maintainers = [ "emery@vfemail.net" stdenv.lib.maintainers.joachifm ];
meta = with stdenv.lib; {
description = "Bitcoin thin-client";
longDescription = ''
An easy-to-use Bitcoin client featuring wallets generated from
mnemonic seeds (in addition to other, more advanced, wallet options)
and the ability to perform transactions without downloading a copy
of the blockchain.
'';
homepage = https://electrum.org;
license = licenses.gpl3;
maintainers = with maintainers; [ emery joachifm ];
};
}

View File

@ -1,12 +1,13 @@
{stdenv, fetchgit, bzip2, qt4, libX11}:
stdenv.mkDerivation rec {
name = "evopedia-0.4.2";
name = "evopedia-${version}";
version = "0.4.4";
src = fetchgit {
url = git://gitorious.org/evopedia/evopedia.git;
rev = "v0.4.2" ;
md5 = "a2f19ed6e4d936c28cee28d44387b682";
url = https://github.com/evopedia/evopedia_qt;
rev = "refs/tags/v${version}";
sha256 = "1biq9zaj8nhxx1pixidsn97iwp9qy1yslgl0znpa4d4p35jcg48g";
};
configurePhase = ''
@ -19,7 +20,7 @@ stdenv.mkDerivation rec {
description = "Offline Wikipedia Viewer";
homepage = http://www.evopedia.info;
license = stdenv.lib.licenses.gpl3Plus;
maintainers = with stdenv.lib.maintainers; [viric];
maintainers = with stdenv.lib.maintainers; [ qknight ];
platforms = with stdenv.lib.platforms; linux;
};
}

Some files were not shown because too many files have changed in this diff Show More