diff --git a/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixos/modules/services/cluster/kubernetes/addon-manager.nix index ad7d17c9c283..17f2dde31a71 100644 --- a/nixos/modules/services/cluster/kubernetes/addon-manager.nix +++ b/nixos/modules/services/cluster/kubernetes/addon-manager.nix @@ -62,50 +62,19 @@ in ''; }; - enable = mkEnableOption "Kubernetes addon manager"; - - kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager"; - bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap"; + enable = mkEnableOption "Whether to enable Kubernetes addon manager."; }; ###### implementation - config = let - - addonManagerPaths = filter (a: a != null) [ - cfg.kubeconfig.caFile - cfg.kubeconfig.certFile - cfg.kubeconfig.keyFile - ]; - bootstrapAddonsPaths = filter (a: a != null) [ - cfg.bootstrapAddonsKubeconfig.caFile - cfg.bootstrapAddonsKubeconfig.certFile - cfg.bootstrapAddonsKubeconfig.keyFile - ]; - - in mkIf cfg.enable { + config = mkIf cfg.enable { environment.etc."kubernetes/addons".source = "${addons}/"; - #TODO: Get rid of kube-addon-manager in the future for the following reasons - # - it is basically just a shell script wrapped around kubectl - # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount - # - it is designed to be used with k8s system components only - # - it would be better with a more Nix-oriented way of managing addons systemd.services.kube-addon-manager = { description = "Kubernetes addon manager"; wantedBy = [ "kubernetes.target" ]; - after = [ "kube-node-online.target" ]; - before = [ "kubernetes.target" ]; - environment = { - ADDON_PATH = "/etc/kubernetes/addons/"; - KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig; - }; - path = with pkgs; [ gawk kubectl ]; - preStart = '' - until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do - echo kubectl -n kube-system get serviceaccounts/default: exit status $? - sleep 2 - done - ''; + after = [ "kube-apiserver.service" ]; + environment.ADDON_PATH = "/etc/kubernetes/addons/"; + path = [ pkgs.gawk ]; serviceConfig = { Slice = "kubernetes.slice"; ExecStart = "${top.package}/bin/kube-addons"; @@ -115,52 +84,8 @@ in Restart = "on-failure"; RestartSec = 10; }; - unitConfig.ConditionPathExists = addonManagerPaths; }; - systemd.paths.kube-addon-manager = { - wantedBy = [ "kube-addon-manager.service" ]; - pathConfig = { - PathExists = addonManagerPaths; - PathChanged = addonManagerPaths; - }; - }; - - services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress; - - systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) { - wantedBy = [ "kube-control-plane-online.target" ]; - after = [ "kube-apiserver.service" ]; - before = [ "kube-control-plane-online.target" ]; - path = [ pkgs.kubectl ]; - environment = { - KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig; - }; - preStart = with pkgs; let - files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v)) - cfg.bootstrapAddons; - in '' - until kubectl auth can-i '*' '*' -q 2>/dev/null; do - echo kubectl auth can-i '*' '*': exit status $? - sleep 2 - done - - kubectl apply -f ${concatStringsSep " \\\n -f " files} - ''; - script = "echo Ok"; - unitConfig.ConditionPathExists = bootstrapAddonsPaths; - }; - - systemd.paths.kube-addon-manager-bootstrap = { - wantedBy = [ "kube-addon-manager-bootstrap.service" ]; - pathConfig = { - PathExists = bootstrapAddonsPaths; - PathChanged = bootstrapAddonsPaths; - }; - }; - - services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress; - services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled (let name = system:kube-addon-manager; diff --git a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix index 2295694ffc74..454e7d35bc01 100644 --- a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix +++ b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix @@ -169,23 +169,6 @@ in { }; }; - kubernetes-dashboard-cm = { - apiVersion = "v1"; - kind = "ConfigMap"; - metadata = { - labels = { - k8s-app = "kubernetes-dashboard"; - # Allows editing resource and makes sure it is created first. - "addonmanager.kubernetes.io/mode" = "EnsureExists"; - }; - name = "kubernetes-dashboard-settings"; - namespace = "kube-system"; - }; - }; - }; - - services.kubernetes.addonManager.bootstrapAddons = mkMerge [{ - kubernetes-dashboard-sa = { apiVersion = "v1"; kind = "ServiceAccount"; @@ -227,9 +210,20 @@ in { }; type = "Opaque"; }; - } - - (optionalAttrs cfg.rbac.enable + kubernetes-dashboard-cm = { + apiVersion = "v1"; + kind = "ConfigMap"; + metadata = { + labels = { + k8s-app = "kubernetes-dashboard"; + # Allows editing resource and makes sure it is created first. + "addonmanager.kubernetes.io/mode" = "EnsureExists"; + }; + name = "kubernetes-dashboard-settings"; + namespace = "kube-system"; + }; + }; + } // (optionalAttrs cfg.rbac.enable (let subjects = [{ kind = "ServiceAccount"; @@ -329,6 +323,6 @@ in { inherit subjects; }; }) - ))]; + )); }; } diff --git a/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixos/modules/services/cluster/kubernetes/apiserver.nix index f293dd79f42a..33796bf2e080 100644 --- a/nixos/modules/services/cluster/kubernetes/apiserver.nix +++ b/nixos/modules/services/cluster/kubernetes/apiserver.nix @@ -290,32 +290,11 @@ in ###### implementation config = mkMerge [ - (let - - apiserverPaths = filter (a: a != null) [ - cfg.clientCaFile - cfg.etcd.caFile - cfg.etcd.certFile - cfg.etcd.keyFile - cfg.kubeletClientCaFile - cfg.kubeletClientCertFile - cfg.kubeletClientKeyFile - cfg.serviceAccountKeyFile - cfg.tlsCertFile - cfg.tlsKeyFile - ]; - etcdPaths = filter (a: a != null) [ - config.services.etcd.trustedCaFile - config.services.etcd.certFile - config.services.etcd.keyFile - ]; - - in mkIf cfg.enable { + (mkIf cfg.enable { systemd.services.kube-apiserver = { description = "Kubernetes APIServer Service"; - wantedBy = [ "kube-control-plane-online.target" ]; - after = [ "certmgr.service" ]; - before = [ "kube-control-plane-online.target" ]; + wantedBy = [ "kubernetes.target" ]; + after = [ "network.target" ]; serviceConfig = { Slice = "kubernetes.slice"; ExecStart = ''${top.package}/bin/kube-apiserver \ @@ -386,15 +365,6 @@ in Restart = "on-failure"; RestartSec = 5; }; - unitConfig.ConditionPathExists = apiserverPaths; - }; - - systemd.paths.kube-apiserver = mkIf top.apiserver.enable { - wantedBy = [ "kube-apiserver.service" ]; - pathConfig = { - PathExists = apiserverPaths; - PathChanged = apiserverPaths; - }; }; services.etcd = { @@ -408,18 +378,6 @@ in initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"]; }; - systemd.services.etcd = { - unitConfig.ConditionPathExists = etcdPaths; - }; - - systemd.paths.etcd = { - wantedBy = [ "etcd.service" ]; - pathConfig = { - PathExists = etcdPaths; - PathChanged = etcdPaths; - }; - }; - services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled { apiserver-kubelet-api-admin-crb = { diff --git a/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixos/modules/services/cluster/kubernetes/controller-manager.nix index b94e8bd86d4c..0b73d090f241 100644 --- a/nixos/modules/services/cluster/kubernetes/controller-manager.nix +++ b/nixos/modules/services/cluster/kubernetes/controller-manager.nix @@ -104,31 +104,11 @@ in }; ###### implementation - config = let - - controllerManagerPaths = filter (a: a != null) [ - cfg.kubeconfig.caFile - cfg.kubeconfig.certFile - cfg.kubeconfig.keyFile - cfg.rootCaFile - cfg.serviceAccountKeyFile - cfg.tlsCertFile - cfg.tlsKeyFile - ]; - - in mkIf cfg.enable { - systemd.services.kube-controller-manager = rec { + config = mkIf cfg.enable { + systemd.services.kube-controller-manager = { description = "Kubernetes Controller Manager Service"; - wantedBy = [ "kube-control-plane-online.target" ]; + wantedBy = [ "kubernetes.target" ]; after = [ "kube-apiserver.service" ]; - before = [ "kube-control-plane-online.target" ]; - environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig; - preStart = '' - until kubectl auth can-i get /api -q 2>/dev/null; do - echo kubectl auth can-i get /api: exit status $? - sleep 2 - done - ''; serviceConfig = { RestartSec = "30s"; Restart = "on-failure"; @@ -140,7 +120,7 @@ in "--cluster-cidr=${cfg.clusterCidr}"} \ ${optionalString (cfg.featureGates != []) "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \ - --kubeconfig=${environment.KUBECONFIG} \ + --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \ --leader-elect=${boolToString cfg.leaderElect} \ ${optionalString (cfg.rootCaFile!=null) "--root-ca-file=${cfg.rootCaFile}"} \ @@ -161,16 +141,7 @@ in User = "kubernetes"; Group = "kubernetes"; }; - path = top.path ++ [ pkgs.kubectl ]; - unitConfig.ConditionPathExists = controllerManagerPaths; - }; - - systemd.paths.kube-controller-manager = { - wantedBy = [ "kube-controller-manager.service" ]; - pathConfig = { - PathExists = controllerManagerPaths; - PathChanged = controllerManagerPaths; - }; + path = top.path; }; services.kubernetes.pki.certs = with top.lib; { diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix index 143b41f57f6a..3790ac9b6918 100644 --- a/nixos/modules/services/cluster/kubernetes/default.nix +++ b/nixos/modules/services/cluster/kubernetes/default.nix @@ -256,29 +256,6 @@ in { wantedBy = [ "multi-user.target" ]; }; - systemd.targets.kube-control-plane-online = { - wantedBy = [ "kubernetes.target" ]; - before = [ "kubernetes.target" ]; - }; - - systemd.services.kube-control-plane-online = rec { - description = "Kubernetes control plane is online"; - wantedBy = [ "kube-control-plane-online.target" ]; - after = [ "kube-scheduler.service" "kube-controller-manager.service" ]; - before = [ "kube-control-plane-online.target" ]; - path = [ pkgs.curl ]; - preStart = '' - until curl -Ssf ${cfg.apiserverAddress}/healthz do - echo curl -Ssf ${cfg.apiserverAddress}/healthz: exit status $? - sleep 3 - done - ''; - script = "echo Ok"; - serviceConfig = { - TimeoutSec = "500"; - }; - }; - systemd.tmpfiles.rules = [ "d /opt/cni/bin 0755 root root -" "d /run/kubernetes 0755 kubernetes kubernetes -" @@ -302,8 +279,6 @@ in { services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null then cfg.apiserver.advertiseAddress else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}"); - - services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress; }) ]; } diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix index d9437427d6d1..93ee2fd65eeb 100644 --- a/nixos/modules/services/cluster/kubernetes/flannel.nix +++ b/nixos/modules/services/cluster/kubernetes/flannel.nix @@ -23,27 +23,17 @@ in { ###### interface options.services.kubernetes.flannel = { - enable = mkEnableOption "flannel networking"; - kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel"; + enable = mkEnableOption "enable flannel networking"; }; ###### implementation - config = let - - flannelPaths = filter (a: a != null) [ - cfg.kubeconfig.caFile - cfg.kubeconfig.certFile - cfg.kubeconfig.keyFile - ]; - kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig; - - in mkIf cfg.enable { + config = mkIf cfg.enable { services.flannel = { enable = mkDefault true; network = mkDefault top.clusterCidr; - inherit storageBackend kubeconfig; - nodeName = top.kubelet.hostname; + inherit storageBackend; + nodeName = config.services.kubernetes.kubelet.hostname; }; services.kubernetes.kubelet = { @@ -58,66 +48,24 @@ in }]; }; - systemd.services.mk-docker-opts = { + systemd.services."mk-docker-opts" = { description = "Pre-Docker Actions"; - wantedBy = [ "flannel.target" ]; - before = [ "flannel.target" ]; path = with pkgs; [ gawk gnugrep ]; script = '' ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker systemctl restart docker ''; - unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ]; serviceConfig.Type = "oneshot"; }; - systemd.paths.flannel-subnet-env = { - wantedBy = [ "mk-docker-opts.service" ]; + systemd.paths."flannel-subnet-env" = { + wantedBy = [ "flannel.service" ]; pathConfig = { - PathExists = [ "/run/flannel/subnet.env" ]; - PathChanged = [ "/run/flannel/subnet.env" ]; + PathModified = "/run/flannel/subnet.env"; Unit = "mk-docker-opts.service"; }; }; - systemd.targets.flannel = { - wantedBy = [ "kube-node-online.target" ]; - before = [ "kube-node-online.target" ]; - }; - - systemd.services.flannel = { - wantedBy = [ "flannel.target" ]; - after = [ "kubelet.target" ]; - before = [ "flannel.target" ]; - path = with pkgs; [ iptables kubectl ]; - environment.KUBECONFIG = kubeconfig; - preStart = let - args = [ - "--selector=kubernetes.io/hostname=${top.kubelet.hostname}" - # flannel exits if node is not registered yet, before that there is no podCIDR - "--output=jsonpath={.items[0].spec.podCIDR}" - # if jsonpath cannot be resolved exit with status 1 - "--allow-missing-template-keys=false" - ]; - in '' - until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do - echo Waiting for ${top.kubelet.hostname} to be RegisteredNode - sleep 1 - done - ''; - unitConfig.ConditionPathExists = flannelPaths; - }; - - systemd.paths.flannel = { - wantedBy = [ "flannel.service" ]; - pathConfig = { - PathExists = flannelPaths; - PathChanged = flannelPaths; - }; - }; - - services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress; - systemd.services.docker = { environment.DOCKER_OPTS = "-b none"; serviceConfig.EnvironmentFile = "-/run/flannel/docker"; @@ -144,6 +92,7 @@ in # give flannel som kubernetes rbac permissions if applicable services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) { + flannel-cr = { apiVersion = "rbac.authorization.k8s.io/v1beta1"; kind = "ClusterRole"; @@ -179,6 +128,7 @@ in name = "flannel-client"; }]; }; + }; }; } diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix index 4c5df96bcc6a..e93062d97437 100644 --- a/nixos/modules/services/cluster/kubernetes/kubelet.nix +++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix @@ -234,28 +234,21 @@ in ###### implementation config = mkMerge [ - (let - - kubeletPaths = filter (a: a != null) [ - cfg.kubeconfig.caFile - cfg.kubeconfig.certFile - cfg.kubeconfig.keyFile - cfg.clientCaFile - cfg.tlsCertFile - cfg.tlsKeyFile - ]; - - in mkIf cfg.enable { + (mkIf cfg.enable { services.kubernetes.kubelet.seedDockerImages = [infraContainer]; systemd.services.kubelet = { description = "Kubernetes Kubelet Service"; - wantedBy = [ "kubelet.target" ]; - after = [ "kube-control-plane-online.target" ]; - before = [ "kubelet.target" ]; + wantedBy = [ "kubernetes.target" ]; + after = [ "network.target" "docker.service" "kube-apiserver.service" ]; path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path; preStart = '' - rm -f /opt/cni/bin/* || true + ${concatMapStrings (img: '' + echo "Seeding docker image: ${img}" + docker load <${img} + '') cfg.seedDockerImages} + + rm /opt/cni/bin/* || true ${concatMapStrings (package: '' echo "Linking cni package: ${package}" ln -fs ${package}/bin/* /opt/cni/bin @@ -308,56 +301,6 @@ in ''; WorkingDirectory = top.dataDir; }; - unitConfig.ConditionPathExists = kubeletPaths; - }; - - systemd.paths.kubelet = { - wantedBy = [ "kubelet.service" ]; - pathConfig = { - PathExists = kubeletPaths; - PathChanged = kubeletPaths; - }; - }; - - systemd.services.docker.before = [ "kubelet.service" ]; - - systemd.services.docker-seed-images = { - wantedBy = [ "docker.service" ]; - after = [ "docker.service" ]; - before = [ "kubelet.service" ]; - path = with pkgs; [ docker ]; - preStart = '' - ${concatMapStrings (img: '' - echo "Seeding docker image: ${img}" - docker load <${img} - '') cfg.seedDockerImages} - ''; - script = "echo Ok"; - serviceConfig.Type = "oneshot"; - serviceConfig.RemainAfterExit = true; - serviceConfig.Slice = "kubernetes.slice"; - }; - - systemd.services.kubelet-online = { - wantedBy = [ "kube-node-online.target" ]; - after = [ "flannel.target" "kubelet.target" ]; - before = [ "kube-node-online.target" ]; - # it is complicated. flannel needs kubelet to run the pause container before - # it discusses the node CIDR with apiserver and afterwards configures and restarts - # dockerd. Until then prevent creating any pods because they have to be recreated anyway - # because the network of docker0 has been changed by flannel. - script = let - docker-env = "/run/flannel/docker"; - flannel-date = "stat --print=%Y ${docker-env}"; - docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker"; - in '' - until test -f ${docker-env} ; do sleep 1 ; done - while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do - sleep 1 - done - ''; - serviceConfig.Type = "oneshot"; - serviceConfig.Slice = "kubernetes.slice"; }; # Allways include cni plugins @@ -404,16 +347,5 @@ in }; }) - { - systemd.targets.kubelet = { - wantedBy = [ "kube-node-online.target" ]; - before = [ "kube-node-online.target" ]; - }; - - systemd.targets.kube-node-online = { - wantedBy = [ "kubernetes.target" ]; - before = [ "kubernetes.target" ]; - }; - } ]; } diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix index 47384ae50a07..733479e24c97 100644 --- a/nixos/modules/services/cluster/kubernetes/pki.nix +++ b/nixos/modules/services/cluster/kubernetes/pki.nix @@ -27,11 +27,12 @@ let certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}"; cfsslAPITokenLength = 32; - clusterAdminKubeconfig = with cfg.certs.clusterAdmin; { - server = top.apiserverAddress; - certFile = cert; - keyFile = key; - }; + clusterAdminKubeconfig = with cfg.certs.clusterAdmin; + top.lib.mkKubeConfig "cluster-admin" { + server = top.apiserverAddress; + certFile = cert; + keyFile = key; + }; remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}"; in @@ -118,11 +119,6 @@ in cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl"; cfsslCert = "${cfsslCertPathPrefix}.pem"; cfsslKey = "${cfsslCertPathPrefix}-key.pem"; - - certmgrPaths = [ - top.caFile - certmgrAPITokenPath - ]; in { @@ -172,40 +168,13 @@ in chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}" '')]); - systemd.targets.cfssl-online = { - wantedBy = [ "network-online.target" ]; - after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ]; - }; - - systemd.services.cfssl-online = { - description = "Wait for ${remote} to be reachable."; - wantedBy = [ "cfssl-online.target" ]; - before = [ "cfssl-online.target" ]; - path = [ pkgs.curl ]; - preStart = '' - until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do - echo curl ${remote}/api/v1/cfssl/info: exit status $? - sleep 2 - done - ''; - script = "echo Ok"; - serviceConfig = { - TimeoutSec = "300"; - }; - }; - systemd.services.kube-certmgr-bootstrap = { description = "Kubernetes certmgr bootstrapper"; - wantedBy = [ "cfssl-online.target" ]; - after = [ "cfssl-online.target" ]; - before = [ "certmgr.service" ]; - path = with pkgs; [ curl cfssl ]; + wantedBy = [ "certmgr.service" ]; + after = [ "cfssl.target" ]; script = concatStringsSep "\n" ['' set -e - mkdir -p $(dirname ${certmgrAPITokenPath}) - mkdir -p $(dirname ${top.caFile}) - # If there's a cfssl (cert issuer) running locally, then don't rely on user to # manually paste it in place. Just symlink. # otherwise, create the target file, ready for users to insert the token @@ -217,18 +186,15 @@ in fi '' (optionalString (cfg.pkiTrustOnBootstrap) '' - if [ ! -s "${top.caFile}" ]; then - until test -s ${top.caFile}.json; do - sleep 2 - curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json - done - cfssljson -f ${top.caFile}.json -stdout >${top.caFile} - rm ${top.caFile}.json + if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then + ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \ + ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile} fi '') ]; serviceConfig = { - TimeoutSec = "500"; + RestartSec = "10s"; + Restart = "on-failure"; }; }; @@ -264,28 +230,35 @@ in mapAttrs mkSpec cfg.certs; }; - systemd.services.certmgr = { - wantedBy = [ "cfssl-online.target" ]; - after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ]; - preStart = '' - while ! test -s ${certmgrAPITokenPath} ; do - sleep 1 - echo Waiting for ${certmgrAPITokenPath} - done - ''; - unitConfig.ConditionPathExists = certmgrPaths; - }; + #TODO: Get rid of kube-addon-manager in the future for the following reasons + # - it is basically just a shell script wrapped around kubectl + # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount + # - it is designed to be used with k8s system components only + # - it would be better with a more Nix-oriented way of managing addons + systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{ + environment.KUBECONFIG = with cfg.certs.addonManager; + top.lib.mkKubeConfig "addon-manager" { + server = top.apiserverAddress; + certFile = cert; + keyFile = key; + }; + } - systemd.paths.certmgr = { - wantedBy = [ "certmgr.service" ]; - pathConfig = { - PathExists = certmgrPaths; - PathChanged = certmgrPaths; - }; - }; + (optionalAttrs (top.addonManager.bootstrapAddons != {}) { + serviceConfig.PermissionsStartOnly = true; + preStart = with pkgs; + let + files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v)) + top.addonManager.bootstrapAddons; + in + '' + export KUBECONFIG=${clusterAdminKubeconfig} + ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files} + ''; + })]); - environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (cfg.etcClusterAdminKubeconfig != null) - (top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig); + environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig) + clusterAdminKubeconfig; environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [ (pkgs.writeScriptBin "nixos-kubernetes-node-join" '' @@ -311,22 +284,38 @@ in exit 1 fi - do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n) - echo $token > ${certmgrAPITokenPath} chmod 600 ${certmgrAPITokenPath} - if [ y = $do_restart ]; then - echo "Restarting certmgr..." >&1 - systemctl restart certmgr - fi + echo "Restarting certmgr..." >&1 + systemctl restart certmgr - echo "Node joined succesfully" >&1 + echo "Waiting for certs to appear..." >&1 + + ${optionalString top.kubelet.enable '' + while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done + echo "Restarting kubelet..." >&1 + systemctl restart kubelet + ''} + + ${optionalString top.proxy.enable '' + while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done + echo "Restarting kube-proxy..." >&1 + systemctl restart kube-proxy + ''} + + ${optionalString top.flannel.enable '' + while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done + echo "Restarting flannel..." >&1 + systemctl restart flannel + ''} + + echo "Node joined succesfully" '')]; # isolate etcd on loopback at the master node # easyCerts doesn't support multimaster clusters anyway atm. - services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; { + services.etcd = with cfg.certs.etcd; { listenClientUrls = ["https://127.0.0.1:2379"]; listenPeerUrls = ["https://127.0.0.1:2380"]; advertiseClientUrls = ["https://etcd.local:2379"]; @@ -335,11 +324,19 @@ in certFile = mkDefault cert; keyFile = mkDefault key; trustedCaFile = mkDefault caCert; - }); + }; networking.extraHosts = mkIf (config.services.etcd.enable) '' 127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local ''; + services.flannel = with cfg.certs.flannelClient; { + kubeconfig = top.lib.mkKubeConfig "flannel" { + server = top.apiserverAddress; + certFile = cert; + keyFile = key; + }; + }; + services.kubernetes = { apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; { @@ -359,13 +356,6 @@ in proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert; proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key; }); - addonManager = mkIf top.addonManager.enable { - kubeconfig = with cfg.certs.addonManager; { - certFile = mkDefault cert; - keyFile = mkDefault key; - }; - bootstrapAddonsKubeconfig = clusterAdminKubeconfig; - }; controllerManager = mkIf top.controllerManager.enable { serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key; rootCaFile = cfg.certs.controllerManagerClient.caCert; @@ -374,12 +364,6 @@ in keyFile = mkDefault key; }; }; - flannel = mkIf top.flannel.enable { - kubeconfig = with cfg.certs.flannelClient; { - certFile = cert; - keyFile = key; - }; - }; scheduler = mkIf top.scheduler.enable { kubeconfig = with cfg.certs.schedulerClient; { certFile = mkDefault cert; diff --git a/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixos/modules/services/cluster/kubernetes/proxy.nix index 23f4d97b7030..bd4bf04ea833 100644 --- a/nixos/modules/services/cluster/kubernetes/proxy.nix +++ b/nixos/modules/services/cluster/kubernetes/proxy.nix @@ -45,28 +45,12 @@ in }; ###### implementation - config = let - - proxyPaths = filter (a: a != null) [ - cfg.kubeconfig.caFile - cfg.kubeconfig.certFile - cfg.kubeconfig.keyFile - ]; - - in mkIf cfg.enable { - systemd.services.kube-proxy = rec { + config = mkIf cfg.enable { + systemd.services.kube-proxy = { description = "Kubernetes Proxy Service"; - wantedBy = [ "kube-node-online.target" ]; - after = [ "kubelet-online.service" ]; - before = [ "kube-node-online.target" ]; - environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig; - path = with pkgs; [ iptables conntrack_tools kubectl ]; - preStart = '' - until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do - echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $? - sleep 2 - done - ''; + wantedBy = [ "kubernetes.target" ]; + after = [ "kube-apiserver.service" ]; + path = with pkgs; [ iptables conntrack_tools ]; serviceConfig = { Slice = "kubernetes.slice"; ExecStart = ''${top.package}/bin/kube-proxy \ @@ -75,7 +59,7 @@ in "--cluster-cidr=${top.clusterCidr}"} \ ${optionalString (cfg.featureGates != []) "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \ - --kubeconfig=${environment.KUBECONFIG} \ + --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \ ${cfg.extraOpts} ''; @@ -83,15 +67,6 @@ in Restart = "on-failure"; RestartSec = 5; }; - unitConfig.ConditionPathExists = proxyPaths; - }; - - systemd.paths.kube-proxy = { - wantedBy = [ "kube-proxy.service" ]; - pathConfig = { - PathExists = proxyPaths; - PathChanged = proxyPaths; - }; }; services.kubernetes.pki.certs = { diff --git a/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixos/modules/services/cluster/kubernetes/scheduler.nix index a0e484542951..5f6113227d9d 100644 --- a/nixos/modules/services/cluster/kubernetes/scheduler.nix +++ b/nixos/modules/services/cluster/kubernetes/scheduler.nix @@ -56,35 +56,18 @@ in }; ###### implementation - config = let - - schedulerPaths = filter (a: a != null) [ - cfg.kubeconfig.caFile - cfg.kubeconfig.certFile - cfg.kubeconfig.keyFile - ]; - - in mkIf cfg.enable { - systemd.services.kube-scheduler = rec { + config = mkIf cfg.enable { + systemd.services.kube-scheduler = { description = "Kubernetes Scheduler Service"; - wantedBy = [ "kube-control-plane-online.target" ]; + wantedBy = [ "kubernetes.target" ]; after = [ "kube-apiserver.service" ]; - before = [ "kube-control-plane-online.target" ]; - environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig; - path = [ pkgs.kubectl ]; - preStart = '' - until kubectl auth can-i get /api -q 2>/dev/null; do - echo kubectl auth can-i get /api: exit status $? - sleep 2 - done - ''; serviceConfig = { Slice = "kubernetes.slice"; ExecStart = ''${top.package}/bin/kube-scheduler \ --address=${cfg.address} \ ${optionalString (cfg.featureGates != []) "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \ - --kubeconfig=${environment.KUBECONFIG} \ + --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \ --leader-elect=${boolToString cfg.leaderElect} \ --port=${toString cfg.port} \ ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \ @@ -96,15 +79,6 @@ in Restart = "on-failure"; RestartSec = 5; }; - unitConfig.ConditionPathExists = schedulerPaths; - }; - - systemd.paths.kube-scheduler = { - wantedBy = [ "kube-scheduler.service" ]; - pathConfig = { - PathExists = schedulerPaths; - PathChanged = schedulerPaths; - }; }; services.kubernetes.pki.certs = { diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix index 212023859f6d..ec1a75e74c41 100644 --- a/nixos/tests/kubernetes/base.nix +++ b/nixos/tests/kubernetes/base.nix @@ -30,10 +30,7 @@ let { config, pkgs, lib, nodes, ... }: mkMerge [ { - boot = { - postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*"; - kernel.sysctl = { "fs.inotify.max_user_instances" = 256; }; - }; + boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*"; virtualisation.memorySize = mkDefault 1536; virtualisation.diskSize = mkDefault 4096; networking = { diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix index e7db0a58ab61..46bcb01a5265 100644 --- a/nixos/tests/kubernetes/dns.nix +++ b/nixos/tests/kubernetes/dns.nix @@ -77,7 +77,6 @@ let singleNodeTest = { test = '' # prepare machine1 for test - $machine1->waitForUnit("kubernetes.target"); $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready"); $machine1->waitUntilSucceeds("docker load < ${redisImage}"); $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}"); @@ -103,8 +102,6 @@ let # Node token exchange $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"); $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"); - $machine1->waitForUnit("kubernetes.target"); - $machine2->waitForUnit("kubernetes.target"); # prepare machines for test $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready"); diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix index 967fe506004f..3ce7adcd0d71 100644 --- a/nixos/tests/kubernetes/rbac.nix +++ b/nixos/tests/kubernetes/rbac.nix @@ -94,8 +94,6 @@ let singlenode = base // { test = '' - $machine1->waitForUnit("kubernetes.target"); - $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready"); $machine1->waitUntilSucceeds("docker load < ${kubectlImage}"); @@ -118,8 +116,6 @@ let # Node token exchange $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"); $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"); - $machine1->waitForUnit("kubernetes.target"); - $machine2->waitForUnit("kubernetes.target"); $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");