func CreateTokenAuthFile(s *kubeadmapi.Secrets) error { tokenAuthFilePath := path.Join(kubeadmapi.GetEnvParams()["host_pki_path"], "tokens.csv") if err := generateTokenIfNeeded(s); err != nil { return fmt.Errorf("<master/tokens> failed to generate token(s) [%v]", err) } if err := os.MkdirAll(kubeadmapi.GetEnvParams()["host_pki_path"], 0700); err != nil { return fmt.Errorf("<master/tokens> failed to create directory %q [%v]", kubeadmapi.GetEnvParams()["host_pki_path"], err) } serialized := []byte(fmt.Sprintf("%s,kubeadm-node-csr,%s,system:kubelet-bootstrap\n", s.BearerToken, uuid.NewUUID())) // DumpReaderToFile create a file with mode 0600 if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), tokenAuthFilePath); err != nil { return fmt.Errorf("<master/tokens> failed to save token auth file (%q) [%v]", tokenAuthFilePath, err) } return nil }
func k8sVolume(s *kubeadmapi.MasterConfiguration) api.Volume { envParams := kubeadmapi.GetEnvParams() return api.Volume{ Name: "pki", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{Path: envParams["kubernetes_dir"]}, }, } }
// etcdVolume exposes a path on the host in order to guarantee data survival during reboot. func etcdVolume(s *kubeadmapi.MasterConfiguration) api.Volume { envParams := kubeadmapi.GetEnvParams() return api.Volume{ Name: "etcd", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{Path: envParams["host_etcd_path"]}, }, } }
func getComponentBaseCommand(component string) (command []string) { envParams := kubeadmapi.GetEnvParams() if envParams["hyperkube_image"] != "" { command = []string{"/hyperkube", component} } else { command = []string{"kube-" + component} } command = append(command, envParams["component_loglevel"]) return }
// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane. // It first generates a self-signed CA certificate, a server certificate (signed by the CA) and a key for // signing service account tokens. It returns CA key and certificate, which is convenient for use with // client config funcs. func CreatePKIAssets(s *kubeadmapi.MasterConfiguration) (*rsa.PrivateKey, *x509.Certificate, error) { var ( err error altNames certutil.AltNames ) for _, a := range s.API.AdvertiseAddresses { if ip := net.ParseIP(a); ip != nil { altNames.IPs = append(altNames.IPs, ip) } else { return nil, nil, fmt.Errorf("could not parse ip %q", a) } } altNames.DNSNames = append(altNames.DNSNames, s.API.ExternalDNSNames...) pkiPath := path.Join(kubeadmapi.GetEnvParams()["host_pki_path"]) caKey, caCert, err := newCertificateAuthority() if err != nil { return nil, nil, fmt.Errorf("<master/pki> failure while creating CA keys and certificate - %v", err) } if err := writeKeysAndCert(pkiPath, "ca", caKey, caCert); err != nil { return nil, nil, fmt.Errorf("<master/pki> failure while saving CA keys and certificate - %v", err) } fmt.Printf("<master/pki> generated Certificate Authority key and certificate:\n%s\n", certutil.FormatCert(caCert)) pub, prv, cert := pathsKeysCerts(pkiPath, "ca") fmt.Printf("Public: %s\nPrivate: %s\nCert: %s\n", pub, prv, cert) apiKey, apiCert, err := newServerKeyAndCert(s, caCert, caKey, altNames) if err != nil { return nil, nil, fmt.Errorf("<master/pki> failure while creating API server keys and certificate - %v", err) } if err := writeKeysAndCert(pkiPath, "apiserver", apiKey, apiCert); err != nil { return nil, nil, fmt.Errorf("<master/pki> failure while saving API server keys and certificate - %v", err) } fmt.Printf("<master/pki> generated API Server key and certificate:\n%s\n", certutil.FormatCert(apiCert)) pub, prv, cert = pathsKeysCerts(pkiPath, "apiserver") fmt.Printf("Public: %s\nPrivate: %s\nCert: %s\n", pub, prv, cert) saKey, err := newServiceAccountKey() if err != nil { return nil, nil, fmt.Errorf("<master/pki> failure while creating service account signing keys [%v]", err) } if err := writeKeysAndCert(pkiPath, "sa", saKey, nil); err != nil { return nil, nil, fmt.Errorf("<master/pki> failure while saving service account signing keys - %v", err) } fmt.Printf("<master/pki> generated Service Account Signing keys:\n") pub, prv, _ = pathsKeysCerts(pkiPath, "sa") fmt.Printf("Public: %s\nPrivate: %s\n", pub, prv) fmt.Printf("<master/pki> created keys and certificates in %q\n", pkiPath) return caKey, caCert, nil }
// TODO(phase1+): kube-proxy should be a daemonset, three different daemonsets should not be here func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration, architecture string) api.PodSpec { envParams := kubeadmapi.GetEnvParams() privilegedTrue := true return api.PodSpec{ SecurityContext: &api.PodSecurityContext{HostNetwork: true}, NodeSelector: map[string]string{ "beta.kubernetes.io/arch": architecture, }, Containers: []api.Container{{ Name: kubeProxy, Image: images.GetCoreImage(images.KubeProxyImage, cfg, envParams["hyperkube_image"]), Command: append(getComponentCommand("proxy", cfg), "--kubeconfig=/run/kubeconfig"), SecurityContext: &api.SecurityContext{Privileged: &privilegedTrue}, VolumeMounts: []api.VolumeMount{ { Name: "dbus", MountPath: "/var/run/dbus", ReadOnly: false, }, { // TODO there are handful of clever options to get around this, but it's // easier to just mount kubelet's config here; we should probably just // make sure that proxy reads the token and CA cert from /run/secrets // and accepts `--master` at the same time // // clever options include: // - do CSR dance and create kubeconfig and mount it as a secret // - create a service account with a second secret encoding kubeconfig // - use init container to convert known information to kubeconfig // - ...whatever Name: "kubeconfig", MountPath: "/run/kubeconfig", ReadOnly: false, }, }, }}, Volumes: []api.Volume{ { Name: "kubeconfig", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{Path: path.Join(envParams["kubernetes_dir"], "kubelet.conf")}, }, }, { Name: "dbus", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/var/run/dbus"}, }, }, }, } }
func WriteKubeconfigIfNotExists(name string, kubeconfig *clientcmdapi.Config) error { envParams := kubeadmapi.GetEnvParams() if err := os.MkdirAll(envParams["kubernetes_dir"], 0700); err != nil { return fmt.Errorf("<util/kubeconfig> failed to create directory %q [%v]", envParams["kubernetes_dir"], err) } filename := path.Join(envParams["kubernetes_dir"], fmt.Sprintf("%s.conf", name)) // Create and open the file, only if it does not already exist. f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0600) if err != nil { return fmt.Errorf("<util/kubeconfig> failed to create %q, it already exists [%v]", filename, err) } f.Close() if err := clientcmd.WriteToFile(*kubeconfig, filename); err != nil { return fmt.Errorf("<util/kubeconfig> failed to write to %q [%v]", filename, err) } fmt.Printf("<util/kubeconfig> created %q\n", filename) return nil }
func newKubeDiscoveryPodSpec() api.PodSpec { envParams := kubeadmapi.GetEnvParams() return api.PodSpec{ // We have to use host network namespace, as `HostPort`/`HostIP` are Docker's // buisness and CNI support isn't quite there yet (except for kubenet) // (see https://github.com/kubernetes/kubernetes/issues/31307) // TODO update this when #31307 is resolved SecurityContext: &api.PodSecurityContext{HostNetwork: true}, Containers: []api.Container{{ Name: kubeDiscoveryName, Image: envParams["discovery_image"], Command: []string{"/usr/local/bin/kube-discovery"}, VolumeMounts: []api.VolumeMount{{ Name: kubeDiscoverySecretName, MountPath: "/tmp/secret", // TODO use a shared constant ReadOnly: true, }}, Ports: []api.ContainerPort{ // TODO when CNI issue (#31307) is resolved, we should consider adding // `HostIP: s.API.AdvertiseAddrs[0]`, if there is only one address` {Name: "http", ContainerPort: 9898, HostPort: 9898}, }, SecurityContext: &api.SecurityContext{ SELinuxOptions: &api.SELinuxOptions{ // TODO: This implies our discovery container is not being restricted by // SELinux. This is not optimal and would be nice to adjust in future // so it can read /tmp/secret, but for now this avoids recommending // setenforce 0 system-wide. Type: "unconfined_t", }, }, }}, Volumes: []api.Volume{{ Name: kubeDiscoverySecretName, VolumeSource: api.VolumeSource{ Secret: &api.SecretVolumeSource{SecretName: kubeDiscoverySecretName}, }}, }, } }
// WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk // where kubelet will pick and schedule them. func WriteStaticPodManifests(s *kubeadmapi.MasterConfiguration) error { envParams := kubeadmapi.GetEnvParams() // Prepare static pod specs staticPodSpecs := map[string]api.Pod{ kubeAPIServer: componentPod(api.Container{ Name: kubeAPIServer, Image: images.GetCoreImage(images.KubeAPIServerImage, s, envParams["hyperkube_image"]), Command: getComponentCommand(apiServer, s), VolumeMounts: []api.VolumeMount{certsVolumeMount(), k8sVolumeMount()}, LivenessProbe: componentProbe(8080, "/healthz"), Resources: componentResources("250m"), }, certsVolume(s), k8sVolume(s)), kubeControllerManager: componentPod(api.Container{ Name: kubeControllerManager, Image: images.GetCoreImage(images.KubeControllerManagerImage, s, envParams["hyperkube_image"]), Command: getComponentCommand(controllerManager, s), VolumeMounts: []api.VolumeMount{certsVolumeMount(), k8sVolumeMount()}, LivenessProbe: componentProbe(10252, "/healthz"), Resources: componentResources("200m"), }, certsVolume(s), k8sVolume(s)), kubeScheduler: componentPod(api.Container{ Name: kubeScheduler, Image: images.GetCoreImage(images.KubeSchedulerImage, s, envParams["hyperkube_image"]), Command: getComponentCommand(scheduler, s), LivenessProbe: componentProbe(10251, "/healthz"), Resources: componentResources("100m"), }), } // Add etcd static pod spec only if external etcd is not configured if len(s.Etcd.Endpoints) == 0 { staticPodSpecs[etcd] = componentPod(api.Container{ Name: etcd, Command: []string{ "etcd", "--listen-client-urls=http://127.0.0.1:2379", "--advertise-client-urls=http://127.0.0.1:2379", "--data-dir=/var/etcd/data", }, VolumeMounts: []api.VolumeMount{certsVolumeMount(), etcdVolumeMount(), k8sVolumeMount()}, Image: images.GetCoreImage(images.KubeEtcdImage, s, envParams["etcd_image"]), LivenessProbe: componentProbe(2379, "/health"), Resources: componentResources("200m"), SecurityContext: &api.SecurityContext{ SELinuxOptions: &api.SELinuxOptions{ // TODO: This implies our etcd container is not being restricted by // SELinux. This is not optimal and would be nice to adjust in future // so it can create and write /var/lib/etcd, but for now this avoids // recommending setenforce 0 system-wide. Type: "unconfined_t", }, }, }, certsVolume(s), etcdVolume(s), k8sVolume(s)) } manifestsPath := path.Join(envParams["kubernetes_dir"], "manifests") if err := os.MkdirAll(manifestsPath, 0700); err != nil { return fmt.Errorf("<master/manifests> failed to create directory %q [%v]", manifestsPath, err) } for name, spec := range staticPodSpecs { filename := path.Join(manifestsPath, name+".json") serialized, err := json.MarshalIndent(spec, "", " ") if err != nil { return fmt.Errorf("<master/manifests> failed to marshall manifest for %q to JSON [%v]", name, err) } if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), filename); err != nil { return fmt.Errorf("<master/manifests> failed to create static pod manifest file for %q (%q) [%v]", name, filename, err) } } return nil }
func getComponentCommand(component string, s *kubeadmapi.MasterConfiguration) (command []string) { baseFlags := map[string][]string{ apiServer: { "--insecure-bind-address=127.0.0.1", "--etcd-servers=http://127.0.0.1:2379", "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota", "--service-cluster-ip-range=" + s.Networking.ServiceSubnet, "--service-account-key-file=" + pkiDir + "/apiserver-key.pem", "--client-ca-file=" + pkiDir + "/ca.pem", "--tls-cert-file=" + pkiDir + "/apiserver.pem", "--tls-private-key-file=" + pkiDir + "/apiserver-key.pem", "--token-auth-file=" + pkiDir + "/tokens.csv", "--secure-port=443", "--allow-privileged", }, controllerManager: { "--address=127.0.0.1", "--leader-elect", "--master=127.0.0.1:8080", "--cluster-name=" + DefaultClusterName, "--root-ca-file=" + pkiDir + "/ca.pem", "--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem", "--cluster-signing-cert-file=" + pkiDir + "/ca.pem", "--cluster-signing-key-file=" + pkiDir + "/ca-key.pem", "--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap", }, scheduler: { "--address=127.0.0.1", "--leader-elect", "--master=127.0.0.1:8080", }, proxy: {}, } envParams := kubeadmapi.GetEnvParams() if envParams["hyperkube_image"] != "" { command = []string{"/hyperkube", component} } else { command = []string{"/usr/local/bin/kube-" + component} } command = append(command, envParams["component_loglevel"]) command = append(command, baseFlags[component]...) if component == apiServer { // Use first address we are given if len(s.API.AdvertiseAddresses) > 0 { command = append(command, fmt.Sprintf("--advertise-address=%s", s.API.AdvertiseAddresses[0])) } // Check if the user decided to use an external etcd cluster if len(s.Etcd.Endpoints) > 0 { command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(s.Etcd.Endpoints, ","))) } else { command = append(command, "--etcd-servers=http://127.0.0.1:2379") } // Is etcd secured? if s.Etcd.CAFile != "" { command = append(command, fmt.Sprintf("--etcd-cafile=%s", s.Etcd.CAFile)) } if s.Etcd.CertFile != "" && s.Etcd.KeyFile != "" { etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", s.Etcd.CertFile) etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", s.Etcd.KeyFile) command = append(command, etcdClientFileArg, etcdKeyFileArg) } } if component == controllerManager { if s.CloudProvider != "" { command = append(command, "--cloud-provider="+s.CloudProvider) // Only append the --cloud-config option if there's a such file // TODO(phase1+) this won't work unless it's in one of the few directories we bind-mount if _, err := os.Stat(DefaultCloudConfigPath); err == nil { command = append(command, "--cloud-config="+DefaultCloudConfigPath) } } // Let the controller-manager allocate Node CIDRs for the Pod network. // Each node will get a subspace of the address CIDR provided with --pod-network-cidr. if s.Networking.PodSubnet != "" { command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+s.Networking.PodSubnet) } } return }