func createDummyDeployment(client *clientset.Clientset) { fmt.Println("[apiclient] Creating a test deployment") dummyDeployment := NewDeployment("dummy", 1, v1.PodSpec{ HostNetwork: true, SecurityContext: &v1.PodSecurityContext{}, Containers: []v1.Container{{ Name: "dummy", Image: images.GetAddonImage("pause"), }}, }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: we should check the error, as some cases may be fatal if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil { fmt.Printf("[apiclient] Failed to create test deployment [%v] (will retry)\n", err) return false, nil } return true, nil }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy", metav1.GetOptions{}) if err != nil { fmt.Printf("[apiclient] Failed to get test deployment [%v] (will retry)\n", err) return false, nil } if d.Status.AvailableReplicas < 1 { return false, nil } return true, nil }) fmt.Println("[apiclient] Test deployment succeeded") // TODO: In the future, make sure the ReplicaSet and Pod are garbage collected if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil { fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err) } }
func createDummyDeployment(client *clientset.Clientset) { fmt.Println("<master/apiclient> attempting a test deployment") dummyDeployment := NewDeployment("dummy", 1, api.PodSpec{ SecurityContext: &api.PodSecurityContext{HostNetwork: true}, Containers: []api.Container{{ Name: "dummy", Image: images.GetAddonImage("pause"), }}, }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: we should check the error, as some cases may be fatal if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil { fmt.Printf("<master/apiclient> failed to create test deployment [%v] (will retry)", err) return false, nil } return true, nil }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy") if err != nil { fmt.Printf("<master/apiclient> failed to get test deployment [%v] (will retry)", err) return false, nil } if d.Status.AvailableReplicas < 1 { return false, nil } return true, nil }) fmt.Println("<master/apiclient> test deployment succeeded") if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &api.DeleteOptions{}); err != nil { fmt.Printf("<master/apiclient> failed to delete test deployment [%v] (will ignore)", err) } }
func createKubeDNSPodSpec(s *kubeadmapi.MasterConfiguration) api.PodSpec { dnsPodResources := api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("100m"), api.ResourceName(api.ResourceMemory): resource.MustParse("170Mi"), } healthzPodResources := api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10m"), api.ResourceName(api.ResourceMemory): resource.MustParse("50Mi"), } kubeDNSPort := int32(10053) dnsmasqPort := int32(53) nslookup := fmt.Sprintf("nslookup kubernetes.default.svc.%s 127.0.0.1", s.Networking.DNSDomain) nslookup = fmt.Sprintf("-cmd=%s:%d >/dev/null && %s:%d >/dev/null", nslookup, dnsmasqPort, nslookup, kubeDNSPort, ) return api.PodSpec{ NodeSelector: map[string]string{ "beta.kubernetes.io/arch": runtime.GOARCH, }, Containers: []api.Container{ // DNS server { Name: "kube-dns", Image: images.GetAddonImage(images.KubeDNSImage), Resources: api.ResourceRequirements{ Limits: dnsPodResources, Requests: dnsPodResources, }, Args: []string{ fmt.Sprintf("--domain=%s", s.Networking.DNSDomain), fmt.Sprintf("--dns-port=%d", kubeDNSPort), // TODO __PILLAR__FEDERATIONS__DOMAIN__MAP__ }, LivenessProbe: &api.Probe{ Handler: api.Handler{ HTTPGet: &api.HTTPGetAction{ Path: "/healthz", Port: intstr.FromInt(8080), Scheme: api.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 1, }, // # we poll on pod startup for the Kubernetes master service and // # only setup the /readiness HTTP server once that's available. ReadinessProbe: &api.Probe{ Handler: api.Handler{ HTTPGet: &api.HTTPGetAction{ Path: "/readiness", Port: intstr.FromInt(8081), Scheme: api.URISchemeHTTP, }, }, InitialDelaySeconds: 30, TimeoutSeconds: 5, }, Ports: []api.ContainerPort{ { ContainerPort: kubeDNSPort, Name: "dns-local", Protocol: api.ProtocolUDP, }, { ContainerPort: kubeDNSPort, Name: "dns-tcp-local", Protocol: api.ProtocolTCP, }, }, }, // dnsmasq { Name: "dnsmasq", Image: images.GetAddonImage(images.KubeDNSmasqImage), Resources: api.ResourceRequirements{ Limits: dnsPodResources, Requests: dnsPodResources, }, Args: []string{ "--cache-size=1000", "--no-resolv", fmt.Sprintf("--server=127.0.0.1#%d", kubeDNSPort), }, Ports: []api.ContainerPort{ { ContainerPort: dnsmasqPort, Name: "dns", Protocol: api.ProtocolUDP, }, { ContainerPort: dnsmasqPort, Name: "dns-tcp", Protocol: api.ProtocolTCP, }, }, }, // healthz { Name: "healthz", Image: images.GetAddonImage(images.KubeExechealthzImage), Resources: api.ResourceRequirements{ Limits: healthzPodResources, Requests: healthzPodResources, }, Args: []string{ nslookup, "-port=8080", "-quiet", }, Ports: []api.ContainerPort{{ ContainerPort: 8080, Protocol: api.ProtocolTCP, }}, }, }, DNSPolicy: api.DNSDefault, } }
func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec { kubeDNSPort := int32(10053) dnsmasqPort := int32(53) return v1.PodSpec{ ServiceAccountName: KubeDNS, Containers: []v1.Container{ // DNS server { Name: "kubedns", Image: images.GetAddonImage(images.KubeDNSImage), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceMemory): resource.MustParse("170Mi"), }, Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("70Mi"), }, }, LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/healthcheck/kubedns", Port: intstr.FromInt(10054), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 5, }, // # we poll on pod startup for the Kubernetes master service and // # only setup the /readiness HTTP server once that's available. ReadinessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/readiness", Port: intstr.FromInt(8081), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 3, TimeoutSeconds: 5, }, Args: []string{ fmt.Sprintf("--domain=%s", cfg.Networking.DNSDomain), fmt.Sprintf("--dns-port=%d", kubeDNSPort), "--config-map=kube-dns", "--v=2", }, Env: []v1.EnvVar{ { Name: "PROMETHEUS_PORT", Value: "10055", }, }, Ports: []v1.ContainerPort{ { ContainerPort: kubeDNSPort, Name: "dns-local", Protocol: v1.ProtocolUDP, }, { ContainerPort: kubeDNSPort, Name: "dns-tcp-local", Protocol: v1.ProtocolTCP, }, { ContainerPort: 10055, Name: "metrics", Protocol: v1.ProtocolTCP, }, }, }, // dnsmasq { Name: "dnsmasq", Image: images.GetAddonImage(images.KubeDNSmasqImage), LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/healthcheck/dnsmasq", Port: intstr.FromInt(10054), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 5, }, Args: []string{ "--cache-size=1000", "--no-resolv", fmt.Sprintf("--server=127.0.0.1#%d", kubeDNSPort), "--log-facility=-", }, Ports: []v1.ContainerPort{ { ContainerPort: dnsmasqPort, Name: "dns", Protocol: v1.ProtocolUDP, }, { ContainerPort: dnsmasqPort, Name: "dns-tcp", Protocol: v1.ProtocolTCP, }, }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("150m"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("10Mi"), }, }, }, { Name: "sidecar", Image: images.GetAddonImage(images.KubeDNSSidecarImage), LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/metrics", Port: intstr.FromInt(10054), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 5, }, Args: []string{ "--v=2", "--logtostderr", fmt.Sprintf("--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.%s,5,A", cfg.Networking.DNSDomain), fmt.Sprintf("--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.%s,5,A", cfg.Networking.DNSDomain), }, Ports: []v1.ContainerPort{ { ContainerPort: 10054, Name: "metrics", Protocol: v1.ProtocolTCP, }, }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceMemory): resource.MustParse("20Mi"), v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"), }, }, }, }, DNSPolicy: v1.DNSDefault, } }
func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec { kubeDNSPort := int32(10053) dnsmasqPort := int32(53) dnsMasqMetricsUser := int64(0) return v1.PodSpec{ Containers: []v1.Container{ // DNS server { Name: "kube-dns", Image: images.GetAddonImage(images.KubeDNSImage), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceMemory): resource.MustParse("170Mi"), }, Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("70Mi"), }, }, LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/healthz-kubedns", Port: intstr.FromInt(8080), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 5, }, // # we poll on pod startup for the Kubernetes master service and // # only setup the /readiness HTTP server once that's available. ReadinessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/readiness", Port: intstr.FromInt(8081), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 3, TimeoutSeconds: 5, }, Args: []string{ fmt.Sprintf("--domain=%s", cfg.Networking.DNSDomain), fmt.Sprintf("--dns-port=%d", kubeDNSPort), "--config-map=kube-dns", "--v=2", }, Env: []v1.EnvVar{ { Name: "PROMETHEUS_PORT", Value: "10055", }, }, Ports: []v1.ContainerPort{ { ContainerPort: kubeDNSPort, Name: "dns-local", Protocol: v1.ProtocolUDP, }, { ContainerPort: kubeDNSPort, Name: "dns-tcp-local", Protocol: v1.ProtocolTCP, }, { ContainerPort: 10055, Name: "metrics", Protocol: v1.ProtocolTCP, }, }, }, // dnsmasq { Name: "dnsmasq", Image: images.GetAddonImage(images.KubeDNSmasqImage), LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/healthz-dnsmasq", Port: intstr.FromInt(8080), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 5, }, Args: []string{ "--cache-size=1000", "--no-resolv", fmt.Sprintf("--server=127.0.0.1#%d", kubeDNSPort), "--log-facility=-", }, Ports: []v1.ContainerPort{ { ContainerPort: dnsmasqPort, Name: "dns", Protocol: v1.ProtocolUDP, }, { ContainerPort: dnsmasqPort, Name: "dns-tcp", Protocol: v1.ProtocolTCP, }, }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("150m"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("10Mi"), }, }, }, { Name: "dnsmasq-metrics", Image: images.GetAddonImage(images.KubeDNSmasqMetricsImage), LivenessProbe: &v1.Probe{ Handler: v1.Handler{ HTTPGet: &v1.HTTPGetAction{ Path: "/metrics", Port: intstr.FromInt(10054), Scheme: v1.URISchemeHTTP, }, }, InitialDelaySeconds: 60, TimeoutSeconds: 5, SuccessThreshold: 1, FailureThreshold: 5, }, // The code below is a workaround for https://github.com/kubernetes/contrib/blob/master/dnsmasq-metrics/Dockerfile.in#L21 // This is just the normal mode (to run with user 0), all other containers do it except for this one, which may lead to // that the DNS pod fails if the "nobody" _group_ doesn't exist. I think it's a typo in the Dockerfile manifest and // that it should be "USER nobody:nogroup" instead of "USER nobody:nobody". However, this fixes the problem. SecurityContext: &v1.SecurityContext{ RunAsUser: &dnsMasqMetricsUser, }, Args: []string{ "--v=2", "--logtostderr", }, Ports: []v1.ContainerPort{ { ContainerPort: 10054, Name: "metrics", Protocol: v1.ProtocolTCP, }, }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceMemory): resource.MustParse("10Mi"), }, }, }, // healthz { Name: "healthz", Image: images.GetAddonImage(images.KubeExechealthzImage), Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceName(v1.ResourceMemory): resource.MustParse("50Mi"), }, Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("50Mi"), }, }, Args: []string{ fmt.Sprintf("--cmd=nslookup kubernetes.default.svc.%s 127.0.0.1 >/dev/null", cfg.Networking.DNSDomain), "--url=/healthz-dnsmasq", fmt.Sprintf("--cmd=nslookup kubernetes.default.svc.%s 127.0.0.1:%d >/dev/null", cfg.Networking.DNSDomain, kubeDNSPort), "--url=/healthz-kubedns", "--port=8080", "--quiet", }, Ports: []v1.ContainerPort{{ ContainerPort: 8080, Protocol: v1.ProtocolTCP, }}, }, }, DNSPolicy: v1.DNSDefault, } }