func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) { glog.Infof("Garbage Collector: Initializing") for _, monitor := range gc.monitors { go monitor.controller.Run(stopCh) } wait.PollInfinite(10*time.Second, func() (bool, error) { for _, monitor := range gc.monitors { if !monitor.controller.HasSynced() { glog.Infof("Garbage Collector: Waiting for resource monitors to be synced...") return false, nil } } return true, nil }) glog.Infof("Garbage Collector: All monitored resources synced. Proceeding to collect garbage") // worker go wait.Until(gc.propagator.processEvent, 0, stopCh) for i := 0; i < workers; i++ { go wait.Until(gc.worker, 0, stopCh) go wait.Until(gc.orphanFinalizer, 0, stopCh) } Register() <-stopCh glog.Infof("Garbage Collector: Shutting down") gc.dirtyQueue.ShutDown() gc.orphanQueue.ShutDown() gc.propagator.eventQueue.ShutDown() }
func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, caCert *x509.Certificate) error { kd := newKubeDiscovery(cfg, caCert) if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kd.Deployment); err != nil { return fmt.Errorf("failed to create %q deployment [%v]", kubeDiscoveryName, err) } if _, err := client.Secrets(api.NamespaceSystem).Create(kd.Secret); err != nil { return fmt.Errorf("failed to create %q secret [%v]", kubeDiscoverySecretName, err) } fmt.Println("[token-discovery] Created the kube-discovery deployment, waiting for it to become ready") start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { d, err := client.Extensions().Deployments(api.NamespaceSystem).Get(kubeDiscoveryName, metav1.GetOptions{}) if err != nil { return false, nil } if d.Status.AvailableReplicas < 1 { return false, nil } return true, nil }) fmt.Printf("[token-discovery] kube-discovery is ready after %f seconds\n", time.Since(start).Seconds()) return nil }
func createDummyDeployment(client *clientset.Clientset) { fmt.Println("[apiclient] Creating a test deployment") dummyDeployment := NewDeployment("dummy", 1, v1.PodSpec{ HostNetwork: true, SecurityContext: &v1.PodSecurityContext{}, Containers: []v1.Container{{ Name: "dummy", Image: images.GetAddonImage("pause"), }}, }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: we should check the error, as some cases may be fatal if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil { fmt.Printf("[apiclient] Failed to create test deployment [%v] (will retry)\n", err) return false, nil } return true, nil }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy", metav1.GetOptions{}) if err != nil { fmt.Printf("[apiclient] Failed to get test deployment [%v] (will retry)\n", err) return false, nil } if d.Status.AvailableReplicas < 1 { return false, nil } return true, nil }) fmt.Println("[apiclient] Test deployment succeeded") // TODO: In the future, make sure the ReplicaSet and Pod are garbage collected if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil { fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err) } }
func (s *statusSync) run() { err := wait.PollInfinite(updateInterval, func() (bool, error) { if s.syncQueue.IsShuttingDown() { return true, nil } // send a dummy object to the queue to force a sync s.syncQueue.Enqueue("dummy") return false, nil }) if err != nil { glog.Errorf("error waiting shutdown: %v", err) } }
func RetrieveTrustedClusterInfo(s *kubeadmapi.NodeConfiguration) (*kubeadmapi.ClusterInfo, error) { host, port := s.MasterAddresses[0], 9898 requestURL := fmt.Sprintf("http://%s:%d/cluster-info/v1/?token-id=%s", host, port, s.Secrets.TokenID) req, err := http.NewRequest("GET", requestURL, nil) if err != nil { return nil, fmt.Errorf("<node/discovery> failed to consturct an HTTP request [%v]", err) } fmt.Printf("<node/discovery> created cluster info discovery client, requesting info from %q\n", requestURL) var res *http.Response wait.PollInfinite(discoveryRetryTimeout, func() (bool, error) { res, err = http.DefaultClient.Do(req) if err != nil { fmt.Printf("<node/discovery> failed to request cluster info, will try again: [%s]\n", err) return false, nil } return true, nil }) buf := new(bytes.Buffer) io.Copy(buf, res.Body) res.Body.Close() object, err := jose.ParseSigned(buf.String()) if err != nil { return nil, fmt.Errorf("<node/discovery> failed to parse response as JWS object [%v]", err) } fmt.Println("<node/discovery> cluster info object received, verifying signature using given token") output, err := object.Verify(s.Secrets.Token) if err != nil { return nil, fmt.Errorf("<node/discovery> failed to verify JWS signature of received cluster info object [%v]", err) } clusterInfo := kubeadmapi.ClusterInfo{} if err := json.Unmarshal(output, &clusterInfo); err != nil { return nil, fmt.Errorf("<node/discovery> failed to decode received cluster info object [%v]", err) } if len(clusterInfo.CertificateAuthorities) == 0 || len(clusterInfo.Endpoints) == 0 { return nil, fmt.Errorf("<node/discovery> cluster info object is invalid - no endpoint(s) and/or root CA certificate(s) found") } // TODO(phase1+) print summary info about the CA certificate, along with the the checksum signature // we also need an ability for the user to configure the client to validate received CA cert against a checksum fmt.Printf("<node/discovery> cluster info signature and contents are valid, will use API endpoints %v\n", clusterInfo.Endpoints) return &clusterInfo, nil }
func createDummyDeployment(client *clientset.Clientset) { fmt.Println("<master/apiclient> attempting a test deployment") dummyDeployment := NewDeployment("dummy", 1, api.PodSpec{ SecurityContext: &api.PodSecurityContext{HostNetwork: true}, Containers: []api.Container{{ Name: "dummy", Image: images.GetAddonImage("pause"), }}, }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { // TODO: we should check the error, as some cases may be fatal if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil { fmt.Printf("<master/apiclient> failed to create test deployment [%v] (will retry)", err) return false, nil } return true, nil }) wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy") if err != nil { fmt.Printf("<master/apiclient> failed to get test deployment [%v] (will retry)", err) return false, nil } if d.Status.AvailableReplicas < 1 { return false, nil } return true, nil }) fmt.Println("<master/apiclient> test deployment succeeded") if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &api.DeleteOptions{}); err != nil { fmt.Printf("<master/apiclient> failed to delete test deployment [%v] (will ignore)", err) } }
func startKubeletConfigSyncLoop(s *options.KubeletServer, currentKC string) { glog.Infof("Starting Kubelet configuration sync loop") go func() { wait.PollInfinite(30*time.Second, func() (bool, error) { glog.Infof("Checking API server for new Kubelet configuration.") remoteKC, err := getRemoteKubeletConfig(s, nil) if err == nil { // Detect new config by comparing with the last JSON string we extracted. if remoteKC != currentKC { glog.Info("Found new Kubelet configuration via API server, restarting!") os.Exit(0) } } else { glog.Infof("Did not find a configuration for this Kubelet via API server: %v", err) } return false, nil // Always return (false, nil) so we poll forever. }) }() }
func (node *OsdnNode) Start() error { var err error node.networkInfo, err = getNetworkInfo(node.osClient) if err != nil { return fmt.Errorf("Failed to get network information: %v", err) } nodeIPTables := newNodeIPTables(node.networkInfo.ClusterNetwork.String(), node.iptablesSyncPeriod) if err = nodeIPTables.Setup(); err != nil { return fmt.Errorf("Failed to set up iptables: %v", err) } node.localSubnetCIDR, err = node.getLocalSubnet() if err != nil { return err } networkChanged, err := node.SetupSDN() if err != nil { return err } err = node.SubnetStartNode() if err != nil { return err } if err = node.policy.Start(node); err != nil { return err } go kwait.Forever(node.watchServices, 0) // Wait for kubelet to init the plugin so we get a knetwork.Host log.V(5).Infof("Waiting for kubelet network plugin initialization") <-node.kubeletInitReady // Wait for kubelet itself to finish initializing kwait.PollInfinite(100*time.Millisecond, func() (bool, error) { if node.host.GetRuntime() == nil { return false, nil } return true, nil }) log.V(5).Infof("Creating and initializing openshift-sdn pod manager") node.podManager, err = newPodManager(node.host, node.localSubnetCIDR, node.networkInfo, node.kClient, node.policy, node.mtu) if err != nil { return err } if err := node.podManager.Start(cniserver.CNIServerSocketPath); err != nil { return err } if networkChanged { var pods []kapi.Pod pods, err = node.GetLocalPods(kapi.NamespaceAll) if err != nil { return err } for _, p := range pods { err = node.UpdatePod(p) if err != nil { log.Warningf("Could not update pod %q: %s", p.Name, err) continue } if vnid, err := node.policy.GetVNID(p.Namespace); err == nil { node.policy.RefVNID(vnid) } } } log.V(5).Infof("openshift-sdn network plugin ready") node.markPodNetworkReady() return nil }
func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Clientset, error) { adminClientConfig, err := clientcmd.NewDefaultClientConfig( *adminConfig, &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, fmt.Errorf("<master/apiclient> failed to create API client configuration [%v]", err) } fmt.Println("<master/apiclient> created API client configuration") client, err := clientset.NewForConfig(adminClientConfig) if err != nil { return nil, fmt.Errorf("<master/apiclient> failed to create API client [%v]", err) } fmt.Println("<master/apiclient> created API client, waiting for the control plane to become ready") start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { cs, err := client.ComponentStatuses().List(api.ListOptions{}) if err != nil { return false, nil } // TODO(phase2) must revisit this when we implement HA if len(cs.Items) < 3 { fmt.Println("<master/apiclient> not all control plane components are ready yet") return false, nil } for _, item := range cs.Items { for _, condition := range item.Conditions { if condition.Type != api.ComponentHealthy { fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions) return false, nil } } } fmt.Printf("<master/apiclient> all control plane components are healthy after %f seconds\n", time.Since(start).Seconds()) return true, nil }) fmt.Println("<master/apiclient> waiting for at least one node to register and become ready") start = time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { nodeList, err := client.Nodes().List(api.ListOptions{}) if err != nil { fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)") return false, nil } if len(nodeList.Items) < 1 { return false, nil } n := &nodeList.Items[0] if !api.IsNodeReady(n) { fmt.Println("<master/apiclient> first node has registered, but is not ready yet") return false, nil } fmt.Printf("<master/apiclient> first node is ready after %f seconds\n", time.Since(start).Seconds()) return true, nil }) return client, nil }