// Delete a Replication Controller and all pods it spawned func DeleteRC(c *client.Client, ns, name string) error { rc, err := c.ReplicationControllers(ns).Get(name) if err != nil { return fmt.Errorf("Failed to find replication controller %s in namespace %s: %v", name, ns, err) } rc.Spec.Replicas = 0 if _, err := c.ReplicationControllers(ns).Update(rc); err != nil { return fmt.Errorf("Failed to resize replication controller %s to zero: %v", name, err) } // Wait up to 20 minutes until all replicas are killed. endTime := time.Now().Add(time.Minute * 20) for { if time.Now().After(endTime) { return fmt.Errorf("Timeout while waiting for replication controller %s replicas to 0", name) } remainingTime := endTime.Sub(time.Now()) err := wait.Poll(time.Second, remainingTime, client.ControllerHasDesiredReplicas(c, rc)) if err != nil { Logf("Error while waiting for replication controller %s replicas to read 0: %v", name, err) } else { break } } // Delete the replication controller. if err := c.ReplicationControllers(ns).Delete(name); err != nil { return fmt.Errorf("Failed to delete replication controller %s: %v", name, err) } return nil }
func runReplicationControllerTest(c *client.Client) { data, err := ioutil.ReadFile("api/examples/controller.json") if err != nil { glog.Fatalf("Unexpected error: %v", err) } var controller api.ReplicationController if err := api.Scheme.DecodeInto(data, &controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Creating replication controllers") if _, err := c.ReplicationControllers(api.NamespaceDefault).Create(&controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Done creating replication controllers") // Give the controllers some time to actually create the pods if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, &controller)); err != nil { glog.Fatalf("FAILED: pods never created %v", err) } // wait for minions to indicate they have info about the desired pods pods, err := c.Pods(api.NamespaceDefault).List(labels.Set(controller.Spec.Selector).AsSelector()) if err != nil { glog.Fatalf("FAILED: unable to get pods to list: %v", err) } if err := wait.Poll(time.Second, time.Second*30, podsOnMinions(c, *pods)); err != nil { glog.Fatalf("FAILED: pods never started running %v", err) } glog.Infof("Pods created") }
func runReplicationControllerTest(c *client.Client) { clientAPIVersion := c.APIVersion() data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json") if err != nil { glog.Fatalf("Unexpected error: %v", err) } var controller api.ReplicationController if err := api.Scheme.DecodeInto(data, &controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Creating replication controllers") updated, err := c.ReplicationControllers("test").Create(&controller) if err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Done creating replication controllers") // Give the controllers some time to actually create the pods if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, updated)); err != nil { glog.Fatalf("FAILED: pods never created %v", err) } // Poll till we can retrieve the status of all pods matching the given label selector from their minions. // This involves 3 operations: // - The scheduler must assign all pods to a minion // - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector // - We need to be able to query the kubelet on that minion for information about the pod if err := wait.Poll( time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil { glog.Fatalf("FAILED: pods never started running %v", err) } glog.Infof("Pods created") }
func (r *RollingUpdater) updateAndWait(rc *api.ReplicationController, interval, timeout time.Duration) (*api.ReplicationController, error) { rc, err := r.c.ReplicationControllers(r.ns).Update(rc) if err != nil { return nil, err } if err := wait.Poll(interval, timeout, client.ControllerHasDesiredReplicas(r.c, rc)); err != nil { return nil, err } return r.c.ReplicationControllers(r.ns).Get(rc.ObjectMeta.Name) }
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy. func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy { updaterClient := &rollingUpdaterClient{ ControllerHasDesiredReplicasFn: func(rc *kapi.ReplicationController) wait.ConditionFunc { return kclient.ControllerHasDesiredReplicas(client, rc) }, GetReplicationControllerFn: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, UpdateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Update(rc) }, // This guards against the RollingUpdater's built-in behavior to create // RCs when the supplied old RC is nil. We won't pass nil, but it doesn't // hurt to further guard against it since we would have no way to identify // or clean up orphaned RCs RollingUpdater might inadvertently create. CreateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) { return nil, fmt.Errorf("unexpected attempt to create Deployment: %#v", rc) }, // We give the RollingUpdater a policy which should prevent it from // deleting the source deployment after the transition, but it doesn't // hurt to guard by removing its ability to delete. DeleteReplicationControllerFn: func(namespace, name string) error { return fmt.Errorf("unexpected attempt to delete Deployment %s/%s", namespace, name) }, } return &RollingDeploymentStrategy{ codec: codec, initialStrategy: initialStrategy, client: updaterClient, rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error { updater := kubectl.NewRollingUpdater(namespace, updaterClient) return updater.Update(config) }, hookExecutor: &stratsupport.HookExecutor{ PodClient: &stratsupport.HookExecutorPodClientImpl{ CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { return client.Pods(namespace).Create(pod) }, PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod { return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel) }, }, }, getUpdateAcceptor: func(timeout time.Duration) kubectl.UpdateAcceptor { return stratsupport.NewFirstContainerReady(client, timeout, NewFirstContainerReadyInterval) }, } }
func (reaper *ReplicationControllerReaper) Stop(namespace, name string) (string, error) { rc := reaper.ReplicationControllers(namespace) controller, err := rc.Get(name) if err != nil { return "", err } controller.Spec.Replicas = 0 // TODO: do retry on 409 errors here? if _, err := rc.Update(controller); err != nil { return "", err } if err := wait.Poll(reaper.pollInterval, reaper.timeout, client.ControllerHasDesiredReplicas(reaper, controller)); err != nil { return "", err } if err := rc.Delete(name); err != nil { return "", err } return fmt.Sprintf("%s stopped", name), nil }
// Delete a Replication Controller and all pods it spawned func DeleteRC(c *client.Client, ns, name string) error { rc, err := c.ReplicationControllers(ns).Get(name) if err != nil { return fmt.Errorf("Failed to find replication controller %s in namespace %s: %v", name, ns, err) } rc.Spec.Replicas = 0 if _, err := c.ReplicationControllers(ns).Update(rc); err != nil { return fmt.Errorf("Failed to resize replication controller %s to zero: %v", name, err) } if err := wait.Poll(time.Second, time.Minute*20, client.ControllerHasDesiredReplicas(c, rc)); err != nil { return fmt.Errorf("Error waiting for replication controller %s replicas to reach 0: %v", name, err) } // Delete the replication controller. if err := c.ReplicationControllers(ns).Delete(name); err != nil { return fmt.Errorf("Failed to delete replication controller %s: %v", name, err) } return nil }
func (reaper *ReplicationControllerReaper) Stop(namespace, name string) (string, error) { rc := reaper.ReplicationControllers(namespace) controller, err := rc.Get(name) if err != nil { return "", err } resizer, err := ResizerFor("ReplicationController", *reaper) if err != nil { return "", err } cond := ResizeCondition(resizer, &ResizePrecondition{-1, ""}, namespace, name, 0) if err = wait.Poll(shortInterval, reaper.timeout, cond); err != nil { return "", err } if err := wait.Poll(reaper.pollInterval, reaper.timeout, client.ControllerHasDesiredReplicas(reaper, controller)); err != nil { return "", err } if err := rc.Delete(name); err != nil { return "", err } return fmt.Sprintf("%s stopped", name), nil }
func runReplicationControllerTest(c *client.Client) { clientAPIVersion := c.APIVersion() data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json") if err != nil { glog.Fatalf("Unexpected error: %v", err) } var controller api.ReplicationController if err := api.Scheme.DecodeInto(data, &controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Creating replication controllers") updated, err := c.ReplicationControllers("test").Create(&controller) if err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Done creating replication controllers") // In practice the controller doesn't need 60s to create a handful of pods, but network latencies on CI // systems have been observed to vary unpredictably, so give the controller enough time to create pods. // Our e2e scalability tests will catch controllers that are *actually* slow. if err := wait.Poll(time.Second, time.Second*60, client.ControllerHasDesiredReplicas(c, updated)); err != nil { glog.Fatalf("FAILED: pods never created %v", err) } // Poll till we can retrieve the status of all pods matching the given label selector from their minions. // This involves 3 operations: // - The scheduler must assign all pods to a minion // - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector // - We need to be able to query the kubelet on that minion for information about the pod if err := wait.Poll( time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil { glog.Fatalf("FAILED: pods never started running %v", err) } glog.Infof("Pods created") }
func (c *realRollingUpdaterClient) ControllerHasDesiredReplicas(rc *api.ReplicationController) wait.ConditionFunc { return client.ControllerHasDesiredReplicas(c.client, rc) }
// ControllerHasDesiredReplicas checks whether the provided replication controller has the desired replicas // number set func (c *realScalerClient) ControllerHasDesiredReplicas(rc *kapi.ReplicationController) wait.ConditionFunc { return kclient.ControllerHasDesiredReplicas(c.kc, rc) }