// Wait for job to reach completions. func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { return false, err } return curr.Status.Succeeded == completions, nil }) }
// waitForJobsAtLeast waits for at least a number of jobs to appear. func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{}) if err != nil { return false, err } return len(jobs.Items) >= atLeast, nil }) }
// Wait for job fail. func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { curr, err := c.Batch().Jobs(ns).Get(jobName) if err != nil { return false, err } for _, c := range curr.Status.Conditions { if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue { return true, nil } } return false, nil }) }
// waitForAnyFinishedJob waits for any completed job to appear. func waitForAnyFinishedJob(c clientset.Interface, ns string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{}) if err != nil { return false, err } for i := range jobs.Items { if job.IsJobFinished(&jobs.Items[i]) { return true, nil } } return false, nil }) }
// Wait for a job to be replaced with a new one. func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{}) if err != nil { return false, err } if len(jobs.Items) > 1 { return false, fmt.Errorf("More than one job is running %+v", jobs.Items) } else if len(jobs.Items) == 0 { framework.Logf("Warning: Found 0 jobs in namespace %v", ns) return false, nil } return jobs.Items[0].Name != previousJobName, nil }) }
// NewJobInformer returns a SharedIndexInformer that lists and watches all jobs func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { return client.Batch().Jobs(v1.NamespaceAll).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { return client.Batch().Jobs(v1.NamespaceAll).Watch(options) }, }, &batch.Job{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) return sharedIndexInformer }
func deleteJob(c clientset.Interface, ns, name string) error { return c.Batch().Jobs(ns).Delete(name, nil) }
func updateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { return c.Batch().Jobs(ns).Update(job) }
func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) { return c.Batch().Jobs(ns).Get(name) }
func deleteV1Job(c clientset.Interface, ns, name string) error { return c.Batch().Jobs(ns).Delete(name, v1.NewDeleteOptions(0)) }
func createV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) { return c.Batch().Jobs(ns).Create(job) }
func getV1Job(c clientset.Interface, ns, name string) (*batch.Job, error) { return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{}) }
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information // about possibly problematic pods (unreplicated and deamon sets). func GetPodsForDeletionOnNodeDrain( podList []*apiv1.Pod, decoder runtime.Decoder, deleteAll bool, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool, checkReferences bool, // Setting this to true requires client to be not-null. client client.Interface, minReplica int32) ([]*apiv1.Pod, error) { pods := []*apiv1.Pod{} for _, pod := range podList { if IsMirrorPod(pod) { continue } daemonsetPod := false replicated := false sr, err := CreatorRef(pod) if err != nil { return []*apiv1.Pod{}, fmt.Errorf("failed to obtain refkind: %v", err) } refKind := "" if sr != nil { refKind = sr.Reference.Kind } if refKind == "ReplicationController" { if checkReferences { rc, err := client.Core().ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume a reason for an error is because the RC is either // gone/missing or that the rc has too few replicas configured. // TODO: replace the minReplica check with pod disruption budget. if err == nil && rc != nil { if rc.Spec.Replicas != nil && *rc.Spec.Replicas < minReplica { return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d", pod.Namespace, pod.Name, rc.Spec.Replicas, minReplica) } replicated = true } else { return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err) } } else { replicated = true } } else if refKind == "DaemonSet" { if checkReferences { ds, err := client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume the only reason for an error is because the DaemonSet is // gone/missing, not for any other cause. TODO(mml): something more // sophisticated than this if err == nil && ds != nil { // Otherwise, treat daemonset-managed pods as unmanaged since // DaemonSet Controller currently ignores the unschedulable bit. // FIXME(mml): Add link to the issue concerning a proper way to drain // daemonset pods, probably using taints. daemonsetPod = true } else { return []*apiv1.Pod{}, fmt.Errorf("deamonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err) } } else { daemonsetPod = true } } else if refKind == "Job" { if checkReferences { job, err := client.Batch().Jobs(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume the only reason for an error is because the Job is // gone/missing, not for any other cause. TODO(mml): something more // sophisticated than this if err == nil && job != nil { replicated = true } else { return []*apiv1.Pod{}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err) } } else { replicated = true } } else if refKind == "ReplicaSet" { if checkReferences { rs, err := client.Extensions().ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume the only reason for an error is because the RS is // gone/missing, not for any other cause. TODO(mml): something more // sophisticated than this if err == nil && rs != nil { if rs.Spec.Replicas != nil && *rs.Spec.Replicas < minReplica { return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d", pod.Namespace, pod.Name, rs.Spec.Replicas, minReplica) } replicated = true } else { return []*apiv1.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err) } } else { replicated = true } } if daemonsetPod { continue } if !deleteAll { if !replicated { return []*apiv1.Pod{}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name) } if pod.Namespace == "kube-system" && skipNodesWithSystemPods { return []*apiv1.Pod{}, fmt.Errorf("non-deamons set, non-mirrored, kube-system pod present: %s", pod.Name) } if HasLocalStorage(pod) && skipNodesWithLocalStorage { return []*apiv1.Pod{}, fmt.Errorf("pod with local storage present: %s", pod.Name) } } pods = append(pods, pod) } return pods, nil }