// InstallRegistry checks whether a registry is installed and installs one if not already installed func (h *Helper) InstallRegistry(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images string, out io.Writer) error { _, err := kubeClient.Services("default").Get(svcDockerRegistry) if err == nil { // If there's no error, the registry already exists return nil } if !apierrors.IsNotFound(err) { return errors.NewError("error retrieving docker registry service").WithCause(err) } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = images cfg := ®istry.RegistryConfig{ Name: "registry", Type: "docker-registry", ImageTemplate: imageTemplate, Ports: "5000", Replicas: 1, Labels: "docker-registry=default", Volume: "/registry", ServiceAccount: "registry", } cmd := registry.NewCmdRegistry(f, "", "registry", out) output := &bytes.Buffer{} err = registry.RunCmdRegistry(f, cmd, output, cfg, []string{}) glog.V(4).Infof("Registry command output:\n%s", output.String()) return err }
func deleteServices(kubeClient client.Interface, ns string) error { items, err := kubeClient.Services(ns).List(labels.Everything(), fields.Everything()) if err != nil { return err } for i := range items.Items { err := kubeClient.Services(ns).Delete(items.Items[i].Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
func deleteServices(kubeClient client.Interface, ns string) error { items, err := kubeClient.Services(ns).List(unversioned.ListOptions{}) if err != nil { return err } for i := range items.Items { err := kubeClient.Services(ns).Delete(items.Items[i].Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
func unloadServiceLabel(client kclient.Interface, application *api.Application, labelSelector labels.Selector) error { resourceList, _ := client.Services(application.Namespace).List(kapi.ListOptions{LabelSelector: labelSelector, FieldSelector: fields.Everything()}) errs := []error{} for _, resource := range resourceList.Items { if !hasItem(application.Spec.Items, api.Item{Kind: "Service", Name: resource.Name}) { delete(resource.Labels, fmt.Sprintf("%s.application.%s", application.Namespace, application.Name)) if _, err := client.Services(application.Namespace).Update(&resource); err != nil { errs = append(errs, err) } } } return nil }
// GetServicePods gets list of pods targeted by given label selector in given namespace. func GetServicePods(client k8sClient.Interface, heapsterClient client.HeapsterClient, namespace, name string, dsQuery *dataselect.DataSelectQuery) (*pod.PodList, error) { service, err := client.Services(namespace).Get(name) if err != nil { return nil, err } labelSelector := labels.SelectorFromSet(service.Spec.Selector) channels := &common.ResourceChannels{ PodList: common.GetPodListChannelWithOptions(client, common.NewSameNamespaceQuery(namespace), api.ListOptions{ LabelSelector: labelSelector, FieldSelector: fields.Everything(), }, 1), } apiPodList := <-channels.PodList.List if err := <-channels.PodList.Error; err != nil { return nil, err } podList := pod.CreatePodList(apiPodList.Items, dsQuery, heapsterClient) return &podList, nil }
// Based on given selector returns list of services that are candidates for deletion. // Services are matched by daemon sets' label selector. They are deleted if given // label selector is targeting only 1 daemon set. func getServicesForDSDeletion(client client.Interface, labelSelector labels.Selector, namespace string) ([]api.Service, error) { daemonSet, err := client.Extensions().DaemonSets(namespace).List(api.ListOptions{ LabelSelector: labelSelector, FieldSelector: fields.Everything(), }) if err != nil { return nil, err } // if label selector is targeting only 1 daemon set // then we can delete services targeted by this label selector, // otherwise we can not delete any services so just return empty list if len(daemonSet.Items) != 1 { return []api.Service{}, nil } services, err := client.Services(namespace).List(api.ListOptions{ LabelSelector: labelSelector, FieldSelector: fields.Everything(), }) if err != nil { return nil, err } return services.Items, nil }
// DeleteDaemonSetServices deletes services related to daemon set with given name in given namespace. func DeleteDaemonSetServices(client k8sClient.Interface, namespace, name string) error { log.Printf("Deleting services related to %s daemon set from %s namespace", name, namespace) daemonSet, err := client.Extensions().DaemonSets(namespace).Get(name) if err != nil { return err } labelSelector, err := unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector) if err != nil { return err } services, err := getServicesForDSDeletion(client, labelSelector, namespace) if err != nil { return err } for _, service := range services { if err := client.Services(namespace).Delete(service.Name); err != nil { return err } } log.Printf("Successfully deleted services related to %s daemon set from %s namespace", name, namespace) return nil }
// ValidateAppName validates application name. When error is returned, name validity could not be // determined. func ValidateAppName(spec *AppNameValiditySpec, client client.Interface) (*AppNameValidity, error) { log.Printf("Validating %s application name in %s namespace", spec.Name, spec.Namespace) isValidRc := false isValidService := false _, err := client.ReplicationControllers(spec.Namespace).Get(spec.Name) if err != nil { if isNotFoundError(err) { isValidRc = true } else { return nil, err } } _, err = client.Services(spec.Namespace).Get(spec.Name) if err != nil { if isNotFoundError(err) { isValidService = true } else { return nil, err } } isValid := isValidRc && isValidService log.Printf("Validation result for %s application name in %s namespace is %t", spec.Name, spec.Namespace, isValid) return &AppNameValidity{Valid: isValid}, nil }
// Deletes services related to replication controller with given name in given namespace. func DeleteReplicationControllerServices(client client.Interface, namespace, name string) error { log.Printf("Deleting services related to %s replication controller from %s namespace", name, namespace) replicationController, err := client.ReplicationControllers(namespace).Get(name) if err != nil { return err } labelSelector, err := toLabelSelector(replicationController.Spec.Selector) if err != nil { return err } services, err := getServicesForDeletion(client, labelSelector, namespace) if err != nil { return err } for _, service := range services { if err := client.Services(namespace).Delete(service.Name); err != nil { return err } } log.Printf("Successfully deleted services related to %s replication controller from %s namespace", name, namespace) return nil }
// Based on given selector returns list of services that are candidates for deletion. // Services are matched by replication controllers' label selector. They are deleted if given // label selector is targeting only 1 replication controller. func getServicesForDeletion(client client.Interface, labelSelector labels.Selector, namespace string) ([]api.Service, error) { replicationControllers, err := client.ReplicationControllers(namespace).List(api.ListOptions{ LabelSelector: labelSelector, FieldSelector: fields.Everything(), }) if err != nil { return nil, err } // if label selector is targeting only 1 replication controller // then we can delete services targeted by this label selector, // otherwise we can not delete any services so just return empty list if len(replicationControllers.Items) != 1 { return []api.Service{}, nil } services, err := client.Services(namespace).List(api.ListOptions{ LabelSelector: labelSelector, FieldSelector: fields.Everything(), }) if err != nil { return nil, err } return services.Items, nil }
func createService(kc kclient.Interface, name string, typeLoadBalancer bool) (*kapi.Service, error) { serviceType := kapi.ServiceTypeClusterIP if typeLoadBalancer { serviceType = kapi.ServiceTypeLoadBalancer } service := &kapi.Service{ ObjectMeta: kapi.ObjectMeta{ GenerateName: "service-", Name: name, }, Spec: kapi.ServiceSpec{ Type: serviceType, Ports: []kapi.ServicePort{{ Protocol: "TCP", Port: 8080, }}, }, } return kc.Services(kapi.NamespaceDefault).Create(service) }
// InstallRegistry checks whether a registry is installed and installs one if not already installed func (h *Helper) InstallRegistry(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images string, out, errout io.Writer) error { _, err := kubeClient.Services(DefaultNamespace).Get(SvcDockerRegistry) if err == nil { // If there's no error, the registry already exists return nil } if !apierrors.IsNotFound(err) { return errors.NewError("error retrieving docker registry service").WithCause(err).WithDetails(h.OriginLog()) } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = images opts := ®istry.RegistryOptions{ Config: ®istry.RegistryConfig{ Name: "registry", Type: "docker-registry", ImageTemplate: imageTemplate, Ports: "5000", Replicas: 1, Labels: "docker-registry=default", Volume: "/registry", ServiceAccount: "registry", }, } cmd := registry.NewCmdRegistry(f, "", "registry", out, errout) output := &bytes.Buffer{} err = opts.Complete(f, cmd, output, output, []string{}) if err != nil { return errors.NewError("error completing the registry configuration").WithCause(err) } err = opts.RunCmdRegistry() glog.V(4).Infof("Registry command output:\n%s", output.String()) if err != nil { return errors.NewError("cannot install registry").WithCause(err).WithDetails(h.OriginLog()) } return nil }
// Returns detailed information about the given daemon set in the given namespace. func GetDaemonSetDetail(client k8sClient.Interface, heapsterClient client.HeapsterClient, namespace, name string) (*DaemonSetDetail, error) { log.Printf("Getting details of %s daemon set in %s namespace", name, namespace) daemonSetWithPods, err := getRawDaemonSetWithPods(client, namespace, name) if err != nil { return nil, err } daemonSet := daemonSetWithPods.DaemonSet pods := daemonSetWithPods.Pods services, err := client.Services(namespace).List(api.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.Everything(), }) if err != nil { return nil, err } daemonSetDetail := &DaemonSetDetail{ ObjectMeta: common.NewObjectMeta(daemonSet.ObjectMeta), TypeMeta: common.NewTypeMeta(common.ResourceKindDaemonSet), LabelSelector: daemonSet.Spec.Selector, PodInfo: getDaemonSetPodInfo(daemonSet, pods.Items), ServiceList: resourceService.ServiceList{Services: make([]resourceService.Service, 0)}, } matchingServices := getMatchingServicesforDS(services.Items, daemonSet) for _, service := range matchingServices { daemonSetDetail.ServiceList.Services = append(daemonSetDetail.ServiceList.Services, resourceService.ToService(&service)) } for _, container := range daemonSet.Spec.Template.Spec.Containers { daemonSetDetail.ContainerImages = append(daemonSetDetail.ContainerImages, container.Image) } daemonSetDetail.Pods = pod.CreatePodList(pods.Items, heapsterClient) return daemonSetDetail, nil }
// GetReplicationControllerDetail returns detailed information about the given replication // controller in the given namespace. func GetReplicationControllerDetail(client k8sClient.Interface, heapsterClient client.HeapsterClient, namespace, name string) (*ReplicationControllerDetail, error) { log.Printf("Getting details of %s replication controller in %s namespace", name, namespace) replicationControllerWithPods, err := getRawReplicationControllerWithPods(client, namespace, name) if err != nil { return nil, err } replicationController := replicationControllerWithPods.ReplicationController pods := replicationControllerWithPods.Pods services, err := client.Services(namespace).List(api.ListOptions{ LabelSelector: labels.Everything(), FieldSelector: fields.Everything(), }) if err != nil { return nil, err } replicationControllerDetail := &ReplicationControllerDetail{ ObjectMeta: common.NewObjectMeta(replicationController.ObjectMeta), TypeMeta: common.NewTypeMeta(common.ResourceKindReplicationController), LabelSelector: replicationController.Spec.Selector, PodInfo: getReplicationPodInfo(replicationController, pods.Items), ServiceList: resourceService.ServiceList{Services: make([]resourceService.Service, 0)}, } matchingServices := getMatchingServices(services.Items, replicationController) for _, service := range matchingServices { replicationControllerDetail.ServiceList.Services = append( replicationControllerDetail.ServiceList.Services, resourceService.ToService(&service)) } for _, container := range replicationController.Spec.Template.Spec.Containers { replicationControllerDetail.ContainerImages = append(replicationControllerDetail.ContainerImages, container.Image) } replicationControllerDetail.Pods = pod.CreatePodList(pods.Items, heapsterClient) return replicationControllerDetail, nil }
// GetServiceDetail gets service details. func GetServiceDetail(client k8sClient.Interface, heapsterClient client.HeapsterClient, namespace, name string) (*ServiceDetail, error) { log.Printf("Getting details of %s service in %s namespace", name, namespace) // TODO(maciaszczykm): Use channels. serviceData, err := client.Services(namespace).Get(name) if err != nil { return nil, err } podList, err := GetServicePods(client, heapsterClient, namespace, serviceData.Spec.Selector) if err != nil { return nil, err } service := ToServiceDetail(serviceData) service.PodList = *podList return &service, nil }
// Validates application name. When error is returned, name validity could not be determined. func ValidateAppName(spec *AppNameValiditySpec, client client.Interface) (*AppNameValidity, error) { isValidRc := false isValidService := false _, err := client.ReplicationControllers(spec.Namespace).Get(spec.Name) if err != nil { if isNotFoundError(err) { isValidRc = true } else { return nil, err } } _, err = client.Services(spec.Namespace).Get(spec.Name) if err != nil { if isNotFoundError(err) { isValidService = true } else { return nil, err } } return &AppNameValidity{Valid: isValidRc && isValidService}, nil }
// Deploys an app based on the given configuration. The app is deployed using the given client. // App deployment consists of a replication controller and an optional service. Both of them share // common labels. // TODO(bryk): Write tests for this function. func DeployApp(spec *AppDeploymentSpec, client client.Interface) error { annotations := map[string]string{} if spec.Description != nil { annotations[DescriptionAnnotationKey] = *spec.Description } labels := getLabelsMap(spec.Labels) objectMeta := api.ObjectMeta{ Annotations: annotations, Name: spec.Name, Labels: labels, } containerSpec := api.Container{ Name: spec.Name, Image: spec.ContainerImage, SecurityContext: &api.SecurityContext{ Privileged: &spec.RunAsPrivileged, }, } if spec.ContainerCommand != nil { containerSpec.Command = []string{*spec.ContainerCommand} } if spec.ContainerCommandArgs != nil { containerSpec.Args = []string{*spec.ContainerCommandArgs} } podTemplate := &api.PodTemplateSpec{ ObjectMeta: objectMeta, Spec: api.PodSpec{ Containers: []api.Container{containerSpec}, }, } replicaSet := &api.ReplicationController{ ObjectMeta: objectMeta, Spec: api.ReplicationControllerSpec{ Replicas: spec.Replicas, Selector: labels, Template: podTemplate, }, } _, err := client.ReplicationControllers(spec.Namespace).Create(replicaSet) if err != nil { // TODO(bryk): Roll back created resources in case of error. return err } if len(spec.PortMappings) > 0 { service := &api.Service{ ObjectMeta: objectMeta, Spec: api.ServiceSpec{ Selector: labels, }, } if spec.IsExternal { service.Spec.Type = api.ServiceTypeLoadBalancer } else { service.Spec.Type = api.ServiceTypeNodePort } for _, portMapping := range spec.PortMappings { servicePort := api.ServicePort{ Protocol: portMapping.Protocol, Port: portMapping.Port, TargetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: portMapping.TargetPort, }, } service.Spec.Ports = append(service.Spec.Ports, servicePort) } _, err = client.Services(spec.Namespace).Create(service) // TODO(bryk): Roll back created resources in case of error. return err } else { return nil } }
// Deploys an app based on the given configuration. The app is deployed using the given client. // App deployment consists of a replication controller and an optional service. Both of them share // common labels. func DeployApp(spec *AppDeploymentSpec, client client.Interface) error { log.Printf("Deploying %s application into %s namespace", spec.Name, spec.Namespace) annotations := map[string]string{} if spec.Description != nil { annotations[DescriptionAnnotationKey] = *spec.Description } labels := getLabelsMap(spec.Labels) objectMeta := api.ObjectMeta{ Annotations: annotations, Name: spec.Name, Labels: labels, } containerSpec := api.Container{ Name: spec.Name, Image: spec.ContainerImage, SecurityContext: &api.SecurityContext{ Privileged: &spec.RunAsPrivileged, }, Resources: api.ResourceRequirements{ Requests: make(map[api.ResourceName]resource.Quantity), }, Env: convertEnvVarsSpec(spec.Variables), } if spec.ContainerCommand != nil { containerSpec.Command = []string{*spec.ContainerCommand} } if spec.ContainerCommandArgs != nil { containerSpec.Args = []string{*spec.ContainerCommandArgs} } if spec.CpuRequirement != nil { containerSpec.Resources.Requests[api.ResourceCPU] = *spec.CpuRequirement } if spec.MemoryRequirement != nil { containerSpec.Resources.Requests[api.ResourceMemory] = *spec.MemoryRequirement } podSpec := api.PodSpec{ Containers: []api.Container{containerSpec}, } if spec.ImagePullSecret != nil { podSpec.ImagePullSecrets = []api.LocalObjectReference{{Name: *spec.ImagePullSecret}} } podTemplate := &api.PodTemplateSpec{ ObjectMeta: objectMeta, Spec: podSpec, } replicationController := &api.ReplicationController{ ObjectMeta: objectMeta, Spec: api.ReplicationControllerSpec{ Replicas: spec.Replicas, Selector: labels, Template: podTemplate, }, } _, err := client.ReplicationControllers(spec.Namespace).Create(replicationController) if err != nil { // TODO(bryk): Roll back created resources in case of error. return err } if len(spec.PortMappings) > 0 { service := &api.Service{ ObjectMeta: objectMeta, Spec: api.ServiceSpec{ Selector: labels, }, } if spec.IsExternal { service.Spec.Type = api.ServiceTypeLoadBalancer } else { service.Spec.Type = api.ServiceTypeClusterIP } for _, portMapping := range spec.PortMappings { servicePort := api.ServicePort{ Protocol: portMapping.Protocol, Port: portMapping.Port, Name: generatePortMappingName(portMapping), TargetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: portMapping.TargetPort, }, } service.Spec.Ports = append(service.Spec.Ports, servicePort) } _, err = client.Services(spec.Namespace).Create(service) // TODO(bryk): Roll back created resources in case of error. return err } else { return nil } }
// Returns detailed information about the given replication controller in the given namespace. func GetReplicationControllerDetail(client client.Interface, heapsterClient HeapsterClient, namespace, name string) (*ReplicationControllerDetail, error) { log.Printf("Getting details of %s replication controller in %s namespace", name, namespace) replicationControllerWithPods, err := getRawReplicationControllerWithPods(client, namespace, name) if err != nil { return nil, err } replicationController := replicationControllerWithPods.ReplicationController pods := replicationControllerWithPods.Pods replicationControllerMetricsByPod, err := getReplicationControllerPodsMetrics(pods, heapsterClient, namespace, name) if err != nil { log.Printf("Skipping Heapster metrics because of error: %s\n", err) } services, err := client.Services(namespace).List(unversioned.ListOptions{ LabelSelector: unversioned.LabelSelector{labels.Everything()}, FieldSelector: unversioned.FieldSelector{fields.Everything()}, }) if err != nil { return nil, err } replicationControllerDetail := &ReplicationControllerDetail{ Name: replicationController.Name, Namespace: replicationController.Namespace, Labels: replicationController.ObjectMeta.Labels, LabelSelector: replicationController.Spec.Selector, PodInfo: getReplicationControllerPodInfo(replicationController, pods.Items), } matchingServices := getMatchingServices(services.Items, replicationController) for _, service := range matchingServices { replicationControllerDetail.Services = append(replicationControllerDetail.Services, getServiceDetail(service)) } for _, container := range replicationController.Spec.Template.Spec.Containers { replicationControllerDetail.ContainerImages = append(replicationControllerDetail.ContainerImages, container.Image) } for _, pod := range pods.Items { podDetail := ReplicationControllerPod{ Name: pod.Name, PodPhase: pod.Status.Phase, StartTime: pod.Status.StartTime, PodIP: pod.Status.PodIP, NodeName: pod.Spec.NodeName, RestartCount: getRestartCount(pod), } if replicationControllerMetricsByPod != nil { metric := replicationControllerMetricsByPod.MetricsMap[pod.Name] podDetail.Metrics = &metric replicationControllerDetail.HasMetrics = true } replicationControllerDetail.Pods = append(replicationControllerDetail.Pods, podDetail) } return replicationControllerDetail, nil }
// InstallRouter installs a default router on the OpenShift server func (h *Helper) InstallRouter(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images, hostIP string, portForwarding bool, out io.Writer) error { _, err := kubeClient.Services(DefaultNamespace).Get(SvcRouter) if err == nil { // Router service already exists, nothing to do return nil } if !apierrors.IsNotFound(err) { return errors.NewError("error retrieving router service").WithCause(err).WithDetails(h.OriginLog()) } masterDir := filepath.Join(configDir, "master") // Create service account for router routerSA := &kapi.ServiceAccount{} routerSA.Name = "router" _, err = kubeClient.ServiceAccounts("default").Create(routerSA) if err != nil { return errors.NewError("cannot create router service account").WithCause(err).WithDetails(h.OriginLog()) } // Add router SA to privileged SCC privilegedSCC, err := kubeClient.SecurityContextConstraints().Get("privileged") if err != nil { return errors.NewError("cannot retrieve privileged SCC").WithCause(err).WithDetails(h.OriginLog()) } privilegedSCC.Users = append(privilegedSCC.Users, serviceaccount.MakeUsername("default", "router")) _, err = kubeClient.SecurityContextConstraints().Update(privilegedSCC) if err != nil { return errors.NewError("cannot update privileged SCC").WithCause(err).WithDetails(h.OriginLog()) } // Create router cert cmdOutput := &bytes.Buffer{} createCertOptions := &admin.CreateServerCertOptions{ SignerCertOptions: &admin.SignerCertOptions{ CertFile: filepath.Join(masterDir, "ca.crt"), KeyFile: filepath.Join(masterDir, "ca.key"), SerialFile: filepath.Join(masterDir, "ca.serial.txt"), }, Overwrite: true, Hostnames: []string{fmt.Sprintf("%s.xip.io", hostIP)}, CertFile: filepath.Join(masterDir, "router.crt"), KeyFile: filepath.Join(masterDir, "router.key"), Output: cmdOutput, } _, err = createCertOptions.CreateServerCert() if err != nil { return errors.NewError("cannot create router cert").WithCause(err) } err = catFiles(filepath.Join(masterDir, "router.pem"), filepath.Join(masterDir, "router.crt"), filepath.Join(masterDir, "router.key"), filepath.Join(masterDir, "ca.crt")) if err != nil { return err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = images cfg := &router.RouterConfig{ Name: "router", Type: "haproxy-router", ImageTemplate: imageTemplate, Ports: "80:80,443:443", Replicas: 1, Labels: "router=<name>", Credentials: filepath.Join(masterDir, "admin.kubeconfig"), DefaultCertificate: filepath.Join(masterDir, "router.pem"), StatsPort: 1936, StatsUsername: "******", HostNetwork: !portForwarding, HostPorts: true, ServiceAccount: "router", } output := &bytes.Buffer{} cmd := router.NewCmdRouter(f, "", "router", out) cmd.SetOutput(output) err = router.RunCmdRouter(f, cmd, output, cfg, []string{}) glog.V(4).Infof("Router command output:\n%s", output.String()) if err != nil { return errors.NewError("cannot install router").WithCause(err).WithDetails(h.OriginLog()) } return nil }
func generateServiceEvents(t *testing.T, kc kclient.Interface) { maxMillisecondInterval := 25 minServiceCount := 10 maxOperations := minServiceCount + 30 var services []*kapi.Service for i := 0; i < maxOperations; { op := createOp if len(services) > minServiceCount { op = rand.Intn(deleteOp + 1) } switch op { case createOp: typeChoice := rand.Intn(2) typeLoadBalancer := false if typeChoice == 1 { typeLoadBalancer = true } s, err := createService(kc, "", typeLoadBalancer) if err != nil { t.Fatalf("unexpected error: %v", err) } services = append(services, s) t.Logf("Added service %s", s.Name) case updateOp: targetIndex := rand.Intn(len(services)) name := services[targetIndex].Name s, err := kc.Services(kapi.NamespaceDefault).Get(name) if err != nil { continue } // Flip the service type if s.Spec.Type == kapi.ServiceTypeLoadBalancer { s.Spec.Type = kapi.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 } else { s.Spec.Type = kapi.ServiceTypeLoadBalancer } s, err = kc.Services(kapi.NamespaceDefault).Update(s) if err != nil { continue } t.Logf("Updated service %s", name) case deleteOp: targetIndex := rand.Intn(len(services)) name := services[targetIndex].Name err := kc.Services(kapi.NamespaceDefault).Delete(name) if err != nil { continue } services = append(services[:targetIndex], services[targetIndex+1:]...) t.Logf("Deleted service %s", name) } i++ time.Sleep(time.Duration(rand.Intn(maxMillisecondInterval)) * time.Millisecond) } // Create one last service to serve as a sentinel. The service // will be created after a slight delay so that it can be assured // of being the last service a controller will see, and with a // known name so its processing can be detected. time.Sleep(time.Millisecond * 100) _, err := createService(kc, sentinelName, true) if err != nil { t.Fatalf("unexpected error: %v", err) } }