// TODO: quinton: This is largely a cut 'n paste of the above. Yuck! Refactor as soon as we have a common interface implmented by both fedclientset.Clientset and kubeclientset.Clientset func deleteClusterIngressOrFail(clusterName string, clientset *release_1_3.Clientset, namespace string, ingressName string) { if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 { Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteClusterIngressOrFail: cluster: %q, clientset: %v, namespace: %v, ingress: %v", clusterName, clientset, namespace, ingressName)) } err := clientset.Ingresses(namespace).Delete(ingressName, api.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName) }
// waitForFederatedServiceShard waits until the number of shards of a given federated // service reaches the expected value, i.e. numSvcs in the given individual Kubernetes // cluster. If the shard count, i.e. numSvcs is expected to be at least one, then // it also checks if the first shard's name and spec matches that of the given service. func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *api.Service, numSvcs int) { By("Fetching a federated service shard") var clSvcList *v1.ServiceList if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) { var err error clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{}) if err != nil { return false, err } n := len(clSvcList.Items) if n == numSvcs { return true, nil } framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll) return false, nil }); err != nil { framework.Failf("Failed to list registered clusters: %+v", err) } if numSvcs > 0 && service != nil { // Renaming for clarity/readability clSvc := clSvcList.Items[0] Expect(clSvc.Name).To(Equal(service.Name)) Expect(clSvc.Spec).To(Equal(service.Spec)) } }
func cleanupServiceShard(clientset *release_1_3.Clientset, clusterName, namespace string, service *v1.Service, timeout time.Duration) error { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := clientset.Services(namespace).Delete(service.Name, &api.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { // Deletion failed with an error, try again. framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service.Name, namespace, clusterName) return false, nil } By(fmt.Sprintf("Service %q in namespace %q in cluster %q deleted", service.Name, namespace, clusterName)) return true, nil }) return err }
func (s *ServiceController) deleteClusterService(clusterName string, cachedService *cachedService, clientset *release_1_3.Clientset) error { service := cachedService.lastState glog.V(4).Infof("Deleting service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) var err error for i := 0; i < clientRetryCount; i++ { err = clientset.Core().Services(service.Namespace).Delete(service.Name, &api.DeleteOptions{}) if err == nil || errors.IsNotFound(err) { glog.V(4).Infof("Service %s/%s deleted from cluster %s", service.Namespace, service.Name, clusterName) return nil } time.Sleep(cachedService.nextRetryDelay()) } glog.V(4).Infof("Failed to delete service %s/%s from cluster %s, %+v", service.Namespace, service.Name, clusterName, err) return err }
/* waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress. If the condition is not met within timeout, it fails the calling test. */ func waitForIngressUpdateOrFail(clientset *release_1_3.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) { By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace)) err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name) if err == nil { // We want it present, and the Get succeeded, so we're all good. if equivalentIngress(*clusterIngress, *ingress) { By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace)) return true, nil } By(fmt.Sprintf("Ingress %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", ingress.Name, namespace, framework.Poll, err)) return false, nil } By(fmt.Sprintf("Ingress %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", ingress.Name, namespace, framework.Poll, err)) return false, nil }) framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster", ingress.Name, namespace) }
func waitForSecretUpdateOrFail(clientset *release_1_3.Clientset, namespace string, secret *v1.Secret, timeout time.Duration) { By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, namespace)) err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { clusterSecret, err := clientset.Core().Secrets(namespace).Get(secret.Name) if err == nil { // We want it present, and the Get succeeded, so we're all good. if equivalentSecret(*clusterSecret, *secret) { By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is updated", secret.Name, namespace)) return true, nil } By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", secret.Name, namespace, framework.Poll, err)) return false, nil } By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for being updated, trying again in %s (err=%v)", secret.Name, namespace, framework.Poll, err)) return false, nil }) framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster", secret.Name, namespace) }
// NewKubernetesClient gives a KubernetesClient with the given dependencies. func NewKubernetesClient(namespace, deployment, pod, container string, clientset *client.Clientset) KubernetesClient { result := &kubernetesClient{ namespace: namespace, deployment: deployment, pod: pod, container: container, clientset: clientset, nodeStore: cache.NewStore(cache.MetaNamespaceKeyFunc), } // Start propagating contents of the nodeStore. nodeListWatch := &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return clientset.Core().Nodes().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return clientset.Core().Nodes().Watch(options) }, } cache.NewReflector(nodeListWatch, &api.Node{}, result.nodeStore, 0).Run() return result }
/* waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset. If the condition is not met within timout, it fails the calling test. */ func waitForServiceOrFail(clientset *release_1_3.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) { By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace)) var clusterService *v1.Service err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { clusterService, err := clientset.Services(namespace).Get(service.Name) if err != nil && !errors.IsNotFound(err) { return false, err } if (clusterService != nil && err == nil && present) || (clusterService == nil && errors.IsNotFound(err) && !present) { By(fmt.Sprintf("Success: federated service shard of service %q in namespace %q in cluster: %v", service.Name, namespace, present)) return true, nil } By(fmt.Sprintf("Service found: %v, waiting for service found: %v, trying again in %s", clusterService != nil, present, framework.Poll)) return false, nil }) framework.ExpectNoError(err, "Failed to get service %q in namespace %q", service.Name, namespace) if present && clusterService != nil { Expect(equivalent(*clusterService, *service)) } }
/* waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset. If the condition is not met within timout, it fails the calling test. */ func waitForServiceOrFail(clientset *release_1_3.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) { By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace)) var clusterService *v1.Service err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { clusterService, err := clientset.Services(namespace).Get(service.Name) if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace)) return true, nil // Success } if present && err == nil { // We want it present, and the Get succeeded, so we're all good. By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is present", service.Name, namespace)) return true, nil // Success } By(fmt.Sprintf("Service %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", service.Name, namespace, clusterService != nil && err == nil, present, framework.Poll, err)) return false, nil }) framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present) if present && clusterService != nil { Expect(equivalent(*clusterService, *service)) } }
// waitForFederatedServiceShard waits until the number of shards of a given federated // service reaches the expected value, i.e. numSvcs in the given individual Kubernetes // cluster. If the shard count, i.e. numSvcs is expected to be at least one, then // it also checks if the first shard's name and spec matches that of the given service. func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *v1.Service, numSvcs int) { By("Fetching a federated service shard") var clSvcList *v1.ServiceList if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) { var err error clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{}) if err != nil { return false, err } n := len(clSvcList.Items) if n == numSvcs { return true, nil } framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll) return false, nil }); err != nil { framework.Failf("Failed to list registered clusters: %+v", err) } if numSvcs > 0 && service != nil { // Renaming for clarity/readability clSvc := clSvcList.Items[0] Expect(clSvc.Name).To(Equal(service.Name)) // Some fields are expected to be different, so make them the same before checking equality. clSvc.Spec.ClusterIP = service.Spec.ClusterIP clSvc.Spec.ExternalIPs = service.Spec.ExternalIPs clSvc.Spec.DeprecatedPublicIPs = service.Spec.DeprecatedPublicIPs clSvc.Spec.LoadBalancerIP = service.Spec.LoadBalancerIP clSvc.Spec.LoadBalancerSourceRanges = service.Spec.LoadBalancerSourceRanges // N.B. We cannot iterate over the port objects directly, as their values // only get copied and our updates will get lost. for i := range clSvc.Spec.Ports { clSvc.Spec.Ports[i].NodePort = service.Spec.Ports[i].NodePort } Expect(clSvc.Spec).To(Equal(service.Spec)) } }
// waitForFederatedServiceShard waits until the number of shards of a given federated // service reaches the expected value, i.e. numSvcs in the given individual Kubernetes // cluster. If the shard count, i.e. numSvcs is expected to be at least one, then // it also checks if the first shard's name and spec matches that of the given service. func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *v1.Service, numSvcs int) { By("Fetching a federated service shard") var clSvcList *v1.ServiceList if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) { var err error clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{}) if err != nil { return false, err } n := len(clSvcList.Items) if n == numSvcs { return true, nil } framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll) return false, nil }); err != nil { framework.Failf("Failed to list registered clusters: %+v", err) } if numSvcs > 0 && service != nil { // Renaming for clarity/readability clSvc := clSvcList.Items[0] // The federation service has no cluster IP. Clear any cluster IP before // comparison. clSvc.Spec.ClusterIP = "" Expect(clSvc.Name).To(Equal(service.Name)) // Some fields are expected to be different, so make them the same before checking equality. clSvc.Spec.ClusterIP = service.Spec.ClusterIP clSvc.Spec.ExternalIPs = service.Spec.ExternalIPs clSvc.Spec.DeprecatedPublicIPs = service.Spec.DeprecatedPublicIPs clSvc.Spec.LoadBalancerIP = service.Spec.LoadBalancerIP clSvc.Spec.LoadBalancerSourceRanges = service.Spec.LoadBalancerSourceRanges Expect(clSvc.Spec).To(Equal(service.Spec)) } }
// FromUnversionedClient adapts a unversioned.Client to a release_1_3.Clientset. // This function is temporary. We will remove it when everyone has moved to using // Clientset. New code should NOT use this function. func FromUnversionedClient(c *unversioned.Client) *release_1_3.Clientset { var clientset release_1_3.Clientset if c != nil { clientset.CoreClient = v1core.New(c.RESTClient) } else { clientset.CoreClient = v1core.New(nil) } if c != nil && c.ExtensionsClient != nil { clientset.ExtensionsClient = v1beta1extensions.New(c.ExtensionsClient.RESTClient) } else { clientset.ExtensionsClient = v1beta1extensions.New(nil) } if c != nil && c.DiscoveryClient != nil { clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient) } else { clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil) } return &clientset }
func (s *ServiceController) ensureClusterService(cachedService *cachedService, clusterName string, service *v1.Service, client *release_1_3.Clientset) error { var err error var needUpdate bool for i := 0; i < clientRetryCount; i++ { svc, err := client.Core().Services(service.Namespace).Get(service.Name) if err == nil { // service exists glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) //reserve immutable fields service.Spec.ClusterIP = svc.Spec.ClusterIP //reserve auto assigned field for i, oldPort := range svc.Spec.Ports { for _, port := range service.Spec.Ports { if port.NodePort == 0 { if !portEqualExcludeNodePort(&oldPort, &port) { svc.Spec.Ports[i] = port needUpdate = true } } else { if !portEqualForLB(&oldPort, &port) { svc.Spec.Ports[i] = port needUpdate = true } } } } if needUpdate { // we only apply spec update svc.Spec = service.Spec _, err = client.Core().Services(svc.Namespace).Update(svc) if err == nil { glog.V(5).Infof("Service %s/%s successfully updated to cluster %s", svc.Namespace, svc.Name, clusterName) return nil } else { glog.V(4).Infof("Failed to update %+v", err) } } else { glog.V(5).Infof("Service %s/%s is not updated to cluster %s as the spec are identical", svc.Namespace, svc.Name, clusterName) return nil } } else if errors.IsNotFound(err) { // Create service if it is not found glog.Infof("Service '%s/%s' is not found in cluster %s, trying to create new", service.Namespace, service.Name, clusterName) service.ResourceVersion = "" _, err = client.Core().Services(service.Namespace).Create(service) if err == nil { glog.V(5).Infof("Service %s/%s successfully created to cluster %s", service.Namespace, service.Name, clusterName) return nil } glog.V(4).Infof("Failed to create %+v", err) if errors.IsAlreadyExists(err) { glog.V(5).Infof("service %s/%s already exists in cluster %s", service.Namespace, service.Name, clusterName) return nil } } if errors.IsConflict(err) { glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v", service.Namespace, service.Name, err) } // should we reuse same retry delay for all clusters? time.Sleep(cachedService.nextRetryDelay()) } return err }