Пример #1
0
// waitForFederatedServiceShard waits until the number of shards of a given federated
// service reaches the expected value, i.e. numSvcs in the given individual Kubernetes
// cluster. If the shard count, i.e. numSvcs is expected to be at least one, then
// it also checks if the first shard's name and spec matches that of the given service.
func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *api.Service, numSvcs int) {
	By("Fetching a federated service shard")
	var clSvcList *v1.ServiceList
	if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
		var err error
		clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{})
		if err != nil {
			return false, err
		}
		n := len(clSvcList.Items)
		if n == numSvcs {
			return true, nil
		}
		framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll)
		return false, nil
	}); err != nil {
		framework.Failf("Failed to list registered clusters: %+v", err)
	}

	if numSvcs > 0 && service != nil {
		// Renaming for clarity/readability
		clSvc := clSvcList.Items[0]
		Expect(clSvc.Name).To(Equal(service.Name))
		Expect(clSvc.Spec).To(Equal(service.Spec))
	}
}
Пример #2
0
func (s *ServiceController) deleteClusterService(clusterName string, cachedService *cachedService, clientset *release_1_3.Clientset) error {
	service := cachedService.lastState
	glog.V(4).Infof("Deleting service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
	var err error
	for i := 0; i < clientRetryCount; i++ {
		err = clientset.Core().Services(service.Namespace).Delete(service.Name, &api.DeleteOptions{})
		if err == nil || errors.IsNotFound(err) {
			glog.V(4).Infof("Service %s/%s deleted from cluster %s", service.Namespace, service.Name, clusterName)
			return nil
		}
		time.Sleep(cachedService.nextRetryDelay())
	}
	glog.V(4).Infof("Failed to delete service %s/%s from cluster %s, %+v", service.Namespace, service.Name, clusterName, err)
	return err
}
Пример #3
0
func waitForSecretUpdateOrFail(clientset *release_1_3.Clientset, namespace string, secret *v1.Secret, timeout time.Duration) {
	By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, namespace))
	err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
		clusterSecret, err := clientset.Core().Secrets(namespace).Get(secret.Name)
		if err == nil { // We want it present, and the Get succeeded, so we're all good.
			if equivalentSecret(*clusterSecret, *secret) {
				By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is updated", secret.Name, namespace))
				return true, nil
			}
			By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", secret.Name, namespace, framework.Poll, err))
			return false, nil
		}
		By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for being updated, trying again in %s (err=%v)", secret.Name, namespace, framework.Poll, err))
		return false, nil
	})
	framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster", secret.Name, namespace)
}
Пример #4
0
// NewKubernetesClient gives a KubernetesClient with the given dependencies.
func NewKubernetesClient(namespace, deployment, pod, container string, clientset *client.Clientset) KubernetesClient {
	result := &kubernetesClient{
		namespace:  namespace,
		deployment: deployment,
		pod:        pod,
		container:  container,
		clientset:  clientset,
		nodeStore:  cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
	// Start propagating contents of the nodeStore.
	nodeListWatch := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			return clientset.Core().Nodes().List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			return clientset.Core().Nodes().Watch(options)
		},
	}
	cache.NewReflector(nodeListWatch, &api.Node{}, result.nodeStore, 0).Run()
	return result
}
Пример #5
0
func waitForSecretOrFail(clientset *release_1_3.Clientset, namespace string, secret *v1.Secret, present bool, timeout time.Duration) {
	By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, namespace))
	var clusterSecret *v1.Secret
	err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
		clusterSecret, err := clientset.Core().Secrets(namespace).Get(secret.Name)
		if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
			By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, namespace))
			return true, nil // Success
		}
		if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
			By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is present", secret.Name, namespace))
			return true, nil // Success
		}
		By(fmt.Sprintf("Secret %q in namespace %q in cluster.  Found: %v, waiting for Found: %v, trying again in %s (err=%v)", secret.Name, namespace, clusterSecret != nil && err == nil, present, framework.Poll, err))
		return false, nil
	})
	framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster: Present=%v", secret.Name, namespace, present)

	if present && clusterSecret != nil {
		Expect(util.SecretEquivalent(*clusterSecret, *secret))
	}
}
Пример #6
0
// waitForFederatedServiceShard waits until the number of shards of a given federated
// service reaches the expected value, i.e. numSvcs in the given individual Kubernetes
// cluster. If the shard count, i.e. numSvcs is expected to be at least one, then
// it also checks if the first shard's name and spec matches that of the given service.
func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *v1.Service, numSvcs int) {
	By("Fetching a federated service shard")
	var clSvcList *v1.ServiceList
	if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
		var err error
		clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{})
		if err != nil {
			return false, err
		}
		n := len(clSvcList.Items)
		if n == numSvcs {
			return true, nil
		}
		framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll)
		return false, nil
	}); err != nil {
		framework.Failf("Failed to list registered clusters: %+v", err)
	}

	if numSvcs > 0 && service != nil {
		// Renaming for clarity/readability
		clSvc := clSvcList.Items[0]

		Expect(clSvc.Name).To(Equal(service.Name))
		// Some fields are expected to be different, so make them the same before checking equality.
		clSvc.Spec.ClusterIP = service.Spec.ClusterIP
		clSvc.Spec.ExternalIPs = service.Spec.ExternalIPs
		clSvc.Spec.DeprecatedPublicIPs = service.Spec.DeprecatedPublicIPs
		clSvc.Spec.LoadBalancerIP = service.Spec.LoadBalancerIP
		clSvc.Spec.LoadBalancerSourceRanges = service.Spec.LoadBalancerSourceRanges
		// N.B. We cannot iterate over the port objects directly, as their values
		// only get copied and our updates will get lost.
		for i := range clSvc.Spec.Ports {
			clSvc.Spec.Ports[i].NodePort = service.Spec.Ports[i].NodePort
		}
		Expect(clSvc.Spec).To(Equal(service.Spec))
	}
}
// waitForFederatedServiceShard waits until the number of shards of a given federated
// service reaches the expected value, i.e. numSvcs in the given individual Kubernetes
// cluster. If the shard count, i.e. numSvcs is expected to be at least one, then
// it also checks if the first shard's name and spec matches that of the given service.
func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *v1.Service, numSvcs int) {
	By("Fetching a federated service shard")
	var clSvcList *v1.ServiceList
	if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
		var err error
		clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{})
		if err != nil {
			return false, err
		}
		n := len(clSvcList.Items)
		if n == numSvcs {
			return true, nil
		}
		framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll)
		return false, nil
	}); err != nil {
		framework.Failf("Failed to list registered clusters: %+v", err)
	}

	if numSvcs > 0 && service != nil {
		// Renaming for clarity/readability
		clSvc := clSvcList.Items[0]

		// The federation service has no cluster IP.  Clear any cluster IP before
		// comparison.
		clSvc.Spec.ClusterIP = ""

		Expect(clSvc.Name).To(Equal(service.Name))
		// Some fields are expected to be different, so make them the same before checking equality.
		clSvc.Spec.ClusterIP = service.Spec.ClusterIP
		clSvc.Spec.ExternalIPs = service.Spec.ExternalIPs
		clSvc.Spec.DeprecatedPublicIPs = service.Spec.DeprecatedPublicIPs
		clSvc.Spec.LoadBalancerIP = service.Spec.LoadBalancerIP
		clSvc.Spec.LoadBalancerSourceRanges = service.Spec.LoadBalancerSourceRanges
		Expect(clSvc.Spec).To(Equal(service.Spec))
	}
}
func (s *ServiceController) ensureClusterService(cachedService *cachedService, clusterName string, service *v1.Service, client *release_1_3.Clientset) error {
	var err error
	var needUpdate bool
	for i := 0; i < clientRetryCount; i++ {
		svc, err := client.Core().Services(service.Namespace).Get(service.Name)
		if err == nil {
			// service exists
			glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
			//reserve immutable fields
			service.Spec.ClusterIP = svc.Spec.ClusterIP

			//reserve auto assigned field
			for i, oldPort := range svc.Spec.Ports {
				for _, port := range service.Spec.Ports {
					if port.NodePort == 0 {
						if !portEqualExcludeNodePort(&oldPort, &port) {
							svc.Spec.Ports[i] = port
							needUpdate = true
						}
					} else {
						if !portEqualForLB(&oldPort, &port) {
							svc.Spec.Ports[i] = port
							needUpdate = true
						}
					}
				}
			}

			if needUpdate {
				// we only apply spec update
				svc.Spec = service.Spec
				_, err = client.Core().Services(svc.Namespace).Update(svc)
				if err == nil {
					glog.V(5).Infof("Service %s/%s successfully updated to cluster %s", svc.Namespace, svc.Name, clusterName)
					return nil
				} else {
					glog.V(4).Infof("Failed to update %+v", err)
				}
			} else {
				glog.V(5).Infof("Service %s/%s is not updated to cluster %s as the spec are identical", svc.Namespace, svc.Name, clusterName)
				return nil
			}
		} else if errors.IsNotFound(err) {
			// Create service if it is not found
			glog.Infof("Service '%s/%s' is not found in cluster %s, trying to create new",
				service.Namespace, service.Name, clusterName)
			service.ResourceVersion = ""
			_, err = client.Core().Services(service.Namespace).Create(service)
			if err == nil {
				glog.V(5).Infof("Service %s/%s successfully created to cluster %s", service.Namespace, service.Name, clusterName)
				return nil
			}
			glog.V(4).Infof("Failed to create %+v", err)
			if errors.IsAlreadyExists(err) {
				glog.V(5).Infof("service %s/%s already exists in cluster %s", service.Namespace, service.Name, clusterName)
				return nil
			}
		}
		if errors.IsConflict(err) {
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
		}
		// should we reuse same retry delay for all clusters?
		time.Sleep(cachedService.nextRetryDelay())
	}
	return err
}