func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) { w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil { t.Fatalf("unexpected error: %v", err) } _, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: default: return false, nil } switch cast := event.Object.(type) { case *v1.ResourceQuota: if len(cast.Status.Hard) > 0 { return true, nil } } return false, nil }) if err != nil { t.Fatalf("unexpected error: %v", err) } }
func createConfig(s *options.SchedulerServer, kubecli *clientset.Clientset) (*scheduler.Config, error) { configFactory := factory.NewConfigFactory(kubecli, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains) if _, err := os.Stat(s.PolicyConfigFile); err == nil { var ( policy schedulerapi.Policy configData []byte ) configData, err := ioutil.ReadFile(s.PolicyConfigFile) if err != nil { return nil, fmt.Errorf("unable to read policy config: %v", err) } if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } return configFactory.CreateFromConfig(policy) } // if the config file isn't provided, use the specified (or default) provider config, err := configFactory.CreateFromProvider(s.AlgorithmProvider) if err != nil { return nil, err } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubecli.Core().Events("")}) return config, nil }
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { target := int32(100) rc := &v1.ReplicationController{ ObjectMeta: v1.ObjectMeta{ Name: "foo", Namespace: namespace, }, Spec: v1.ReplicationControllerSpec{ Replicas: &target, Selector: map[string]string{"foo": "bar"}, Template: &v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "container", Image: "busybox", }, }, }, }, }, } w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil { t.Fatalf("unexpected error: %v", err) } _, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: default: return false, nil } switch cast := event.Object.(type) { case *v1.ReplicationController: fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target) if cast.Status.Replicas == target { return true, nil } } return false, nil }) if err != nil { pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()}) t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items)) } }
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) { tokenName := "" token := "" findToken := func() (bool, error) { user, err := c.Core().ServiceAccounts(ns).Get(name) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } for _, ref := range user.Secrets { secret, err := c.Core().Secrets(ns).Get(ref.Name) if errors.IsNotFound(err) { continue } if err != nil { return false, err } if secret.Type != v1.SecretTypeServiceAccountToken { continue } name := secret.Annotations[v1.ServiceAccountNameKey] uid := secret.Annotations[v1.ServiceAccountUIDKey] tokenData := secret.Data[v1.ServiceAccountTokenKey] if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 { tokenName = secret.Name token = string(tokenData) return true, nil } } return false, nil } if shouldWait { err := wait.Poll(time.Second, 10*time.Second, findToken) if err != nil { return "", "", err } } else { ok, err := findToken() if err != nil { return "", "", err } if !ok { return "", "", fmt.Errorf("No token found for %s/%s", ns, name) } } return tokenName, token, nil }
func (s *ServiceController) deleteClusterService(clusterName string, cachedService *cachedService, clientset *kubeclientset.Clientset) error { service := cachedService.lastState glog.V(4).Infof("Deleting service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) var err error for i := 0; i < clientRetryCount; i++ { err = clientset.Core().Services(service.Namespace).Delete(service.Name, &v1.DeleteOptions{}) if err == nil || errors.IsNotFound(err) { glog.V(4).Infof("Service %s/%s deleted from cluster %s", service.Namespace, service.Name, clusterName) delete(cachedService.endpointMap, clusterName) return nil } time.Sleep(cachedService.nextRetryDelay()) } glog.V(4).Infof("Failed to delete service %s/%s from cluster %s, %+v", service.Namespace, service.Name, clusterName, err) return err }
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) { testSecret := &v1.Secret{ ObjectMeta: v1.ObjectMeta{Name: "testSecret"}, Data: map[string][]byte{"test": []byte("data")}, } readOps := []testOperation{ func() error { _, err := c.Core().Secrets(ns).List(v1.ListOptions{}) return err }, func() error { _, err := c.Core().Pods(ns).List(v1.ListOptions{}) return err }, } writeOps := []testOperation{ func() error { _, err := c.Core().Secrets(ns).Create(testSecret); return err }, func() error { return c.Core().Secrets(ns).Delete(testSecret.Name, nil) }, } for _, op := range readOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canRead && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canRead && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } for _, op := range writeOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canWrite && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canWrite && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } }
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*v1.ServiceAccount, error) { if !shouldWait { return c.Core().ServiceAccounts(ns).Get(name) } var user *v1.ServiceAccount var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { user, err = c.Core().ServiceAccounts(ns).Get(name) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } return true, nil }) return user, err }
func waitForSecretUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, secret *v1.Secret, timeout time.Duration) { By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, namespace)) err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { clusterSecret, err := clientset.Core().Secrets(namespace).Get(secret.Name) if err == nil { // We want it present, and the Get succeeded, so we're all good. if util.SecretEquivalent(*clusterSecret, *secret) { By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is updated", secret.Name, namespace)) return true, nil } else { By(fmt.Sprintf("Expected equal secrets. expected: %+v\nactual: %+v", *secret, *clusterSecret)) } By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for secret being updated, trying again in %s (err=%v)", secret.Name, namespace, framework.Poll, err)) return false, nil } By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for being updated, trying again in %s (err=%v)", secret.Name, namespace, framework.Poll, err)) return false, nil }) framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster", secret.Name, namespace) }
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) { // Check if the claim is already in requested phase claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName) if err == nil && claim.Status.Phase == phase { return } // Wait for the phase for { event := <-w.ResultChan() claim, ok := event.Object.(*v1.PersistentVolumeClaim) if !ok { continue } if claim.Status.Phase == phase && claim.Name == claimName { glog.V(2).Infof("claim %q is %s", claim.Name, phase) break } } }
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) { // Check if the volume is already in requested phase volume, err := client.Core().PersistentVolumes().Get(pvName) if err == nil && volume.Status.Phase == phase { return } // Wait for the phase for { event := <-w.ResultChan() volume, ok := event.Object.(*v1.PersistentVolume) if !ok { continue } if volume.Status.Phase == phase && volume.Name == pvName { glog.V(2).Infof("volume %q is %s", volume.Name, phase) break } } }
func waitForSecretOrFail(clientset *kubeclientset.Clientset, namespace string, secret *v1.Secret, present bool, timeout time.Duration) { By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, namespace)) var clusterSecret *v1.Secret err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { clusterSecret, err := clientset.Core().Secrets(namespace).Get(secret.Name) if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone. By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, namespace)) return true, nil // Success } if present && err == nil { // We want it present, and the Get succeeded, so we're all good. By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is present", secret.Name, namespace)) return true, nil // Success } By(fmt.Sprintf("Secret %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", secret.Name, namespace, clusterSecret != nil && err == nil, present, framework.Poll, err)) return false, nil }) framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster: Present=%v", secret.Name, namespace, present) if present && clusterSecret != nil { Expect(util.SecretEquivalent(*clusterSecret, *secret)) } }
func (s *ServiceController) ensureClusterService(cachedService *cachedService, clusterName string, service *v1.Service, client *kubeclientset.Clientset) error { var err error var needUpdate bool for i := 0; i < clientRetryCount; i++ { svc, err := client.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{}) if err == nil { // service exists glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) //reserve immutable fields service.Spec.ClusterIP = svc.Spec.ClusterIP //reserve auto assigned field for i, oldPort := range svc.Spec.Ports { for _, port := range service.Spec.Ports { if port.NodePort == 0 { if !portEqualExcludeNodePort(&oldPort, &port) { svc.Spec.Ports[i] = port needUpdate = true } } else { if !portEqualForLB(&oldPort, &port) { svc.Spec.Ports[i] = port needUpdate = true } } } } if needUpdate { // we only apply spec update svc.Spec = service.Spec _, err = client.Core().Services(svc.Namespace).Update(svc) if err == nil { glog.V(5).Infof("Service %s/%s successfully updated to cluster %s", svc.Namespace, svc.Name, clusterName) return nil } else { glog.V(4).Infof("Failed to update %+v", err) } } else { glog.V(5).Infof("Service %s/%s is not updated to cluster %s as the spec are identical", svc.Namespace, svc.Name, clusterName) return nil } } else if errors.IsNotFound(err) { // Create service if it is not found glog.Infof("Service '%s/%s' is not found in cluster %s, trying to create new", service.Namespace, service.Name, clusterName) service.ResourceVersion = "" _, err = client.Core().Services(service.Namespace).Create(service) if err == nil { glog.V(5).Infof("Service %s/%s successfully created to cluster %s", service.Namespace, service.Name, clusterName) return nil } glog.V(4).Infof("Failed to create %+v", err) if errors.IsAlreadyExists(err) { glog.V(5).Infof("service %s/%s already exists in cluster %s", service.Namespace, service.Name, clusterName) return nil } } if errors.IsConflict(err) { glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v", service.Namespace, service.Name, err) } // should we reuse same retry delay for all clusters? time.Sleep(cachedService.nextRetryDelay()) } return err }
// NewSourceApiserver creates a config source that watches and pulls from the apiserver. func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) { lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", v1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) newSourceApiserverFromLW(lw, updates) }