func createPVC(clientset *client.Clientset, namespace, svcName string) (*api.PersistentVolumeClaim, error) { capacity, err := resource.ParseQuantity("10Gi") if err != nil { return nil, err } pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%s-etcd-claim", svcName), Namespace: namespace, Labels: componentLabel, Annotations: map[string]string{ "volume.alpha.kubernetes.io/storage-class": "yes", }, }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, }, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceStorage: capacity, }, }, }, } return clientset.Core().PersistentVolumeClaims(namespace).Create(pvc) }
func waitForLoadBalancerAddress(clientset *client.Clientset, svc *api.Service) ([]string, []string, error) { ips := []string{} hostnames := []string{} err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) { pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name) if err != nil { return false, nil } if ings := pollSvc.Status.LoadBalancer.Ingress; len(ings) > 0 { for _, ing := range ings { if len(ing.IP) > 0 { ips = append(ips, ing.IP) } if len(ing.Hostname) > 0 { hostnames = append(hostnames, ing.Hostname) } } if len(ips) > 0 || len(hostnames) > 0 { return true, nil } } return false, nil }) if err != nil { return nil, nil, err } return ips, hostnames, nil }
// WaitForPodCreationServiceAccounts ensures that the service account needed for pod creation exists // and that the cache for the admission control that checks for pod tokens has caught up to allow // pod creation. func WaitForPodCreationServiceAccounts(clientset *kclientset.Clientset, namespace string) error { if err := WaitForServiceAccounts(clientset, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil { return err } testPod := &kapi.Pod{} testPod.GenerateName = "test" testPod.Spec.Containers = []kapi.Container{ { Name: "container", Image: "openshift/origin-pod:latest", }, } return wait.PollImmediate(time.Second, PodCreationWaitTimeout, func() (bool, error) { pod, err := clientset.Core().Pods(namespace).Create(testPod) if err != nil { glog.Warningf("Error attempting to create test pod: %v", err) return false, nil } err = clientset.Core().Pods(namespace).Delete(pod.Name, kapi.NewDeleteOptions(0)) if err != nil { return false, err } return true, nil }) }
func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.Clientset) { w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(api.SingleObject(api.ObjectMeta{Name: quota.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil { t.Fatalf("unexpected error: %v", err) } _, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: default: return false, nil } switch cast := event.Object.(type) { case *api.ResourceQuota: if len(cast.Status.Hard) > 0 { return true, nil } } return false, nil }) if err != nil { t.Fatalf("unexpected error: %v", err) } }
// Find the names of all zones and the region in which we have nodes in this cluster. func getZoneNames(client *clientset.Clientset) (zones []string, region string, err error) { zoneNames := sets.NewString() nodes, err := client.Core().Nodes().List(api.ListOptions{}) if err != nil { glog.Errorf("Failed to list nodes while getting zone names: %v", err) return nil, "", err } for i, node := range nodes.Items { // TODO: quinton-hoole make this more efficient. // For non-multi-zone clusters the zone will // be identical for all nodes, so we only need to look at one node // For multi-zone clusters we know at build time // which zones are included. Rather get this info from there, because it's cheaper. zoneName, err := getZoneNameForNode(node) if err != nil { return nil, "", err } zoneNames.Insert(zoneName) if i == 0 { region, err = getRegionNameForNode(node) if err != nil { return nil, "", err } } } return zoneNames.List(), region, nil }
func createService(clientset *client.Clientset, namespace, svcName string, dryRun bool) (*api.Service, error) { svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: svcName, Namespace: namespace, Labels: componentLabel, }, Spec: api.ServiceSpec{ Type: api.ServiceTypeLoadBalancer, Selector: apiserverSvcSelector, Ports: []api.ServicePort{ { Name: "https", Protocol: "TCP", Port: 443, TargetPort: intstr.FromInt(443), }, }, }, } if dryRun { return svc, nil } return clientset.Core().Services(namespace).Create(svc) }
func GetNodes(kubeClient *kclientset.Clientset) ([]kapi.Node, error) { nodeList, err := kubeClient.Core().Nodes().List(kapi.ListOptions{}) if err != nil { return nil, fmt.Errorf("Listing nodes in the cluster failed. Error: %s", err) } return nodeList.Items, nil }
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) { target := 100 rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: namespace, }, Spec: api.ReplicationControllerSpec{ Replicas: int32(target), Selector: map[string]string{"foo": "bar"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "container", Image: "busybox", }, }, }, }, }, } w, err := clientset.Core().ReplicationControllers(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: rc.Name})) if err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil { t.Fatalf("unexpected error: %v", err) } _, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) { switch event.Type { case watch.Modified: default: return false, nil } switch cast := event.Object.(type) { case *api.ReplicationController: fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target) if int(cast.Status.Replicas) == target { return true, nil } } return false, nil }) if err != nil { pods, _ := clientset.Core().Pods(namespace).List(api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}) t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items)) } }
func createNamespace(clientset *client.Clientset, namespace string) (*api.Namespace, error) { ns := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: namespace, }, } return clientset.Core().Namespaces().Create(ns) }
// NewServer creates a server from the provided config and client. func NewServer(config *server.Config, client *kclientset.Clientset) *Server { stop := make(chan struct{}) return &Server{ Config: config, Services: NewCachedServiceAccessor(client.CoreClient.RESTClient(), stop), Endpoints: client.Core(), Stop: stop, } }
func newProjectAuthorizationCache(authorizer authorizer.Authorizer, kubeClient *kclientset.Clientset, informerFactory shared.InformerFactory) *projectauth.AuthorizationCache { return projectauth.NewAuthorizationCache( projectauth.NewAuthorizerReviewer(authorizer), kubeClient.Core().Namespaces(), informerFactory.ClusterPolicies().Lister(), informerFactory.ClusterPolicyBindings().Lister(), informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), ) }
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) { tokenName := "" token := "" findToken := func() (bool, error) { user, err := c.Core().ServiceAccounts(ns).Get(name) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } for _, ref := range user.Secrets { secret, err := c.Core().Secrets(ns).Get(ref.Name) if errors.IsNotFound(err) { continue } if err != nil { return false, err } if secret.Type != api.SecretTypeServiceAccountToken { continue } name := secret.Annotations[api.ServiceAccountNameKey] uid := secret.Annotations[api.ServiceAccountUIDKey] tokenData := secret.Data[api.ServiceAccountTokenKey] if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 { tokenName = secret.Name token = string(tokenData) return true, nil } } return false, nil } if shouldWait { err := wait.Poll(time.Second, 10*time.Second, findToken) if err != nil { return "", "", err } } else { ok, err := findToken() if err != nil { return "", "", err } if !ok { return "", "", fmt.Errorf("No token found for %s/%s", ns, name) } } return tokenName, token, nil }
func getServiceAccountPullSecret(client *kclientset.Clientset, ns, name string) (string, error) { secrets, err := client.Core().Secrets(ns).List(api.ListOptions{}) if err != nil { return "", err } for _, secret := range secrets.Items { if secret.Type == api.SecretTypeDockercfg && secret.Annotations[api.ServiceAccountNameKey] == name { return string(secret.Data[api.DockerConfigKey]), nil } } return "", nil }
func createNamespace(clientset *client.Clientset, namespace string, dryRun bool) (*api.Namespace, error) { ns := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: namespace, }, } if dryRun { return ns, nil } return clientset.Core().Namespaces().Create(ns) }
// WaitForServiceAccounts ensures the service accounts needed by build pods exist in the namespace // The extra controllers tend to starve the service account controller func WaitForServiceAccounts(clientset *kclientset.Clientset, namespace string, accounts []string) error { serviceAccounts := clientset.Core().ServiceAccounts(namespace) return wait.Poll(time.Second, ServiceAccountWaitTimeout, func() (bool, error) { for _, account := range accounts { if sa, err := serviceAccounts.Get(account); err != nil { if !serviceAccountSecretsExist(clientset, namespace, sa) { continue } return false, nil } } return true, nil }) }
// getNodeIP is copied from the upstream proxy config to retrieve the IP of a node. func getNodeIP(client *kclientset.Clientset, hostname string) net.IP { var nodeIP net.IP node, err := client.Core().Nodes().Get(hostname) if err != nil { glog.Warningf("Failed to retrieve node info: %v", err) return nil } nodeIP, err = utilnode.GetNodeHostIP(node) if err != nil { glog.Warningf("Failed to retrieve node IP: %v", err) return nil } return nodeIP }
func (c *MasterConfig) RunServiceServingCertController(client *kclientset.Clientset) { if c.Options.ControllerConfig.ServiceServingCert.Signer == nil { return } ca, err := crypto.GetCA(c.Options.ControllerConfig.ServiceServingCert.Signer.CertFile, c.Options.ControllerConfig.ServiceServingCert.Signer.KeyFile, "") if err != nil { glog.Fatalf("service serving cert controller failed: %v", err) } servingCertController := servingcertcontroller.NewServiceServingCertController(client.Core(), client.Core(), ca, "cluster.local", 2*time.Minute) go servingCertController.Run(1, make(chan struct{})) servingCertUpdateController := servingcertcontroller.NewServiceServingCertUpdateController(client.Core(), client.Core(), ca, "cluster.local", 20*time.Minute) go servingCertUpdateController.Run(5, make(chan struct{})) }
func getAllServices(kubeClient *kclientset.Clientset) ([]kapi.Service, error) { filtered_srvs := []kapi.Service{} serviceList, err := kubeClient.Core().Services(kapi.NamespaceAll).List(kapi.ListOptions{}) if err != nil { return filtered_srvs, err } for _, srv := range serviceList.Items { if len(srv.Spec.ClusterIP) == 0 || srv.Spec.ClusterIP == kapi.ClusterIPNone { continue } filtered_srvs = append(filtered_srvs, srv) } return filtered_srvs, nil }
// CreateHeapsterRESTClient creates new Heapster REST client. When heapsterHost param is empty // string the function assumes that it is running inside a Kubernetes cluster and connects via // service proxy. heapsterHost param is in the format of protocol://address:port, // e.g., http://localhost:8002. func CreateHeapsterRESTClient(heapsterHost string, apiclient *client.Clientset) ( HeapsterClient, error) { if heapsterHost == "" { log.Print("Creating in-cluster Heapster client") return InClusterHeapsterClient{client: apiclient.Core().RESTClient()}, nil } cfg := &restclient.Config{Host: heapsterHost, QPS: defaultQPS, Burst: defaultBurst} restClient, err := client.NewForConfig(cfg) if err != nil { return nil, err } log.Printf("Creating remote Heapster client for %s", heapsterHost) return RemoteHeapsterClient{client: restClient.Core().RESTClient()}, nil }
func createAPIServerCredentialsSecret(clientset *client.Clientset, namespace, credentialsName string, entKeyPairs *entityKeyPairs) (*api.Secret, error) { // Build the secret object with API server credentials. secret := &api.Secret{ ObjectMeta: api.ObjectMeta{ Name: credentialsName, Namespace: namespace, }, Data: map[string][]byte{ "ca.crt": certutil.EncodeCertPEM(entKeyPairs.ca.Cert), "server.crt": certutil.EncodeCertPEM(entKeyPairs.server.Cert), "server.key": certutil.EncodePrivateKeyPEM(entKeyPairs.server.Key), }, } // Boilerplate to create the secret in the host cluster. return clientset.Core().Secrets(namespace).Create(secret) }
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) { testSecret := &api.Secret{ ObjectMeta: api.ObjectMeta{Name: "testSecret"}, Data: map[string][]byte{"test": []byte("data")}, } readOps := []testOperation{ func() error { _, err := c.Core().Secrets(ns).List(api.ListOptions{}) return err }, func() error { _, err := c.Core().Pods(ns).List(api.ListOptions{}) return err }, } writeOps := []testOperation{ func() error { _, err := c.Core().Secrets(ns).Create(testSecret); return err }, func() error { return c.Core().Secrets(ns).Delete(testSecret.Name, nil) }, } for _, op := range readOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canRead && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canRead && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } for _, op := range writeOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canWrite && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canWrite && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } }
func RunBuildDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClientset *kclientset.Clientset) { buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{}) if err != nil { t.Fatalf("Couldn't subscribe to Builds %v", err) } defer buildWatch.Stop() created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild()) if err != nil { t.Fatalf("Couldn't create Build: %v", err) } podWatch, err := clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion}) if err != nil { t.Fatalf("Couldn't subscribe to Pods %v", err) } defer podWatch.Stop() // wait for initial build event from the creation of the imagerepo with tag latest event := waitForWatch(t, "initial build added", buildWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild := event.Object.(*buildapi.Build) // initial pod creation for build event = waitForWatch(t, "build pod created", podWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } clusterAdminClient.Builds(testutil.Namespace()).Delete(newBuild.Name) event = waitForWatchType(t, "pod deleted due to build deleted", podWatch, watchapi.Deleted) if e, a := watchapi.Deleted, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } pod := event.Object.(*kapi.Pod) if expected := buildapi.GetBuildPodName(newBuild); pod.Name != expected { t.Fatalf("Expected pod %s to be deleted, but pod %s was deleted", expected, pod.Name) } }
func getServiceAccount(c *clientset.Clientset, ns string, name string, shouldWait bool) (*api.ServiceAccount, error) { if !shouldWait { return c.Core().ServiceAccounts(ns).Get(name) } var user *api.ServiceAccount var err error err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { user, err = c.Core().ServiceAccounts(ns).Get(name) if errors.IsNotFound(err) { return false, nil } if err != nil { return false, err } return true, nil }) return user, err }
//checkKibanaSecret confirms the secret used by kibana matches that configured in the oauth client func checkKibanaSecret(r types.DiagnosticResult, osClient *client.Client, kClient *kclientset.Clientset, project string, oauthclient *oauthapi.OAuthClient) { r.Debug("AGL0100", "Checking oauthclient secrets...") secret, err := kClient.Core().Secrets(project).Get(kibanaProxySecretName) if err != nil { r.Error("AGL0105", err, fmt.Sprintf("Error retrieving the secret '%s': %s", kibanaProxySecretName, err)) return } decoded, err := decodeSecret(secret, oauthSecretKeyName) if err != nil { r.Error("AGL0110", err, fmt.Sprintf("Unable to decode Kibana Secret: %s", err)) return } if decoded != oauthclient.Secret { r.Debug("AGL0120", fmt.Sprintf("OauthClient Secret: '%s'", oauthclient.Secret)) r.Debug("AGL0125", fmt.Sprintf("Decoded Kibana Secret: '%s'", decoded)) message := fmt.Sprintf("The %s OauthClient.Secret does not match the decoded oauth secret in '%s'", kibanaProxyOauthClientName, kibanaProxySecretName) r.Error("AGL0130", errors.New(message), message) } }
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase api.PersistentVolumeClaimPhase) { // Check if the claim is already in requested phase claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName) if err == nil && claim.Status.Phase == phase { return } // Wait for the phase for { event := <-w.ResultChan() claim, ok := event.Object.(*api.PersistentVolumeClaim) if !ok { continue } if claim.Status.Phase == phase && claim.Name == claimName { glog.V(2).Infof("claim %q is %s", claim.Name, phase) break } } }
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase api.PersistentVolumePhase) { // Check if the volume is already in requested phase volume, err := client.Core().PersistentVolumes().Get(pvName) if err == nil && volume.Status.Phase == phase { return } // Wait for the phase for { event := <-w.ResultChan() volume, ok := event.Object.(*api.PersistentVolume) if !ok { continue } if volume.Status.Phase == phase && volume.Name == pvName { glog.V(2).Infof("volume %q is %s", volume.Name, phase) break } } }
// serviceAccountSecretsExist checks whether the given service account has at least a token and a dockercfg // secret associated with it. func serviceAccountSecretsExist(clientset *kclientset.Clientset, namespace string, sa *kapi.ServiceAccount) bool { foundTokenSecret := false foundDockercfgSecret := false for _, secret := range sa.Secrets { ns := namespace if len(secret.Namespace) > 0 { ns = secret.Namespace } secret, err := clientset.Core().Secrets(ns).Get(secret.Name) if err == nil { switch secret.Type { case kapi.SecretTypeServiceAccountToken: foundTokenSecret = true case kapi.SecretTypeDockercfg: foundDockercfgSecret = true } } } return foundTokenSecret && foundDockercfgSecret }
func DeleteAndWaitForNamespaceTermination(c *kclientset.Clientset, name string) error { w, err := c.Core().Namespaces().Watch(kapi.ListOptions{}) if err != nil { return err } if err := c.Core().Namespaces().Delete(name, nil); err != nil { return err } _, err = watch.Until(30*time.Second, w, func(event watch.Event) (bool, error) { if event.Type != watch.Deleted { return false, nil } namespace, ok := event.Object.(*kapi.Namespace) if !ok { return false, nil } return namespace.Name == name, nil }) return err }
// NewEndpointController returns a new *EndpointController. func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController { if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter()) } e := &EndpointController{ client: client, queue: workqueue.New(), } e.serviceStore.Store, e.serviceController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return e.client.Core().Services(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return e.client.Core().Services(api.NamespaceAll).Watch(options) }, }, &api.Service{}, // TODO: Can we have much longer period here? FullServiceResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: e.enqueueService, UpdateFunc: func(old, cur interface{}) { e.enqueueService(cur) }, DeleteFunc: e.enqueueService, }, ) podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: e.addPod, UpdateFunc: e.updatePod, DeleteFunc: e.deletePod, }) e.podStore.Indexer = podInformer.GetIndexer() e.podController = podInformer.GetController() e.podStoreSynced = podInformer.HasSynced return e }
func getSDNRunningPods(kubeClient *kclientset.Clientset) ([]kapi.Pod, error) { podList, err := kubeClient.Core().Pods(kapi.NamespaceAll).List(kapi.ListOptions{}) if err != nil { return nil, err } filtered_pods := []kapi.Pod{} for _, pod := range podList.Items { // Skip pods that are not running if pod.Status.Phase != kapi.PodRunning { continue } // Skip pods with hostNetwork enabled if pod.Spec.SecurityContext.HostNetwork { continue } filtered_pods = append(filtered_pods, pod) } return filtered_pods, nil }