Пример #1
0
func waitForQuota(t *testing.T, quota *api.ResourceQuota, clientset *clientset.Clientset) {
	w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(api.SingleObject(api.ObjectMeta{Name: quota.Name}))
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
		switch event.Type {
		case watch.Modified:
		default:
			return false, nil
		}

		switch cast := event.Object.(type) {
		case *api.ResourceQuota:
			if len(cast.Status.Hard) > 0 {
				return true, nil
			}
		}

		return false, nil
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
Пример #2
0
func GetNodes(kubeClient *kclientset.Clientset) ([]kapi.Node, error) {
	nodeList, err := kubeClient.Core().Nodes().List(kapi.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("Listing nodes in the cluster failed. Error: %s", err)
	}
	return nodeList.Items, nil
}
Пример #3
0
func waitForLoadBalancerAddress(clientset *client.Clientset, svc *api.Service) ([]string, []string, error) {
	ips := []string{}
	hostnames := []string{}

	err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) {
		pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name)
		if err != nil {
			return false, nil
		}
		if ings := pollSvc.Status.LoadBalancer.Ingress; len(ings) > 0 {
			for _, ing := range ings {
				if len(ing.IP) > 0 {
					ips = append(ips, ing.IP)
				}
				if len(ing.Hostname) > 0 {
					hostnames = append(hostnames, ing.Hostname)
				}
			}
			if len(ips) > 0 || len(hostnames) > 0 {
				return true, nil
			}
		}
		return false, nil
	})
	if err != nil {
		return nil, nil, err
	}

	return ips, hostnames, nil
}
Пример #4
0
// SupportEviction uses Discovery API to find out if the server support eviction subresource
// If support, it will return its groupVersion; Otherwise, it will return ""
func SupportEviction(clientset *internalclientset.Clientset) (string, error) {
	discoveryClient := clientset.Discovery()
	groupList, err := discoveryClient.ServerGroups()
	if err != nil {
		return "", err
	}
	foundPolicyGroup := false
	var policyGroupVersion string
	for _, group := range groupList.Groups {
		if group.Name == "policy" {
			foundPolicyGroup = true
			policyGroupVersion = group.PreferredVersion.GroupVersion
			break
		}
	}
	if !foundPolicyGroup {
		return "", nil
	}
	resourceList, err := discoveryClient.ServerResourcesForGroupVersion("v1")
	if err != nil {
		return "", err
	}
	for _, resource := range resourceList.APIResources {
		if resource.Name == EvictionSubresource && resource.Kind == EvictionKind {
			return policyGroupVersion, nil
		}
	}
	return "", nil
}
Пример #5
0
// Find the names of all zones and the region in which we have nodes in this cluster.
func getZoneNames(client *clientset.Clientset) (zones []string, region string, err error) {
	zoneNames := sets.NewString()
	nodes, err := client.Core().Nodes().List(api.ListOptions{})
	if err != nil {
		glog.Errorf("Failed to list nodes while getting zone names: %v", err)
		return nil, "", err
	}
	for i, node := range nodes.Items {
		// TODO: quinton-hoole make this more efficient.
		//       For non-multi-zone clusters the zone will
		//       be identical for all nodes, so we only need to look at one node
		//       For multi-zone clusters we know at build time
		//       which zones are included.  Rather get this info from there, because it's cheaper.
		zoneName, err := getZoneNameForNode(node)
		if err != nil {
			return nil, "", err
		}
		zoneNames.Insert(zoneName)
		if i == 0 {
			region, err = getRegionNameForNode(node)
			if err != nil {
				return nil, "", err
			}
		}
	}
	return zoneNames.List(), region, nil
}
Пример #6
0
// Update updates an existing node api object
// by looking up the given hostname.
// The updated node merges the given slave attribute labels
// and annotations with the found api object.
func Update(
	client *clientset.Clientset,
	hostname string,
	slaveAttrLabels,
	annotations map[string]string,
) (n *api.Node, err error) {
	for i := 0; i < clientRetryCount; i++ {
		n, err = client.Nodes().Get(hostname)
		if err != nil {
			return nil, fmt.Errorf("error getting node %q: %v", hostname, err)
		}
		if n == nil {
			return nil, fmt.Errorf("no node instance returned for %q", hostname)
		}

		// update labels derived from Mesos slave attributes, keep all other labels
		n.Labels = mergeMaps(
			filterMap(n.Labels, IsNotSlaveAttributeLabel),
			slaveAttrLabels,
		)
		n.Annotations = mergeMaps(n.Annotations, annotations)

		n, err = client.Nodes().Update(n)
		if err == nil && !errors.IsConflict(err) {
			return n, nil
		}

		log.Infof("retry %d/%d: error updating node %v err %v", i, clientRetryCount, n, err)
		time.Sleep(time.Duration(i) * clientRetryInterval)
	}

	return nil, err
}
Пример #7
0
func recreatePods(client *internalclientset.Clientset, namespace string, selector map[string]string) error {
	pods, err := client.Pods(namespace).List(api.ListOptions{
		FieldSelector: fields.Everything(),
		LabelSelector: labels.Set(selector).AsSelector(),
	})

	if err != nil {
		return err
	}

	// Restart pods
	for _, pod := range pods.Items {
		log.Printf("Restarting pod: %v/%v", pod.Namespace, pod.Name)

		// Delete each pod for get them restarted with changed spec.
		err := client.Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{
			Preconditions: &api.Preconditions{
				UID: &pod.UID,
			},
		})

		if err != nil {
			return err
		}
	}

	return nil
}
Пример #8
0
// WaitForPodCreationServiceAccounts ensures that the service account needed for pod creation exists
// and that the cache for the admission control that checks for pod tokens has caught up to allow
// pod creation.
func WaitForPodCreationServiceAccounts(clientset *kclientset.Clientset, namespace string) error {
	if err := WaitForServiceAccounts(clientset, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
		return err
	}

	testPod := &kapi.Pod{}
	testPod.GenerateName = "test"
	testPod.Spec.Containers = []kapi.Container{
		{
			Name:  "container",
			Image: "openshift/origin-pod:latest",
		},
	}

	return wait.PollImmediate(time.Second, PodCreationWaitTimeout, func() (bool, error) {
		pod, err := clientset.Core().Pods(namespace).Create(testPod)
		if err != nil {
			glog.Warningf("Error attempting to create test pod: %v", err)
			return false, nil
		}
		err = clientset.Core().Pods(namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
		if err != nil {
			return false, err
		}
		return true, nil
	})
}
Пример #9
0
func createService(clientset *client.Clientset, namespace, svcName string, dryRun bool) (*api.Service, error) {
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      svcName,
			Namespace: namespace,
			Labels:    componentLabel,
		},
		Spec: api.ServiceSpec{
			Type:     api.ServiceTypeLoadBalancer,
			Selector: apiserverSvcSelector,
			Ports: []api.ServicePort{
				{
					Name:       "https",
					Protocol:   "TCP",
					Port:       443,
					TargetPort: intstr.FromInt(443),
				},
			},
		},
	}

	if dryRun {
		return svc, nil
	}

	return clientset.Core().Services(namespace).Create(svc)
}
Пример #10
0
func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error {
	n, err := findMyself(client)
	if err != nil {
		return err
	}

	n.ObjectMeta.Labels["kubeadm.alpha.kubernetes.io/role"] = "master"

	if !schedulable {
		taintsAnnotation, _ := json.Marshal([]api.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
		n.ObjectMeta.Annotations[api.TaintsAnnotationKey] = string(taintsAnnotation)
	}

	if _, err := client.Nodes().Update(n); err != nil {
		if apierrs.IsConflict(err) {
			fmt.Println("<master/apiclient> temporarily unable to update master node metadata due to conflict (will retry)")
			time.Sleep(apiCallRetryInterval)
			attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable)
		} else {
			return err
		}
	}

	return nil
}
Пример #11
0
func createPVC(clientset *client.Clientset, namespace, svcName string) (*api.PersistentVolumeClaim, error) {
	capacity, err := resource.ParseQuantity("10Gi")
	if err != nil {
		return nil, err
	}

	pvc := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      fmt.Sprintf("%s-etcd-claim", svcName),
			Namespace: namespace,
			Labels:    componentLabel,
			Annotations: map[string]string{
				"volume.alpha.kubernetes.io/storage-class": "yes",
			},
		},
		Spec: api.PersistentVolumeClaimSpec{
			AccessModes: []api.PersistentVolumeAccessMode{
				api.ReadWriteOnce,
			},
			Resources: api.ResourceRequirements{
				Requests: api.ResourceList{
					api.ResourceStorage: capacity,
				},
			},
		},
	}

	return clientset.Core().PersistentVolumeClaims(namespace).Create(pvc)
}
Пример #12
0
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
	target := 100
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name:      "foo",
			Namespace: namespace,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: int32(target),
			Selector: map[string]string{"foo": "bar"},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{
						"foo": "bar",
					},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  "container",
							Image: "busybox",
						},
					},
				},
			},
		},
	}

	w, err := clientset.Core().ReplicationControllers(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: rc.Name}))
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
		switch event.Type {
		case watch.Modified:
		default:
			return false, nil
		}

		switch cast := event.Object.(type) {
		case *api.ReplicationController:
			fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
			if int(cast.Status.Replicas) == target {
				return true, nil
			}
		}

		return false, nil
	})
	if err != nil {
		pods, _ := clientset.Core().Pods(namespace).List(api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()})
		t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
	}
}
Пример #13
0
// coreClientSetFromUnversioned adapts just enough of a a unversioned.Client to work with the scale RC function
func coreClientSetFromUnversioned(c *client.Client) internalclientset.Interface {
	var clientset internalclientset.Clientset
	if c != nil {
		clientset.CoreClient = unversionedcore.New(c.RESTClient)
	} else {
		clientset.CoreClient = unversionedcore.New(nil)
	}
	return &clientset
}
Пример #14
0
func createNamespace(clientset *client.Clientset, namespace string) (*api.Namespace, error) {
	ns := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name: namespace,
		},
	}

	return clientset.Core().Namespaces().Create(ns)
}
Пример #15
0
// NewServer creates a server from the provided config and client.
func NewServer(config *server.Config, client *kclientset.Clientset) *Server {
	stop := make(chan struct{})
	return &Server{
		Config:    config,
		Services:  NewCachedServiceAccessor(client.CoreClient.RESTClient(), stop),
		Endpoints: client.Core(),
		Stop:      stop,
	}
}
Пример #16
0
func newProjectAuthorizationCache(authorizer authorizer.Authorizer, kubeClient *kclientset.Clientset, informerFactory shared.InformerFactory) *projectauth.AuthorizationCache {
	return projectauth.NewAuthorizationCache(
		projectauth.NewAuthorizerReviewer(authorizer),
		kubeClient.Core().Namespaces(),
		informerFactory.ClusterPolicies().Lister(),
		informerFactory.ClusterPolicyBindings().Lister(),
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
	)
}
Пример #17
0
// FromUnversionedClient adapts a unversioned.Client to a internalclientset.Clientset.
// This function is temporary. We will remove it when everyone has moved to using
// Clientset. New code should NOT use this function.
func FromUnversionedClient(c *unversioned.Client) *internalclientset.Clientset {
	var clientset internalclientset.Clientset
	if c != nil {
		clientset.CoreClient = unversionedcore.New(c.RESTClient)
	} else {
		clientset.CoreClient = unversionedcore.New(nil)
	}
	if c != nil && c.ExtensionsClient != nil {
		clientset.ExtensionsClient = unversionedextensions.New(c.ExtensionsClient.RESTClient)
	} else {
		clientset.ExtensionsClient = unversionedextensions.New(nil)
	}
	if c != nil && c.BatchClient != nil {
		clientset.BatchClient = unversionedbatch.New(c.BatchClient.RESTClient)
	} else {
		clientset.BatchClient = unversionedbatch.New(nil)
	}
	if c != nil && c.DiscoveryClient != nil {
		clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient)
	} else {
		clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil)
	}

	return &clientset
}
Пример #18
0
// It's safe to do this for alpha, as we don't have HA and there is no way we can get
// more then one node here (TODO(phase1+) use os.Hostname)
func findMyself(client *clientset.Clientset) (*api.Node, error) {
	nodeList, err := client.Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("unable to list nodes [%v]", err)
	}
	if len(nodeList.Items) < 1 {
		return nil, fmt.Errorf("no nodes found")
	}
	node := &nodeList.Items[0]
	return node, nil
}
Пример #19
0
func getServiceAccountPullSecret(client *kclientset.Clientset, ns, name string) (string, error) {
	secrets, err := client.Core().Secrets(ns).List(api.ListOptions{})
	if err != nil {
		return "", err
	}
	for _, secret := range secrets.Items {
		if secret.Type == api.SecretTypeDockercfg && secret.Annotations[api.ServiceAccountNameKey] == name {
			return string(secret.Data[api.DockerConfigKey]), nil
		}
	}
	return "", nil
}
Пример #20
0
func getReferencedServiceAccountToken(c *clientset.Clientset, ns string, name string, shouldWait bool) (string, string, error) {
	tokenName := ""
	token := ""

	findToken := func() (bool, error) {
		user, err := c.Core().ServiceAccounts(ns).Get(name)
		if errors.IsNotFound(err) {
			return false, nil
		}
		if err != nil {
			return false, err
		}

		for _, ref := range user.Secrets {
			secret, err := c.Core().Secrets(ns).Get(ref.Name)
			if errors.IsNotFound(err) {
				continue
			}
			if err != nil {
				return false, err
			}
			if secret.Type != api.SecretTypeServiceAccountToken {
				continue
			}
			name := secret.Annotations[api.ServiceAccountNameKey]
			uid := secret.Annotations[api.ServiceAccountUIDKey]
			tokenData := secret.Data[api.ServiceAccountTokenKey]
			if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
				tokenName = secret.Name
				token = string(tokenData)
				return true, nil
			}
		}

		return false, nil
	}

	if shouldWait {
		err := wait.Poll(time.Second, 10*time.Second, findToken)
		if err != nil {
			return "", "", err
		}
	} else {
		ok, err := findToken()
		if err != nil {
			return "", "", err
		}
		if !ok {
			return "", "", fmt.Errorf("No token found for %s/%s", ns, name)
		}
	}
	return tokenName, token, nil
}
// getNode gets node object from the apiserver.
func getNode(c *clientset.Clientset) (*api.Node, error) {
	nodes, err := c.Nodes().List(api.ListOptions{})
	Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
	if nodes == nil {
		return nil, fmt.Errorf("the node list is nil.")
	}
	Expect(len(nodes.Items) > 1).NotTo(BeTrue(), "should not be more than 1 nodes.")
	if len(nodes.Items) == 0 {
		return nil, fmt.Errorf("empty node list: %+v", nodes)
	}
	return &nodes.Items[0], nil
}
Пример #22
0
func createNamespace(clientset *client.Clientset, namespace string, dryRun bool) (*api.Namespace, error) {
	ns := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name: namespace,
		},
	}

	if dryRun {
		return ns, nil
	}

	return clientset.Core().Namespaces().Create(ns)
}
Пример #23
0
// getNodeIP is copied from the upstream proxy config to retrieve the IP of a node.
func getNodeIP(client *kclientset.Clientset, hostname string) net.IP {
	var nodeIP net.IP
	node, err := client.Core().Nodes().Get(hostname)
	if err != nil {
		glog.Warningf("Failed to retrieve node info: %v", err)
		return nil
	}
	nodeIP, err = utilnode.GetNodeHostIP(node)
	if err != nil {
		glog.Warningf("Failed to retrieve node IP: %v", err)
		return nil
	}
	return nodeIP
}
Пример #24
0
// WaitForServiceAccounts ensures the service accounts needed by build pods exist in the namespace
// The extra controllers tend to starve the service account controller
func WaitForServiceAccounts(clientset *kclientset.Clientset, namespace string, accounts []string) error {
	serviceAccounts := clientset.Core().ServiceAccounts(namespace)
	return wait.Poll(time.Second, ServiceAccountWaitTimeout, func() (bool, error) {
		for _, account := range accounts {
			if sa, err := serviceAccounts.Get(account); err != nil {
				if !serviceAccountSecretsExist(clientset, namespace, sa) {
					continue
				}
				return false, nil
			}
		}
		return true, nil
	})
}
Пример #25
0
func (c *MasterConfig) RunServiceServingCertController(client *kclientset.Clientset) {
	if c.Options.ControllerConfig.ServiceServingCert.Signer == nil {
		return
	}
	ca, err := crypto.GetCA(c.Options.ControllerConfig.ServiceServingCert.Signer.CertFile, c.Options.ControllerConfig.ServiceServingCert.Signer.KeyFile, "")
	if err != nil {
		glog.Fatalf("service serving cert controller failed: %v", err)
	}

	servingCertController := servingcertcontroller.NewServiceServingCertController(client.Core(), client.Core(), ca, "cluster.local", 2*time.Minute)
	go servingCertController.Run(1, make(chan struct{}))

	servingCertUpdateController := servingcertcontroller.NewServiceServingCertUpdateController(client.Core(), client.Core(), ca, "cluster.local", 20*time.Minute)
	go servingCertUpdateController.Run(5, make(chan struct{}))
}
Пример #26
0
func getAllServices(kubeClient *kclientset.Clientset) ([]kapi.Service, error) {
	filtered_srvs := []kapi.Service{}
	serviceList, err := kubeClient.Core().Services(kapi.NamespaceAll).List(kapi.ListOptions{})
	if err != nil {
		return filtered_srvs, err
	}

	for _, srv := range serviceList.Items {
		if len(srv.Spec.ClusterIP) == 0 || srv.Spec.ClusterIP == kapi.ClusterIPNone {
			continue
		}
		filtered_srvs = append(filtered_srvs, srv)
	}
	return filtered_srvs, nil
}
Пример #27
0
// GetPodLogs returns logs for particular pod and container. When container
// is null, logs for the first one are returned.
func GetPodLogs(client *client.Clientset, namespace, podID string, container string,
	logSelector *logs.LogViewSelector) (*logs.Logs, error) {
	pod, err := client.Pods(namespace).Get(podID)
	if err != nil {
		return nil, err
	}

	if len(container) == 0 {
		container = pod.Spec.Containers[0].Name
	}

	logOptions := &api.PodLogOptions{
		Container:  container,
		Follow:     false,
		Previous:   false,
		Timestamps: true,
	}

	rawLogs, err := getRawPodLogs(client, namespace, podID, logOptions)
	if err != nil {
		return nil, err
	}

	return ConstructLogs(podID, rawLogs, container, logSelector), nil
}
Пример #28
0
// GetStatefulSetDetail gets pet set details.
func GetStatefulSetDetail(client *k8sClient.Clientset, heapsterClient client.HeapsterClient,
	namespace, name string) (*StatefulSetDetail, error) {

	log.Printf("Getting details of %s service in %s namespace", name, namespace)

	// TODO(floreks): Use channels.
	statefulSetData, err := client.Apps().StatefulSets(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	podList, err := GetStatefulSetPods(client, heapsterClient, dataselect.DefaultDataSelectWithMetrics, name, namespace)
	if err != nil {
		return nil, err
	}

	podInfo, err := getStatefulSetPodInfo(client, statefulSetData)
	if err != nil {
		return nil, err
	}

	events, err := GetStatefulSetEvents(client, dataselect.DefaultDataSelect, statefulSetData.Namespace, statefulSetData.Name)
	if err != nil {
		return nil, err
	}

	statefulSet := getStatefulSetDetail(statefulSetData, heapsterClient, *events, *podList, *podInfo)
	return &statefulSet, nil
}
Пример #29
0
// CreateHeapsterRESTClient creates new Heapster REST client. When heapsterHost param is empty
// string the function assumes that it is running inside a Kubernetes cluster and connects via
// service proxy. heapsterHost param is in the format of protocol://address:port,
// e.g., http://localhost:8002.
func CreateHeapsterRESTClient(heapsterHost string, apiclient *client.Clientset) (
	HeapsterClient, error) {

	if heapsterHost == "" {
		log.Print("Creating in-cluster Heapster client")
		return InClusterHeapsterClient{client: apiclient.Core().RESTClient()}, nil
	}

	cfg := &restclient.Config{Host: heapsterHost, QPS: defaultQPS, Burst: defaultBurst}
	restClient, err := client.NewForConfig(cfg)
	if err != nil {
		return nil, err
	}
	log.Printf("Creating remote Heapster client for %s", heapsterHost)
	return RemoteHeapsterClient{client: restClient.Core().RESTClient()}, nil
}
Пример #30
0
func createAPIServerCredentialsSecret(clientset *client.Clientset, namespace, credentialsName string, entKeyPairs *entityKeyPairs) (*api.Secret, error) {
	// Build the secret object with API server credentials.
	secret := &api.Secret{
		ObjectMeta: api.ObjectMeta{
			Name:      credentialsName,
			Namespace: namespace,
		},
		Data: map[string][]byte{
			"ca.crt":     certutil.EncodeCertPEM(entKeyPairs.ca.Cert),
			"server.crt": certutil.EncodeCertPEM(entKeyPairs.server.Cert),
			"server.key": certutil.EncodePrivateKeyPEM(entKeyPairs.server.Key),
		},
	}

	// Boilerplate to create the secret in the host cluster.
	return clientset.Core().Secrets(namespace).Create(secret)
}