示例#1
0
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {

	addrlist := make([]v1.EndpointAddress, len(hostips))
	for i, v := range hostips {
		addrlist[i].IP = v
	}
	endpoint = &v1.Endpoints{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: namespace,
			Name:      epServiceName,
			Labels: map[string]string{
				"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
			},
		},
		Subsets: []v1.EndpointSubset{{
			Addresses: addrlist,
			Ports:     []v1.EndpointPort{{Port: 1, Protocol: "TCP"}},
		}},
	}
	kubeClient := p.plugin.host.GetKubeClient()
	if kubeClient == nil {
		return nil, nil, fmt.Errorf("glusterfs: failed to get kube client when creating endpoint service")
	}
	_, err = kubeClient.Core().Endpoints(namespace).Create(endpoint)
	if err != nil && errors.IsAlreadyExists(err) {
		glog.V(1).Infof("glusterfs: endpoint [%s] already exist in namespace [%s]", endpoint, namespace)
		err = nil
	}
	if err != nil {
		glog.Errorf("glusterfs: failed to create endpoint: %v", err)
		return nil, nil, fmt.Errorf("error creating endpoint: %v", err)
	}
	service = &v1.Service{
		ObjectMeta: metav1.ObjectMeta{
			Name:      epServiceName,
			Namespace: namespace,
			Labels: map[string]string{
				"gluster.kubernetes.io/provisioned-for-pvc": pvcname,
			},
		},
		Spec: v1.ServiceSpec{
			Ports: []v1.ServicePort{
				{Protocol: "TCP", Port: 1}}}}
	_, err = kubeClient.Core().Services(namespace).Create(service)
	if err != nil && errors.IsAlreadyExists(err) {
		glog.V(1).Infof("glusterfs: service [%s] already exist in namespace [%s]", service, namespace)
		err = nil
	}
	if err != nil {
		glog.Errorf("glusterfs: failed to create service: %v", err)
		return nil, nil, fmt.Errorf("error creating service: %v", err)
	}
	return endpoint, service, nil
}
示例#2
0
// CreateMasterServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error {
	if s, err := c.ServiceClient.Services(api.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil {
		// The service already exists.
		if reconcile {
			if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated {
				glog.Warningf("Resetting master service %q to %#v", serviceName, svc)
				_, err := c.ServiceClient.Services(api.NamespaceDefault).Update(svc)
				return err
			}
		}
		return nil
	}
	svc := &api.Service{
		ObjectMeta: metav1.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "kubernetes", "component": "apiserver"},
		},
		Spec: api.ServiceSpec{
			Ports: servicePorts,
			// maintained by this code, not by the pod selector
			Selector:        nil,
			ClusterIP:       serviceIP.String(),
			SessionAffinity: api.ServiceAffinityClientIP,
			Type:            serviceType,
		},
	}

	_, err := c.ServiceClient.Services(api.NamespaceDefault).Create(svc)
	if errors.IsAlreadyExists(err) {
		return c.CreateOrUpdateMasterServiceIfNeeded(serviceName, serviceIP, servicePorts, serviceType, reconcile)
	}
	return err
}
示例#3
0
func (p *provision) Admit(a admission.Attributes) (err error) {
	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		return nil
	}
	// we need to wait for our caches to warm
	if !p.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}
	namespace := &api.Namespace{
		ObjectMeta: metav1.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := p.namespaceInformer.GetStore().Get(namespace)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if exists {
		return nil
	}
	_, err = p.client.Core().Namespaces().Create(namespace)
	if err != nil && !errors.IsAlreadyExists(err) {
		return admission.NewForbidden(a, err)
	}
	return nil
}
示例#4
0
func (t *Tester) testCreateAlreadyExisting(obj runtime.Object, createFn CreateFunc) {
	ctx := t.TestContext()

	foo := copyOrDie(obj)
	t.setObjectMeta(foo, t.namer(1))
	if err := createFn(ctx, foo); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	defer t.delete(ctx, foo)

	_, err := t.storage.(rest.Creater).Create(ctx, foo)
	if !errors.IsAlreadyExists(err) {
		t.Errorf("expected already exists err, got %v", err)
	}
}
示例#5
0
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return nil, err
	}
	out := e.NewFunc()
	if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
		err = storeerr.InterpretCreateError(err, e.QualifiedResource, name)
		err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		if !kubeerr.IsAlreadyExists(err) {
			return nil, err
		}
		if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil {
			return nil, err
		}
		accessor, errGetAcc := meta.Accessor(out)
		if errGetAcc != nil {
			return nil, err
		}
		if accessor.GetDeletionTimestamp() != nil {
			msg := &err.(*kubeerr.StatusError).ErrStatus.Message
			*msg = fmt.Sprintf("object is being deleted: %s", *msg)
		}
		return nil, err
	}
	if e.AfterCreate != nil {
		if err := e.AfterCreate(out); err != nil {
			return nil, err
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, err
		}
	}
	return out, nil
}
示例#6
0
// CheckGeneratedNameError checks whether an error that occurred creating a resource is due
// to generation being unable to pick a valid name.
func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error {
	if !errors.IsAlreadyExists(err) {
		return err
	}

	objectMeta, kind, kerr := objectMetaAndKind(strategy, obj)
	if kerr != nil {
		return kerr
	}

	if len(objectMeta.GenerateName) == 0 {
		return err
	}

	return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0)
}
示例#7
0
// CreateNamespaceIfNeeded will create a namespace if it doesn't already exist
func (c *Controller) CreateNamespaceIfNeeded(ns string) error {
	if _, err := c.NamespaceClient.Namespaces().Get(ns, metav1.GetOptions{}); err == nil {
		// the namespace already exists
		return nil
	}
	newNs := &api.Namespace{
		ObjectMeta: metav1.ObjectMeta{
			Name:      ns,
			Namespace: "",
		},
	}
	_, err := c.NamespaceClient.Namespaces().Create(newNs)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
示例#8
0
// recordEvent attempts to write event to a sink. It returns true if the event
// was successfully recorded or discarded, false if it should be retried.
// If updateExistingEvent is false, it creates a new event, otherwise it updates
// existing event.
func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
	var newEvent *v1.Event
	var err error
	if updateExistingEvent {
		newEvent, err = sink.Patch(event, patch)
	}
	// Update can fail because the event may have been removed and it no longer exists.
	if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) {
		// Making sure that ResourceVersion is empty on creation
		event.ResourceVersion = ""
		newEvent, err = sink.Create(event)
	}
	if err == nil {
		// we need to update our event correlator with the server returned state to handle name/resourceversion
		eventCorrelator.UpdateState(newEvent)
		return true
	}

	// If we can't contact the server, then hold everything while we keep trying.
	// Otherwise, something about the event is malformed and we should abandon it.
	switch err.(type) {
	case *rest.RequestConstructionError:
		// We will construct the request the same next time, so don't keep trying.
		glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
		return true
	case *errors.StatusError:
		if errors.IsAlreadyExists(err) {
			glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
		} else {
			glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
		}
		return true
	case *errors.UnexpectedObjectError:
		// We don't expect this; it implies the server's response didn't match a
		// known pattern. Go ahead and retry.
	default:
		// This case includes actual http transport errors. Go ahead and retry.
	}
	glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
	return false
}
func (c *ServiceAccountsController) syncNamespace(key string) error {
	startTime := time.Now()
	defer func() {
		glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
	}()

	ns, err := c.nsLister.Get(key)
	if apierrs.IsNotFound(err) {
		return nil
	}
	if err != nil {
		return err
	}
	if ns.Status.Phase != v1.NamespaceActive {
		// If namespace is not active, we shouldn't try to create anything
		return nil
	}

	createFailures := []error{}
	for i := range c.serviceAccountsToEnsure {
		sa := c.serviceAccountsToEnsure[i]
		switch _, err := c.saLister.ServiceAccounts(ns.Name).Get(sa.Name); {
		case err == nil:
			continue
		case apierrs.IsNotFound(err):
		case err != nil:
			return err
		}
		// this is only safe because we never read it and we always write it
		// TODO eliminate this once the fake client can handle creation without NS
		sa.Namespace = ns.Name

		if _, err := c.client.Core().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
			createFailures = append(createFailures, err)
		}
	}

	return utilerrors.Flatten(utilerrors.NewAggregate(createFailures))
}
示例#10
0
func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error {
	if mc.apiserverClient == nil {
		return nil
	}
	// Make a copy of the pod.
	copyPod := *pod
	copyPod.Annotations = make(map[string]string)

	for k, v := range pod.Annotations {
		copyPod.Annotations[k] = v
	}
	hash := getPodHash(pod)
	copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash
	apiPod, err := mc.apiserverClient.Core().Pods(copyPod.Namespace).Create(&copyPod)
	if err != nil && errors.IsAlreadyExists(err) {
		// Check if the existing pod is the same as the pod we want to create.
		if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash {
			return nil
		}
	}
	return err
}
示例#11
0
// Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
// 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
// Note that the pod-template-hash will be added to adopted RSes and pods.
func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) {
	existingNewRS, err := deploymentutil.FindNewReplicaSet(deployment, rsList)
	if err != nil {
		return nil, err
	}

	// Calculate the max revision number among all old RSes
	maxOldRevision := deploymentutil.MaxRevision(oldRSs)
	// Calculate revision number for this new replica set
	newRevision := strconv.FormatInt(maxOldRevision+1, 10)

	// Latest replica set exists. We need to sync its annotations (includes copying all but
	// annotationsToSkip from the parent deployment, and update revision, desiredReplicas,
	// and maxReplicas) and also update the revision annotation in the deployment with the
	// latest revision.
	if existingNewRS != nil {
		objCopy, err := api.Scheme.Copy(existingNewRS)
		if err != nil {
			return nil, err
		}
		rsCopy := objCopy.(*extensions.ReplicaSet)

		// Set existing new replica set's annotation
		annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(deployment, rsCopy, newRevision, true)
		minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != deployment.Spec.MinReadySeconds
		if annotationsUpdated || minReadySecondsNeedsUpdate {
			rsCopy.Spec.MinReadySeconds = deployment.Spec.MinReadySeconds
			return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
		}

		updateConditions := deploymentutil.SetDeploymentRevision(deployment, newRevision)
		// If no other Progressing condition has been recorded and we need to estimate the progress
		// of this deployment then it is likely that old users started caring about progress. In that
		// case we need to take into account the first time we noticed their new replica set.
		cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
		if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil {
			msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
			condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
			deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
			updateConditions = true
		}

		if updateConditions {
			if deployment, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment); err != nil {
				return nil, err
			}
		}
		return rsCopy, nil
	}

	if !createIfNotExisted {
		return nil, nil
	}

	// new ReplicaSet does not exist, create one.
	namespace := deployment.Namespace
	podTemplateSpecHash := fmt.Sprintf("%d", deploymentutil.GetPodTemplateSpecHash(deployment.Spec.Template))
	newRSTemplate := deploymentutil.GetNewReplicaSetTemplate(deployment)
	newRSTemplate.Labels = labelsutil.CloneAndAddLabel(deployment.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
	// Add podTemplateHash label to selector.
	newRSSelector := labelsutil.CloneSelectorAndAddLabel(deployment.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)

	// Create new ReplicaSet
	newRS := extensions.ReplicaSet{
		ObjectMeta: metav1.ObjectMeta{
			// Make the name deterministic, to ensure idempotence
			Name:      deployment.Name + "-" + podTemplateSpecHash,
			Namespace: namespace,
		},
		Spec: extensions.ReplicaSetSpec{
			Replicas:        func(i int32) *int32 { return &i }(0),
			MinReadySeconds: deployment.Spec.MinReadySeconds,
			Selector:        newRSSelector,
			Template:        newRSTemplate,
		},
	}
	var trueVar = true
	controllerRef := &metav1.OwnerReference{
		APIVersion: getDeploymentKind().GroupVersion().String(),
		Kind:       getDeploymentKind().Kind,
		Name:       deployment.Name,
		UID:        deployment.UID,
		Controller: &trueVar,
	}
	newRS.OwnerReferences = append(newRS.OwnerReferences, *controllerRef)
	allRSs := append(oldRSs, &newRS)
	newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, &newRS)
	if err != nil {
		return nil, err
	}

	*(newRS.Spec.Replicas) = newReplicasCount
	// Set new replica set's annotation
	deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false)
	createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS)
	switch {
	// We may end up hitting this due to a slow cache or a fast resync of the deployment.
	// TODO: Restore once https://github.com/kubernetes/kubernetes/issues/29735 is fixed
	// ie. we start using a new hashing algorithm.
	case errors.IsAlreadyExists(err):
		return nil, err
	//	return dc.rsLister.ReplicaSets(namespace).Get(newRS.Name)
	case err != nil:
		msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
		if deployment.Spec.ProgressDeadlineSeconds != nil {
			cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
			deploymentutil.SetDeploymentCondition(&deployment.Status, *cond)
			// We don't really care about this error at this point, since we have a bigger issue to report.
			// TODO: Update the rest of the Deployment status, too. We may need to do this every time we
			// error out in all other places in the controller so that we let users know that their deployments
			// have been noticed by the controller, albeit with errors.
			// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
			// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
			_, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment)
		}
		dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
		return nil, err
	}
	if newReplicasCount > 0 {
		dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
	}

	deploymentutil.SetDeploymentRevision(deployment, newRevision)
	if deployment.Spec.ProgressDeadlineSeconds != nil {
		msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
		condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
		deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
	}
	_, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
	return createdRS, err
}
// tryRegisterWithApiServer makes an attempt to register the given node with
// the API server, returning a boolean indicating whether the attempt was
// successful.  If a node with the same name already exists, it reconciles the
// value of the annotation for controller-managed attach-detach of attachable
// persistent volumes for the node.  If a node of the same name exists but has
// a different externalID value, it attempts to delete that node so that a
// later attempt can recreate it.
func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool {
	_, err := kl.kubeClient.Core().Nodes().Create(node)
	if err == nil {
		return true
	}

	if !apierrors.IsAlreadyExists(err) {
		glog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err)
		return false
	}

	existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
	if err != nil {
		glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
		return false
	}
	if existingNode == nil {
		glog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName)
		return false
	}

	clonedNode, err := conversion.NewCloner().DeepCopy(existingNode)
	if err != nil {
		glog.Errorf("Unable to clone %q node object %#v: %v", kl.nodeName, existingNode, err)
		return false
	}

	originalNode, ok := clonedNode.(*v1.Node)
	if !ok || originalNode == nil {
		glog.Errorf("Unable to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode)
		return false
	}

	if existingNode.Spec.ExternalID == node.Spec.ExternalID {
		glog.Infof("Node %s was previously registered", kl.nodeName)

		// Edge case: the node was previously registered; reconcile
		// the value of the controller-managed attach-detach
		// annotation.
		requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode)
		if requiresUpdate {
			if _, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName),
				originalNode, existingNode); err != nil {
				glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err)
				return false
			}
		}

		return true
	}

	glog.Errorf(
		"Previously node %q had externalID %q; now it is %q; will delete and recreate.",
		kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID,
	)
	if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil {
		glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err)
	} else {
		glog.Info("Deleted old node object %q", kl.nodeName)
	}

	return false
}
func TestServiceAccountTokenAuthentication(t *testing.T) {
	c, config, stopFunc := startServiceAccountTestServer(t)
	defer stopFunc()

	myns := "auth-ns"
	otherns := "other-ns"

	// Create "my" namespace
	_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}})
	if err != nil && !errors.IsAlreadyExists(err) {
		t.Fatalf("could not create namespace: %v", err)
	}

	// Create "other" namespace
	_, err = c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}})
	if err != nil && !errors.IsAlreadyExists(err) {
		t.Fatalf("could not create namespace: %v", err)
	}

	// Create "ro" user in myns
	_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}})
	if err != nil {
		t.Fatalf("Service Account not created: %v", err)
	}
	roTokenName, roToken, err := getReferencedServiceAccountToken(c, myns, readOnlyServiceAccountName, true)
	if err != nil {
		t.Fatal(err)
	}
	roClientConfig := config
	roClientConfig.BearerToken = roToken
	roClient := clientset.NewForConfigOrDie(&roClientConfig)
	doServiceAccountAPIRequests(t, roClient, myns, true, true, false)
	doServiceAccountAPIRequests(t, roClient, otherns, true, false, false)
	err = c.Core().Secrets(myns).Delete(roTokenName, nil)
	if err != nil {
		t.Fatalf("could not delete token: %v", err)
	}
	doServiceAccountAPIRequests(t, roClient, myns, false, false, false)

	// Create "rw" user in myns
	_, err = c.Core().ServiceAccounts(myns).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}})
	if err != nil {
		t.Fatalf("Service Account not created: %v", err)
	}
	_, rwToken, err := getReferencedServiceAccountToken(c, myns, readWriteServiceAccountName, true)
	if err != nil {
		t.Fatal(err)
	}
	rwClientConfig := config
	rwClientConfig.BearerToken = rwToken
	rwClient := clientset.NewForConfigOrDie(&rwClientConfig)
	doServiceAccountAPIRequests(t, rwClient, myns, true, true, true)
	doServiceAccountAPIRequests(t, rwClient, otherns, true, false, false)

	// Get default user and token which should have been automatically created
	_, defaultToken, err := getReferencedServiceAccountToken(c, myns, "default", true)
	if err != nil {
		t.Fatalf("could not get default user and token: %v", err)
	}
	defaultClientConfig := config
	defaultClientConfig.BearerToken = defaultToken
	defaultClient := clientset.NewForConfigOrDie(&defaultClientConfig)
	doServiceAccountAPIRequests(t, defaultClient, myns, true, false, false)
}
func TestServiceAccountTokenAutoMount(t *testing.T) {
	c, _, stopFunc := startServiceAccountTestServer(t)
	defer stopFunc()

	ns := "auto-mount-ns"

	// Create "my" namespace
	_, err := c.Core().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}})
	if err != nil && !errors.IsAlreadyExists(err) {
		t.Fatalf("could not create namespace: %v", err)
	}

	// Get default token
	defaultTokenName, _, err := getReferencedServiceAccountToken(c, ns, serviceaccountadmission.DefaultServiceAccountName, true)
	if err != nil {
		t.Fatal(err)
	}

	// Pod to create
	protoPod := v1.Pod{
		ObjectMeta: metav1.ObjectMeta{Name: "protopod"},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:  "container-1",
					Image: "container-1-image",
				},
				{
					Name:  "container-2",
					Image: "container-2-image",
					VolumeMounts: []v1.VolumeMount{
						{Name: "empty-dir", MountPath: serviceaccountadmission.DefaultAPITokenMountPath},
					},
				},
			},
			Volumes: []v1.Volume{
				{
					Name:         "empty-dir",
					VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
				},
			},
		},
	}

	// Pod we expect to get created
	defaultMode := int32(0644)
	expectedServiceAccount := serviceaccountadmission.DefaultServiceAccountName
	expectedVolumes := append(protoPod.Spec.Volumes, v1.Volume{
		Name: defaultTokenName,
		VolumeSource: v1.VolumeSource{
			Secret: &v1.SecretVolumeSource{
				SecretName:  defaultTokenName,
				DefaultMode: &defaultMode,
			},
		},
	})
	expectedContainer1VolumeMounts := []v1.VolumeMount{
		{Name: defaultTokenName, MountPath: serviceaccountadmission.DefaultAPITokenMountPath, ReadOnly: true},
	}
	expectedContainer2VolumeMounts := protoPod.Spec.Containers[1].VolumeMounts

	createdPod, err := c.Core().Pods(ns).Create(&protoPod)
	if err != nil {
		t.Fatal(err)
	}
	if createdPod.Spec.ServiceAccountName != expectedServiceAccount {
		t.Fatalf("Expected %s, got %s", expectedServiceAccount, createdPod.Spec.ServiceAccountName)
	}
	if !api.Semantic.DeepEqual(&expectedVolumes, &createdPod.Spec.Volumes) {
		t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedVolumes, createdPod.Spec.Volumes)
	}
	if !api.Semantic.DeepEqual(&expectedContainer1VolumeMounts, &createdPod.Spec.Containers[0].VolumeMounts) {
		t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer1VolumeMounts, createdPod.Spec.Containers[0].VolumeMounts)
	}
	if !api.Semantic.DeepEqual(&expectedContainer2VolumeMounts, &createdPod.Spec.Containers[1].VolumeMounts) {
		t.Fatalf("Expected\n\t%#v\n\tgot\n\t%#v", expectedContainer2VolumeMounts, createdPod.Spec.Containers[1].VolumeMounts)
	}
}
示例#15
0
func (s *ServiceController) ensureClusterService(cachedService *cachedService, clusterName string, service *v1.Service, client *kubeclientset.Clientset) error {
	var err error
	var needUpdate bool
	for i := 0; i < clientRetryCount; i++ {
		svc, err := client.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{})
		if err == nil {
			// service exists
			glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
			//reserve immutable fields
			service.Spec.ClusterIP = svc.Spec.ClusterIP

			//reserve auto assigned field
			for i, oldPort := range svc.Spec.Ports {
				for _, port := range service.Spec.Ports {
					if port.NodePort == 0 {
						if !portEqualExcludeNodePort(&oldPort, &port) {
							svc.Spec.Ports[i] = port
							needUpdate = true
						}
					} else {
						if !portEqualForLB(&oldPort, &port) {
							svc.Spec.Ports[i] = port
							needUpdate = true
						}
					}
				}
			}

			if needUpdate {
				// we only apply spec update
				svc.Spec = service.Spec
				_, err = client.Core().Services(svc.Namespace).Update(svc)
				if err == nil {
					glog.V(5).Infof("Service %s/%s successfully updated to cluster %s", svc.Namespace, svc.Name, clusterName)
					return nil
				} else {
					glog.V(4).Infof("Failed to update %+v", err)
				}
			} else {
				glog.V(5).Infof("Service %s/%s is not updated to cluster %s as the spec are identical", svc.Namespace, svc.Name, clusterName)
				return nil
			}
		} else if errors.IsNotFound(err) {
			// Create service if it is not found
			glog.Infof("Service '%s/%s' is not found in cluster %s, trying to create new",
				service.Namespace, service.Name, clusterName)
			service.ResourceVersion = ""
			_, err = client.Core().Services(service.Namespace).Create(service)
			if err == nil {
				glog.V(5).Infof("Service %s/%s successfully created to cluster %s", service.Namespace, service.Name, clusterName)
				return nil
			}
			glog.V(4).Infof("Failed to create %+v", err)
			if errors.IsAlreadyExists(err) {
				glog.V(5).Infof("service %s/%s already exists in cluster %s", service.Namespace, service.Name, clusterName)
				return nil
			}
		}
		if errors.IsConflict(err) {
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
		}
		// should we reuse same retry delay for all clusters?
		time.Sleep(cachedService.nextRetryDelay())
	}
	return err
}
示例#16
0
func TestStoreCreate(t *testing.T) {
	gracefulPeriod := int64(50)
	podA := &api.Pod{
		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test"},
		Spec:       api.PodSpec{NodeName: "machine"},
	}
	podB := &api.Pod{
		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test"},
		Spec:       api.PodSpec{NodeName: "machine2"},
	}

	testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test")
	destroyFunc, registry := NewTestGenericStoreRegistry(t)
	defer destroyFunc()
	// re-define delete strategy to have graceful delete capability
	registry.DeleteStrategy = pod.Strategy

	// create the object
	objA, err := registry.Create(testContext, podA)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	// get the object
	checkobj, err := registry.Get(testContext, podA.Name, &metav1.GetOptions{})
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	// verify objects are equal
	if e, a := objA, checkobj; !reflect.DeepEqual(e, a) {
		t.Errorf("Expected %#v, got %#v", e, a)
	}

	// now try to create the second pod
	_, err = registry.Create(testContext, podB)
	if !errors.IsAlreadyExists(err) {
		t.Errorf("Unexpected error: %v", err)
	}

	// verify graceful delete capability is defined
	_, ok := registry.DeleteStrategy.(rest.RESTGracefulDeleteStrategy)
	if !ok {
		t.Fatalf("No graceful capability set.")
	}

	// now delete pod with graceful period set
	delOpts := &api.DeleteOptions{GracePeriodSeconds: &gracefulPeriod}
	_, err = registry.Delete(testContext, podA.Name, delOpts)
	if err != nil {
		t.Fatalf("Failed to delete pod gracefully. Unexpected error: %v", err)
	}

	// try to create before graceful deletion period is over
	_, err = registry.Create(testContext, podA)
	if err == nil || !errors.IsAlreadyExists(err) {
		t.Fatalf("Expected 'already exists' error from storage, but got %v", err)
	}

	// check the 'alredy exists' msg was edited
	msg := &err.(*errors.StatusError).ErrStatus.Message
	if !strings.Contains(*msg, "object is being deleted:") {
		t.Errorf("Unexpected error without the 'object is being deleted:' in message: %v", err)
	}
}
示例#17
0
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
	glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)

	// Generate unique name for the recycler pod - we need to get "already
	// exists" error when a previous controller has already started recycling
	// the volume. Here we assume that pv.Name is already unique.
	pod.Name = "recycler-for-" + pvName
	pod.GenerateName = ""

	stopChannel := make(chan struct{})
	defer close(stopChannel)
	podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
	if err != nil {
		glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
		return err
	}

	// Start the pod
	_, err = recyclerClient.CreatePod(pod)
	if err != nil {
		if errors.IsAlreadyExists(err) {
			glog.V(5).Infof("old recycler pod %q found for volume", pod.Name)
		} else {
			return fmt.Errorf("unexpected error creating recycler pod:  %+v\n", err)
		}
	}
	defer func(pod *v1.Pod) {
		glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
		if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil {
			glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
		}
	}(pod)

	// Now only the old pod or the new pod run. Watch it until it finishes
	// and send all events on the pod to the PV
	for {
		event := <-podCh
		switch event.Object.(type) {
		case *v1.Pod:
			// POD changed
			pod := event.Object.(*v1.Pod)
			glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
			switch event.Type {
			case watch.Added, watch.Modified:
				if pod.Status.Phase == v1.PodSucceeded {
					// Recycle succeeded.
					return nil
				}
				if pod.Status.Phase == v1.PodFailed {
					if pod.Status.Message != "" {
						return fmt.Errorf(pod.Status.Message)
					} else {
						return fmt.Errorf("pod failed, pod.Status.Message unknown.")
					}
				}

			case watch.Deleted:
				return fmt.Errorf("recycler pod was deleted")

			case watch.Error:
				return fmt.Errorf("recycler pod watcher failed")
			}

		case *v1.Event:
			// Event received
			podEvent := event.Object.(*v1.Event)
			glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
			if event.Type == watch.Added {
				recyclerClient.Event(podEvent.Type, podEvent.Message)
			}
		}
	}
}
示例#18
0
// config returns a complete clientConfig for constructing clients.  This is separate in anticipation of composition
// which means that not all clientsets are known here
func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, error) {
	clientConfig := restclient.AnonymousClientConfig(b.ClientConfig)

	// we need the SA UID to find a matching SA token
	sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name, metav1.GetOptions{})
	if err != nil && !apierrors.IsNotFound(err) {
		return nil, err
	} else if apierrors.IsNotFound(err) {
		// check to see if the namespace exists.  If it isn't a NotFound, just try to create the SA.
		// It'll probably fail, but perhaps that will have a better message.
		if _, err := b.CoreClient.Namespaces().Get(b.Namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) {
			_, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: b.Namespace}})
			if err != nil && !apierrors.IsAlreadyExists(err) {
				return nil, err
			}
		}

		sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(
			&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: b.Namespace, Name: name}})
		if err != nil {
			return nil, err
		}
	}

	lw := &cache.ListWatch{
		ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
			options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
			return b.CoreClient.Secrets(b.Namespace).List(options)
		},
		WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
			options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
			return b.CoreClient.Secrets(b.Namespace).Watch(options)
		},
	}
	_, err = cache.ListWatchUntil(30*time.Second, lw,
		func(event watch.Event) (bool, error) {
			switch event.Type {
			case watch.Deleted:
				return false, nil
			case watch.Error:
				return false, fmt.Errorf("error watching")

			case watch.Added, watch.Modified:
				secret := event.Object.(*v1.Secret)
				if !serviceaccount.IsServiceAccountToken(secret, sa) ||
					len(secret.Data[v1.ServiceAccountTokenKey]) == 0 {
					return false, nil
				}
				// TODO maybe verify the token is valid
				clientConfig.BearerToken = string(secret.Data[v1.ServiceAccountTokenKey])
				restclient.AddUserAgent(clientConfig, apiserverserviceaccount.MakeUsername(b.Namespace, name))
				return true, nil

			default:
				return false, fmt.Errorf("unexpected event type: %v", event.Type)
			}
		})
	if err != nil {
		return nil, fmt.Errorf("unable to get token for service account: %v", err)
	}

	return clientConfig, nil
}