Exemple #1
0
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
	if !deployutil.HasChangeTrigger(config) {
		glog.V(5).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	if config.Status.LatestVersion == 0 {
		_, _, abort, err := c.generateDeployment(config)
		if err != nil {
			if kerrors.IsConflict(err) {
				return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
			}
			glog.V(4).Infof("Couldn't create initial deployment for deploymentConfig %q: %v", deployutil.LabelForDeploymentConfig(config), err)
			return nil
		}
		if !abort {
			glog.V(4).Infof("Created initial deployment for deploymentConfig %q", deployutil.LabelForDeploymentConfig(config))
		}
		return nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		if kerrors.IsNotFound(err) {
			glog.V(5).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
			return nil
		}
		return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	deployedConfig, err := c.decodeConfig(deployment)
	if err != nil {
		return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
	}

	// Detect template diffs, and return early if there aren't any changes.
	if kapi.Semantic.DeepEqual(config.Spec.Template, deployedConfig.Spec.Template) {
		glog.V(5).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.Status.LatestVersion, deployutil.LabelForDeployment(deployment))
		return nil
	}

	// There was a template diff, so generate a new config version.
	fromVersion, toVersion, abort, err := c.generateDeployment(config)
	if err != nil {
		if kerrors.IsConflict(err) {
			return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
		}
		return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	if !abort {
		glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
	}
	return nil
}
Exemple #2
0
//  This test the fast-fail path. We test that the precondition gets verified
//  again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
	ctx := t.TestContext()

	foo := copyOrDie(obj)
	t.setObjectMeta(foo, t.namer(1))
	objectMeta := t.getObjectMetaOrFail(foo)
	objectMeta.UID = types.UID("UID0000")
	if err := createFn(ctx, foo); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID1111"))
	if err == nil || !errors.IsConflict(err) {
		t.Errorf("unexpected error: %v", err)
	}

	obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID0000"))
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if !t.returnDeletedObject {
		if status, ok := obj.(*unversioned.Status); !ok {
			t.Errorf("expected status of delete, got %v", status)
		} else if status.Status != unversioned.StatusSuccess {
			t.Errorf("expected success, got: %v", status.Status)
		}
	}

	_, err = getFn(ctx, foo)
	if err == nil || !isNotFoundFn(err) {
		t.Errorf("unexpected error: %v", err)
	}
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error {
	var node *v1.Node
	var err error
	for attempt := 0; attempt < retries; attempt++ {
		node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
		if err != nil {
			return err
		}
		if node.Labels == nil {
			return nil
		}
		for _, labelKey := range labelKeys {
			if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
				break
			}
			delete(node.Labels, labelKey)
		}
		_, err = c.Core().Nodes().Update(node)
		if err != nil {
			if !apierrs.IsConflict(err) {
				return err
			} else {
				glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
			}
		} else {
			break
		}
		time.Sleep(100 * time.Millisecond)
	}
	return err
}
Exemple #4
0
func TestUpdateWithMismatchedResourceVersion(t *testing.T) {
	// Starting conditions
	associatedUser1, associatedIdentity1User1 := makeAssociated()
	unassociatedUser2 := makeUser()
	// Finishing conditions
	_, unassociatedIdentity1 := disassociate(associatedUser1, associatedIdentity1User1)

	expectedActions := []test.Action{
		// Existing mapping lookup
		{"GetIdentity", associatedIdentity1User1.Name},
		{"GetUser", associatedUser1.Name},
	}

	mapping := &api.UserIdentityMapping{
		ObjectMeta: kapi.ObjectMeta{ResourceVersion: "123"},
		Identity:   kapi.ObjectReference{Name: unassociatedIdentity1.Name},
		User:       kapi.ObjectReference{Name: unassociatedUser2.Name},
	}

	actions, _, _, rest := setupRegistries(associatedIdentity1User1, associatedUser1, unassociatedUser2)
	_, _, err := rest.Update(kapi.NewContext(), mapping.Name, kapirest.DefaultUpdatedObjectInfo(mapping, kapi.Scheme))

	if err == nil {
		t.Errorf("Expected error")
	}
	if !kerrs.IsConflict(err) {
		t.Errorf("Unexpected error: %v", err)
	}
	verifyActions(expectedActions, *actions, t)
}
Exemple #5
0
func updateSecretOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1.Secret {
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}

	var err error
	var newSecret *v1.Secret
	secret := &v1.Secret{
		ObjectMeta: v1.ObjectMeta{
			Name: UpdatedFederatedSecretName,
		},
	}

	for retryCount := 0; retryCount < MaxRetries; retryCount++ {
		_, err = clientset.Core().Secrets(namespace).Get(FederatedSecretName)
		if err != nil {
			framework.Failf("failed to get secret %q: %v", FederatedSecretName, err)
		}
		newSecret, err = clientset.Core().Secrets(namespace).Update(secret)
		if err == nil {
			return newSecret
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update secret %q: %v", FederatedSecretName, err)
		}
	}
	framework.Failf("too many retries updating secret %q", FederatedSecretName)
	return newSecret
}
Exemple #6
0
// addVolume is callback from framework.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
	// Store the new volume version in the cache and do not process it if this
	// is an old version.
	new, err := storeObjectUpdate(ctrl.volumes.store, obj, "volume")
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	if !ctrl.isFullySynced() {
		return
	}

	pv, ok := obj.(*api.PersistentVolume)
	if !ok {
		glog.Errorf("expected PersistentVolume but handler received %+v", obj)
		return
	}
	if err := ctrl.syncVolume(pv); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
		} else {
			glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
		}
	}
}
func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.DaemonSet {
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateDaemonSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}

	var newDaemonSet *v1beta1.DaemonSet
	for retryCount := 0; retryCount < FederatedDaemonSetMaxRetries; retryCount++ {
		daemonset, err := clientset.Extensions().DaemonSets(namespace).Get(FederatedDaemonSetName)
		if err != nil {
			framework.Failf("failed to get daemonset %q: %v", FederatedDaemonSetName, err)
		}

		// Update one of the data in the daemonset.
		daemonset.Annotations = map[string]string{"ccc": "ddd"}
		newDaemonSet, err = clientset.Extensions().DaemonSets(namespace).Update(daemonset)
		if err == nil {
			return newDaemonSet
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update daemonset %q: %v", FederatedDaemonSetName, err)
		}
	}
	framework.Failf("too many retries updating daemonset %q", FederatedDaemonSetName)
	return newDaemonSet
}
func updateSecretOrFail(clientset *fedclientset.Clientset, namespace string) *v1.Secret {
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}

	var newSecret *v1.Secret
	for retryCount := 0; retryCount < MaxRetries; retryCount++ {
		secret, err := clientset.Core().Secrets(namespace).Get(FederatedSecretName)
		if err != nil {
			framework.Failf("failed to get secret %q: %v", FederatedSecretName, err)
		}

		// Update one of the data in the secret.
		secret.Data = map[string][]byte{
			"key": []byte("value"),
		}
		newSecret, err = clientset.Core().Secrets(namespace).Update(secret)
		if err == nil {
			return newSecret
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update secret %q: %v", FederatedSecretName, err)
		}
	}
	framework.Failf("too many retries updating secret %q", FederatedSecretName)
	return newSecret
}
func TestConflictingUpdate(t *testing.T) {
	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})

	storage := makeTestStorage()
	obj, err := storage.Create(ctx, &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-roleBinding"},
		RoleRef:    kapi.ObjectReference{Name: "admin"},
	})
	if err != nil {
		t.Errorf("unexpected error: %v", err)
		return
	}
	original := obj.(*authorizationapi.RoleBinding)

	roleBinding := &authorizationapi.RoleBinding{
		ObjectMeta: original.ObjectMeta,
		RoleRef:    kapi.ObjectReference{Name: "admin"},
		Subjects:   []kapi.ObjectReference{{Name: "bob", Kind: "User"}},
	}
	roleBinding.ResourceVersion = roleBinding.ResourceVersion + "1"

	_, _, err = storage.Update(ctx, roleBinding.Name, rest.DefaultUpdatedObjectInfo(roleBinding, kapi.Scheme))
	if err == nil || !kapierrors.IsConflict(err) {
		t.Errorf("Expected conflict error, got: %#v", err)
	}
}
// secretDeleted reacts to a Secret being deleted by looking to see if it's a dockercfg secret for a service account, in which case it
// it removes the references from the service account and removes the token created to back the dockercfgSecret
func (e *DockercfgDeletedController) secretDeleted(obj interface{}) {
	dockercfgSecret, ok := obj.(*api.Secret)
	if !ok {
		return
	}
	if _, exists := dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]; !exists {
		return
	}

	for i := 1; i <= NumServiceAccountUpdateRetries; i++ {
		if err := e.removeDockercfgSecretReference(dockercfgSecret); err != nil {
			if kapierrors.IsConflict(err) && i < NumServiceAccountUpdateRetries {
				time.Sleep(wait.Jitter(100*time.Millisecond, 0.0))
				continue
			}

			glog.Error(err)
			break
		}

		break
	}

	// remove the reference token secret
	if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]); (err != nil) && !kapierrors.IsNotFound(err) {
		util.HandleError(err)
	}
}
Exemple #11
0
func (reaper *DeploymentConfigReaper) updateDeploymentWithRetries(namespace, name string, applyUpdate updateConfigFunc) (*deployapi.DeploymentConfig, error) {
	var (
		config *deployapi.DeploymentConfig
		err    error
	)
	deploymentConfigs := reaper.oc.DeploymentConfigs(namespace)
	resultErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
		config, err = deploymentConfigs.Get(name)
		if err != nil {
			return false, err
		}
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(config)
		config, err = deploymentConfigs.Update(config)
		if err != nil {
			// Retry only on update conflict
			if kerrors.IsConflict(err) {
				return false, nil
			}
			return false, err
		}
		return true, nil
	})
	return config, resultErr
}
Exemple #12
0
func (p *provisioningIdentityMapper) userForWithRetries(info authapi.UserIdentityInfo, allowedRetries int) (kuser.Info, error) {
	ctx := kapi.NewContext()

	identity, err := p.identity.GetIdentity(ctx, info.GetIdentityName())

	if kerrs.IsNotFound(err) {
		user, err := p.createIdentityAndMapping(ctx, info)
		// Only retry for the following types of errors:
		// AlreadyExists errors:
		// * The same user was created by another identity provider with the same preferred username
		// * The same user was created by another instance of this identity provider (e.g. double-clicked login button)
		// * The same identity was created by another instance of this identity provider (e.g. double-clicked login button)
		// Conflict errors:
		// * The same user was updated be another identity provider to add identity info
		if (kerrs.IsAlreadyExists(err) || kerrs.IsConflict(err)) && allowedRetries > 0 {
			return p.userForWithRetries(info, allowedRetries-1)
		}
		return user, err
	}

	if err != nil {
		return nil, err
	}

	return p.getMapping(ctx, identity)
}
Exemple #13
0
// handleComplete represents the default OnComplete handler. This Handler will
// check which build should be run next and update the StartTimestamp field for
// that build. That will trigger HandleBuild() to process that build immediately
// and as a result the build is immediately executed.
func handleComplete(lister buildclient.BuildLister, updater buildclient.BuildUpdater, build *buildapi.Build) error {
	bcName := buildutil.ConfigNameForBuild(build)
	if len(bcName) == 0 {
		return nil
	}
	nextBuilds, hasRunningBuilds, err := GetNextConfigBuild(lister, build.Namespace, bcName)
	if err != nil {
		return fmt.Errorf("unable to get the next build for %s/%s: %v", build.Namespace, build.Name, err)
	}
	if hasRunningBuilds || len(nextBuilds) == 0 {
		return nil
	}
	now := unversioned.Now()
	for _, build := range nextBuilds {
		build.Status.StartTimestamp = &now
		err := wait.Poll(500*time.Millisecond, 5*time.Second, func() (bool, error) {
			err := updater.Update(build.Namespace, build)
			if err != nil && errors.IsConflict(err) {
				glog.V(5).Infof("Error updating build %s/%s: %v (will retry)", build.Namespace, build.Name, err)
				return false, nil
			}
			return true, err
		})
		if err != nil {
			return err
		}
	}
	return nil
}
func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, routeCreated bool) error {
	var err error
	for i := 0; i < updateNodeStatusMaxRetries; i++ {
		// Patch could also fail, even though the chance is very slim. So we still do
		// patch in the retry loop.
		currentTime := metav1.Now()
		if routeCreated {
			err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
				Type:               v1.NodeNetworkUnavailable,
				Status:             v1.ConditionFalse,
				Reason:             "RouteCreated",
				Message:            "RouteController created a route",
				LastTransitionTime: currentTime,
			})
		} else {
			err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
				Type:               v1.NodeNetworkUnavailable,
				Status:             v1.ConditionTrue,
				Reason:             "NoRouteCreated",
				Message:            "RouteController failed to create a route",
				LastTransitionTime: currentTime,
			})
		}
		if err == nil {
			return nil
		}
		if i == updateNodeStatusMaxRetries || !errors.IsConflict(err) {
			glog.Errorf("Error updating node %s: %v", nodeName, err)
			return err
		}
		glog.Errorf("Error updating node %s, retrying: %v", nodeName, err)
	}
	return err
}
Exemple #15
0
func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient federation_release_1_3.Interface) error {
	service := cachedService.lastState
	glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name)
	var err error
	for i := 0; i < clientRetryCount; i++ {
		_, err := fedClient.Core().Services(service.Namespace).Get(service.Name)
		if errors.IsNotFound(err) {
			glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return nil
		}
		_, err = fedClient.Core().Services(service.Namespace).UpdateStatus(service)
		if err == nil {
			glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name)
			return nil
		}
		if errors.IsNotFound(err) {
			glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return nil
		}
		if errors.IsConflict(err) {
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
			return err
		}
		time.Sleep(cachedService.nextFedUpdateDelay())
	}
	return err
}
Exemple #16
0
func TestImageStreamImportUnsupported(t *testing.T) {
	testCases := []struct {
		status unversioned.Status
		errFn  func(err error) bool
	}{
		{
			status: errors.NewNotFound("", "").(kclient.APIStatus).Status(),
			errFn:  func(err error) bool { return err == ErrImageStreamImportUnsupported },
		},
		{
			status: errors.NewNotFound("Other", "").(kclient.APIStatus).Status(),
			errFn:  func(err error) bool { return err != ErrImageStreamImportUnsupported && errors.IsNotFound(err) },
		},
		{
			status: errors.NewConflict("Other", "", nil).(kclient.APIStatus).Status(),
			errFn:  func(err error) bool { return err != ErrImageStreamImportUnsupported && errors.IsConflict(err) },
		},
	}
	for i, test := range testCases {
		c, err := New(&kclient.Config{
			Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
				buf := bytes.NewBuffer([]byte(runtime.EncodeOrDie(latest.GroupOrDie("").Codec, &test.status)))
				return &http.Response{StatusCode: http.StatusNotFound, Body: ioutil.NopCloser(buf)}, nil
			}),
		})
		if err != nil {
			t.Fatal(err)
		}
		if _, err := c.ImageStreams("test").Import(&api.ImageStreamImport{}); !test.errFn(err) {
			t.Errorf("%d: error: %v", i, err)
		}
	}
}
Exemple #17
0
// updateVolume is callback from framework.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
	newVolume, ok := newObj.(*api.PersistentVolume)
	if !ok {
		glog.Errorf("Expected PersistentVolume but handler received %+v", newObj)
		return
	}

	if ctrl.upgradeVolumeFrom1_2(newVolume) {
		// volume deleted
		return
	}

	// Store the new volume version in the cache and do not process it if this
	// is an old version.
	new, err := storeObjectUpdate(ctrl.volumes.store, newObj, "volume")
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	if err := ctrl.syncVolume(newVolume); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
		} else {
			glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
		}
	}
}
Exemple #18
0
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
	// Store the new claim version in the cache and do not process it if this is
	// an old version.
	new, err := storeObjectUpdate(ctrl.claims, newObj, "claim")
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	newClaim, ok := newObj.(*api.PersistentVolumeClaim)
	if !ok {
		glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj)
		return
	}
	if err := ctrl.syncClaim(newClaim); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
		} else {
			glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
		}
	}
}
func (s *ServiceController) persistUpdate(service *api.Service) error {
	var err error
	for i := 0; i < clientRetryCount; i++ {
		_, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service)

		switch {
		case err == nil:
			return nil
		case errors.IsNotFound(err):
			// If the object no longer exists, we don't want to recreate it. Just bail
			// out so that we can process the delete, which we should soon be receiving
			// if we haven't already.
			glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
				service.Namespace, service.Name, err)
			return nil
		case errors.IsConflict(err):
			// TODO: Try to resolve the conflict if the change was unrelated to load
			// balancer status. For now, just rely on the fact that we'll
			// also process the update that caused the resource version to change.
			glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
				service.Namespace, service.Name, err)
			return nil
		}

		glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v",
			service.Namespace, service.Name, err)
		time.Sleep(clientRetryInterval)
	}
	return err
}
// addVolume is callback from cache.Controller watching PersistentVolume
// events.
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
	pv, ok := obj.(*v1.PersistentVolume)
	if !ok {
		glog.Errorf("expected PersistentVolume but handler received %#v", obj)
		return
	}

	if ctrl.upgradeVolumeFrom1_2(pv) {
		// volume deleted
		return
	}

	// Store the new volume version in the cache and do not process it if this
	// is an old version.
	new, err := ctrl.storeVolumeUpdate(pv)
	if err != nil {
		glog.Errorf("%v", err)
	}
	if !new {
		return
	}

	if err := ctrl.syncVolume(pv); err != nil {
		if errors.IsConflict(err) {
			// Version conflict error happens quite often and the controller
			// recovers from it easily.
			glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
		} else {
			glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
		}
	}
}
Exemple #21
0
func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error {
	n, err := findMyself(client)
	if err != nil {
		return err
	}

	n.ObjectMeta.Labels["kubeadm.alpha.kubernetes.io/role"] = "master"

	if !schedulable {
		taintsAnnotation, _ := json.Marshal([]api.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
		n.ObjectMeta.Annotations[api.TaintsAnnotationKey] = string(taintsAnnotation)
	}

	if _, err := client.Nodes().Update(n); err != nil {
		if apierrs.IsConflict(err) {
			fmt.Println("<master/apiclient> temporarily unable to update master node metadata due to conflict (will retry)")
			time.Sleep(apiCallRetryInterval)
			attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable)
		} else {
			return err
		}
	}

	return nil
}
func TestConflictingUpdate(t *testing.T) {
	storage := makeLocalTestStorage()
	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})
	realizedRoleObj, err := storage.Create(ctx, &authorizationapi.Role{
		ObjectMeta: kapi.ObjectMeta{Name: "my-role"},
		Rules: []authorizationapi.PolicyRule{
			{Verbs: sets.NewString(authorizationapi.VerbAll)},
		},
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	realizedRole := realizedRoleObj.(*authorizationapi.Role)

	role := &authorizationapi.Role{
		ObjectMeta: realizedRole.ObjectMeta,
		Rules: []authorizationapi.PolicyRule{
			{Verbs: sets.NewString("list", "update")},
		},
	}
	role.ResourceVersion += "1"

	_, _, err = storage.Update(ctx, role.Name, rest.DefaultUpdatedObjectInfo(role, kapi.Scheme))
	if err == nil || !kapierrors.IsConflict(err) {
		t.Errorf("Expected conflict error, got: %#v", err)
	}
}
func updateIngressOrFail(clientset *federation_release_1_4.Clientset, namespace string) (newIng *v1beta1.Ingress) {
	var err error
	if clientset == nil || len(namespace) == 0 {
		Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
	}
	ingress := &v1beta1.Ingress{
		ObjectMeta: v1.ObjectMeta{
			Name: FederatedIngressName,
		},
		Spec: v1beta1.IngressSpec{
			Backend: &v1beta1.IngressBackend{
				ServiceName: "updated-testingress-service",
				ServicePort: intstr.FromInt(80),
			},
		},
	}

	for MaxRetriesOnFederatedApiserver := 0; MaxRetriesOnFederatedApiserver < 3; MaxRetriesOnFederatedApiserver++ {
		_, err = clientset.Extensions().Ingresses(namespace).Get(FederatedIngressName)
		if err != nil {
			framework.Failf("failed to get ingress %q: %v", FederatedIngressName, err)
		}
		newIng, err = clientset.Extensions().Ingresses(namespace).Update(ingress)
		if err == nil {
			describeIng(namespace)
			return
		}
		if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
			framework.Failf("failed to update ingress %q: %v", FederatedIngressName, err)
		}
	}
	framework.Failf("too many retries updating ingress %q", FederatedIngressName)
	return newIng
}
Exemple #24
0
func TestEtcdCreateWithConflict(t *testing.T) {
	storage, bindingStorage, _, server := newStorage(t)
	defer server.Terminate(t)
	ctx := api.NewDefaultContext()

	_, err := storage.Create(ctx, validNewPod())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Suddenly, a wild scheduler appears:
	binding := api.Binding{
		ObjectMeta: api.ObjectMeta{
			Namespace:   api.NamespaceDefault,
			Name:        "foo",
			Annotations: map[string]string{"label1": "value1"},
		},
		Target: api.ObjectReference{Name: "machine"},
	}
	_, err = bindingStorage.Create(ctx, &binding)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = bindingStorage.Create(ctx, &binding)
	if err == nil || !errors.IsConflict(err) {
		t.Fatalf("expected resource conflict error, not: %v", err)
	}
}
Exemple #25
0
func TestUpdateImageStreamOK(t *testing.T) {
	fakeEtcdClient, helper := newHelper(t)
	fakeEtcdClient.Data["/imagestreams/default/bar"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStream{
					ObjectMeta: kapi.ObjectMeta{Name: "bar", Namespace: "default"},
				}),
				ModifiedIndex: 2,
			},
		},
	}
	storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{})

	ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{})
	obj, created, err := storage.Update(ctx, &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "bar", ResourceVersion: "1"}})
	if !errors.IsConflict(err) {
		t.Fatalf("unexpected non-error: %v", err)
	}
	obj, created, err = storage.Update(ctx, &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "bar", ResourceVersion: "2"}})
	if err != nil || created {
		t.Fatalf("Unexpected non-nil error: %#v", err)
	}
	stream, ok := obj.(*api.ImageStream)
	if !ok {
		t.Errorf("Expected image stream, got %#v", obj)
	}
	if stream.Name != "bar" {
		t.Errorf("Unexpected stream returned: %#v", stream)
	}
}
Exemple #26
0
func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, setFn SetFunc, getFn GetFunc) {
	ctx := t.TestContext()

	foo := copyOrDie(obj)
	t.setObjectMeta(foo, "foo3")

	if err := setFn(ctx, foo); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	storedFoo, err := getFn(ctx, foo)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	older := copyOrDie(storedFoo)
	olderMeta := t.getObjectMetaOrFail(older)
	olderMeta.ResourceVersion = "1"

	_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), older)
	if err == nil {
		t.Errorf("Expected an error, but we didn't get one")
	} else if !errors.IsConflict(err) {
		t.Errorf("Expected Conflict error, got '%v'", err)
	}
}
Exemple #27
0
// Update updates an existing node api object
// by looking up the given hostname.
// The updated node merges the given slave attribute labels
// and annotations with the found api object.
func Update(
	client unversionedcore.NodesGetter,
	hostname string,
	slaveAttrLabels,
	annotations map[string]string,
) (n *api.Node, err error) {
	for i := 0; i < clientRetryCount; i++ {
		n, err = client.Nodes().Get(hostname)
		if err != nil {
			return nil, fmt.Errorf("error getting node %q: %v", hostname, err)
		}
		if n == nil {
			return nil, fmt.Errorf("no node instance returned for %q", hostname)
		}

		// update labels derived from Mesos slave attributes, keep all other labels
		n.Labels = mergeMaps(
			filterMap(n.Labels, IsNotSlaveAttributeLabel),
			slaveAttrLabels,
		)
		n.Annotations = mergeMaps(n.Annotations, annotations)

		n, err = client.Nodes().Update(n)
		if err == nil && !errors.IsConflict(err) {
			return n, nil
		}

		log.Infof("retry %d/%d: error updating node %v err %v", i, clientRetryCount, n, err)
		time.Sleep(time.Duration(i) * clientRetryInterval)
	}

	return nil, err
}
// secretDeleted reacts to a Secret being deleted by removing a reference from the corresponding ServiceAccount if needed
func (e *TokensController) secretDeleted(obj interface{}) {
	secret, ok := obj.(*api.Secret)
	if !ok {
		// Unknown type. If we missed a Secret deletion, the corresponding ServiceAccount (if it exists)
		// will get a secret recreated (if needed) during the ServiceAccount re-list
		return
	}

	serviceAccount, err := e.getServiceAccount(secret, false)
	if err != nil {
		glog.Error(err)
		return
	}
	if serviceAccount == nil {
		return
	}

	for i := 1; i <= NumServiceAccountRemoveReferenceRetries; i++ {
		if _, err := e.removeSecretReferenceIfNeeded(serviceAccount, secret.Name); err != nil {
			if apierrors.IsConflict(err) && i < NumServiceAccountRemoveReferenceRetries {
				time.Sleep(wait.Jitter(100*time.Millisecond, 0.0))
				continue
			}
			glog.Error(err)
			break
		}
		break
	}
}
Exemple #29
0
func (t *Tester) testUpdateFailsOnVersion(older runtime.Object) {
	_, _, err := t.storage.(rest.Updater).Update(t.TestContext(), older)
	if err == nil {
		t.Errorf("Expected an error, but we didn't get one")
	} else if !errors.IsConflict(err) {
		t.Errorf("Expected Conflict error, got '%v'", err)
	}
}
// createTokenSecret creates a token secret for a given service account.  Returns the name of the token
func (e *DockercfgController) createTokenSecret(serviceAccount *api.ServiceAccount) (*api.Secret, bool, error) {
	pendingTokenName := serviceAccount.Annotations[PendingTokenAnnotation]

	// If this service account has no record of a pending token name, record one
	if len(pendingTokenName) == 0 {
		pendingTokenName = secret.Strategy.GenerateName(osautil.GetTokenSecretNamePrefix(serviceAccount))
		if serviceAccount.Annotations == nil {
			serviceAccount.Annotations = map[string]string{}
		}
		serviceAccount.Annotations[PendingTokenAnnotation] = pendingTokenName
		updatedServiceAccount, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
		// Conflicts mean we'll get called to sync this service account again
		if kapierrors.IsConflict(err) {
			return nil, false, nil
		}
		if err != nil {
			return nil, false, err
		}
		serviceAccount = updatedServiceAccount
	}

	// Return the token from cache
	existingTokenSecretObj, exists, err := e.secretCache.GetByKey(serviceAccount.Namespace + "/" + pendingTokenName)
	if err != nil {
		return nil, false, err
	}
	if exists {
		existingTokenSecret := existingTokenSecretObj.(*api.Secret)
		return existingTokenSecret, len(existingTokenSecret.Data[api.ServiceAccountTokenKey]) > 0, nil
	}

	// Try to create the named pending token
	tokenSecret := &api.Secret{
		ObjectMeta: api.ObjectMeta{
			Name:      pendingTokenName,
			Namespace: serviceAccount.Namespace,
			Annotations: map[string]string{
				api.ServiceAccountNameKey: serviceAccount.Name,
				api.ServiceAccountUIDKey:  string(serviceAccount.UID),
				api.CreatedByAnnotation:   CreateDockercfgSecretsController,
			},
		},
		Type: api.SecretTypeServiceAccountToken,
		Data: map[string][]byte{},
	}

	glog.V(4).Infof("Creating token secret %q for service account %s/%s", tokenSecret.Name, serviceAccount.Namespace, serviceAccount.Name)
	token, err := e.client.Core().Secrets(tokenSecret.Namespace).Create(tokenSecret)
	// Already exists but not in cache means we'll get an add watch event and resync
	if kapierrors.IsAlreadyExists(err) {
		return nil, false, nil
	}
	if err != nil {
		return nil, false, err
	}
	return token, len(token.Data[api.ServiceAccountTokenKey]) > 0, nil
}