Пример #1
0
// CreateOrUpdate attempts to update the current etcd state with the provided
// allocation.
func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error {
	e.lock.Lock()
	defer e.lock.Unlock()

	last := ""
	err := e.storage.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true,
		storage.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) {
			existing := input.(*api.RangeAllocation)
			switch {
			case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0:
				if snapshot.ResourceVersion != existing.ResourceVersion {
					return nil, k8serr.NewConflict(e.kind, "", fmt.Errorf("the provided resource version does not match"))
				}
			case len(existing.ResourceVersion) != 0:
				return nil, k8serr.NewConflict(e.kind, "", fmt.Errorf("another caller has already initialized the resource"))
			}
			last = snapshot.ResourceVersion
			return snapshot, nil
		}),
	)
	if err != nil {
		return etcderr.InterpretUpdateError(err, e.kind, "")
	}
	err = e.alloc.Restore(snapshot.Range, snapshot.Data)
	if err == nil {
		e.last = last
	}
	return err
}
Пример #2
0
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj))
	}
	deployment, err := (*r.registry).GetDeployment(ctx, scale.Name)
	if err != nil {
		return nil, false, errors.NewNotFound("scale", scale.Name)
	}
	deployment.Spec.Replicas = scale.Spec.Replicas
	deployment, err = (*r.registry).UpdateDeployment(ctx, deployment)
	if err != nil {
		return nil, false, errors.NewConflict("scale", scale.Name, err)
	}
	return &extensions.Scale{
		ObjectMeta: api.ObjectMeta{
			Name:              deployment.Name,
			Namespace:         deployment.Namespace,
			CreationTimestamp: deployment.CreationTimestamp,
		},
		Spec: extensions.ScaleSpec{
			Replicas: deployment.Spec.Replicas,
		},
		Status: extensions.ScaleStatus{
			Replicas: deployment.Status.Replicas,
			Selector: deployment.Spec.Selector,
		},
	}, false, nil
}
Пример #3
0
// InterpretUpdateError converts a generic etcd error on a update
// operation into the appropriate API error.
func InterpretUpdateError(err error, kind, name string) error {
	switch {
	case tools.IsEtcdTestFailed(err), tools.IsEtcdNodeExist(err):
		return errors.NewConflict(kind, name, err)
	default:
		return err
	}
}
Пример #4
0
// assignPod assigns the given pod to the given machine.
func (r *BindingREST) assignPod(ctx api.Context, podID string, machine string, annotations map[string]string) (err error) {
	if _, err = r.setPodHostAndAnnotations(ctx, podID, "", machine, annotations); err != nil {
		err = etcderr.InterpretGetError(err, "pod", podID)
		err = etcderr.InterpretUpdateError(err, "pod", podID)
		if _, ok := err.(*errors.StatusError); !ok {
			err = errors.NewConflict("binding", podID, err)
		}
	}
	return
}
Пример #5
0
func TestRetryOnConflictError(t *testing.T) {
	mockClient := &testclient.Fake{}
	numTries := 0
	retryOnce := func(kubeClient client.Interface, namespace *api.Namespace) (*api.Namespace, error) {
		numTries++
		if numTries <= 1 {
			return namespace, errors.NewConflict(namespace.Kind, namespace.Name, fmt.Errorf("ERROR!"))
		}
		return namespace, nil
	}
	namespace := &api.Namespace{}
	_, err := retryOnConflictError(mockClient, namespace, retryOnce)
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}
	if numTries != 2 {
		t.Errorf("Expected %v, but got %v", 2, numTries)
	}
}
Пример #6
0
func TestErrorsToAPIStatus(t *testing.T) {
	cases := map[error]unversioned.Status{
		errors.NewNotFound("foo", "bar"): {
			Status:  unversioned.StatusFailure,
			Code:    http.StatusNotFound,
			Reason:  unversioned.StatusReasonNotFound,
			Message: "foo \"bar\" not found",
			Details: &unversioned.StatusDetails{
				Kind: "foo",
				Name: "bar",
			},
		},
		errors.NewAlreadyExists("foo", "bar"): {
			Status:  unversioned.StatusFailure,
			Code:    http.StatusConflict,
			Reason:  "AlreadyExists",
			Message: "foo \"bar\" already exists",
			Details: &unversioned.StatusDetails{
				Kind: "foo",
				Name: "bar",
			},
		},
		errors.NewConflict("foo", "bar", stderrs.New("failure")): {
			Status:  unversioned.StatusFailure,
			Code:    http.StatusConflict,
			Reason:  "Conflict",
			Message: "foo \"bar\" cannot be updated: failure",
			Details: &unversioned.StatusDetails{
				Kind: "foo",
				Name: "bar",
			},
		},
	}
	for k, v := range cases {
		actual := errToAPIStatus(k)
		if !reflect.DeepEqual(actual, &v) {
			t.Errorf("%s: Expected %#v, Got %#v", k, v, actual)
		}
	}
}
Пример #7
0
// Delete enforces life-cycle rules for namespace termination
func (r *REST) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
	nsObj, err := r.Get(ctx, name)
	if err != nil {
		return nil, err
	}

	namespace := nsObj.(*api.Namespace)

	// upon first request to delete, we switch the phase to start namespace termination
	if namespace.DeletionTimestamp.IsZero() {
		now := unversioned.Now()
		namespace.DeletionTimestamp = &now
		namespace.Status.Phase = api.NamespaceTerminating
		result, _, err := r.status.Update(ctx, namespace)
		return result, err
	}

	// prior to final deletion, we must ensure that finalizers is empty
	if len(namespace.Spec.Finalizers) != 0 {
		err = apierrors.NewConflict("Namespace", namespace.Name, fmt.Errorf("The system is ensuring all content is removed from this namespace.  Upon completion, this namespace will automatically be purged by the system."))
		return nil, err
	}
	return r.Etcd.Delete(ctx, name, nil)
}
Пример #8
0
func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	service := obj.(*api.Service)
	if !api.ValidNamespace(ctx, &service.ObjectMeta) {
		return nil, false, errors.NewConflict("service", service.Namespace, fmt.Errorf("Service.Namespace does not match the provided context"))
	}

	oldService, err := rs.registry.GetService(ctx, service.Name)
	if err != nil {
		return nil, false, err
	}

	// Copy over non-user fields
	// TODO: make this a merge function
	if errs := validation.ValidateServiceUpdate(oldService, service); len(errs) > 0 {
		return nil, false, errors.NewInvalid("service", service.Name, errs)
	}

	nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
	defer nodePortOp.Finish()

	assignNodePorts := shouldAssignNodePorts(service)

	oldNodePorts := CollectServiceNodePorts(oldService)

	newNodePorts := []int{}
	if assignNodePorts {
		for i := range service.Spec.Ports {
			servicePort := &service.Spec.Ports[i]
			nodePort := servicePort.NodePort
			if nodePort != 0 {
				if !contains(oldNodePorts, nodePort) {
					err := nodePortOp.Allocate(nodePort)
					if err != nil {
						el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", nodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
						return nil, false, errors.NewInvalid("Service", service.Name, el)
					}
				}
			} else {
				nodePort, err = nodePortOp.AllocateNext()
				if err != nil {
					el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", nodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
					return nil, false, errors.NewInvalid("Service", service.Name, el)
				}
				servicePort.NodePort = nodePort
			}
			// Detect duplicate node ports; this should have been caught by validation, so we panic
			if contains(newNodePorts, nodePort) {
				panic("duplicate node port")
			}
			newNodePorts = append(newNodePorts, nodePort)
		}
	} else {
		// Validate should have validated that nodePort == 0
	}

	// The comparison loops are O(N^2), but we don't expect N to be huge
	// (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot)
	for _, oldNodePort := range oldNodePorts {
		if !contains(newNodePorts, oldNodePort) {
			continue
		}
		nodePortOp.ReleaseDeferred(oldNodePort)
	}

	// Remove any LoadBalancerStatus now if Type != LoadBalancer;
	// although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity.
	if service.Spec.Type != api.ServiceTypeLoadBalancer {
		service.Status.LoadBalancer = api.LoadBalancerStatus{}
	}

	out, err := rs.registry.UpdateService(ctx, service)

	if err == nil {
		el := nodePortOp.Commit()
		if el != nil {
			// problems should be fixed by an eventual reconciliation / restart
			glog.Errorf("error(s) committing NodePorts changes: %v", el)
		}
	}

	return out, false, err
}
Пример #9
0
// Update performs an atomic update and set of the object. Returns the result of the update
// or an error. If the registry allows create-on-update, the create flow will be executed.
// A bool is returned along with the object and any errors, to indicate object creation.
func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	trace := util.NewTrace("Update " + reflect.TypeOf(obj).String())
	defer trace.LogIfLong(time.Second)
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, false, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, false, err
	}
	// If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version,
	// then we populate it with the latest version.
	// Else, we check that the version specified by the user matches the version of latest etcd object.
	resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
	if err != nil {
		return nil, false, err
	}
	doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()
	// TODO: expose TTL
	creating := false
	out := e.NewFunc()
	err = e.Storage.GuaranteedUpdate(key, out, true, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {
		version, err := e.Storage.Versioner().ObjectResourceVersion(existing)
		if err != nil {
			return nil, nil, err
		}
		if version == 0 {
			if !e.UpdateStrategy.AllowCreateOnUpdate() {
				return nil, nil, kubeerr.NewNotFound(e.EndpointName, name)
			}
			creating = true
			if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
				return nil, nil, err
			}
			ttl, err := e.calculateTTL(obj, 0, false)
			if err != nil {
				return nil, nil, err
			}
			return obj, &ttl, nil
		}

		creating = false
		if doUnconditionalUpdate {
			// Update the object's resource version to match the latest etcd object's resource version.
			err = e.Storage.Versioner().UpdateObject(obj, res.Expiration, res.ResourceVersion)
			if err != nil {
				return nil, nil, err
			}
		} else {
			// Check if the object's resource version matches the latest resource version.
			newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
			if err != nil {
				return nil, nil, err
			}
			if newVersion != version {
				return nil, nil, kubeerr.NewConflict(e.EndpointName, name, fmt.Errorf("the object has been modified; please apply your changes to the latest version and try again"))
			}
		}
		if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
			return nil, nil, err
		}
		ttl, err := e.calculateTTL(obj, res.TTL, true)
		if err != nil {
			return nil, nil, err
		}
		if int64(ttl) != res.TTL {
			return obj, &ttl, nil
		}
		return obj, nil, nil
	})

	if err != nil {
		if creating {
			err = etcderr.InterpretCreateError(err, e.EndpointName, name)
			err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		} else {
			err = etcderr.InterpretUpdateError(err, e.EndpointName, name)
		}
		return nil, false, err
	}
	if creating {
		if e.AfterCreate != nil {
			if err := e.AfterCreate(out); err != nil {
				return nil, false, err
			}
		}
	} else {
		if e.AfterUpdate != nil {
			if err := e.AfterUpdate(out); err != nil {
				return nil, false, err
			}
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, false, err
		}
	}
	return out, creating, nil
}