Example #1
0
// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error.
func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (*metav1.ObjectMeta, schema.GroupVersionKind, error) {
	objectMeta, err := metav1.ObjectMetaFor(obj)
	if err != nil {
		return nil, schema.GroupVersionKind{}, errors.NewInternalError(err)
	}
	kinds, _, err := typer.ObjectKinds(obj)
	if err != nil {
		return nil, schema.GroupVersionKind{}, errors.NewInternalError(err)
	}
	return objectMeta, kinds[0], nil
}
Example #2
0
// NewNotFound is a utility function to return a well-formatted admission control error response
func NewNotFound(a Attributes) error {
	name, resource, err := extractResourceName(a)
	if err != nil {
		return apierrors.NewInternalError(err)
	}
	return apierrors.NewNotFound(resource, name)
}
Example #3
0
// transformResponse converts an API response into a structured API object
func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
	var body []byte
	if resp.Body != nil {
		if data, err := ioutil.ReadAll(resp.Body); err == nil {
			body = data
		}
	}

	glogBody("Response Body", body)

	// verify the content type is accurate
	contentType := resp.Header.Get("Content-Type")
	decoder := r.serializers.Decoder
	if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) {
		mediaType, params, err := mime.ParseMediaType(contentType)
		if err != nil {
			return Result{err: errors.NewInternalError(err)}
		}
		decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
		if err != nil {
			// if we fail to negotiate a decoder, treat this as an unstructured error
			switch {
			case resp.StatusCode == http.StatusSwitchingProtocols:
				// no-op, we've been upgraded
			case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
				return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
			}
			return Result{
				body:        body,
				contentType: contentType,
				statusCode:  resp.StatusCode,
			}
		}
	}

	switch {
	case resp.StatusCode == http.StatusSwitchingProtocols:
		// no-op, we've been upgraded
	case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
		// calculate an unstructured error from the response which the Result object may use if the caller
		// did not return a structured error.
		retryAfter, _ := retryAfterSeconds(resp)
		err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
		return Result{
			body:        body,
			contentType: contentType,
			statusCode:  resp.StatusCode,
			decoder:     decoder,
			err:         err,
		}
	}

	return Result{
		body:        body,
		contentType: contentType,
		statusCode:  resp.StatusCode,
		decoder:     decoder,
	}
}
Example #4
0
// NewForbidden is a utility function to return a well-formatted admission control error response
func NewForbidden(a Attributes, internalError error) error {
	// do not double wrap an error of same type
	if apierrors.IsForbidden(internalError) {
		return internalError
	}
	name, resource, err := extractResourceName(a)
	if err != nil {
		return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err}))
	}
	return apierrors.NewForbidden(resource, name, internalError)
}
Example #5
0
func (r *RollbackREST) rollbackDeployment(ctx genericapirequest.Context, deploymentID string, config *extensions.RollbackConfig, annotations map[string]string) error {
	if _, err := r.setDeploymentRollback(ctx, deploymentID, config, annotations); err != nil {
		err = storeerr.InterpretGetError(err, extensions.Resource("deployments"), deploymentID)
		err = storeerr.InterpretUpdateError(err, extensions.Resource("deployments"), deploymentID)
		if _, ok := err.(*errors.StatusError); !ok {
			err = errors.NewInternalError(err)
		}
		return err
	}
	return nil
}
func (checker GenericHttpResponseChecker) Check(resp *http.Response) error {
	if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
		defer resp.Body.Close()
		bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxReadLength))
		if err != nil {
			return errors.NewInternalError(err)
		}
		bodyText := string(bodyBytes)

		switch {
		case resp.StatusCode == http.StatusInternalServerError:
			return errors.NewInternalError(fmt.Errorf("%s", bodyText))
		case resp.StatusCode == http.StatusBadRequest:
			return errors.NewBadRequest(bodyText)
		case resp.StatusCode == http.StatusNotFound:
			return errors.NewGenericServerResponse(resp.StatusCode, "", checker.QualifiedResource, checker.Name, bodyText, 0, false)
		}
		return errors.NewGenericServerResponse(resp.StatusCode, "", checker.QualifiedResource, checker.Name, bodyText, 0, false)
	}
	return nil
}
Example #7
0
func hasUID(obj runtime.Object) (bool, error) {
	if obj == nil {
		return false, nil
	}
	accessor, err := meta.Accessor(obj)
	if err != nil {
		return false, errors.NewInternalError(err)
	}
	if len(accessor.GetUID()) == 0 {
		return false, nil
	}
	return true, nil
}
Example #8
0
// getDefaultClass returns the default StorageClass from the store, or nil.
func getDefaultClass(store cache.Store) (*storage.StorageClass, error) {
	defaultClasses := []*storage.StorageClass{}
	for _, c := range store.List() {
		class, ok := c.(*storage.StorageClass)
		if !ok {
			return nil, errors.NewInternalError(fmt.Errorf("error converting stored object to StorageClass: %v", c))
		}
		if storageutil.IsDefaultAnnotation(class.ObjectMeta) {
			defaultClasses = append(defaultClasses, class)
			glog.V(4).Infof("getDefaultClass added: %s", class.Name)
		}
	}

	if len(defaultClasses) == 0 {
		return nil, nil
	}
	if len(defaultClasses) > 1 {
		glog.V(4).Infof("getDefaultClass %s defaults found", len(defaultClasses))
		return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses)))
	}
	return defaultClasses[0], nil
}
Example #9
0
func (e *exists) Admit(a admission.Attributes) (err error) {
	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		return nil
	}

	// we need to wait for our caches to warm
	if !e.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}
	namespace := &api.Namespace{
		ObjectMeta: metav1.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := e.namespaceInformer.GetStore().Get(namespace)
	if err != nil {
		return errors.NewInternalError(err)
	}
	if exists {
		return nil
	}

	// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
	_, err = e.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{})
	if err != nil {
		if errors.IsNotFound(err) {
			return err
		}
		return errors.NewInternalError(err)
	}

	return nil
}
Example #10
0
// InterpretDeleteError converts a generic error on a delete
// operation into the appropriate API error.
func InterpretDeleteError(err error, qualifiedResource schema.GroupResource, name string) error {
	switch {
	case storage.IsNotFound(err):
		return errors.NewNotFound(qualifiedResource, name)
	case storage.IsUnreachable(err):
		return errors.NewServerTimeout(qualifiedResource, "delete", 2) // TODO: make configurable or handled at a higher level
	case storage.IsConflict(err), storage.IsNodeExist(err), storage.IsInvalidObj(err):
		return errors.NewConflict(qualifiedResource, name, err)
	case storage.IsInternalError(err):
		return errors.NewInternalError(err)
	default:
		return err
	}
}
func TestGenericHttpResponseChecker(t *testing.T) {
	responseChecker := NewGenericHttpResponseChecker(api.Resource("pods"), "foo")
	tests := []struct {
		resp        *http.Response
		expectError bool
		expected    error
		name        string
	}{
		{
			resp: &http.Response{
				Body:       ioutil.NopCloser(bytes.NewBufferString("Success")),
				StatusCode: http.StatusOK,
			},
			expectError: false,
			name:        "ok",
		},
		{
			resp: &http.Response{
				Body:       ioutil.NopCloser(bytes.NewBufferString("Invalid request.")),
				StatusCode: http.StatusBadRequest,
			},
			expectError: true,
			expected:    errors.NewBadRequest("Invalid request."),
			name:        "bad request",
		},
		{
			resp: &http.Response{
				Body:       ioutil.NopCloser(bytes.NewBufferString("Pod does not exist.")),
				StatusCode: http.StatusInternalServerError,
			},
			expectError: true,
			expected:    errors.NewInternalError(fmt.Errorf("%s", "Pod does not exist.")),
			name:        "internal server error",
		},
	}
	for _, test := range tests {
		err := responseChecker.Check(test.resp)
		if test.expectError && err == nil {
			t.Error("unexpected non-error")
		}
		if !test.expectError && err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		if test.expectError && !reflect.DeepEqual(err, test.expected) {
			t.Errorf("expected: %s, saw: %s", test.expected, err)
		}
	}
}
Example #12
0
// getMatchingPolicies returns policies from the store.  For now this returns everything
// in the future it can filter based on UserInfo and permissions.
//
// TODO: this will likely need optimization since the initial implementation will
// always query for authorization.  Needs scale testing and possibly checking against
// a cache.
func getMatchingPolicies(store cache.Store, user user.Info, sa user.Info, authz authorizer.Authorizer) ([]*extensions.PodSecurityPolicy, error) {
	matchedPolicies := make([]*extensions.PodSecurityPolicy, 0)

	for _, c := range store.List() {
		constraint, ok := c.(*extensions.PodSecurityPolicy)
		if !ok {
			return nil, errors.NewInternalError(fmt.Errorf("error converting object from store to a pod security policy: %v", c))
		}

		if authorizedForPolicy(user, constraint, authz) || authorizedForPolicy(sa, constraint, authz) {
			matchedPolicies = append(matchedPolicies, constraint)
		}
	}

	return matchedPolicies, nil
}
Example #13
0
// ServeAttach handles requests to attach to a container. After creating/receiving the required
// streams, it delegates the actual attaching to attacher.
func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, podName string, uid types.UID, container string, streamOpts *Options, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) {
	ctx, ok := createStreams(req, w, streamOpts, supportedProtocols, idleTimeout, streamCreationTimeout)
	if !ok {
		// error is handled by createStreams
		return
	}
	defer ctx.conn.Close()

	err := attacher.AttachContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan)
	if err != nil {
		err = fmt.Errorf("error attaching to container: %v", err)
		runtime.HandleError(err)
		ctx.writeStatus(apierrors.NewInternalError(err))
	} else {
		ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{
			Status: metav1.StatusSuccess,
		}})
	}
}
Example #14
0
func TestInputStreamInternalServerErrorTransport(t *testing.T) {
	message := "Pod is in PodPending"
	location, _ := url.Parse("http://www.example.com")
	streamer := &LocationStreamer{
		Location:        location,
		Transport:       fakeInternalServerErrorTransport("text/plain", message),
		ResponseChecker: NewGenericHttpResponseChecker(api.Resource(""), ""),
	}
	expectedError := errors.NewInternalError(fmt.Errorf("%s", message))

	_, _, _, err := streamer.InputStream("", "")
	if err == nil {
		t.Errorf("unexpected non-error")
		return
	}

	if !reflect.DeepEqual(err, expectedError) {
		t.Errorf("StreamInternalServerError does not match. Got: %s. Expected: %s.", err, expectedError)
	}
}
Example #15
0
// UpdateREST registers the REST handlers for this APIGroupVersion to an existing web service
// in the restful Container.  It will use the prefix (root/version) to find the existing
// web service.  If a web service does not exist within the container to support the prefix
// this method will return an error.
func (g *APIGroupVersion) UpdateREST(container *restful.Container) error {
	installer := g.newInstaller()
	var ws *restful.WebService = nil

	for i, s := range container.RegisteredWebServices() {
		if s.RootPath() == installer.prefix {
			ws = container.RegisteredWebServices()[i]
			break
		}
	}

	if ws == nil {
		return apierrors.NewInternalError(fmt.Errorf("unable to find an existing webservice for prefix %s", installer.prefix))
	}
	apiResources, registrationErrors := installer.Install(ws)
	lister := g.ResourceLister
	if lister == nil {
		lister = staticLister{apiResources}
	}
	AddSupportedResourcesWebService(g.Serializer, ws, g.GroupVersion, lister)
	return utilerrors.NewAggregate(registrationErrors)
}
Example #16
0
// BeforeUpdate ensures that common operations for all resources are performed on update. It only returns
// errors that can be converted to api.Status. It will invoke update validation with the provided existing
// and updated objects.
func BeforeUpdate(strategy RESTUpdateStrategy, ctx genericapirequest.Context, obj, old runtime.Object) error {
	objectMeta, kind, kerr := objectMetaAndKind(strategy, obj)
	if kerr != nil {
		return kerr
	}
	if strategy.NamespaceScoped() {
		if !ValidNamespace(ctx, objectMeta) {
			return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request")
		}
	} else {
		objectMeta.Namespace = api.NamespaceNone
	}
	// Ensure requests cannot update generation
	oldMeta, err := metav1.ObjectMetaFor(old)
	if err != nil {
		return err
	}
	objectMeta.Generation = oldMeta.Generation

	strategy.PrepareForUpdate(ctx, obj, old)

	// ClusterName is ignored and should not be saved
	objectMeta.ClusterName = ""

	// Ensure some common fields, like UID, are validated for all resources.
	errs, err := validateCommonFields(obj, old)
	if err != nil {
		return errors.NewInternalError(err)
	}

	errs = append(errs, strategy.ValidateUpdate(ctx, obj, old)...)
	if len(errs) > 0 {
		return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs)
	}

	strategy.Canonicalize(obj)

	return nil
}
Example #17
0
// ServeExec handles requests to execute a command in a container. After
// creating/receiving the required streams, it delegates the actual execution
// to the executor.
func ServeExec(w http.ResponseWriter, req *http.Request, executor Executor, podName string, uid types.UID, container string, cmd []string, streamOpts *Options, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) {
	ctx, ok := createStreams(req, w, streamOpts, supportedProtocols, idleTimeout, streamCreationTimeout)
	if !ok {
		// error is handled by createStreams
		return
	}
	defer ctx.conn.Close()

	err := executor.ExecInContainer(podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0)
	if err != nil {
		if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() {
			rc := exitErr.ExitStatus()
			ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{
				Status: metav1.StatusFailure,
				Reason: NonZeroExitCodeReason,
				Details: &metav1.StatusDetails{
					Causes: []metav1.StatusCause{
						{
							Type:    ExitCodeCauseType,
							Message: fmt.Sprintf("%d", rc),
						},
					},
				},
				Message: fmt.Sprintf("command terminated with non-zero exit code: %v", exitErr),
			}})
		} else {
			err = fmt.Errorf("error executing command in container: %v", err)
			runtime.HandleError(err)
			ctx.writeStatus(apierrors.NewInternalError(err))
		}
	} else {
		ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{
			Status: metav1.StatusSuccess,
		}})
	}
}
Example #18
0
// Delete enforces life-cycle rules for namespace termination
func (r *REST) Delete(ctx genericapirequest.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
	nsObj, err := r.Get(ctx, name, &metav1.GetOptions{})
	if err != nil {
		return nil, err
	}

	namespace := nsObj.(*api.Namespace)

	// Ensure we have a UID precondition
	if options == nil {
		options = api.NewDeleteOptions(0)
	}
	if options.Preconditions == nil {
		options.Preconditions = &api.Preconditions{}
	}
	if options.Preconditions.UID == nil {
		options.Preconditions.UID = &namespace.UID
	} else if *options.Preconditions.UID != namespace.UID {
		err = apierrors.NewConflict(
			api.Resource("namespaces"),
			name,
			fmt.Errorf("Precondition failed: UID in precondition: %v, UID in object meta: %v", *options.Preconditions.UID, namespace.UID),
		)
		return nil, err
	}

	// upon first request to delete, we switch the phase to start namespace termination
	// TODO: enhance graceful deletion's calls to DeleteStrategy to allow phase change and finalizer patterns
	if namespace.DeletionTimestamp.IsZero() {
		key, err := r.Store.KeyFunc(ctx, name)
		if err != nil {
			return nil, err
		}

		preconditions := storage.Preconditions{UID: options.Preconditions.UID}

		out := r.Store.NewFunc()
		err = r.Store.Storage.GuaranteedUpdate(
			ctx, key, out, false, &preconditions,
			storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) {
				existingNamespace, ok := existing.(*api.Namespace)
				if !ok {
					// wrong type
					return nil, fmt.Errorf("expected *api.Namespace, got %v", existing)
				}
				// Set the deletion timestamp if needed
				if existingNamespace.DeletionTimestamp.IsZero() {
					now := metav1.Now()
					existingNamespace.DeletionTimestamp = &now
				}
				// Set the namespace phase to terminating, if needed
				if existingNamespace.Status.Phase != api.NamespaceTerminating {
					existingNamespace.Status.Phase = api.NamespaceTerminating
				}

				// Remove orphan finalizer if options.OrphanDependents = false.
				if options.OrphanDependents != nil && *options.OrphanDependents == false {
					// remove Orphan finalizer.
					newFinalizers := []string{}
					for i := range existingNamespace.ObjectMeta.Finalizers {
						finalizer := existingNamespace.ObjectMeta.Finalizers[i]
						if string(finalizer) != api.FinalizerOrphan {
							newFinalizers = append(newFinalizers, finalizer)
						}
					}
					existingNamespace.ObjectMeta.Finalizers = newFinalizers
				}
				return existingNamespace, nil
			}),
		)

		if err != nil {
			err = storageerr.InterpretGetError(err, api.Resource("namespaces"), name)
			err = storageerr.InterpretUpdateError(err, api.Resource("namespaces"), name)
			if _, ok := err.(*apierrors.StatusError); !ok {
				err = apierrors.NewInternalError(err)
			}
			return nil, err
		}

		return out, nil
	}

	// prior to final deletion, we must ensure that finalizers is empty
	if len(namespace.Spec.Finalizers) != 0 {
		err = apierrors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("The system is ensuring all content is removed from this namespace.  Upon completion, this namespace will automatically be purged by the system."))
		return nil, err
	}
	return r.Store.Delete(ctx, name, options)
}
Example #19
0
func (rs *REST) healthCheckNodePortUpdate(oldService, service *api.Service) (bool, error) {
	// Health Check Node Port handling during updates
	//
	// Case 1. Transition from globalTraffic to OnlyLocal for the ESIPP annotation
	//
	//   Allocate a health check node port or attempt to reserve the user-specified one, if provided.
	//   Insert health check node port as an annotation into the service's annotations
	//
	// Case 2. Transition from OnlyLocal to Global for the ESIPP annotation
	//
	//   Free the existing healthCheckNodePort and clear the health check nodePort annotation
	//
	// Case 3. No change (Global ---stays--> Global) but prevent invalid annotation manipulations
	//
	//   Reject insertion of the "service.alpha.kubernetes.io/healthcheck-nodeport" annotation
	//
	// Case 4. No change (OnlyLocal ---stays--> OnlyLocal) but prevent invalid annotation manipulations
	//
	//   Reject deletion of the "service.alpha.kubernetes.io/healthcheck-nodeport" annotation
	//   Reject changing the value of the healthCheckNodePort annotation
	//
	oldServiceHasHealthCheckNodePort := shouldCheckOrAssignHealthCheckNodePort(oldService)
	oldHealthCheckNodePort := apiservice.GetServiceHealthCheckNodePort(oldService)

	assignHealthCheckNodePort := shouldCheckOrAssignHealthCheckNodePort(service)
	requestedHealthCheckNodePort := apiservice.GetServiceHealthCheckNodePort(service)

	switch {
	case !oldServiceHasHealthCheckNodePort && assignHealthCheckNodePort:
		glog.Infof("Transition from Global LB service to OnlyLocal service")
		if requestedHealthCheckNodePort > 0 {
			// If the request has a health check nodePort in mind, attempt to reserve it
			err := rs.serviceNodePorts.Allocate(int(requestedHealthCheckNodePort))
			if err != nil {
				errmsg := fmt.Sprintf("Failed to allocate requested HealthCheck nodePort %v:%v",
					requestedHealthCheckNodePort, err)
				el := field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"),
					apiservice.BetaAnnotationHealthCheckNodePort, errmsg)}
				return false, errors.NewInvalid(api.Kind("Service"), service.Name, el)
			}
			glog.Infof("Reserved user requested nodePort: %d", requestedHealthCheckNodePort)
		} else {
			// If the request has no health check nodePort specified, allocate any
			healthCheckNodePort, err := rs.serviceNodePorts.AllocateNext()
			if err != nil {
				// TODO: what error should be returned here?  It's not a
				// field-level validation failure (the field is valid), and it's
				// not really an internal error.
				return false, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
			}
			// Insert the newly allocated health check port as an annotation (plan of record for Alpha)
			service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort] = fmt.Sprintf("%d", healthCheckNodePort)
			glog.Infof("Reserved health check nodePort: %d", healthCheckNodePort)
		}

	case oldServiceHasHealthCheckNodePort && !assignHealthCheckNodePort:
		glog.Infof("Transition from OnlyLocal LB service to Global service")
		err := rs.serviceNodePorts.Release(int(oldHealthCheckNodePort))
		if err != nil {
			glog.Warningf("Error releasing service health check %s node port %d: %v", service.Name, oldHealthCheckNodePort, err)
			return false, errors.NewInternalError(fmt.Errorf("failed to free health check nodePort: %v", err))
		} else {
			delete(service.Annotations, apiservice.BetaAnnotationHealthCheckNodePort)
			delete(service.Annotations, apiservice.AlphaAnnotationHealthCheckNodePort)
			glog.Infof("Freed health check nodePort: %d", oldHealthCheckNodePort)
		}

	case !oldServiceHasHealthCheckNodePort && !assignHealthCheckNodePort:
		if _, ok := service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort]; ok {
			glog.Warningf("Attempt to insert health check node port annotation DENIED")
			el := field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"),
				apiservice.BetaAnnotationHealthCheckNodePort, "Cannot insert healthcheck nodePort annotation")}
			return false, errors.NewInvalid(api.Kind("Service"), service.Name, el)
		}

	case oldServiceHasHealthCheckNodePort && assignHealthCheckNodePort:
		if _, ok := service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort]; !ok {
			glog.Warningf("Attempt to delete health check node port annotation DENIED")
			el := field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"),
				apiservice.BetaAnnotationHealthCheckNodePort, "Cannot delete healthcheck nodePort annotation")}
			return false, errors.NewInvalid(api.Kind("Service"), service.Name, el)
		}
		if oldHealthCheckNodePort != requestedHealthCheckNodePort {
			glog.Warningf("Attempt to change value of health check node port annotation DENIED")
			el := field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"),
				apiservice.BetaAnnotationHealthCheckNodePort, "Cannot change healthcheck nodePort during update")}
			return false, errors.NewInvalid(api.Kind("Service"), service.Name, el)
		}
	}
	return true, nil
}
Example #20
0
func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
	oldService, err := rs.registry.GetService(ctx, name, &metav1.GetOptions{})
	if err != nil {
		return nil, false, err
	}

	obj, err := objInfo.UpdatedObject(ctx, oldService)
	if err != nil {
		return nil, false, err
	}

	service := obj.(*api.Service)
	if !rest.ValidNamespace(ctx, &service.ObjectMeta) {
		return nil, false, errors.NewConflict(api.Resource("services"), service.Namespace, fmt.Errorf("Service.Namespace does not match the provided context"))
	}

	// Copy over non-user fields
	// TODO: make this a merge function
	if errs := validation.ValidateServiceUpdate(service, oldService); len(errs) > 0 {
		return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, errs)
	}

	nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
	defer nodePortOp.Finish()

	assignNodePorts := shouldAssignNodePorts(service)

	oldNodePorts := CollectServiceNodePorts(oldService)

	newNodePorts := []int{}
	if assignNodePorts {
		for i := range service.Spec.Ports {
			servicePort := &service.Spec.Ports[i]
			nodePort := int(servicePort.NodePort)
			if nodePort != 0 {
				if !contains(oldNodePorts, nodePort) {
					err := nodePortOp.Allocate(nodePort)
					if err != nil {
						el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort, err.Error())}
						return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, el)
					}
				}
			} else {
				nodePort, err = nodePortOp.AllocateNext()
				if err != nil {
					// TODO: what error should be returned here?  It's not a
					// field-level validation failure (the field is valid), and it's
					// not really an internal error.
					return nil, false, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
				}
				servicePort.NodePort = int32(nodePort)
			}
			// Detect duplicate node ports; this should have been caught by validation, so we panic
			if contains(newNodePorts, nodePort) {
				panic("duplicate node port")
			}
			newNodePorts = append(newNodePorts, nodePort)
		}
	} else {
		// Validate should have validated that nodePort == 0
	}

	// The comparison loops are O(N^2), but we don't expect N to be huge
	// (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot)
	for _, oldNodePort := range oldNodePorts {
		if !contains(newNodePorts, oldNodePort) {
			continue
		}
		nodePortOp.ReleaseDeferred(oldNodePort)
	}

	// Remove any LoadBalancerStatus now if Type != LoadBalancer;
	// although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity.
	if service.Spec.Type != api.ServiceTypeLoadBalancer {
		service.Status.LoadBalancer = api.LoadBalancerStatus{}
	}

	success, err := rs.healthCheckNodePortUpdate(oldService, service)
	if !success {
		return nil, false, err
	}

	out, err := rs.registry.UpdateService(ctx, service)

	if err == nil {
		el := nodePortOp.Commit()
		if el != nil {
			// problems should be fixed by an eventual reconciliation / restart
			glog.Errorf("error(s) committing NodePorts changes: %v", el)
		}
	}

	return out, false, err
}
Example #21
0
// Delete removes the item from storage.
func (e *Store) Delete(ctx genericapirequest.Context, name string, options *api.DeleteOptions) (runtime.Object, error) {
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}

	obj := e.NewFunc()
	if err := e.Storage.Get(ctx, key, "", obj, false); err != nil {
		return nil, storeerr.InterpretDeleteError(err, e.QualifiedResource, name)
	}
	// support older consumers of delete by treating "nil" as delete immediately
	if options == nil {
		options = api.NewDeleteOptions(0)
	}
	var preconditions storage.Preconditions
	if options.Preconditions != nil {
		preconditions.UID = options.Preconditions.UID
	}
	graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)
	if err != nil {
		return nil, err
	}
	// this means finalizers cannot be updated via DeleteOptions if a deletion is already pending
	if pendingGraceful {
		return e.finalizeDelete(obj, false)
	}
	// check if obj has pending finalizers
	accessor, err := meta.Accessor(obj)
	if err != nil {
		return nil, kubeerr.NewInternalError(err)
	}
	pendingFinalizers := len(accessor.GetFinalizers()) != 0
	var ignoreNotFound bool
	var deleteImmediately bool = true
	var lastExisting, out runtime.Object
	if !e.EnableGarbageCollection {
		// TODO: remove the check on graceful, because we support no-op updates now.
		if graceful {
			err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletion(ctx, name, key, options, preconditions, obj)
		}
	} else {
		shouldUpdateFinalizers, _ := shouldUpdateFinalizers(e, accessor, options)
		// TODO: remove the check, because we support no-op updates now.
		if graceful || pendingFinalizers || shouldUpdateFinalizers {
			err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, obj)
		}
	}
	// !deleteImmediately covers all cases where err != nil. We keep both to be future-proof.
	if !deleteImmediately || err != nil {
		return out, err
	}

	// delete immediately, or no graceful deletion supported
	glog.V(6).Infof("going to delete %s from regitry: ", name)
	out = e.NewFunc()
	if err := e.Storage.Delete(ctx, key, out, &preconditions); err != nil {
		// Please refer to the place where we set ignoreNotFound for the reason
		// why we ignore the NotFound error .
		if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil {
			// The lastExisting object may not be the last state of the object
			// before its deletion, but it's the best approximation.
			return e.finalizeDelete(lastExisting, true)
		}
		return nil, storeerr.InterpretDeleteError(err, e.QualifiedResource, name)
	}
	return e.finalizeDelete(out, true)
}
Example #22
0
func (rs *REST) Create(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) {
	service := obj.(*api.Service)

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	// TODO: this should probably move to strategy.PrepareForCreate()
	releaseServiceIP := false
	defer func() {
		if releaseServiceIP {
			if api.IsServiceIPSet(service) {
				rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
			}
		}
	}()

	nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
	defer nodePortOp.Finish()

	if api.IsServiceIPRequested(service) {
		// Allocate next available.
		ip, err := rs.serviceIPs.AllocateNext()
		if err != nil {
			// TODO: what error should be returned here?  It's not a
			// field-level validation failure (the field is valid), and it's
			// not really an internal error.
			return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err))
		}
		service.Spec.ClusterIP = ip.String()
		releaseServiceIP = true
	} else if api.IsServiceIPSet(service) {
		// Try to respect the requested IP.
		if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil {
			// TODO: when validation becomes versioned, this gets more complicated.
			el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIP"), service.Spec.ClusterIP, err.Error())}
			return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el)
		}
		releaseServiceIP = true
	}

	assignNodePorts := shouldAssignNodePorts(service)
	svcPortToNodePort := map[int]int{}
	for i := range service.Spec.Ports {
		servicePort := &service.Spec.Ports[i]
		allocatedNodePort := svcPortToNodePort[int(servicePort.Port)]
		if allocatedNodePort == 0 {
			// This will only scan forward in the service.Spec.Ports list because any matches
			// before the current port would have been found in svcPortToNodePort. This is really
			// looking for any user provided values.
			np := findRequestedNodePort(int(servicePort.Port), service.Spec.Ports)
			if np != 0 {
				err := nodePortOp.Allocate(np)
				if err != nil {
					// TODO: when validation becomes versioned, this gets more complicated.
					el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), np, err.Error())}
					return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el)
				}
				servicePort.NodePort = int32(np)
				svcPortToNodePort[int(servicePort.Port)] = np
			} else if assignNodePorts {
				nodePort, err := nodePortOp.AllocateNext()
				if err != nil {
					// TODO: what error should be returned here?  It's not a
					// field-level validation failure (the field is valid), and it's
					// not really an internal error.
					return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
				}
				servicePort.NodePort = int32(nodePort)
				svcPortToNodePort[int(servicePort.Port)] = nodePort
			}
		} else if int(servicePort.NodePort) != allocatedNodePort {
			if servicePort.NodePort == 0 {
				servicePort.NodePort = int32(allocatedNodePort)
			} else {
				err := nodePortOp.Allocate(int(servicePort.NodePort))
				if err != nil {
					// TODO: when validation becomes versioned, this gets more complicated.
					el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())}
					return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el)
				}
			}
		}
	}

	if shouldCheckOrAssignHealthCheckNodePort(service) {
		var healthCheckNodePort int
		var err error
		if l, ok := service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort]; ok {
			healthCheckNodePort, err = strconv.Atoi(l)
			if err != nil || healthCheckNodePort <= 0 {
				return nil, errors.NewInternalError(fmt.Errorf("Failed to parse annotation %v: %v", apiservice.BetaAnnotationHealthCheckNodePort, err))
			}
		}
		if healthCheckNodePort > 0 {
			// If the request has a health check nodePort in mind, attempt to reserve it
			err := nodePortOp.Allocate(int(healthCheckNodePort))
			if err != nil {
				return nil, errors.NewInternalError(fmt.Errorf("Failed to allocate requested HealthCheck nodePort %v: %v", healthCheckNodePort, err))
			}
		} else {
			// If the request has no health check nodePort specified, allocate any
			healthCheckNodePort, err = nodePortOp.AllocateNext()
			if err != nil {
				// TODO: what error should be returned here?  It's not a
				// field-level validation failure (the field is valid), and it's
				// not really an internal error.
				return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
			}
			// Insert the newly allocated health check port as an annotation (plan of record for Alpha)
			service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort] = fmt.Sprintf("%d", healthCheckNodePort)
		}
	}

	out, err := rs.registry.CreateService(ctx, service)
	if err != nil {
		err = rest.CheckGeneratedNameError(Strategy, err, service)
	}

	if err == nil {
		el := nodePortOp.Commit()
		if el != nil {
			// these should be caught by an eventual reconciliation / restart
			glog.Errorf("error(s) committing service node-ports changes: %v", el)
		}

		releaseServiceIP = false
	}

	return out, err
}
Example #23
0
// ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked
// or over a websocket connection.
func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
	w = httplog.Unlogged(w)

	if wsstream.IsWebSocketRequest(req) {
		w.Header().Set("Content-Type", s.MediaType)
		websocket.Handler(s.HandleWS).ServeHTTP(w, req)
		return
	}

	cn, ok := w.(http.CloseNotifier)
	if !ok {
		err := fmt.Errorf("unable to start watch - can't get http.CloseNotifier: %#v", w)
		utilruntime.HandleError(err)
		s.Scope.err(errors.NewInternalError(err), w, req)
		return
	}
	flusher, ok := w.(http.Flusher)
	if !ok {
		err := fmt.Errorf("unable to start watch - can't get http.Flusher: %#v", w)
		utilruntime.HandleError(err)
		s.Scope.err(errors.NewInternalError(err), w, req)
		return
	}

	framer := s.Framer.NewFrameWriter(w)
	if framer == nil {
		// programmer error
		err := fmt.Errorf("no stream framing support is available for media type %q", s.MediaType)
		utilruntime.HandleError(err)
		s.Scope.err(errors.NewBadRequest(err.Error()), w, req)
		return
	}
	e := streaming.NewEncoder(framer, s.Encoder)

	// ensure the connection times out
	timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()
	defer cleanup()
	defer s.Watching.Stop()

	// begin the stream
	w.Header().Set("Content-Type", s.MediaType)
	w.Header().Set("Transfer-Encoding", "chunked")
	w.WriteHeader(http.StatusOK)
	flusher.Flush()

	var unknown runtime.Unknown
	internalEvent := &metav1.InternalEvent{}
	buf := &bytes.Buffer{}
	ch := s.Watching.ResultChan()
	for {
		select {
		case <-cn.CloseNotify():
			return
		case <-timeoutCh:
			return
		case event, ok := <-ch:
			if !ok {
				// End of results.
				return
			}

			obj := event.Object
			s.Fixup(obj)
			if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil {
				// unexpected error
				utilruntime.HandleError(fmt.Errorf("unable to encode watch object: %v", err))
				return
			}

			// ContentType is not required here because we are defaulting to the serializer
			// type
			unknown.Raw = buf.Bytes()
			event.Object = &unknown

			// the internal event will be versioned by the encoder
			*internalEvent = metav1.InternalEvent(event)
			if err := e.Encode(internalEvent); err != nil {
				utilruntime.HandleError(fmt.Errorf("unable to encode watch object: %v (%#v)", err, e))
				// client disconnect.
				return
			}
			if len(ch) == 0 {
				flusher.Flush()
			}

			buf.Reset()
		}
	}
}
Example #24
0
func (l *lifecycle) Admit(a admission.Attributes) error {
	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) {
		return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
	}

	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		// if a namespace is deleted, we want to prevent all further creates into it
		// while it is undergoing termination.  to reduce incidences where the cache
		// is slow to update, we add the namespace into a force live lookup list to ensure
		// we are not looking at stale state.
		if a.GetOperation() == admission.Delete {
			l.forceLiveLookupCache.Add(a.GetName(), true, forceLiveLookupTTL)
		}
		return nil
	}

	// we need to wait for our caches to warm
	if !l.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}

	var (
		namespaceObj interface{}
		exists       bool
		err          error
	)

	key := makeNamespaceKey(a.GetNamespace())
	namespaceObj, exists, err = l.namespaceInformer.GetStore().Get(key)
	if err != nil {
		return errors.NewInternalError(err)
	}

	if !exists && a.GetOperation() == admission.Create {
		// give the cache time to observe the namespace before rejecting a create.
		// this helps when creating a namespace and immediately creating objects within it.
		time.Sleep(missingNamespaceWait)
		namespaceObj, exists, err = l.namespaceInformer.GetStore().Get(key)
		if err != nil {
			return errors.NewInternalError(err)
		}
		if exists {
			glog.V(4).Infof("found %s in cache after waiting", a.GetNamespace())
		}
	}

	// forceLiveLookup if true will skip looking at local cache state and instead always make a live call to server.
	forceLiveLookup := false
	if _, ok := l.forceLiveLookupCache.Get(a.GetNamespace()); ok {
		// we think the namespace was marked for deletion, but our current local cache says otherwise, we will force a live lookup.
		forceLiveLookup = exists && namespaceObj.(*api.Namespace).Status.Phase == api.NamespaceActive
	}

	// refuse to operate on non-existent namespaces
	if !exists || forceLiveLookup {
		// as a last resort, make a call directly to storage
		namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{})
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
		glog.V(4).Infof("found %s via storage lookup", a.GetNamespace())
	}

	// ensure that we're not trying to create objects in terminating namespaces
	if a.GetOperation() == admission.Create {
		namespace := namespaceObj.(*api.Namespace)
		if namespace.Status.Phase != api.NamespaceTerminating {
			return nil
		}

		// TODO: This should probably not be a 403
		return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
	}

	return nil
}
Example #25
0
// Admit enforces that pod and its namespace node label selectors matches at least a node in the cluster.
func (p *podNodeSelector) Admit(a admission.Attributes) error {
	resource := a.GetResource().GroupResource()
	if resource != api.Resource("pods") {
		return nil
	}
	if a.GetSubresource() != "" {
		// only run the checks below on pods proper and not subresources
		return nil
	}

	obj := a.GetObject()
	pod, ok := obj.(*api.Pod)
	if !ok {
		glog.Errorf("expected pod but got %s", a.GetKind().Kind)
		return nil
	}

	if !p.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}

	name := pod.Name
	nsName := a.GetNamespace()
	var namespace *api.Namespace

	namespaceObj, exists, err := p.namespaceInformer.GetStore().Get(&api.Namespace{
		ObjectMeta: metav1.ObjectMeta{
			Name:      nsName,
			Namespace: "",
		},
	})
	if err != nil {
		return errors.NewInternalError(err)
	}
	if exists {
		namespace = namespaceObj.(*api.Namespace)
	} else {
		namespace, err = p.defaultGetNamespace(nsName)
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
	}
	namespaceNodeSelector, err := p.getNodeSelectorMap(namespace)
	if err != nil {
		return err
	}

	if labels.Conflicts(namespaceNodeSelector, labels.Set(pod.Spec.NodeSelector)) {
		return errors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its namespace node label selector"))
	}

	whitelist, err := labels.ConvertSelectorToLabelsMap(p.clusterNodeSelectors[namespace.Name])
	if err != nil {
		return err
	}

	// Merge pod node selector = namespace node selector + current pod node selector
	podNodeSelectorLabels := labels.Merge(namespaceNodeSelector, pod.Spec.NodeSelector)

	// whitelist verification
	if !labels.AreLabelsInWhiteList(podNodeSelectorLabels, whitelist) {
		return errors.NewForbidden(resource, name, fmt.Errorf("pod node label selector labels conflict with its namespace whitelist"))
	}

	// Updated pod node selector = namespace node selector + current pod node selector
	pod.Spec.NodeSelector = map[string]string(podNodeSelectorLabels)
	return nil
}