Beispiel #1
0
func (m *VirtualStorage) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	role := obj.(*authorizationapi.Role)

	policy, err := m.EnsurePolicy(ctx)
	if err != nil {
		return nil, err
	}
	if _, exists := policy.Roles[role.Name]; exists {
		return nil, kapierrors.NewAlreadyExists("Role", role.Name)
	}

	role.ResourceVersion = policy.ResourceVersion
	policy.Roles[role.Name] = role
	policy.LastModified = util.Now()

	if err := m.PolicyStorage.UpdatePolicy(ctx, policy); err != nil {
		return nil, err
	}

	return role, nil
}
Beispiel #2
0
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return nil, err
	}
	out := e.NewFunc()
	if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
		err = storeerr.InterpretCreateError(err, e.QualifiedResource, name)
		err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		return nil, err
	}
	if e.AfterCreate != nil {
		if err := e.AfterCreate(out); err != nil {
			return nil, err
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, err
		}
	}
	return out, nil
}
Beispiel #3
0
// Create instantiates a new build from a build configuration
func (s *InstantiateREST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	return s.generator.Instantiate(ctx, obj.(*buildapi.BuildRequest))
}
Beispiel #4
0
// CreateMasterServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (c *Controller) CreateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePort, nodePort int) error {
	ctx := api.NewDefaultContext()
	if _, err := c.ServiceRegistry.GetService(ctx, serviceName); err == nil {
		// The service already exists.
		return nil
	}

	ports, serviceType := createPortAndServiceSpec(servicePort, nodePort)
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "kubernetes", "component": "apiserver"},
		},
		Spec: api.ServiceSpec{
			Ports: ports,
			// maintained by this code, not by the pod selector
			Selector:        nil,
			ClusterIP:       serviceIP.String(),
			SessionAffinity: api.ServiceAffinityNone,
			Type:            serviceType,
		},
	}
	if err := rest.BeforeCreate(rest.Services, ctx, svc); err != nil {
		return err
	}

	_, err := c.ServiceRegistry.CreateService(ctx, svc)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
Beispiel #5
0
func (m *VirtualStorage) createRole(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.Role, error) {
	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	role := obj.(*authorizationapi.Role)
	if !allowEscalation {
		if err := rulevalidation.ConfirmNoEscalation(ctx, authorizationapi.Resource("role"), role.Name, m.RuleResolver, authorizationinterfaces.NewLocalRoleAdapter(role)); err != nil {
			return nil, err
		}
	}

	policy, err := m.EnsurePolicy(ctx)
	if err != nil {
		return nil, err
	}
	if _, exists := policy.Roles[role.Name]; exists {
		return nil, kapierrors.NewAlreadyExists(authorizationapi.Resource("role"), role.Name)
	}

	role.ResourceVersion = policy.ResourceVersion
	policy.Roles[role.Name] = role
	policy.LastModified = unversioned.Now()

	if err := m.PolicyStorage.UpdatePolicy(ctx, policy); err != nil {
		return nil, err
	}

	return role, nil
}
Beispiel #6
0
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	istag, ok := obj.(*imageapi.ImageStreamTag)
	if !ok {
		return nil, kapierrors.NewBadRequest(fmt.Sprintf("obj is not an ImageStreamTag: %#v", obj))
	}
	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}
	namespace, ok := kapi.NamespaceFrom(ctx)
	if !ok {
		return nil, kapierrors.NewBadRequest("a namespace must be specified to import images")
	}

	imageStreamName, imageTag, ok := imageapi.SplitImageStreamTag(istag.Name)
	if !ok {
		return nil, fmt.Errorf("%q must be of the form <stream_name>:<tag>", istag.Name)
	}

	target, err := r.imageStreamRegistry.GetImageStream(ctx, imageStreamName)
	if err != nil {
		if !kapierrors.IsNotFound(err) {
			return nil, err
		}

		// try to create the target if it doesn't exist
		target = &imageapi.ImageStream{
			ObjectMeta: kapi.ObjectMeta{
				Name:      imageStreamName,
				Namespace: namespace,
			},
		}
	}

	if target.Spec.Tags == nil {
		target.Spec.Tags = make(map[string]imageapi.TagReference)
	}

	// The user wants to symlink a tag.
	_, exists := target.Spec.Tags[imageTag]
	if exists {
		return nil, kapierrors.NewAlreadyExists(imageapi.Resource("imagestreamtag"), istag.Name)
	}
	target.Spec.Tags[imageTag] = *istag.Tag

	// Check the stream creation timestamp and make sure we will not
	// create a new image stream while deleting.
	if target.CreationTimestamp.IsZero() {
		_, err = r.imageStreamRegistry.CreateImageStream(ctx, target)
	} else {
		_, err = r.imageStreamRegistry.UpdateImageStream(ctx, target)
	}
	if err != nil {
		return nil, err
	}

	return istag, nil
}
Beispiel #7
0
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return nil, err
	}
	out := e.NewFunc()
	if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
		err = storeerr.InterpretCreateError(err, e.QualifiedResource, name)
		err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		if !kubeerr.IsAlreadyExists(err) {
			return nil, err
		}
		if errGet := e.Storage.Get(ctx, key, out, false); errGet != nil {
			return nil, err
		}
		accessor, errGetAcc := meta.Accessor(out)
		if errGetAcc != nil {
			return nil, err
		}
		if accessor.GetDeletionTimestamp() != nil {
			msg := &err.(*kubeerr.StatusError).ErrStatus.Message
			*msg = fmt.Sprintf("object is being deleted: %s", *msg)
			// TODO: remove this block after 1.6
			userAgent, _ := api.UserAgentFrom(ctx)
			if !isOldKubectl(userAgent) {
				return nil, err
			}
			if e.QualifiedResource.Resource != "replicationcontrollers" {
				return nil, err
			}
			*msg = fmt.Sprintf("%s: if you're using \"kubectl rolling-update\" with kubectl version older than v1.4.0, your rolling update has failed, though the pods are correctly updated. Please see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#kubectl-rolling-update for a workaround", *msg)
		}
		return nil, err
	}
	if e.AfterCreate != nil {
		if err := e.AfterCreate(out); err != nil {
			return nil, err
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, err
		}
	}
	return out, nil
}
Beispiel #8
0
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return nil, err
	}
	out := e.NewFunc()
	if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
		err = storeerr.InterpretCreateError(err, e.QualifiedResource, name)
		err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		if !kubeerr.IsAlreadyExists(err) {
			return nil, err
		}
		if errGet := e.Storage.Get(ctx, key, out, false); errGet != nil {
			return nil, err
		}
		accessor, errGetAcc := meta.Accessor(out)
		if errGetAcc != nil {
			return nil, err
		}
		if accessor.GetDeletionTimestamp() != nil {
			msg := &err.(*kubeerr.StatusError).ErrStatus.Message
			*msg = fmt.Sprintf("object is being deleted: %s", *msg)
		}
		return nil, err
	}
	if e.AfterCreate != nil {
		if err := e.AfterCreate(out); err != nil {
			return nil, err
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, err
		}
	}
	return out, nil
}
Beispiel #9
0
// Create instantiates a new build from a build configuration
func (s *InstantiateREST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	request := obj.(*buildapi.BuildRequest)
	if request.TriggeredBy == nil {
		buildTriggerCauses := []buildapi.BuildTriggerCause{}
		request.TriggeredBy = append(buildTriggerCauses,
			buildapi.BuildTriggerCause{
				Message: buildapi.BuildTriggerCauseManualMsg,
			},
		)
	}
	return s.generator.Instantiate(ctx, request)
}
Beispiel #10
0
func (m *VirtualStorage) createRoleBinding(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.RoleBinding, error) {
	// Copy object before passing to BeforeCreate, since it mutates
	objCopy, err := kapi.Scheme.DeepCopy(obj)
	if err != nil {
		return nil, err
	}
	obj = objCopy.(runtime.Object)

	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	roleBinding := obj.(*authorizationapi.RoleBinding)

	if !allowEscalation {
		if err := m.confirmNoEscalation(ctx, roleBinding); err != nil {
			return nil, err
		}
	}

	// Retry if we hit a conflict on the underlying PolicyBinding object
	if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		policyBinding, err := m.getPolicyBindingForPolicy(ctx, roleBinding.RoleRef.Namespace, allowEscalation)
		if err != nil {
			return err
		}

		_, exists := policyBinding.RoleBindings[roleBinding.Name]
		if exists {
			return kapierrors.NewAlreadyExists(authorizationapi.Resource("rolebinding"), roleBinding.Name)
		}

		roleBinding.ResourceVersion = policyBinding.ResourceVersion
		policyBinding.RoleBindings[roleBinding.Name] = roleBinding
		policyBinding.LastModified = unversioned.Now()

		return m.BindingRegistry.UpdatePolicyBinding(ctx, policyBinding)
	}); err != nil {
		return nil, err
	}

	return roleBinding, nil
}
Beispiel #11
0
// Create registers a new image (if it doesn't exist) and updates the specified ImageStream's tags.
func (s *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	mapping := obj.(*api.ImageStreamMapping)

	stream, err := s.findStreamForMapping(ctx, mapping)
	if err != nil {
		return nil, err
	}

	image := mapping.Image
	tag := mapping.Tag
	if len(tag) == 0 {
		tag = api.DefaultImageTag
	}

	if err := s.imageRegistry.CreateImage(ctx, &image); err != nil && !errors.IsAlreadyExists(err) {
		return nil, err
	}

	next := api.TagEvent{
		Created:              util.Now(),
		DockerImageReference: image.DockerImageReference,
		Image:                image.Name,
	}

	if !api.AddTagEventToImageStream(stream, tag, next) {
		// nothing actually changed
		return &kapi.Status{Status: kapi.StatusSuccess}, nil
	}

	api.UpdateTrackingTags(stream, tag, next)

	if _, err := s.imageStreamRegistry.UpdateImageStreamStatus(ctx, stream); err != nil {
		return nil, err
	}

	return &kapi.Status{Status: kapi.StatusSuccess}, nil
}
Beispiel #12
0
// CreateWithName inserts a new item with the provided name
// DEPRECATED: use Create instead
func (e *Etcd) CreateWithName(ctx api.Context, name string, obj runtime.Object) error {
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return err
	}
	if e.CreateStrategy != nil {
		if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
			return err
		}
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return err
	}
	err = e.Storage.Create(key, obj, nil, ttl)
	err = etcderr.InterpretCreateError(err, e.EndpointName, name)
	if err == nil && e.Decorator != nil {
		err = e.Decorator(obj)
	}
	return err
}
func (m *VirtualStorage) createRole(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.Role, error) {
	// Copy object before passing to BeforeCreate, since it mutates
	objCopy, err := kapi.Scheme.DeepCopy(obj)
	if err != nil {
		return nil, err
	}
	obj = objCopy.(runtime.Object)

	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	role := obj.(*authorizationapi.Role)
	if !allowEscalation {
		if err := rulevalidation.ConfirmNoEscalation(ctx, m.Resource, role.Name, m.RuleResolver, m.CachedRuleResolver, authorizationinterfaces.NewLocalRoleAdapter(role)); err != nil {
			return nil, err
		}
	}

	if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
		policy, err := m.EnsurePolicy(ctx)
		if err != nil {
			return err
		}
		if _, exists := policy.Roles[role.Name]; exists {
			return kapierrors.NewAlreadyExists(m.Resource, role.Name)
		}

		role.ResourceVersion = policy.ResourceVersion
		policy.Roles[role.Name] = role
		policy.LastModified = unversioned.Now()

		return m.PolicyStorage.UpdatePolicy(ctx, policy)
	}); err != nil {
		return nil, err
	}

	return role, nil
}
// CreateMasterServiceIfNeeded will create the specified service if it
// doesn't already exist.
func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error {
	ctx := api.NewDefaultContext()
	if s, err := c.ServiceRegistry.GetService(ctx, serviceName); err == nil {
		// The service already exists.
		if reconcile {
			if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated {
				glog.Warningf("Resetting master service %q to %#v", serviceName, svc)
				_, err := c.ServiceRegistry.UpdateService(ctx, svc)
				return err
			}
		}
		return nil
	}
	svc := &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name:      serviceName,
			Namespace: api.NamespaceDefault,
			Labels:    map[string]string{"provider": "kubernetes", "component": "apiserver"},
		},
		Spec: api.ServiceSpec{
			Ports: servicePorts,
			// maintained by this code, not by the pod selector
			Selector:        nil,
			ClusterIP:       serviceIP.String(),
			SessionAffinity: api.ServiceAffinityNone,
			Type:            serviceType,
		},
	}

	if err := rest.BeforeCreate(rest.Services, ctx, svc); err != nil {
		return err
	}

	_, err := c.ServiceRegistry.CreateService(ctx, svc)
	if err != nil && errors.IsAlreadyExists(err) {
		err = nil
	}
	return err
}
Beispiel #15
0
// Create inserts a new item according to the unique key from the object.
func (e *Etcd) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	trace := util.NewTrace("Create " + reflect.TypeOf(obj).String())
	defer trace.LogIfLong(time.Second)
	if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, err
	}
	ttl, err := e.calculateTTL(obj, 0, false)
	if err != nil {
		return nil, err
	}
	trace.Step("About to create object")
	out := e.NewFunc()
	if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
		err = etcderr.InterpretCreateError(err, e.EndpointName, name)
		err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		return nil, err
	}
	trace.Step("Object created")
	if e.AfterCreate != nil {
		if err := e.AfterCreate(out); err != nil {
			return nil, err
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, err
		}
	}
	return out, nil
}
Beispiel #16
0
func (m *VirtualStorage) createRoleBinding(ctx kapi.Context, obj runtime.Object, allowEscalation bool) (*authorizationapi.RoleBinding, error) {
	if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil {
		return nil, err
	}

	roleBinding := obj.(*authorizationapi.RoleBinding)

	if err := m.validateReferentialIntegrity(ctx, roleBinding); err != nil {
		return nil, err
	}
	if !allowEscalation {
		if err := m.confirmNoEscalation(ctx, roleBinding); err != nil {
			return nil, err
		}
	}

	policyBinding, err := m.getPolicyBindingForPolicy(ctx, roleBinding.RoleRef.Namespace, allowEscalation)
	if err != nil {
		return nil, err
	}

	_, exists := policyBinding.RoleBindings[roleBinding.Name]
	if exists {
		return nil, kapierrors.NewAlreadyExists("RoleBinding", roleBinding.Name)
	}

	roleBinding.ResourceVersion = policyBinding.ResourceVersion
	policyBinding.RoleBindings[roleBinding.Name] = roleBinding
	policyBinding.LastModified = util.Now()

	if err := m.BindingRegistry.UpdatePolicyBinding(ctx, policyBinding); err != nil {
		return nil, err
	}

	return roleBinding, nil
}
Beispiel #17
0
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	signature := obj.(*imageapi.ImageSignature)

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	imageName, _, err := imageapi.SplitImageSignatureName(signature.Name)
	if err != nil {
		return nil, kapierrors.NewBadRequest(err.Error())
	}

	image, err := r.imageClient.Get(imageName)
	if err != nil {
		return nil, err
	}

	// ensure that given signature already doesn't exist - either by its name or type:content
	if byName, byContent := imageapi.IndexOfImageSignatureByName(image.Signatures, signature.Name), imageapi.IndexOfImageSignature(image.Signatures, signature.Type, signature.Content); byName >= 0 || byContent >= 0 {
		return nil, kapierrors.NewAlreadyExists(imageapi.Resource("imageSignatures"), signature.Name)
	}

	image.Signatures = append(image.Signatures, *signature)

	image, err = r.imageClient.Update(image)
	if err != nil {
		return nil, err
	}

	byName := imageapi.IndexOfImageSignatureByName(image.Signatures, signature.Name)
	if byName < 0 {
		return nil, kapierrors.NewInternalError(errors.New("failed to store given signature"))
	}

	return &image.Signatures[byName], nil
}
Beispiel #18
0
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	service := obj.(*api.Service)

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	releaseServiceIP := false
	defer func() {
		if releaseServiceIP {
			if api.IsServiceIPSet(service) {
				rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
			}
		}
	}()

	nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
	defer nodePortOp.Finish()

	if api.IsServiceIPRequested(service) {
		// Allocate next available.
		ip, err := rs.serviceIPs.AllocateNext()
		if err != nil {
			el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())}
			return nil, errors.NewInvalid("Service", service.Name, el)
		}
		service.Spec.ClusterIP = ip.String()
		releaseServiceIP = true
	} else if api.IsServiceIPSet(service) {
		// Try to respect the requested IP.
		if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil {
			el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())}
			return nil, errors.NewInvalid("Service", service.Name, el)
		}
		releaseServiceIP = true
	}

	assignNodePorts := shouldAssignNodePorts(service)
	for i := range service.Spec.Ports {
		servicePort := &service.Spec.Ports[i]
		if servicePort.NodePort != 0 {
			err := nodePortOp.Allocate(servicePort.NodePort)
			if err != nil {
				el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", servicePort.NodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
				return nil, errors.NewInvalid("Service", service.Name, el)
			}
		} else if assignNodePorts {
			nodePort, err := nodePortOp.AllocateNext()
			if err != nil {
				el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", servicePort.NodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
				return nil, errors.NewInvalid("Service", service.Name, el)
			}
			servicePort.NodePort = nodePort
		}
	}

	out, err := rs.registry.CreateService(ctx, service)
	if err != nil {
		err = rest.CheckGeneratedNameError(Strategy, err, service)
	}

	if err == nil {
		el := nodePortOp.Commit()
		if el != nil {
			// these should be caught by an eventual reconciliation / restart
			glog.Errorf("error(s) committing service node-ports changes: %v", el)
		}

		releaseServiceIP = false
	}

	return out, err
}
Beispiel #19
0
// Update performs an atomic update and set of the object. Returns the result of the update
// or an error. If the registry allows create-on-update, the create flow will be executed.
// A bool is returned along with the object and any errors, to indicate object creation.
func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, false, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, false, err
	}
	// If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version,
	// then we populate it with the latest version.
	// Else, we check that the version specified by the user matches the version of latest etcd object.
	resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
	if err != nil {
		return nil, false, err
	}
	doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()
	// TODO: expose TTL
	creating := false
	out := e.NewFunc()
	err = e.Storage.GuaranteedUpdate(ctx, key, out, true, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {
		// Since we return 'obj' from this function and it can be modified outside this
		// function, we are resetting resourceVersion to the initial value here.
		//
		// TODO: In fact, we should probably return a DeepCopy of obj in all places.
		err := e.Storage.Versioner().UpdateObject(obj, nil, resourceVersion)
		if err != nil {
			return nil, nil, err
		}

		version, err := e.Storage.Versioner().ObjectResourceVersion(existing)
		if err != nil {
			return nil, nil, err
		}
		if version == 0 {
			if !e.UpdateStrategy.AllowCreateOnUpdate() {
				return nil, nil, kubeerr.NewNotFound(e.QualifiedResource, name)
			}
			creating = true
			if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
				return nil, nil, err
			}
			ttl, err := e.calculateTTL(obj, 0, false)
			if err != nil {
				return nil, nil, err
			}
			return obj, &ttl, nil
		}

		creating = false
		if doUnconditionalUpdate {
			// Update the object's resource version to match the latest etcd object's resource version.
			err = e.Storage.Versioner().UpdateObject(obj, res.Expiration, res.ResourceVersion)
			if err != nil {
				return nil, nil, err
			}
		} else {
			// Check if the object's resource version matches the latest resource version.
			newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
			if err != nil {
				return nil, nil, err
			}
			if newVersion == 0 {
				// TODO: The Invalid error should has a field for Resource.
				// After that field is added, we should fill the Resource and
				// leave the Kind field empty. See the discussion in #18526.
				qualifiedKind := unversioned.GroupKind{e.QualifiedResource.Group, e.QualifiedResource.Resource}
				fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")}
				return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
			}
			if newVersion != version {
				return nil, nil, kubeerr.NewConflict(e.QualifiedResource, name, fmt.Errorf("the object has been modified; please apply your changes to the latest version and try again"))
			}
		}
		if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
			return nil, nil, err
		}
		ttl, err := e.calculateTTL(obj, res.TTL, true)
		if err != nil {
			return nil, nil, err
		}
		if int64(ttl) != res.TTL {
			return obj, &ttl, nil
		}
		return obj, nil, nil
	})

	if err != nil {
		if creating {
			err = etcderr.InterpretCreateError(err, e.QualifiedResource, name)
			err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		} else {
			err = etcderr.InterpretUpdateError(err, e.QualifiedResource, name)
		}
		return nil, false, err
	}
	if creating {
		if e.AfterCreate != nil {
			if err := e.AfterCreate(out); err != nil {
				return nil, false, err
			}
		}
	} else {
		if e.AfterUpdate != nil {
			if err := e.AfterUpdate(out); err != nil {
				return nil, false, err
			}
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, false, err
		}
	}
	return out, creating, nil
}
Beispiel #20
0
func (h *binaryInstantiateHandler) handle(r io.Reader) (runtime.Object, error) {
	h.options.Name = h.name
	if err := rest.BeforeCreate(BinaryStrategy, h.ctx, h.options); err != nil {
		glog.Infof("failed to validate binary: %#v", h.options)
		return nil, err
	}

	request := &buildapi.BuildRequest{}
	request.Name = h.name
	if len(h.options.Commit) > 0 {
		request.Revision = &buildapi.SourceRevision{
			Git: &buildapi.GitSourceRevision{
				Committer: buildapi.SourceControlUser{
					Name:  h.options.CommitterName,
					Email: h.options.CommitterEmail,
				},
				Author: buildapi.SourceControlUser{
					Name:  h.options.AuthorName,
					Email: h.options.AuthorEmail,
				},
				Message: h.options.Message,
				Commit:  h.options.Commit,
			},
		}
	}
	request.Binary = &buildapi.BinaryBuildSource{
		AsFile: h.options.AsFile,
	}
	build, err := h.r.Generator.Instantiate(h.ctx, request)
	if err != nil {
		glog.Infof("failed to instantiate: %#v", request)
		return nil, err
	}

	latest, ok, err := registry.WaitForRunningBuild(h.r.Watcher, h.ctx, build, h.r.Timeout)
	if err != nil {
		switch latest.Status.Phase {
		case buildapi.BuildPhaseError:
			return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", build.Name, buildutil.NoBuildLogsMessage))
		case buildapi.BuildPhaseCancelled:
			return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", build.Name, buildutil.NoBuildLogsMessage))
		}
		return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", build.Name, err))
	}
	if !ok {
		return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, h.r.Timeout), 0)
	}
	if latest.Status.Phase != buildapi.BuildPhaseRunning {
		return nil, errors.NewBadRequest(fmt.Sprintf("build %s is no longer running, cannot upload file: %s", build.Name, build.Status.Phase))
	}

	// The container should be the default build container, so setting it to blank
	buildPodName := buildutil.GetBuildPodName(build)
	opts := &kapi.PodAttachOptions{
		Stdin: true,
	}
	location, transport, err := pod.AttachLocation(h.r.PodGetter, h.r.ConnectionInfo, h.ctx, buildPodName, opts)
	if err != nil {
		if errors.IsNotFound(err) {
			return nil, errors.NewNotFound(kapi.Resource("pod"), buildPodName)
		}
		return nil, errors.NewBadRequest(err.Error())
	}
	rawTransport, ok := transport.(*http.Transport)
	if !ok {
		return nil, errors.NewInternalError(fmt.Errorf("unable to connect to node, unrecognized type: %v", reflect.TypeOf(transport)))
	}
	upgrader := spdy.NewRoundTripper(rawTransport.TLSClientConfig)
	exec, err := remotecommand.NewStreamExecutor(upgrader, nil, "POST", location)
	if err != nil {
		return nil, errors.NewInternalError(fmt.Errorf("unable to connect to server: %v", err))
	}
	if err := exec.Stream(r, nil, nil, false); err != nil {
		return nil, errors.NewInternalError(err)
	}
	return latest, nil
}
Beispiel #21
0
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	service := obj.(*api.Service)

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	// TODO: this should probably move to strategy.PrepareForCreate()
	releaseServiceIP := false
	defer func() {
		if releaseServiceIP {
			if api.IsServiceIPSet(service) {
				rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
			}
		}
	}()

	nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
	defer nodePortOp.Finish()

	if api.IsServiceIPRequested(service) {
		// Allocate next available.
		ip, err := rs.serviceIPs.AllocateNext()
		if err != nil {
			// TODO: what error should be returned here?  It's not a
			// field-level validation failure (the field is valid), and it's
			// not really an internal error.
			return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err))
		}
		service.Spec.ClusterIP = ip.String()
		releaseServiceIP = true
	} else if api.IsServiceIPSet(service) {
		// Try to respect the requested IP.
		if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil {
			// TODO: when validation becomes versioned, this gets more complicated.
			el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIP"), service.Spec.ClusterIP, err.Error())}
			return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el)
		}
		releaseServiceIP = true
	}

	assignNodePorts := shouldAssignNodePorts(service)
	svcPortToNodePort := map[int]int{}
	for i := range service.Spec.Ports {
		servicePort := &service.Spec.Ports[i]
		allocatedNodePort := svcPortToNodePort[int(servicePort.Port)]
		if allocatedNodePort == 0 {
			// This will only scan forward in the service.Spec.Ports list because any matches
			// before the current port would have been found in svcPortToNodePort. This is really
			// looking for any user provided values.
			np := findRequestedNodePort(int(servicePort.Port), service.Spec.Ports)
			if np != 0 {
				err := nodePortOp.Allocate(np)
				if err != nil {
					// TODO: when validation becomes versioned, this gets more complicated.
					el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), np, err.Error())}
					return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el)
				}
				servicePort.NodePort = int32(np)
				svcPortToNodePort[int(servicePort.Port)] = np
			} else if assignNodePorts {
				nodePort, err := nodePortOp.AllocateNext()
				if err != nil {
					// TODO: what error should be returned here?  It's not a
					// field-level validation failure (the field is valid), and it's
					// not really an internal error.
					return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
				}
				servicePort.NodePort = int32(nodePort)
				svcPortToNodePort[int(servicePort.Port)] = nodePort
			}
		} else if int(servicePort.NodePort) != allocatedNodePort {
			if servicePort.NodePort == 0 {
				servicePort.NodePort = int32(allocatedNodePort)
			} else {
				err := nodePortOp.Allocate(int(servicePort.NodePort))
				if err != nil {
					// TODO: when validation becomes versioned, this gets more complicated.
					el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())}
					return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el)
				}
			}
		}
	}

	if shouldCheckOrAssignHealthCheckNodePort(service) {
		var healthCheckNodePort int
		var err error
		if l, ok := service.Annotations[apiservice.AnnotationHealthCheckNodePort]; ok {
			healthCheckNodePort, err = strconv.Atoi(l)
			if err != nil || healthCheckNodePort <= 0 {
				return nil, errors.NewInternalError(fmt.Errorf("Failed to parse annotation %v: %v", apiservice.AnnotationHealthCheckNodePort, err))
			}
		}
		if healthCheckNodePort > 0 {
			// If the request has a health check nodePort in mind, attempt to reserve it
			err := nodePortOp.Allocate(int(healthCheckNodePort))
			if err != nil {
				return nil, errors.NewInternalError(fmt.Errorf("Failed to allocate requested HealthCheck nodePort %v: %v", healthCheckNodePort, err))
			}
		} else {
			// If the request has no health check nodePort specified, allocate any
			healthCheckNodePort, err = nodePortOp.AllocateNext()
			if err != nil {
				// TODO: what error should be returned here?  It's not a
				// field-level validation failure (the field is valid), and it's
				// not really an internal error.
				return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
			}
			// Insert the newly allocated health check port as an annotation (plan of record for Alpha)
			service.Annotations[apiservice.AnnotationHealthCheckNodePort] = fmt.Sprintf("%d", healthCheckNodePort)
		}
	}

	out, err := rs.registry.CreateService(ctx, service)
	if err != nil {
		err = rest.CheckGeneratedNameError(Strategy, err, service)
	}

	if err == nil {
		el := nodePortOp.Commit()
		if el != nil {
			// these should be caught by an eventual reconciliation / restart
			glog.Errorf("error(s) committing service node-ports changes: %v", el)
		}

		releaseServiceIP = false
	}

	return out, err
}
Beispiel #22
0
func (s *REST) createOrUpdate(ctx kapi.Context, obj runtime.Object, forceCreate bool) (runtime.Object, bool, error) {
	mapping := obj.(*api.UserIdentityMapping)
	identity, identityErr, oldUser, oldUserErr, oldMapping, oldMappingErr := s.getRelatedObjects(ctx, mapping.Name)

	// Ensure we didn't get any errors other than NotFound errors
	if !(oldMappingErr == nil || kerrs.IsNotFound(oldMappingErr)) {
		return nil, false, oldMappingErr
	}
	if !(identityErr == nil || kerrs.IsNotFound(identityErr)) {
		return nil, false, identityErr
	}
	if !(oldUserErr == nil || kerrs.IsNotFound(oldUserErr)) {
		return nil, false, oldUserErr
	}

	// If we expect to be creating, fail if the mapping already existed
	if forceCreate && oldMappingErr == nil {
		return nil, false, kerrs.NewAlreadyExists(api.Resource("useridentitymapping"), oldMapping.Name)
	}

	// Allow update to create if missing
	creating := forceCreate || kerrs.IsNotFound(oldMappingErr)
	if creating {
		// Pre-create checks with no access to oldMapping
		if err := rest.BeforeCreate(Strategy, ctx, mapping); err != nil {
			return nil, false, err
		}

		// Ensure resource version is not specified
		if len(mapping.ResourceVersion) > 0 {
			return nil, false, kerrs.NewNotFound(api.Resource("useridentitymapping"), mapping.Name)
		}
	} else {
		// Pre-update checks with access to oldMapping
		if err := rest.BeforeUpdate(Strategy, ctx, mapping, oldMapping); err != nil {
			return nil, false, err
		}

		// Ensure resource versions match
		if len(mapping.ResourceVersion) > 0 && mapping.ResourceVersion != oldMapping.ResourceVersion {
			return nil, false, kerrs.NewConflict(api.Resource("useridentitymapping"), mapping.Name, fmt.Errorf("the resource was updated to %s", oldMapping.ResourceVersion))
		}

		// If we're "updating" to the user we're already pointing to, we're already done
		if mapping.User.Name == oldMapping.User.Name {
			return oldMapping, false, nil
		}
	}

	// Validate identity
	if kerrs.IsNotFound(identityErr) {
		errs := field.ErrorList{field.Invalid(field.NewPath("identity", "name"), mapping.Identity.Name, "referenced identity does not exist")}
		return nil, false, kerrs.NewInvalid(api.Kind("UserIdentityMapping"), mapping.Name, errs)
	}

	// Get new user
	newUser, err := s.userRegistry.GetUser(ctx, mapping.User.Name)
	if kerrs.IsNotFound(err) {
		errs := field.ErrorList{field.Invalid(field.NewPath("user", "name"), mapping.User.Name, "referenced user does not exist")}
		return nil, false, kerrs.NewInvalid(api.Kind("UserIdentityMapping"), mapping.Name, errs)
	}
	if err != nil {
		return nil, false, err
	}

	// Update the new user to point at the identity. If this fails, Update is re-entrant
	if addIdentityToUser(identity, newUser) {
		if _, err := s.userRegistry.UpdateUser(ctx, newUser); err != nil {
			return nil, false, err
		}
	}

	// Update the identity to point at the new user. If this fails. Update is re-entrant
	if setIdentityUser(identity, newUser) {
		if updatedIdentity, err := s.identityRegistry.UpdateIdentity(ctx, identity); err != nil {
			return nil, false, err
		} else {
			identity = updatedIdentity
		}
	}

	// At this point, the mapping for the identity has been updated to the new user
	// Everything past this point is cleanup

	// Update the old user to no longer point at the identity.
	// If this fails, log the error, but continue, because Update is no longer re-entrant
	if oldUser != nil && removeIdentityFromUser(identity, oldUser) {
		if _, err := s.userRegistry.UpdateUser(ctx, oldUser); err != nil {
			utilruntime.HandleError(fmt.Errorf("error removing identity reference %s from user %s: %v", identity.Name, oldUser.Name, err))
		}
	}

	updatedMapping, err := mappingFor(newUser, identity)
	return updatedMapping, creating, err
}
Beispiel #23
0
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	service := obj.(*api.Service)

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	// TODO: this should probably move to strategy.PrepareForCreate()
	releaseServiceIP := false
	defer func() {
		if releaseServiceIP {
			if api.IsServiceIPSet(service) {
				rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP))
			}
		}
	}()

	nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
	defer nodePortOp.Finish()

	if api.IsServiceIPRequested(service) {
		// Allocate next available.
		ip, err := rs.serviceIPs.AllocateNext()
		if err != nil {
			// TODO: what error should be returned here?  It's not a
			// field-level validation failure (the field is valid), and it's
			// not really an internal error.
			return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err))
		}
		service.Spec.ClusterIP = ip.String()
		releaseServiceIP = true
	} else if api.IsServiceIPSet(service) {
		// Try to respect the requested IP.
		if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil {
			// TODO: when validation becomes versioned, this gets more complicated.
			el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIP"), service.Spec.ClusterIP, err.Error())}
			return nil, errors.NewInvalid("Service", service.Name, el)
		}
		releaseServiceIP = true
	}

	assignNodePorts := shouldAssignNodePorts(service)
	for i := range service.Spec.Ports {
		servicePort := &service.Spec.Ports[i]
		if servicePort.NodePort != 0 {
			err := nodePortOp.Allocate(servicePort.NodePort)
			if err != nil {
				// TODO: when validation becomes versioned, this gets more complicated.
				el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())}
				return nil, errors.NewInvalid("Service", service.Name, el)
			}
		} else if assignNodePorts {
			nodePort, err := nodePortOp.AllocateNext()
			if err != nil {
				// TODO: what error should be returned here?  It's not a
				// field-level validation failure (the field is valid), and it's
				// not really an internal error.
				return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
			}
			servicePort.NodePort = nodePort
		}
	}

	out, err := rs.registry.CreateService(ctx, service)
	if err != nil {
		err = rest.CheckGeneratedNameError(Strategy, err, service)
	}

	if err == nil {
		el := nodePortOp.Commit()
		if el != nil {
			// these should be caught by an eventual reconciliation / restart
			glog.Errorf("error(s) committing service node-ports changes: %v", el)
		}

		releaseServiceIP = false
	}

	return out, err
}
Beispiel #24
0
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	isi, ok := obj.(*api.ImageStreamImport)
	if !ok {
		return nil, kapierrors.NewBadRequest(fmt.Sprintf("obj is not an ImageStreamImport: %#v", obj))
	}

	inputMeta := isi.ObjectMeta

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	namespace, ok := kapi.NamespaceFrom(ctx)
	if !ok {
		return nil, kapierrors.NewBadRequest("a namespace must be specified to import images")
	}

	if r.clientFn != nil {
		if client := r.clientFn(); client != nil {
			ctx = kapi.WithValue(ctx, importer.ContextKeyV1RegistryClient, client)
		}
	}

	// only load secrets if we need them
	credentials := importer.NewLazyCredentialsForSecrets(func() ([]kapi.Secret, error) {
		secrets, err := r.secrets.ImageStreamSecrets(namespace).Secrets(isi.Name, kapi.ListOptions{})
		if err != nil {
			return nil, err
		}
		return secrets.Items, nil
	})
	importCtx := importer.NewContext(r.transport, r.insecureTransport).WithCredentials(credentials)
	imports := r.importFn(importCtx)
	if err := imports.Import(ctx.(gocontext.Context), isi); err != nil {
		return nil, kapierrors.NewInternalError(err)
	}

	// if we encountered an error loading credentials and any images could not be retrieved with an access
	// related error, modify the message.
	// TODO: set a status cause
	if err := credentials.Err(); err != nil {
		for i, image := range isi.Status.Images {
			switch image.Status.Reason {
			case unversioned.StatusReasonUnauthorized, unversioned.StatusReasonForbidden:
				isi.Status.Images[i].Status.Message = fmt.Sprintf("Unable to load secrets for this image: %v; (%s)", err, image.Status.Message)
			}
		}
		if r := isi.Status.Repository; r != nil {
			switch r.Status.Reason {
			case unversioned.StatusReasonUnauthorized, unversioned.StatusReasonForbidden:
				r.Status.Message = fmt.Sprintf("Unable to load secrets for this repository: %v; (%s)", err, r.Status.Message)
			}
		}
	}

	// TODO: perform the transformation of the image stream and return it with the ISI if import is false
	//   so that clients can see what the resulting object would look like.
	if !isi.Spec.Import {
		clearManifests(isi)
		return isi, nil
	}

	create := false
	stream, err := r.streams.GetImageStream(ctx, isi.Name)
	if err != nil {
		if !kapierrors.IsNotFound(err) {
			return nil, err
		}
		// consistency check, stream must exist
		if len(inputMeta.ResourceVersion) > 0 || len(inputMeta.UID) > 0 {
			return nil, err
		}
		create = true
		stream = &api.ImageStream{
			ObjectMeta: kapi.ObjectMeta{
				Name:       isi.Name,
				Namespace:  namespace,
				Generation: 0,
			},
		}
	} else {
		if len(inputMeta.ResourceVersion) > 0 && inputMeta.ResourceVersion != stream.ResourceVersion {
			glog.V(4).Infof("DEBUG: mismatch between requested ResourceVersion %s and located ResourceVersion %s", inputMeta.ResourceVersion, stream.ResourceVersion)
			return nil, kapierrors.NewConflict(api.Resource("imagestream"), inputMeta.Name, fmt.Errorf("the image stream was updated from %q to %q", inputMeta.ResourceVersion, stream.ResourceVersion))
		}
		if len(inputMeta.UID) > 0 && inputMeta.UID != stream.UID {
			glog.V(4).Infof("DEBUG: mismatch between requested UID %s and located UID %s", inputMeta.UID, stream.UID)
			return nil, kapierrors.NewNotFound(api.Resource("imagestream"), inputMeta.Name)
		}
	}

	if stream.Annotations == nil {
		stream.Annotations = make(map[string]string)
	}
	now := unversioned.Now()
	_, hasAnnotation := stream.Annotations[api.DockerImageRepositoryCheckAnnotation]
	nextGeneration := stream.Generation + 1

	original, err := kapi.Scheme.DeepCopy(stream)
	if err != nil {
		return nil, err
	}

	// walk the retrieved images, ensuring each one exists in etcd
	importedImages := make(map[string]error)
	updatedImages := make(map[string]*api.Image)

	if spec := isi.Spec.Repository; spec != nil {
		for i, status := range isi.Status.Repository.Images {
			if checkImportFailure(status, stream, status.Tag, nextGeneration, now) {
				continue
			}

			image := status.Image
			ref, err := api.ParseDockerImageReference(image.DockerImageReference)
			if err != nil {
				utilruntime.HandleError(fmt.Errorf("unable to parse image reference during import: %v", err))
				continue
			}
			from, err := api.ParseDockerImageReference(spec.From.Name)
			if err != nil {
				utilruntime.HandleError(fmt.Errorf("unable to parse from reference during import: %v", err))
				continue
			}

			tag := ref.Tag
			if len(status.Tag) > 0 {
				tag = status.Tag
			}
			// we've imported a set of tags, ensure spec tag will point to this for later imports
			from.ID, from.Tag = "", tag

			if updated, ok := r.importSuccessful(ctx, image, stream, tag, from.Exact(), nextGeneration, now, spec.ImportPolicy, importedImages, updatedImages); ok {
				isi.Status.Repository.Images[i].Image = updated
			}
		}
	}

	for i, spec := range isi.Spec.Images {
		if spec.To == nil {
			continue
		}
		tag := spec.To.Name

		// record a failure condition
		status := isi.Status.Images[i]
		if checkImportFailure(status, stream, tag, nextGeneration, now) {
			// ensure that we have a spec tag set
			ensureSpecTag(stream, tag, spec.From.Name, spec.ImportPolicy, false)
			continue
		}

		// record success
		image := status.Image
		if updated, ok := r.importSuccessful(ctx, image, stream, tag, spec.From.Name, nextGeneration, now, spec.ImportPolicy, importedImages, updatedImages); ok {
			isi.Status.Images[i].Image = updated
		}
	}

	// TODO: should we allow partial failure?
	for _, err := range importedImages {
		if err != nil {
			return nil, err
		}
	}

	clearManifests(isi)

	hasChanges := !kapi.Semantic.DeepEqual(original, stream)
	if create {
		stream.Annotations[api.DockerImageRepositoryCheckAnnotation] = now.UTC().Format(time.RFC3339)
		glog.V(4).Infof("create new stream: %#v", stream)
		obj, err = r.internalStreams.Create(ctx, stream)
	} else {
		if hasAnnotation && !hasChanges {
			glog.V(4).Infof("stream did not change: %#v", stream)
			obj, err = original.(*api.ImageStream), nil
		} else {
			if glog.V(4) {
				glog.V(4).Infof("updated stream %s", diff.ObjectDiff(original, stream))
			}
			stream.Annotations[api.DockerImageRepositoryCheckAnnotation] = now.UTC().Format(time.RFC3339)
			obj, _, err = r.internalStreams.Update(ctx, stream.Name, rest.DefaultUpdatedObjectInfo(stream, kapi.Scheme))
		}
	}

	if err != nil {
		return nil, err
	}
	isi.Status.Import = obj.(*api.ImageStream)
	return isi, nil
}
Beispiel #25
0
func (r *REST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) {
	istag, ok := obj.(*api.ImageStreamTag)
	if !ok {
		return nil, false, kapierrors.NewBadRequest(fmt.Sprintf("obj is not an ImageStreamTag: %#v", obj))
	}

	name, tag, err := nameAndTag(istag.Name)
	if err != nil {
		return nil, false, err
	}

	imageStream, err := r.imageStreamRegistry.GetImageStream(ctx, name)
	if err != nil {
		if !kapierrors.IsNotFound(err) || len(istag.ResourceVersion) != 0 {
			return nil, false, err
		}
		namespace, ok := kapi.NamespaceFrom(ctx)
		if !ok {
			return nil, false, kapierrors.NewBadRequest("namespace is required on ImageStreamTags")
		}
		imageStream = &api.ImageStream{
			ObjectMeta: kapi.ObjectMeta{Namespace: namespace, Name: name},
		}
	}

	// check for conflict
	if len(istag.ResourceVersion) == 0 {
		istag.ResourceVersion = imageStream.ResourceVersion
	}
	if imageStream.ResourceVersion != istag.ResourceVersion {
		return nil, false, kapierrors.NewConflict(api.Resource("imagestreamtags"), istag.Name, fmt.Errorf("another caller has updated the resource version to %s", imageStream.ResourceVersion))
	}

	// create the synthetic old istag
	old, err := newISTag(tag, imageStream, nil, true)
	if err != nil {
		return nil, false, err
	}

	if len(istag.ResourceVersion) == 0 {
		if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
			return nil, false, err
		}
	} else {
		if err := rest.BeforeUpdate(Strategy, ctx, obj, old); err != nil {
			return nil, false, err
		}
	}

	// update the spec tag
	if imageStream.Spec.Tags == nil {
		imageStream.Spec.Tags = map[string]api.TagReference{}
	}
	tagRef, exists := imageStream.Spec.Tags[tag]
	// if the caller set tag, override the spec tag
	if istag.Tag != nil {
		tagRef = *istag.Tag
		tagRef.Name = tag
	}
	tagRef.Annotations = istag.Annotations
	imageStream.Spec.Tags[tag] = tagRef

	// mutate the image stream
	var newImageStream *api.ImageStream
	if imageStream.CreationTimestamp.IsZero() {
		newImageStream, err = r.imageStreamRegistry.CreateImageStream(ctx, imageStream)
	} else {
		newImageStream, err = r.imageStreamRegistry.UpdateImageStream(ctx, imageStream)
	}
	if err != nil {
		return nil, false, err
	}

	image, err := r.imageFor(ctx, tag, newImageStream)
	if err != nil {
		if !kapierrors.IsNotFound(err) {
			return nil, false, err
		}
	}

	newISTag, err := newISTag(tag, newImageStream, image, true)
	return newISTag, !exists, err
}
Beispiel #26
0
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {

	if err := rest.BeforeCreate(projectrequestregistry.Strategy, ctx, obj); err != nil {
		return nil, err
	}

	projectRequest := obj.(*projectapi.ProjectRequest)

	if _, err := r.openshiftClient.Projects().Get(projectRequest.Name); err == nil {
		return nil, kapierror.NewAlreadyExists("project", projectRequest.Name)
	}

	projectName := projectRequest.Name
	projectAdmin := ""
	if userInfo, exists := kapi.UserFrom(ctx); exists {
		projectAdmin = userInfo.GetName()
	}

	template, err := r.getTemplate()
	if err != nil {
		return nil, err
	}

	for i := range template.Parameters {
		switch template.Parameters[i].Name {
		case ProjectAdminUserParam:
			template.Parameters[i].Value = projectAdmin
		case ProjectDescriptionParam:
			template.Parameters[i].Value = projectRequest.Description
		case ProjectDisplayNameParam:
			template.Parameters[i].Value = projectRequest.DisplayName
		case ProjectNameParam:
			template.Parameters[i].Value = projectName
		}
	}

	list, err := r.openshiftClient.TemplateConfigs(kapi.NamespaceDefault).Create(template)
	if err != nil {
		return nil, err
	}
	if err := utilerrors.NewAggregate(runtime.DecodeList(list.Objects, kapi.Scheme)); err != nil {
		return nil, kapierror.NewInternalError(err)
	}

	// one of the items in this list should be the project.  We are going to locate it, remove it from the list, create it separately
	var projectFromTemplate *projectapi.Project
	objectsToCreate := &kapi.List{}
	for i := range list.Objects {
		if templateProject, ok := list.Objects[i].(*projectapi.Project); ok {
			projectFromTemplate = templateProject

			if len(list.Objects) > (i + 1) {
				objectsToCreate.Items = append(objectsToCreate.Items, list.Objects[i+1:]...)
			}
			break
		}

		objectsToCreate.Items = append(objectsToCreate.Items, list.Objects[i])
	}
	if projectFromTemplate == nil {
		return nil, kapierror.NewInternalError(fmt.Errorf("the project template (%s/%s) is not correctly configured: must contain a project resource", r.templateNamespace, r.templateName))
	}

	// we split out project creation separately so that in a case of racers for the same project, only one will win and create the rest of their template objects
	if _, err := r.openshiftClient.Projects().Create(projectFromTemplate); err != nil {
		return nil, err
	}

	bulk := configcmd.Bulk{
		Mapper: latest.RESTMapper,
		Typer:  kapi.Scheme,
		RESTClientFactory: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			if latest.OriginKind(mapping.Kind, mapping.APIVersion) {
				return r.openshiftClient, nil
			}
			return r.kubeClient, nil
		},
	}
	if err := utilerrors.NewAggregate(bulk.Create(objectsToCreate, projectName)); err != nil {
		return nil, kapierror.NewInternalError(err)
	}

	return r.openshiftClient.Projects().Get(projectName)
}
Beispiel #27
0
// Update performs an atomic update and set of the object. Returns the result of the update
// or an error. If the registry allows create-on-update, the create flow will be executed.
// A bool is returned along with the object and any errors, to indicate object creation.
func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) {
	trace := util.NewTrace("Update " + reflect.TypeOf(obj).String())
	defer trace.LogIfLong(time.Second)
	name, err := e.ObjectNameFunc(obj)
	if err != nil {
		return nil, false, err
	}
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, false, err
	}
	// If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version,
	// then we populate it with the latest version.
	// Else, we check that the version specified by the user matches the version of latest etcd object.
	resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
	if err != nil {
		return nil, false, err
	}
	doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()
	// TODO: expose TTL
	creating := false
	out := e.NewFunc()
	err = e.Storage.GuaranteedUpdate(ctx, key, out, true, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {
		version, err := e.Storage.Versioner().ObjectResourceVersion(existing)
		if err != nil {
			return nil, nil, err
		}
		if version == 0 {
			if !e.UpdateStrategy.AllowCreateOnUpdate() {
				return nil, nil, kubeerr.NewNotFound(e.EndpointName, name)
			}
			creating = true
			if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
				return nil, nil, err
			}
			ttl, err := e.calculateTTL(obj, 0, false)
			if err != nil {
				return nil, nil, err
			}
			return obj, &ttl, nil
		}

		creating = false
		if doUnconditionalUpdate {
			// Update the object's resource version to match the latest etcd object's resource version.
			err = e.Storage.Versioner().UpdateObject(obj, res.Expiration, res.ResourceVersion)
			if err != nil {
				return nil, nil, err
			}
		} else {
			// Check if the object's resource version matches the latest resource version.
			newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
			if err != nil {
				return nil, nil, err
			}
			if newVersion != version {
				return nil, nil, kubeerr.NewConflict(e.EndpointName, name, fmt.Errorf("the object has been modified; please apply your changes to the latest version and try again"))
			}
		}
		if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
			return nil, nil, err
		}
		ttl, err := e.calculateTTL(obj, res.TTL, true)
		if err != nil {
			return nil, nil, err
		}
		if int64(ttl) != res.TTL {
			return obj, &ttl, nil
		}
		return obj, nil, nil
	})

	if err != nil {
		if creating {
			err = etcderr.InterpretCreateError(err, e.EndpointName, name)
			err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
		} else {
			err = etcderr.InterpretUpdateError(err, e.EndpointName, name)
		}
		return nil, false, err
	}
	if creating {
		if e.AfterCreate != nil {
			if err := e.AfterCreate(out); err != nil {
				return nil, false, err
			}
		}
	} else {
		if e.AfterUpdate != nil {
			if err := e.AfterUpdate(out); err != nil {
				return nil, false, err
			}
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(obj); err != nil {
			return nil, false, err
		}
	}
	return out, creating, nil
}
Beispiel #28
0
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	isi, ok := obj.(*api.ImageStreamImport)
	if !ok {
		return nil, kapierrors.NewBadRequest(fmt.Sprintf("obj is not an ImageStreamImport: %#v", obj))
	}

	inputMeta := isi.ObjectMeta

	if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil {
		return nil, err
	}

	namespace, ok := kapi.NamespaceFrom(ctx)
	if !ok {
		return nil, kapierrors.NewBadRequest("a namespace must be specified to import images")
	}

	secrets, err := r.secrets.ImageStreamSecrets(namespace).Secrets(isi.Name, kapi.ListOptions{})
	if err != nil {
		util.HandleError(fmt.Errorf("unable to load secrets for namespace %q: %v", namespace, err))
		secrets = &kapi.SecretList{}
	}

	if r.clientFn != nil {
		if client := r.clientFn(); client != nil {
			ctx = kapi.WithValue(ctx, importer.ContextKeyV1RegistryClient, client)
		}
	}
	credentials := importer.NewCredentialsForSecrets(secrets.Items)
	importCtx := importer.NewContext(r.transport).WithCredentials(credentials)

	imports := r.importFn(importCtx)
	if err := imports.Import(ctx.(gocontext.Context), isi); err != nil {
		return nil, kapierrors.NewInternalError(err)
	}

	// TODO: perform the transformation of the image stream and return it with the ISI if import is false
	//   so that clients can see what the resulting object would look like.
	if !isi.Spec.Import {
		clearManifests(isi)
		return isi, nil
	}

	create := false
	stream, err := r.streams.GetImageStream(ctx, isi.Name)
	if err != nil {
		if !kapierrors.IsNotFound(err) {
			return nil, err
		}
		// consistency check, stream must exist
		if len(inputMeta.ResourceVersion) > 0 || len(inputMeta.UID) > 0 {
			return nil, err
		}
		create = true
		stream = &api.ImageStream{
			ObjectMeta: kapi.ObjectMeta{
				Name:       isi.Name,
				Namespace:  namespace,
				Generation: 0,
			},
		}
	} else {
		if len(inputMeta.ResourceVersion) > 0 && inputMeta.ResourceVersion != stream.ResourceVersion {
			glog.V(4).Infof("DEBUG: mismatch between requested UID %s and located UID %s", inputMeta.UID, stream.UID)
			return nil, kapierrors.NewConflict("imageStream", inputMeta.Name, fmt.Errorf("the image stream was updated from %q to %q", inputMeta.ResourceVersion, stream.ResourceVersion))
		}
		if len(inputMeta.UID) > 0 && inputMeta.UID != stream.UID {
			glog.V(4).Infof("DEBUG: mismatch between requested UID %s and located UID %s", inputMeta.UID, stream.UID)
			return nil, kapierrors.NewNotFound("imageStream", inputMeta.Name)
		}
	}

	if stream.Annotations == nil {
		stream.Annotations = make(map[string]string)
	}
	now := unversioned.Now()
	stream.Annotations[api.DockerImageRepositoryCheckAnnotation] = now.UTC().Format(time.RFC3339)
	gen := stream.Generation + 1
	zero := int64(0)

	importedImages := make(map[string]error)
	updatedImages := make(map[string]*api.Image)
	if spec := isi.Spec.Repository; spec != nil {
		for i, imageStatus := range isi.Status.Repository.Images {
			image := imageStatus.Image
			if image == nil {
				continue
			}

			// update the spec tag
			ref, err := api.ParseDockerImageReference(image.DockerImageReference)
			if err != nil {
				// ???
				continue
			}
			tag := ref.Tag
			if len(imageStatus.Tag) > 0 {
				tag = imageStatus.Tag
			}
			if _, ok := stream.Spec.Tags[tag]; !ok {
				if stream.Spec.Tags == nil {
					stream.Spec.Tags = make(map[string]api.TagReference)
				}
				stream.Spec.Tags[tag] = api.TagReference{
					From: &kapi.ObjectReference{
						Kind: "DockerImage",
						Name: image.DockerImageReference,
					},
					Generation:   &gen,
					ImportPolicy: api.TagImportPolicy{Insecure: spec.ImportPolicy.Insecure},
				}
			}

			// import or reuse the image
			importErr, imported := importedImages[image.Name]
			if importErr != nil {
				api.SetTagConditions(stream, tag, newImportFailedCondition(err, gen, now))
			}

			pullSpec, _ := api.MostAccuratePullSpec(image.DockerImageReference, image.Name, "")
			api.AddTagEventToImageStream(stream, tag, api.TagEvent{
				Created:              now,
				DockerImageReference: pullSpec,
				Image:                image.Name,
				Generation:           gen,
			})

			if imported {
				if updatedImage, ok := updatedImages[image.Name]; ok {
					isi.Status.Repository.Images[i].Image = updatedImage
				}
				continue
			}

			// establish the image into the store
			updated, err := r.images.Create(ctx, image)
			switch {
			case kapierrors.IsAlreadyExists(err):
				if err := api.ImageWithMetadata(image); err != nil {
					glog.V(4).Infof("Unable to update image metadata during image import when image already exists %q: err", image.Name, err)
				}
				updated = image
				fallthrough
			case err == nil:
				updatedImage := updated.(*api.Image)
				updatedImages[image.Name] = updatedImage
				isi.Status.Repository.Images[i].Image = updatedImage
				importedImages[image.Name] = nil
			default:
				importedImages[image.Name] = err
			}
		}
	}

	for i, spec := range isi.Spec.Images {
		if spec.To == nil {
			continue
		}
		tag := spec.To.Name

		if stream.Spec.Tags == nil {
			stream.Spec.Tags = make(map[string]api.TagReference)
		}
		specTag := stream.Spec.Tags[tag]
		from := spec.From
		specTag.From = &from
		specTag.Generation = &zero
		specTag.ImportPolicy.Insecure = spec.ImportPolicy.Insecure
		stream.Spec.Tags[tag] = specTag

		status := isi.Status.Images[i]
		if status.Image == nil || status.Status.Status == unversioned.StatusFailure {
			message := status.Status.Message
			if len(message) == 0 {
				message = "unknown error prevented import"
			}
			api.SetTagConditions(stream, tag, api.TagEventCondition{
				Type:       api.ImportSuccess,
				Status:     kapi.ConditionFalse,
				Message:    message,
				Reason:     string(status.Status.Reason),
				Generation: gen,

				LastTransitionTime: now,
			})
			continue
		}
		image := status.Image

		importErr, imported := importedImages[image.Name]
		if importErr != nil {
			api.SetTagConditions(stream, tag, newImportFailedCondition(err, gen, now))
		}

		pullSpec, _ := api.MostAccuratePullSpec(image.DockerImageReference, image.Name, "")
		api.AddTagEventToImageStream(stream, tag, api.TagEvent{
			Created:              now,
			DockerImageReference: pullSpec,
			Image:                image.Name,
			Generation:           gen,
		})

		if imported {
			continue
		}
		_, err = r.images.Create(ctx, image)
		if kapierrors.IsAlreadyExists(err) {
			err = nil
		}
		importedImages[image.Name] = err
	}

	// TODO: should we allow partial failure?
	for _, err := range importedImages {
		if err != nil {
			return nil, err
		}
	}

	clearManifests(isi)

	if create {
		obj, err = r.internalStreams.Create(ctx, stream)
	} else {
		obj, _, err = r.internalStreams.Update(ctx, stream)
	}
	if err != nil {
		return nil, err
	}
	isi.Status.Import = obj.(*api.ImageStream)
	return isi, nil
}
Beispiel #29
0
// Update performs an atomic update and set of the object. Returns the result of the update
// or an error. If the registry allows create-on-update, the create flow will be executed.
// A bool is returned along with the object and any errors, to indicate object creation.
func (e *Store) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
	key, err := e.KeyFunc(ctx, name)
	if err != nil {
		return nil, false, err
	}

	var (
		creatingObj runtime.Object
		creating    = false
	)

	storagePreconditions := &storage.Preconditions{}
	if preconditions := objInfo.Preconditions(); preconditions != nil {
		storagePreconditions.UID = preconditions.UID
	}

	out := e.NewFunc()
	// deleteObj is only used in case a deletion is carried out
	var deleteObj runtime.Object
	err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {
		// Given the existing object, get the new object
		obj, err := objInfo.UpdatedObject(ctx, existing)
		if err != nil {
			return nil, nil, err
		}

		// If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version,
		// then we populate it with the latest version.
		// Else, we check that the version specified by the user matches the version of latest storage object.
		resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
		if err != nil {
			return nil, nil, err
		}
		doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()

		version, err := e.Storage.Versioner().ObjectResourceVersion(existing)
		if err != nil {
			return nil, nil, err
		}
		if version == 0 {
			if !e.UpdateStrategy.AllowCreateOnUpdate() {
				return nil, nil, kubeerr.NewNotFound(e.QualifiedResource, name)
			}
			creating = true
			creatingObj = obj
			if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
				return nil, nil, err
			}
			ttl, err := e.calculateTTL(obj, 0, false)
			if err != nil {
				return nil, nil, err
			}
			return obj, &ttl, nil
		}

		creating = false
		creatingObj = nil
		if doUnconditionalUpdate {
			// Update the object's resource version to match the latest storage object's resource version.
			err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion)
			if err != nil {
				return nil, nil, err
			}
		} else {
			// Check if the object's resource version matches the latest resource version.
			newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
			if err != nil {
				return nil, nil, err
			}
			if newVersion == 0 {
				// TODO: The Invalid error should has a field for Resource.
				// After that field is added, we should fill the Resource and
				// leave the Kind field empty. See the discussion in #18526.
				qualifiedKind := unversioned.GroupKind{Group: e.QualifiedResource.Group, Kind: e.QualifiedResource.Resource}
				fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")}
				return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
			}
			if newVersion != version {
				return nil, nil, kubeerr.NewConflict(e.QualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
			}
		}
		if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
			return nil, nil, err
		}
		delete := e.shouldDelete(ctx, key, obj, existing)
		if delete {
			deleteObj = obj
			return nil, nil, errEmptiedFinalizers
		}
		ttl, err := e.calculateTTL(obj, res.TTL, true)
		if err != nil {
			return nil, nil, err
		}
		if int64(ttl) != res.TTL {
			return obj, &ttl, nil
		}
		return obj, nil, nil
	})

	if err != nil {
		// delete the object
		if err == errEmptiedFinalizers {
			return e.deleteForEmptyFinalizers(ctx, name, key, deleteObj, storagePreconditions)
		}
		if creating {
			err = storeerr.InterpretCreateError(err, e.QualifiedResource, name)
			err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj)
		} else {
			err = storeerr.InterpretUpdateError(err, e.QualifiedResource, name)
		}
		return nil, false, err
	}
	if creating {
		if e.AfterCreate != nil {
			if err := e.AfterCreate(out); err != nil {
				return nil, false, err
			}
		}
	} else {
		if e.AfterUpdate != nil {
			if err := e.AfterUpdate(out); err != nil {
				return nil, false, err
			}
		}
	}
	if e.Decorator != nil {
		if err := e.Decorator(out); err != nil {
			return nil, false, err
		}
	}
	return out, creating, nil
}
Beispiel #30
0
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {

	if err := rest.BeforeCreate(projectrequestregistry.Strategy, ctx, obj); err != nil {
		return nil, err
	}

	projectRequest := obj.(*projectapi.ProjectRequest)

	if _, err := r.openshiftClient.Projects().Get(projectRequest.Name); err == nil {
		return nil, kapierror.NewAlreadyExists(projectapi.Resource("project"), projectRequest.Name)
	}

	projectName := projectRequest.Name
	projectAdmin := ""
	projectRequester := ""
	if userInfo, exists := kapi.UserFrom(ctx); exists {
		projectAdmin = userInfo.GetName()
		projectRequester = userInfo.GetName()
	}

	template, err := r.getTemplate()
	if err != nil {
		return nil, err
	}

	for i := range template.Parameters {
		switch template.Parameters[i].Name {
		case ProjectAdminUserParam:
			template.Parameters[i].Value = projectAdmin
		case ProjectDescriptionParam:
			template.Parameters[i].Value = projectRequest.Description
		case ProjectDisplayNameParam:
			template.Parameters[i].Value = projectRequest.DisplayName
		case ProjectNameParam:
			template.Parameters[i].Value = projectName
		case ProjectRequesterParam:
			template.Parameters[i].Value = projectRequester
		}
	}

	list, err := r.openshiftClient.TemplateConfigs(kapi.NamespaceDefault).Create(template)
	if err != nil {
		return nil, err
	}
	if err := utilerrors.NewAggregate(runtime.DecodeList(list.Objects, kapi.Codecs.UniversalDecoder())); err != nil {
		return nil, kapierror.NewInternalError(err)
	}

	// one of the items in this list should be the project.  We are going to locate it, remove it from the list, create it separately
	var projectFromTemplate *projectapi.Project
	var lastRoleBinding *authorizationapi.RoleBinding
	objectsToCreate := &kapi.List{}
	for i := range list.Objects {
		if templateProject, ok := list.Objects[i].(*projectapi.Project); ok {
			projectFromTemplate = templateProject
			// don't add this to the list to create.  We'll create the project separately.
			continue
		}

		if roleBinding, ok := list.Objects[i].(*authorizationapi.RoleBinding); ok {
			// keep track of the rolebinding, but still add it to the list
			lastRoleBinding = roleBinding
		}

		objectsToCreate.Items = append(objectsToCreate.Items, list.Objects[i])
	}
	if projectFromTemplate == nil {
		return nil, kapierror.NewInternalError(fmt.Errorf("the project template (%s/%s) is not correctly configured: must contain a project resource", r.templateNamespace, r.templateName))
	}

	// we split out project creation separately so that in a case of racers for the same project, only one will win and create the rest of their template objects
	createdProject, err := r.openshiftClient.Projects().Create(projectFromTemplate)
	if err != nil {
		// log errors other than AlreadyExists and Forbidden
		if !kapierror.IsAlreadyExists(err) && !kapierror.IsForbidden(err) {
			utilruntime.HandleError(fmt.Errorf("error creating requested project %#v: %v", projectFromTemplate, err))
		}
		return nil, err
	}

	// Stop on the first error, since we have to delete the whole project if any item in the template fails
	stopOnErr := configcmd.AfterFunc(func(_ *resource.Info, err error) bool {
		return err != nil
	})

	bulk := configcmd.Bulk{
		Mapper: &resource.Mapper{
			RESTMapper:  client.DefaultMultiRESTMapper(),
			ObjectTyper: kapi.Scheme,
			ClientMapper: resource.ClientMapperFunc(func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
				if latest.OriginKind(mapping.GroupVersionKind) {
					return r.openshiftClient, nil
				}
				return r.kubeClient, nil
			}),
		},
		After: stopOnErr,
		Op:    configcmd.Create,
	}
	if err := utilerrors.NewAggregate(bulk.Run(objectsToCreate, projectName)); err != nil {
		utilruntime.HandleError(fmt.Errorf("error creating items in requested project %q: %v", createdProject.Name, err))
		// We have to clean up the project if any part of the project request template fails
		if deleteErr := r.openshiftClient.Projects().Delete(createdProject.Name); deleteErr != nil {
			utilruntime.HandleError(fmt.Errorf("error cleaning up requested project %q: %v", createdProject.Name, deleteErr))
		}
		return nil, kapierror.NewInternalError(err)
	}

	// wait for a rolebinding if we created one
	if lastRoleBinding != nil {
		r.waitForRoleBinding(projectName, lastRoleBinding.Name)
	}

	return r.openshiftClient.Projects().Get(projectName)
}