Пример #1
0
func TestResourceRestrictionsWithWeirdWork(t *testing.T) {
	test1 := &authorizeTest{
		context: kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "adze"), &user.DefaultInfo{Name: "Rachel"}),
		attributes: &DefaultAuthorizationAttributes{
			Verb:     "get",
			Resource: "BUILDCONFIGS",
		},
		expectedAllowed: true,
		expectedReason:  "allowed by rule in adze",
	}
	test1.clusterPolicies = newDefaultClusterPolicies()
	test1.policies = newAdzePolicies()
	test1.clusterBindings = newDefaultClusterPolicyBindings()
	test1.bindings = newAdzeBindings()
	test1.test(t)

	test2 := &authorizeTest{
		context: kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "adze"), &user.DefaultInfo{Name: "Rachel"}),
		attributes: &DefaultAuthorizationAttributes{
			Verb:     "get",
			Resource: "buildconfigs",
		},
		expectedAllowed: true,
		expectedReason:  "allowed by rule in adze",
	}
	test2.clusterPolicies = newDefaultClusterPolicies()
	test2.policies = newAdzePolicies()
	test2.clusterBindings = newDefaultClusterPolicyBindings()
	test2.bindings = newAdzeBindings()
	test2.test(t)
}
Пример #2
0
func TestVerbRestrictionsWork(t *testing.T) {
	test1 := &authorizeTest{
		context: kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "adze"), &user.DefaultInfo{Name: "Valerie"}),
		attributes: &DefaultAuthorizationAttributes{
			Verb:     "get",
			Resource: "buildConfigs",
		},
		expectedAllowed: true,
		expectedReason:  "allowed by rule in adze",
	}
	test1.clusterPolicies = newDefaultClusterPolicies()
	test1.policies = newAdzePolicies()
	test1.clusterBindings = newDefaultClusterPolicyBindings()
	test1.bindings = newAdzeBindings()
	test1.test(t)

	test2 := &authorizeTest{
		context: kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "adze"), &user.DefaultInfo{Name: "Valerie"}),
		attributes: &DefaultAuthorizationAttributes{
			Verb:     "create",
			Resource: "buildConfigs",
		},
		expectedAllowed: false,
		expectedReason:  `User "Valerie" cannot create buildConfigs in project "adze"`,
	}
	test2.clusterPolicies = newDefaultClusterPolicies()
	test2.policies = newAdzePolicies()
	test2.clusterBindings = newDefaultClusterPolicyBindings()
	test2.bindings = newAdzeBindings()
	test2.test(t)
}
Пример #3
0
// resolveImageStreamReference looks up the ImageStream[Tag/Image] and converts it to a
// docker pull spec that can be used in an Image field.
func (g *BuildGenerator) resolveImageStreamReference(ctx kapi.Context, from kapi.ObjectReference, defaultNamespace string) (string, error) {
	var namespace string
	if len(from.Namespace) != 0 {
		namespace = from.Namespace
	} else {
		namespace = defaultNamespace
	}

	glog.V(4).Infof("Resolving ImageStreamReference %s of Kind %s in namespace %s", from.Name, from.Kind, namespace)
	switch from.Kind {
	case "ImageStream":
		// NOTE: The 'ImageStream' reference should be used only for the 'output' image
		is, err := g.Client.GetImageStream(kapi.WithNamespace(ctx, namespace), from.Name)
		if err != nil {
			glog.V(2).Infof("Error getting ImageStream %s/%s: %v", namespace, from.Name, err)
			return "", err
		}
		image, err := imageapi.DockerImageReferenceForStream(is)
		if err != nil {
			glog.V(2).Infof("Error resolving Docker image reference for %s/%s: %v", namespace, from.Name, err)
			return "", err
		}
		return image.String(), nil
	case "ImageStreamImage":
		imageStreamImage, err := g.Client.GetImageStreamImage(kapi.WithNamespace(ctx, namespace), from.Name)
		if err != nil {
			glog.V(2).Infof("Error ImageStreamReference %s in namespace %s: %v", from.Name, namespace, err)
			if errors.IsNotFound(err) {
				return "", err
			}
			return "", fatalError{err}
		}
		image := imageStreamImage.Image
		glog.V(4).Infof("Resolved ImageStreamReference %s to image %s with reference %s in namespace %s", from.Name, image.Name, image.DockerImageReference, namespace)
		return image.DockerImageReference, nil
	case "ImageStreamTag":
		imageStreamTag, err := g.Client.GetImageStreamTag(kapi.WithNamespace(ctx, namespace), from.Name)
		if err != nil {
			glog.V(2).Infof("Error resolving ImageStreamTag reference %s in namespace %s: %v", from.Name, namespace, err)
			if errors.IsNotFound(err) {
				return "", err
			}
			return "", fatalError{err}
		}
		image := imageStreamTag.Image
		glog.V(4).Infof("Resolved ImageStreamTag %s to image %s with reference %s in namespace %s", from.Name, image.Name, image.DockerImageReference, namespace)
		return image.DockerImageReference, nil
	case "DockerImage":
		return from.Name, nil
	default:
		return "", fatalError{fmt.Errorf("Unknown From Kind %s", from.Kind)}
	}
}
Пример #4
0
func TestEtcdListRoutesInDifferentNamespaces(t *testing.T) {
	fakeClient := tools.NewFakeEtcdClient(t)
	namespaceAlfa := kapi.WithNamespace(kapi.NewContext(), "alfa")
	namespaceBravo := kapi.WithNamespace(kapi.NewContext(), "bravo")
	fakeClient.Data["/routes/alfa"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.Route{ObjectMeta: kapi.ObjectMeta{Name: "foo1"}}),
					},
				},
			},
		},
		E: nil,
	}
	fakeClient.Data["/routes/bravo"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.Route{ObjectMeta: kapi.ObjectMeta{Name: "foo2"}}),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.Route{ObjectMeta: kapi.ObjectMeta{Name: "bar2"}}),
					},
				},
			},
		},
		E: nil,
	}
	registry := NewTestEtcd(fakeClient)

	routesAlfa, err := registry.ListRoutes(namespaceAlfa, labels.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(routesAlfa.Items) != 1 || routesAlfa.Items[0].Name != "foo1" {
		t.Errorf("Unexpected builds list: %#v", routesAlfa)
	}

	routesBravo, err := registry.ListRoutes(namespaceBravo, labels.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(routesBravo.Items) != 2 || routesBravo.Items[0].Name != "foo2" || routesBravo.Items[1].Name != "bar2" {
		t.Errorf("Unexpected builds list: %#v", routesBravo)
	}
}
Пример #5
0
// ListResource returns a function that handles retrieving a list of resources from a RESTStorage object.
func ListResource(r RESTLister, ctxFn ContextFunc, namer ScopeNamer, codec runtime.Codec, version, apiResource string) restful.RouteFunction {
	return func(req *restful.Request, res *restful.Response) {
		w := res.ResponseWriter

		namespace, err := namer.Namespace(req)
		if err != nil {
			errorJSON(err, codec, w)
			return
		}
		ctx := ctxFn(req)
		ctx = api.WithNamespace(ctx, namespace)

		label, field, err := parseSelectorQueryParams(req.Request.URL.Query(), version, apiResource)
		if err != nil {
			errorJSON(err, codec, w)
			return
		}

		result, err := r.List(ctx, label, field)
		if err != nil {
			errorJSON(err, codec, w)
			return
		}
		if err := setListSelfLink(result, req, namer); err != nil {
			errorJSON(err, codec, w)
			return
		}
		writeJSON(http.StatusOK, codec, result, w)
	}
}
Пример #6
0
// Bind just does a POST binding RPC.
func (b *binder) Bind(binding *api.Binding) error {
	glog.V(2).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name)
	ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
	return b.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
	// TODO: use Pods interface for binding once clusters are upgraded
	// return b.Pods(binding.Namespace).Bind(binding)
}
// getServiceEnvironmentVariables populates a list of environment variables that are used
// in the container environment to get access to services.
func (b *BasicBoundPodFactory) getServiceEnvironmentVariables(ctx api.Context, registry service.Registry, machine string) ([]api.EnvVar, error) {
	var result []api.EnvVar
	servicesInNs, err := registry.ListServices(ctx)
	if err != nil {
		return result, err
	}

	masterServices, err := registry.ListServices(api.WithNamespace(api.NewContext(), b.MasterServiceNamespace))
	if err != nil {
		return result, err
	}

	projection := map[string]api.Service{}
	services := []api.Service{}
	for _, service := range masterServices.Items {
		if masterServiceNames.Has(service.Name) {
			projection[service.Name] = service
		}
	}
	for _, service := range servicesInNs.Items {
		projection[service.Name] = service
	}
	for _, service := range projection {
		services = append(services, service)
	}

	return envvars.FromServices(&api.ServiceList{Items: services}), nil
}
Пример #8
0
func (m *VirtualStorage) confirmNoEscalation(ctx kapi.Context, roleBinding *authorizationapi.RoleBinding) error {
	modifyingRole, err := m.getReferencedRole(roleBinding.RoleRef)
	if err != nil {
		return err
	}

	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		m.PolicyRegistry,
		m.BindingRegistry,
		m.ClusterPolicyRegistry,
		m.ClusterPolicyBindingRegistry,
	)
	ownerLocalRules, err := ruleResolver.GetEffectivePolicyRules(ctx)
	if err != nil {
		return err
	}
	masterContext := kapi.WithNamespace(ctx, "")
	ownerGlobalRules, err := ruleResolver.GetEffectivePolicyRules(masterContext)
	if err != nil {
		return err
	}

	ownerRules := make([]authorizationapi.PolicyRule, 0, len(ownerGlobalRules)+len(ownerLocalRules))
	ownerRules = append(ownerRules, ownerLocalRules...)
	ownerRules = append(ownerRules, ownerGlobalRules...)

	ownerRightsCover, missingRights := rulevalidation.Covers(ownerRules, modifyingRole.Rules)
	if !ownerRightsCover {
		user, _ := kapi.UserFrom(ctx)
		return fmt.Errorf("attempt to grant extra privileges: %v\nuser=%v\nownerrules%v\n", missingRights, user, ownerRules)
	}

	return nil
}
// ConnectResource returns a function that handles a connect request on a rest.Storage object.
func ConnectResource(connecter rest.Connecter, scope RequestScope, connectOptionsKind string, subpath bool, subpathKey string) restful.RouteFunction {
	return func(req *restful.Request, res *restful.Response) {
		w := res.ResponseWriter
		namespace, name, err := scope.Namer.Name(req)
		if err != nil {
			errorJSON(err, scope.Codec, w)
			return
		}
		ctx := scope.ContextFunc(req)
		ctx = api.WithNamespace(ctx, namespace)
		opts, err := getRequestOptions(req, scope, connectOptionsKind, subpath, subpathKey)
		if err != nil {
			errorJSON(err, scope.Codec, w)
			return
		}
		handler, err := connecter.Connect(ctx, name, opts)
		if err != nil {
			errorJSON(err, scope.Codec, w)
			return
		}
		handler.ServeHTTP(w, req.Request)
		err = handler.RequestError()
		if err != nil {
			errorJSON(err, scope.Codec, w)
			return
		}
	}
}
Пример #10
0
func TestGet(t *testing.T) {
	expect := validNewPod()
	expect.Status.Host = "machine"

	fakeEtcdClient, helper := newHelper(t)
	fakeEtcdClient.Data["/registry/pods/test/foo"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value: runtime.EncodeOrDie(latest.Codec, expect),
			},
		},
	}
	storage, _, _ := NewREST(helper)
	cache := &fakeCache{statusToReturn: &api.PodStatus{Phase: api.PodRunning}}
	storage = storage.WithPodStatus(cache)

	obj, err := storage.Get(api.WithNamespace(api.NewContext(), "test"), "foo")
	pod := obj.(*api.Pod)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	expect.Status.Phase = api.PodRunning
	if e, a := expect, pod; !api.Semantic.DeepEqual(e, a) {
		t.Errorf("Unexpected pod: %s", util.ObjectDiff(e, a))
	}
}
Пример #11
0
func TestUpdateCannotChangeRoleRefError(t *testing.T) {
	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})

	storage := makeTestStorage()
	obj, err := storage.Create(ctx, &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-different"},
		RoleRef:    kapi.ObjectReference{Name: "admin"},
	})
	if err != nil {
		t.Errorf("unexpected error: %v", err)
		return
	}
	original := obj.(*authorizationapi.RoleBinding)

	roleBinding := &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-different", ResourceVersion: original.ResourceVersion},
		RoleRef:    kapi.ObjectReference{Name: "cluster-admin"},
	}

	_, _, err = storage.Update(ctx, roleBinding)
	if err == nil {
		t.Errorf("Missing expected error")
		return
	}
	expectedErr := "cannot change roleRef"
	if !strings.Contains(err.Error(), expectedErr) {
		t.Errorf("Expected %v, got %v", expectedErr, err.Error())
	}
}
Пример #12
0
// TestGetClusterPolicy tests that a ReadOnlyPolicyClient GetPolicy() call correctly retrieves a cluster policy
// when the namespace given is equal to the empty string
func TestGetClusterPolicy(t *testing.T) {
	testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
	defer close(policyStopChannel)
	defer close(bindingStopChannel)

	var clusterPolicy *authorizationapi.Policy
	var err error

	namespace := ""
	context := kapi.WithNamespace(kapi.NewContext(), namespace)
	name := "uniqueClusterPolicyName"

	util.Until(func() {
		clusterPolicy, err = testClient.GetPolicy(context, name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) &&
			(clusterPolicy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting cluster policy using GetPolicy(): %v", err)
	case clusterPolicy == nil:
		t.Error("Policy is nil")
	case clusterPolicy.Name != name:
		t.Errorf("Expected policy.Name to be '%s', but got '%s'", name, clusterPolicy.Name)
	case clusterPolicy.Namespace != "":
		t.Errorf("Expected policy.Namespace to be '%s', but got '%s'", namespace, clusterPolicy.Namespace)
	}
}
Пример #13
0
func TestUpdateError(t *testing.T) {
	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})

	storage := makeTestStorage()
	obj, err := storage.Create(ctx, &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-different"},
		RoleRef:    kapi.ObjectReference{Name: "admin"},
	})
	if err != nil {
		t.Errorf("unexpected error: %v", err)
		return
	}
	original := obj.(*authorizationapi.RoleBinding)

	roleBinding := &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{Name: "my-roleBinding", ResourceVersion: original.ResourceVersion},
		RoleRef:    kapi.ObjectReference{Name: "admin"},
	}

	_, _, err = storage.Update(ctx, roleBinding)
	if err == nil {
		t.Errorf("Missing expected error")
		return
	}
	if !kapierrors.IsNotFound(err) {
		t.Errorf("Unexpected error %v", err)
	}
}
Пример #14
0
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache {
	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)

	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})

	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return registry.ListClusterPolicies(ctx, labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return registry.WatchClusterPolicies(ctx, labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&authorizationapi.ClusterPolicy{},
		indexer,
		2*time.Minute,
	)

	return readOnlyClusterPolicyCache{
		registry:  registry,
		indexer:   indexer,
		reflector: *reflector,

		keyFunc: cache.MetaNamespaceKeyFunc,
	}
}
func TestUpdate(t *testing.T) {
	storage := makeLocalTestStorage()
	ctx := kapi.WithNamespace(kapi.NewContext(), "unittest")
	realizedRoleObj, _ := storage.Create(ctx, &authorizationapi.Role{
		ObjectMeta: kapi.ObjectMeta{Name: "my-role"},
	})
	realizedRole := realizedRoleObj.(*authorizationapi.Role)

	role := &authorizationapi.Role{
		ObjectMeta: kapi.ObjectMeta{Name: "my-role", ResourceVersion: realizedRole.ResourceVersion},
	}

	obj, created, err := storage.Update(ctx, role)
	if err != nil || created {
		t.Errorf("Unexpected error %v", err)
	}

	switch obj.(type) {
	case *kapi.Status:
		t.Errorf("Unexpected operation error: %v", obj)

	case *authorizationapi.Role:
		if !reflect.DeepEqual(role, obj) {
			t.Errorf("Updated role does not match input role."+
				" Expected: %v, Got: %v", role, obj)
		}
	default:
		t.Errorf("Unexpected result type: %v", obj)
	}
}
Пример #16
0
func TestAllowedWithMissingBinding(t *testing.T) {
	test := &authorizeTest{
		context: kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "adze"), &user.DefaultInfo{Name: "Anna"}),
		attributes: &DefaultAuthorizationAttributes{
			Verb:     "update",
			Resource: "roles",
		},
		expectedAllowed: true,
		expectedReason:  "allowed by rule in adze",
	}
	test.clusterPolicies = newDefaultClusterPolicies()
	test.policies = append(test.policies, newAdzePolicies()...)
	test.clusterBindings = newDefaultClusterPolicyBindings()
	test.bindings = append(test.bindings, newAdzeBindings()...)
	test.bindings[0].RoleBindings["missing"] = &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{
			Name: "missing",
		},
		RoleRef: kapi.ObjectReference{
			Name: "not-a-real-binding",
		},
		Users: util.NewStringSet("Anna"),
	}

	test.test(t)
}
Пример #17
0
func (m *VirtualStorage) getReferencedRole(roleRef kapi.ObjectReference) (*authorizationapi.Role, error) {
	ctx := kapi.WithNamespace(kapi.NewContext(), roleRef.Namespace)

	var policy *authorizationapi.Policy
	var err error
	switch {
	case len(roleRef.Namespace) == 0:
		var clusterPolicy *authorizationapi.ClusterPolicy
		clusterPolicy, err = m.ClusterPolicyRegistry.GetClusterPolicy(ctx, authorizationapi.PolicyName)
		policy = authorizationapi.ToPolicy(clusterPolicy)
	default:
		policy, err = m.PolicyRegistry.GetPolicy(ctx, authorizationapi.PolicyName)
	}

	if err != nil {
		return nil, err
	}

	role, exists := policy.Roles[roleRef.Name]
	if !exists {
		return nil, kapierrors.NewNotFound("Role", roleRef.Name)
	}

	return role, nil
}
Пример #18
0
func TestDeniedWithError(t *testing.T) {
	test := &authorizeTest{
		context: kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "adze"), &user.DefaultInfo{Name: "Anna"}),
		attributes: &DefaultAuthorizationAttributes{
			Verb:     "update",
			Resource: "roles",
		},
		expectedAllowed: false,
		expectedError:   "my special error",
	}
	test.clusterPolicies = newDefaultClusterPolicies()
	test.policies = append(test.policies, newAdzePolicies()...)
	test.clusterBindings = newDefaultClusterPolicyBindings()
	test.bindings = append(test.bindings, newAdzeBindings()...)
	test.bindings[0].RoleBindings["missing"] = &authorizationapi.RoleBinding{
		ObjectMeta: kapi.ObjectMeta{
			Name: "missing",
		},
		RoleRef: kapi.ObjectReference{
			Name: "not-a-real-binding",
		},
		Users: util.NewStringSet("Anna"),
	}
	test.policyRetrievalError = errors.New("my special error")

	test.test(t)
}
Пример #19
0
func TestUpdateImageStreamConflictingNamespace(t *testing.T) {
	fakeEtcdClient, helper := newHelper(t)
	fakeEtcdClient.Data["/imagestreams/legal-name/bar"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value: runtime.EncodeOrDie(latest.Codec, &api.ImageStream{
					ObjectMeta: kapi.ObjectMeta{Name: "bar", Namespace: "default"},
				}),
				ModifiedIndex: 2,
			},
		},
	}
	storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{})

	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "legal-name"), &fakeUser{})
	obj, created, err := storage.Update(ctx, &api.ImageStream{
		ObjectMeta: kapi.ObjectMeta{Name: "bar", Namespace: "some-value", ResourceVersion: "2"},
	})

	if obj != nil || created {
		t.Error("Expected a nil obj, but we got a value")
	}

	checkExpectedNamespaceError(t, err)
}
Пример #20
0
// namespacingFilter adds a filter that adds the namespace of the request to the context.  Not all requests will have namespaces,
// but any that do will have the appropriate value added.
func namespacingFilter(handler http.Handler, contextMapper kapi.RequestContextMapper) http.Handler {
	infoResolver := &apiserver.APIRequestInfoResolver{util.NewStringSet("api", "osapi", "oapi"), latest.RESTMapper}

	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		ctx, ok := contextMapper.Get(req)
		if !ok {
			http.Error(w, "Unable to find request context", http.StatusInternalServerError)
			return
		}

		if _, exists := kapi.NamespaceFrom(ctx); !exists {
			if requestInfo, err := infoResolver.GetAPIRequestInfo(req); err == nil {
				// only set the namespace if the apiRequestInfo was resolved
				// keep in mind that GetAPIRequestInfo will fail on non-api requests, so don't fail the entire http request on that
				// kind of failure.

				// TODO reconsider special casing this.  Having the special case hereallow us to fully share the kube
				// APIRequestInfoResolver without any modification or customization.
				namespace := requestInfo.Namespace
				if (requestInfo.Resource == "projects") && (len(requestInfo.Name) > 0) {
					namespace = requestInfo.Name
				}

				ctx = kapi.WithNamespace(ctx, namespace)
				contextMapper.Update(req, ctx)
			}
		}

		handler.ServeHTTP(w, req)
	})
}
Пример #21
0
func TestGet(t *testing.T) {
	expect := validNewPod()
	expect.Status.Phase = api.PodRunning
	expect.Spec.NodeName = "machine"

	fakeEtcdClient, etcdStorage := newEtcdStorage(t)
	key := etcdtest.AddPrefix("/pods/test/foo")
	fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Value: runtime.EncodeOrDie(latest.Codec, expect),
			},
		},
	}
	storage := NewStorage(etcdStorage, nil).Pod

	obj, err := storage.Get(api.WithNamespace(api.NewContext(), "test"), "foo")
	pod := obj.(*api.Pod)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if e, a := expect, pod; !api.Semantic.DeepEqual(e, a) {
		t.Errorf("Unexpected pod: %s", util.ObjectDiff(e, a))
	}
}
Пример #22
0
func (k *deleter) deleteOne(pod *Pod) error {
	ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
	podKey, err := podtask.MakePodKey(ctx, pod.Name)
	if err != nil {
		return err
	}

	log.V(2).Infof("pod deleted: %v", podKey)

	// order is important here: we want to make sure we have the lock before
	// removing the pod from the scheduling queue. this makes the concurrent
	// execution of scheduler-error-handling and delete-handling easier to
	// reason about.
	k.api.Lock()
	defer k.api.Unlock()

	// prevent the scheduler from attempting to pop this; it's also possible that
	// it's concurrently being scheduled (somewhere between pod scheduling and
	// binding) - if so, then we'll end up removing it from taskRegistry which
	// will abort Bind()ing
	k.qr.dequeue(pod.GetUID())

	switch task, state := k.api.tasks().ForPod(podKey); state {
	case podtask.StateUnknown:
		log.V(2).Infof("Could not resolve pod '%s' to task id", podKey)
		return noSuchPodErr

	// determine if the task has already been launched to mesos, if not then
	// cleanup is easier (unregister) since there's no state to sync
	case podtask.StatePending:
		if !task.Has(podtask.Launched) {
			// we've been invoked in between Schedule() and Bind()
			if task.HasAcceptedOffer() {
				task.Offer.Release()
				task.Reset()
				task.Set(podtask.Deleted)
				//TODO(jdef) probably want better handling here
				if err := k.api.tasks().Update(task); err != nil {
					return err
				}
			}
			k.api.tasks().Unregister(task)
			return nil
		}
		fallthrough

	case podtask.StateRunning:
		// signal to watchers that the related pod is going down
		task.Set(podtask.Deleted)
		if err := k.api.tasks().Update(task); err != nil {
			log.Errorf("failed to update task w/ Deleted status: %v", err)
		}
		return k.api.killTask(task.ID)

	default:
		log.Infof("cannot kill pod '%s': non-terminal task not found %v", podKey, task.ID)
		return noSuchTaskErr
	}
}
Пример #23
0
// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
	// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
	// or if they are executed against different leaders,
	// the ordering guarantee required to ensure no IP is allocated twice is violated.
	// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
	// and the release code must not release services that have had IPs allocated but not yet been created
	// See #8295

	latest, err := c.alloc.Get()
	if err != nil {
		return fmt.Errorf("unable to refresh the service IP block: %v", err)
	}

	ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
	list, err := c.registry.ListServices(ctx)
	if err != nil {
		return fmt.Errorf("unable to refresh the service IP block: %v", err)
	}

	r := ipallocator.NewCIDRRange(c.network)
	for _, svc := range list.Items {
		if !api.IsServiceIPSet(&svc) {
			continue
		}
		ip := net.ParseIP(svc.Spec.PortalIP)
		if ip == nil {
			// portal IP is broken, reallocate
			util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.PortalIP, svc.Name, svc.Namespace))
			continue
		}
		switch err := r.Allocate(ip); err {
		case nil:
		case ipallocator.ErrAllocated:
			// TODO: send event
			// portal IP is broken, reallocate
			util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace))
		case ipallocator.ErrNotInRange:
			// TODO: send event
			// portal IP is broken, reallocate
			util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
		case ipallocator.ErrFull:
			// TODO: send event
			return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services")
		default:
			return fmt.Errorf("unable to allocate portal IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
		}
	}

	err = r.Snapshot(latest)
	if err != nil {
		return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
	}

	if err := c.alloc.CreateOrUpdate(latest); err != nil {
		return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
	}
	return nil
}
Пример #24
0
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
	// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
	// or if they are executed against different leaders,
	// the ordering guarantee required to ensure no port is allocated twice is violated.
	// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
	// and the release code must not release services that have had ports allocated but not yet been created
	// See #8295

	latest, err := c.alloc.Get()
	if err != nil {
		return fmt.Errorf("unable to refresh the port block: %v", err)
	}

	ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
	list, err := c.registry.ListServices(ctx)
	if err != nil {
		return fmt.Errorf("unable to refresh the port block: %v", err)
	}

	r := portallocator.NewPortAllocator(c.portRange)
	for i := range list.Items {
		svc := &list.Items[i]
		ports := service.CollectServiceNodePorts(svc)
		if len(ports) == 0 {
			continue
		}

		for _, port := range ports {
			switch err := r.Allocate(port); err {
			case nil:
			case portallocator.ErrAllocated:
				// TODO: send event
				// port is broken, reallocate
				util.HandleError(fmt.Errorf("the port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace))
			case portallocator.ErrNotInRange:
				// TODO: send event
				// port is broken, reallocate
				util.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %v; please recreate", port, svc.Name, svc.Namespace, c.portRange))
			case portallocator.ErrFull:
				// TODO: send event
				return fmt.Errorf("the port range %v is full; you must widen the port range in order to create new services", c.portRange)
			default:
				return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err)
			}
		}
	}

	err = r.Snapshot(latest)
	if err != nil {
		return fmt.Errorf("unable to persist the updated port allocations: %v", err)
	}

	if err := c.alloc.CreateOrUpdate(latest); err != nil {
		return fmt.Errorf("unable to persist the updated port allocations: %v", err)
	}
	return nil
}
Пример #25
0
// createController is a helper function that returns a controller with the updated resource version.
func createController(storage *REST, rc api.ReplicationController, t *testing.T) (api.ReplicationController, error) {
	ctx := api.WithNamespace(api.NewContext(), rc.Namespace)
	obj, err := storage.Create(ctx, &rc)
	if err != nil {
		t.Errorf("Failed to create controller, %v", err)
	}
	newRc := obj.(*api.ReplicationController)
	return *newRc, nil
}
Пример #26
0
// this pod may be out of sync with respect to the API server registry:
//      this pod   |  apiserver registry
//    -------------|----------------------
//      host=.*    |  404           ; pod was deleted
//      host=.*    |  5xx           ; failed to sync, try again later?
//      host=""    |  host=""       ; perhaps no updates to process?
//      host=""    |  host="..."    ; pod has been scheduled and assigned, is there a task assigned? (check TaskIdKey in binding?)
//      host="..." |  host=""       ; pod is no longer scheduled, does it need to be re-queued?
//      host="..." |  host="..."    ; perhaps no updates to process?
//
// TODO(jdef) this needs an integration test
func (s *schedulingPlugin) reconcilePod(oldPod api.Pod) {
	log.V(1).Infof("reconcile pod %v", oldPod.Name)
	ctx := api.WithNamespace(api.NewDefaultContext(), oldPod.Namespace)
	pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(oldPod.Name)
	if err != nil {
		if errors.IsNotFound(err) {
			// attempt to delete
			if err = s.deleter.deleteOne(&Pod{Pod: &oldPod}); err != nil && err != noSuchPodErr && err != noSuchTaskErr {
				log.Errorf("failed to delete pod: %v: %v", oldPod.Name, err)
			}
		} else {
			//TODO(jdef) other errors should probably trigger a retry (w/ backoff).
			//For now, drop the pod on the floor
			log.Warning("aborting reconciliation for pod %v: %v", oldPod.Name, err)
		}
		return
	}
	if oldPod.Spec.NodeName != pod.Spec.NodeName {
		if pod.Spec.NodeName == "" {
			// pod is unscheduled.
			// it's possible that we dropped the pod in the scheduler error handler
			// because of task misalignment with the pod (task.Has(podtask.Launched) == true)

			podKey, err := podtask.MakePodKey(ctx, pod.Name)
			if err != nil {
				log.Error(err)
				return
			}

			s.api.Lock()
			defer s.api.Unlock()

			if _, state := s.api.tasks().ForPod(podKey); state != podtask.StateUnknown {
				//TODO(jdef) reconcile the task
				log.Errorf("task already registered for pod %v", pod.Name)
				return
			}

			now := time.Now()
			log.V(3).Infof("reoffering pod %v", podKey)
			s.qr.reoffer(&Pod{
				Pod:      pod,
				deadline: &now,
			})
		} else {
			// pod is scheduled.
			// not sure how this happened behind our backs. attempt to reconstruct
			// at least a partial podtask.T record.
			//TODO(jdef) reconcile the task
			log.Errorf("pod already scheduled: %v", pod.Name)
		}
	} else {
		//TODO(jdef) for now, ignore the fact that the rest of the spec may be different
		//and assume that our knowledge of the pod aligns with that of the apiserver
		log.Error("pod reconciliation does not support updates; not yet implemented")
	}
}
Пример #27
0
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {

	if schedulingErr == noSuchPodErr {
		log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
		return
	}

	log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
	defer util.HandleCrash()

	// default upstream scheduler passes pod.Name as binding.PodID
	ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
	podKey, err := podtask.MakePodKey(ctx, pod.Name)
	if err != nil {
		log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
		return
	}

	k.backoff.GC()
	k.api.Lock()
	defer k.api.Unlock()

	switch task, state := k.api.tasks().ForPod(podKey); state {
	case podtask.StateUnknown:
		// if we don't have a mapping here any more then someone deleted the pod
		log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
		return

	case podtask.StatePending:
		if task.Has(podtask.Launched) {
			log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
			return
		}
		breakoutEarly := queue.BreakChan(nil)
		if schedulingErr == noSuitableOffersErr {
			log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
			breakoutEarly = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
				k.api.Lock()
				defer k.api.Unlock()
				switch task, state := k.api.tasks().Get(task.ID); state {
				case podtask.StatePending:
					return !task.Has(podtask.Launched) && task.AcceptOffer(offer)
				default:
					// no point in continuing to check for matching offers
					return true
				}
			}))
		}
		delay := k.backoff.Get(podKey)
		log.V(3).Infof("requeuing pod %v with delay %v", podKey, delay)
		k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: breakoutEarly})

	default:
		log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
	}
}
Пример #28
0
func (r *RedirectHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
	var verb string
	var apiResource string
	var httpCode int
	reqStart := time.Now()
	defer monitor("redirect", &verb, &apiResource, &httpCode, reqStart)

	requestInfo, err := r.apiRequestInfoResolver.GetAPIRequestInfo(req)
	if err != nil {
		notFound(w, req)
		httpCode = http.StatusNotFound
		return
	}
	verb = requestInfo.Verb
	resource, parts := requestInfo.Resource, requestInfo.Parts
	ctx, ok := r.context.Get(req)
	if !ok {
		ctx = api.NewContext()
	}
	ctx = api.WithNamespace(ctx, requestInfo.Namespace)

	// redirection requires /resource/resourceName path parts
	if len(parts) != 2 || req.Method != "GET" {
		notFound(w, req)
		httpCode = http.StatusNotFound
		return
	}
	id := parts[1]
	storage, ok := r.storage[resource]
	if !ok {
		httplog.LogOf(req, w).Addf("'%v' has no storage object", resource)
		notFound(w, req)
		httpCode = http.StatusNotFound
		return
	}
	apiResource = resource

	redirector, ok := storage.(Redirector)
	if !ok {
		httplog.LogOf(req, w).Addf("'%v' is not a redirector", resource)
		httpCode = errorJSON(errors.NewMethodNotSupported(resource, "redirect"), r.codec, w)
		return
	}

	location, err := redirector.ResourceLocation(ctx, id)
	if err != nil {
		status := errToAPIStatus(err)
		writeJSON(status.Code, r.codec, status, w)
		httpCode = status.Code
		return
	}

	w.Header().Set("Location", fmt.Sprintf("http://%s", location))
	w.WriteHeader(http.StatusTemporaryRedirect)
	httpCode = http.StatusTemporaryRedirect
}
Пример #29
0
// resolveImageStreamDockerRepository looks up the ImageStream[Tag/Image] and converts it to a
// the docker repository reference with no tag information
func (g *BuildGenerator) resolveImageStreamDockerRepository(ctx kapi.Context, from kapi.ObjectReference, defaultNamespace string) (string, error) {
	namespace := defaultNamespace
	if len(from.Namespace) > 0 {
		namespace = from.Namespace
	}

	glog.V(4).Infof("Resolving ImageStreamReference %s of Kind %s in namespace %s", from.Name, from.Kind, namespace)
	switch from.Kind {
	case "ImageStreamImage":
		imageStreamImage, err := g.Client.GetImageStreamImage(kapi.WithNamespace(ctx, namespace), from.Name)
		if err != nil {
			glog.V(2).Infof("Error ImageStreamReference %s in namespace %s: %v", from.Name, namespace, err)
			if errors.IsNotFound(err) {
				return "", err
			}
			return "", fatalError{err}
		}
		image := imageStreamImage.Image
		glog.V(4).Infof("Resolved ImageStreamReference %s to image %s with reference %s in namespace %s", from.Name, image.Name, image.DockerImageReference, namespace)
		return image.DockerImageReference, nil
	case "ImageStreamTag":
		name := strings.Split(from.Name, ":")[0]
		is, err := g.Client.GetImageStream(kapi.WithNamespace(ctx, namespace), name)
		if err != nil {
			glog.V(2).Infof("Error getting ImageStream %s/%s: %v", namespace, name, err)
			if errors.IsNotFound(err) {
				return "", err
			}
			return "", fatalError{err}
		}
		image, err := imageapi.DockerImageReferenceForStream(is)
		if err != nil {
			glog.V(2).Infof("Error resolving Docker image reference for %s/%s: %v", namespace, name, err)
			return "", err
		}
		glog.V(4).Infof("Resolved ImageStreamTag %s/%s to repository %s", namespace, from.Name, image)
		return image.String(), nil
	case "DockerImage":
		return from.Name, nil
	default:
		return "", fatalError{fmt.Errorf("Unknown From Kind %s", from.Kind)}
	}
}
Пример #30
0
func TestCreateValidationError(t *testing.T) {
	storage := makeTestStorage()
	roleBinding := &authorizationapi.RoleBinding{}

	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})
	_, err := storage.Create(ctx, roleBinding)
	if err == nil {
		t.Errorf("Expected validation error")
	}
}