func TestRESTUpdate(t *testing.T) { _, rest := NewTestREST() eventA := testEvent("foo") _, err := rest.Create(api.NewDefaultContext(), eventA) if err != nil { t.Fatalf("Unexpected error %v", err) } got, err := rest.Get(api.NewDefaultContext(), eventA.Name) if err != nil { t.Fatalf("Unexpected error %v", err) } if e, a := eventA, got; !reflect.DeepEqual(e, a) { t.Errorf("diff: %s", util.ObjectDiff(e, a)) } eventB := testEvent("bar") _, _, err = rest.Update(api.NewDefaultContext(), eventB) if err != nil { t.Fatalf("Unexpected error %v", err) } got2, err := rest.Get(api.NewDefaultContext(), eventB.Name) if err != nil { t.Fatalf("Unexpected error %v", err) } if e, a := eventB, got2; !reflect.DeepEqual(e, a) { t.Errorf("diff: %s", util.ObjectDiff(e, a)) } }
func TestDeletePod(t *testing.T) { storage, _, _, fakeClient := newStorage(t) fakeClient.ChangeIndex = 1 ctx := api.NewDefaultContext() key, _ := storage.Etcd.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, Spec: api.PodSpec{NodeName: "machine"}, }), ModifiedIndex: 1, CreatedIndex: 1, }, }, } _, err := storage.Delete(api.NewDefaultContext(), "foo", nil) if err != nil { t.Fatalf("unexpected error: %v", err) } }
// TestGetNodeAddresses verifies that proper results are returned // when requesting node addresses. func TestGetNodeAddresses(t *testing.T) { master, etcdserver, _, assert := setUp(t) defer etcdserver.Terminate(t) // Fail case (no addresses associated with nodes) nodes, _ := master.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) addrs, err := master.getNodeAddresses() assert.Error(err, "getNodeAddresses should have caused an error as there are no addresses.") assert.Equal([]string(nil), addrs) // Pass case with External type IP nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeExternalIP, Address: "127.0.0.1"}} } addrs, err = master.getNodeAddresses() assert.NoError(err, "getNodeAddresses should not have returned an error.") assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) // Pass case with LegacyHost type IP nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.2"}} } addrs, err = master.getNodeAddresses() assert.NoError(err, "getNodeAddresses failback should not have returned an error.") assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs) }
func TestInstantiateWithLastVersion(t *testing.T) { g := mockBuildGenerator() c := g.Client.(Client) c.GetBuildConfigFunc = func(ctx kapi.Context, name string) (*buildapi.BuildConfig, error) { bc := mocks.MockBuildConfig(mocks.MockSource(), mocks.MockSourceStrategyForImageRepository(), mocks.MockOutput()) bc.Status.LastVersion = 1 return bc, nil } g.Client = c // Version not specified _, err := g.Instantiate(kapi.NewDefaultContext(), &buildapi.BuildRequest{}) if err != nil { t.Errorf("Unexpected error %v", err) } // Version specified and it matches lastVersion := 1 _, err = g.Instantiate(kapi.NewDefaultContext(), &buildapi.BuildRequest{LastVersion: &lastVersion}) if err != nil { t.Errorf("Unexpected error %v", err) } // Version specified, but doesn't match lastVersion = 0 _, err = g.Instantiate(kapi.NewDefaultContext(), &buildapi.BuildRequest{LastVersion: &lastVersion}) if err == nil { t.Errorf("Expected an error and did not get one") } }
func TestInstantiateDeletingError(t *testing.T) { generator := BuildGenerator{Client: Client{ GetBuildConfigFunc: func(ctx kapi.Context, name string) (*buildapi.BuildConfig, error) { bc := &buildapi.BuildConfig{ ObjectMeta: kapi.ObjectMeta{ Annotations: map[string]string{ buildapi.BuildConfigPausedAnnotation: "true", }, }, } return bc, nil }, GetBuildFunc: func(ctx kapi.Context, name string) (*buildapi.Build, error) { build := &buildapi.Build{ Status: buildapi.BuildStatus{ Config: &kapi.ObjectReference{ Name: "buildconfig", }, }, } return build, nil }, }} _, err := generator.Instantiate(kapi.NewDefaultContext(), &buildapi.BuildRequest{}) if err == nil || !strings.Contains(err.Error(), "BuildConfig is paused") { t.Errorf("Expected error, got different %v", err) } _, err = generator.Clone(kapi.NewDefaultContext(), &buildapi.BuildRequest{}) if err == nil || !strings.Contains(err.Error(), "BuildConfig is paused") { t.Errorf("Expected error, got different %v", err) } }
func TestUpdateWithConflictingNamespace(t *testing.T) { storage, _, _, fakeClient := newStorage(t) ctx := api.NewDefaultContext() key, _ := storage.Etcd.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "default"}, Spec: api.PodSpec{NodeName: "machine"}, }), ModifiedIndex: 1, }, }, } pod := validChangedPod() pod.Namespace = "not-default" obj, created, err := storage.Update(api.NewDefaultContext(), pod) if obj != nil || created { t.Error("Expected a nil channel, but we got a value or created") } if err == nil { t.Errorf("Expected an error, but we didn't get one") } else if strings.Index(err.Error(), "the namespace of the provided object does not match the namespace sent on the request") == -1 { t.Errorf("Expected 'Pod.Namespace does not match the provided context' error, got '%v'", err.Error()) } }
func TestCreateImageStreamOK(t *testing.T) { _, helper := newHelper(t) storage, _ := NewREST(helper, noDefaultRegistry, &fakeSubjectAccessReviewRegistry{}) stream := &api.ImageStream{ObjectMeta: kapi.ObjectMeta{Name: "foo"}} ctx := kapi.WithUser(kapi.NewDefaultContext(), &fakeUser{}) _, err := storage.Create(ctx, stream) if err != nil { t.Fatalf("Unexpected non-nil error: %#v", err) } actual := &api.ImageStream{} if err := helper.Get(kapi.NewDefaultContext(), "/imagestreams/default/foo", actual, false); err != nil { t.Fatalf("unexpected extraction error: %v", err) } if actual.Name != stream.Name { t.Errorf("unexpected stream: %#v", actual) } if len(actual.UID) == 0 { t.Errorf("expected stream UID to be set: %#v", actual) } if stream.CreationTimestamp.IsZero() { t.Error("Unexpected zero CreationTimestamp") } if stream.Spec.DockerImageRepository != "" { t.Errorf("unexpected stream: %#v", stream) } }
func TestDeleteResourceQuota(t *testing.T) { fakeEtcdClient, etcdStorage := newEtcdStorage(t) fakeEtcdClient.ChangeIndex = 1 storage, _ := NewStorage(etcdStorage) ctx := api.NewDefaultContext() key, _ := storage.Etcd.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, Status: api.ResourceQuotaStatus{}, }), ModifiedIndex: 1, CreatedIndex: 1, }, }, } _, err := storage.Delete(api.NewDefaultContext(), "foo", nil) if err != nil { t.Fatalf("unexpected error: %v", err) } }
func TestNoOpUpdates(t *testing.T) { server, registry := NewTestGenericStoreRegistry(t) defer server.Terminate(t) newPod := func() *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "foo", Labels: map[string]string{"prepare_create": "true"}, }, Spec: api.PodSpec{NodeName: "machine"}, } } var err error var createResult runtime.Object if createResult, err = registry.Create(api.NewDefaultContext(), newPod()); err != nil { t.Fatalf("Unexpected error: %v", err) } createdPod, err := registry.Get(api.NewDefaultContext(), "foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } var updateResult runtime.Object p := newPod() if updateResult, _, err = registry.Update(api.NewDefaultContext(), p.Name, rest.DefaultUpdatedObjectInfo(p, api.Scheme)); err != nil { t.Fatalf("Unexpected error: %v", err) } // Check whether we do not return empty result on no-op update. if !reflect.DeepEqual(createResult, updateResult) { t.Errorf("no-op update should return a correct value, got: %#v", updateResult) } updatedPod, err := registry.Get(api.NewDefaultContext(), "foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } createdMeta, err := meta.Accessor(createdPod) if err != nil { t.Fatalf("Unexpected error: %v", err) } updatedMeta, err := meta.Accessor(updatedPod) if err != nil { t.Fatalf("Unexpected error: %v", err) } if createdMeta.GetResourceVersion() != updatedMeta.GetResourceVersion() { t.Errorf("no-op update should be ignored and not written to etcd") } }
func TestServiceRegistryIPReallocation(t *testing.T) { storage, _ := NewTestREST(t, nil) svc1 := &api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo"}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, SessionAffinity: api.ServiceAffinityNone, Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{{ Port: 6502, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(6502), }}, }, } ctx := api.NewDefaultContext() created_svc1, _ := storage.Create(ctx, svc1) created_service_1 := created_svc1.(*api.Service) if created_service_1.Name != "foo" { t.Errorf("Expected foo, but got %v", created_service_1.Name) } if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) { t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP) } _, err := storage.Delete(ctx, created_service_1.Name) if err != nil { t.Errorf("Unexpected error deleting service: %v", err) } svc2 := &api.Service{ ObjectMeta: api.ObjectMeta{Name: "bar"}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, SessionAffinity: api.ServiceAffinityNone, Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{{ Port: 6502, Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(6502), }}, }, } ctx = api.NewDefaultContext() created_svc2, _ := storage.Create(ctx, svc2) created_service_2 := created_svc2.(*api.Service) if created_service_2.Name != "bar" { t.Errorf("Expected bar, but got %v", created_service_2.Name) } if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) { t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP) } }
func TestRESTDelete(t *testing.T) { _, rest := NewTestREST() eventA := testEvent("foo") _, err := rest.Create(api.NewDefaultContext(), eventA) if err != nil { t.Fatalf("Unexpected error %v", err) } c, err := rest.Delete(api.NewDefaultContext(), eventA.Name) if err != nil { t.Fatalf("Unexpected error %v", err) } if stat := c.(*api.Status); stat.Status != api.StatusSuccess { t.Errorf("unexpected status: %v", stat) } }
func TestNamespaceStatusStrategy(t *testing.T) { ctx := api.NewDefaultContext() if StatusStrategy.NamespaceScoped() { t.Errorf("Namespaces should not be namespace scoped") } if StatusStrategy.AllowCreateOnUpdate() { t.Errorf("Namespaces should not allow create on update") } now := unversioned.Now() oldNamespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10", DeletionTimestamp: &now}, Spec: api.NamespaceSpec{Finalizers: []api.FinalizerName{"kubernetes"}}, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9", DeletionTimestamp: &now}, Status: api.NamespaceStatus{Phase: api.NamespaceTerminating}, } StatusStrategy.PrepareForUpdate(ctx, namespace, oldNamespace) if namespace.Status.Phase != api.NamespaceTerminating { t.Errorf("Namespace status updates should allow change of phase: %v", namespace.Status.Phase) } if len(namespace.Spec.Finalizers) != 1 || namespace.Spec.Finalizers[0] != api.FinalizerKubernetes { t.Errorf("PrepareForUpdate should have preserved old finalizers") } errs := StatusStrategy.ValidateUpdate(ctx, namespace, oldNamespace) if len(errs) != 0 { t.Errorf("Unexpected error %v", errs) } if namespace.ResourceVersion != "9" { t.Errorf("Incoming resource version on update should not be mutated") } }
func TestEtcdCreateWithExistingContainers(t *testing.T) { storage, bindingStorage, _, server := newStorage(t) defer server.Terminate(t) ctx := api.NewDefaultContext() key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) _, err := storage.Create(ctx, validNewPod()) if err != nil { t.Fatalf("unexpected error: %v", err) } // Suddenly, a wild scheduler appears: _, err = bindingStorage.Create(ctx, &api.Binding{ ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err != nil { t.Fatalf("unexpected error: %v", err) } _, err = storage.Get(ctx, "foo") if err != nil { t.Fatalf("Unexpected error %v", err) } }
func TestEtcdCreateWithContainersNotFound(t *testing.T) { storage, bindingStorage, _, server := newStorage(t) defer server.Terminate(t) ctx := api.NewDefaultContext() key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) _, err := storage.Create(ctx, validNewPod()) if err != nil { t.Fatalf("unexpected error: %v", err) } // Suddenly, a wild scheduler appears: _, err = bindingStorage.Create(ctx, &api.Binding{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "foo", Annotations: map[string]string{"label1": "value1"}, }, Target: api.ObjectReference{Name: "machine"}, }) if err != nil { t.Fatalf("unexpected error: %v", err) } obj, err := storage.Get(ctx, "foo") if err != nil { t.Fatalf("Unexpected error %v", err) } pod := obj.(*api.Pod) if !(pod.Annotations != nil && pod.Annotations["label1"] == "value1") { t.Fatalf("Pod annotations don't match the expected: %v", pod.Annotations) } }
func TestDelete(t *testing.T) { ctx := api.NewDefaultContext() storage, fakeEtcdClient, _ := newStorage(t) test := resttest.New(t, storage, fakeEtcdClient.SetError) rsrc := validNewThirdPartyResource("foo2") key, _ := storage.KeyFunc(ctx, "foo2") key = etcdtest.AddPrefix(key) createFn := func() runtime.Object { fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(testapi.Codec(), rsrc), ModifiedIndex: 1, }, }, } return rsrc } gracefulSetFn := func() bool { if fakeEtcdClient.Data[key].R.Node == nil { return false } return fakeEtcdClient.Data[key].R.Node.TTL == 30 } test.TestDeleteNoGraceful(createFn, gracefulSetFn) }
func TestDelete(t *testing.T) { ctx := api.NewDefaultContext() storage, fakeClient := newStorage(t) test := resttest.New(t, storage, fakeClient.SetError) key, _ := storage.KeyFunc(ctx, validDeployment.Name) key = etcdtest.AddPrefix(key) createFn := func() runtime.Object { dc := validNewDeployment() dc.ResourceVersion = "1" fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(testapi.Codec(), dc), ModifiedIndex: 1, }, }, } return dc } gracefulSetFn := func() bool { // If the deployment is still around after trying to delete either the delete // failed, or we're deleting it gracefully. if fakeClient.Data[key].R.Node != nil { return true } return false } test.TestDelete(createFn, gracefulSetFn) }
// Ensure that when scheduler creates a binding for a pod that has already been deleted // by the API server, API server returns not-found error. func TestEtcdCreateBindingNoPod(t *testing.T) { storage, bindingStorage, _, server := newStorage(t) defer server.Terminate(t) ctx := api.NewDefaultContext() key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) // Assume that a pod has undergone the following: // - Create (apiserver) // - Schedule (scheduler) // - Delete (apiserver) _, err := bindingStorage.Create(ctx, &api.Binding{ ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "foo"}, Target: api.ObjectReference{Name: "machine"}, }) if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(etcderrors.InterpretGetError(err, "Pod", "foo")) { t.Fatalf("Unexpected error returned: %#v", err) } _, err = storage.Get(ctx, "foo") if err == nil { t.Fatalf("Expected not-found-error but got nothing") } if !errors.IsNotFound(etcderrors.InterpretGetError(err, "Pod", "foo")) { t.Fatalf("Unexpected error: %v", err) } }
// reconciler action factory, performs explicit task reconciliation for non-terminal // tasks identified by annotations in the Kubernetes pod registry. func (k *KubernetesScheduler) makePodRegistryReconciler() ReconcilerAction { return ReconcilerAction(func(drv bindings.SchedulerDriver, cancel <-chan struct{}) <-chan error { ctx := api.NewDefaultContext() podList, err := k.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything()) if err != nil { return proc.ErrorChanf("failed to reconcile pod registry: %v", err) } taskToSlave := make(map[string]string) for _, pod := range podList.Items { if len(pod.Annotations) == 0 { continue } taskId, found := pod.Annotations[meta.TaskIdKey] if !found { continue } slaveId, found := pod.Annotations[meta.SlaveIdKey] if !found { continue } taskToSlave[taskId] = slaveId } return proc.ErrorChan(k.explicitlyReconcileTasks(drv, taskToSlave, cancel)) }) }
func TestEtcdCreateWithConflict(t *testing.T) { storage, bindingStorage, _, server := newStorage(t) defer server.Terminate(t) ctx := api.NewDefaultContext() _, err := storage.Create(ctx, validNewPod()) if err != nil { t.Fatalf("unexpected error: %v", err) } // Suddenly, a wild scheduler appears: binding := api.Binding{ ObjectMeta: api.ObjectMeta{ Namespace: api.NamespaceDefault, Name: "foo", Annotations: map[string]string{"label1": "value1"}, }, Target: api.ObjectReference{Name: "machine"}, } _, err = bindingStorage.Create(ctx, &binding) if err != nil { t.Fatalf("unexpected error: %v", err) } _, err = bindingStorage.Create(ctx, &binding) if err == nil || !errors.IsConflict(err) { t.Fatalf("expected resource conflict error, not: %v", err) } }
func (ks *KubernetesScheduler) recoverTasks() error { ctx := api.NewDefaultContext() podList, err := ks.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything()) if err != nil { log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err) return err } recoverSlave := func(t *podtask.T) { slaveId := t.Spec.SlaveID ks.slaves.checkAndAdd(slaveId, t.Offer.Host()) } for _, pod := range podList.Items { if t, ok, err := podtask.RecoverFrom(pod); err != nil { log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err) err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil) //TODO(jdef) check for temporary or not-found errors if err != nil { log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err) } } else if ok { ks.taskRegistry.Register(t, nil) recoverSlave(t) log.Infof("recovered task %v from pod %v/%v", t.ID, pod.Namespace, pod.Name) } } return nil }
func TestEtcdUpdateNotScheduled(t *testing.T) { storage, _, _, server := newStorage(t) defer server.Terminate(t) ctx := api.NewDefaultContext() key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) if _, err := storage.Create(ctx, validNewPod()); err != nil { t.Fatalf("unexpected error: %v", err) } podIn := validChangedPod() _, _, err := storage.Update(ctx, podIn) if err != nil { t.Errorf("Unexpected error: %v", err) } obj, err := storage.Get(ctx, validNewPod().ObjectMeta.Name) if err != nil { t.Errorf("unexpected error: %v", err) } podOut := obj.(*api.Pod) // validChangedPod only changes the Labels, so were checking the update was valid if !api.Semantic.DeepEqual(podIn.Labels, podOut.Labels) { t.Errorf("objects differ: %v", util.ObjectDiff(podOut, podIn)) } }
func TestListPopulatedList(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.ChangeIndex = 1 fakeEtcdClient.Data["/images"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Nodes: []*etcd.Node{ {Value: runtime.EncodeOrDie(latest.Codec, &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "foo"}})}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Image{ObjectMeta: kapi.ObjectMeta{Name: "bar"}})}, }, }, }, } storage := NewREST(helper) list, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything()) if err != nil { t.Errorf("Unexpected non-nil error: %#v", err) } images := list.(*api.ImageList) if e, a := 2, len(images.Items); e != a { t.Errorf("Expected %v, got %v", e, a) } }
func TestCreateInstantiateValidationError(t *testing.T) { rest := InstantiateREST{&generator.BuildGenerator{}} _, err := rest.Create(kapi.NewDefaultContext(), &buildapi.BuildRequest{}) if err == nil { t.Error("Expected object got none!") } }
func TestGetOK(t *testing.T) { fakeEtcdClient, helper := newHelper(t) expectedImage := &api.Image{ ObjectMeta: kapi.ObjectMeta{Name: "foo"}, DockerImageReference: "openshift/ruby-19-centos", } fakeEtcdClient.Data["/images/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, expectedImage), }, }, } storage := NewREST(helper) image, err := storage.Get(kapi.NewDefaultContext(), "foo") if image == nil { t.Fatal("Unexpected nil image") } if err != nil { t.Fatal("Unexpected non-nil error", err) } if image.(*api.Image).Name != "foo" { t.Errorf("Unexpected image: %#v", image) } }
func TestEtcdWatchResourceQuotas(t *testing.T) { storage, _, fakeClient := newStorage(t) ctx := api.NewDefaultContext() watching, err := storage.Watch(ctx, labels.Everything(), fields.Everything(), "1", ) if err != nil { t.Fatalf("unexpected error: %v", err) } fakeClient.WaitForWatchCompletion() select { case _, ok := <-watching.ResultChan(): if !ok { t.Errorf("watching channel should be open") } default: } fakeClient.WatchInjectError <- nil if _, ok := <-watching.ResultChan(); ok { t.Errorf("watching channel should be closed") } watching.Stop() }
func TestDelete(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Data["/images/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Image{}), }, }, } storage := NewREST(helper) obj, err := storage.Delete(kapi.NewDefaultContext(), "foo", nil) if obj == nil { t.Error("Unexpected nil obj") } if err != nil { t.Errorf("Unexpected non-nil error: %#v", err) } status, ok := obj.(*kapi.Status) if !ok { t.Fatalf("Expected status type, got: %#v", obj) } if status.Status != kapi.StatusSuccess { t.Errorf("Expected status=success, got: %#v", status) } if len(fakeEtcdClient.DeletedKeys) != 1 { t.Errorf("Expected 1 delete, found %#v", fakeEtcdClient.DeletedKeys) } else if key := "/images/foo"; fakeEtcdClient.DeletedKeys[0] != key { t.Errorf("Unexpected key: %s, expected %s", fakeEtcdClient.DeletedKeys[0], key) } }
func TestNamespaceFinalizeStrategy(t *testing.T) { ctx := api.NewDefaultContext() if FinalizeStrategy.NamespaceScoped() { t.Errorf("Namespaces should not be namespace scoped") } if FinalizeStrategy.AllowCreateOnUpdate() { t.Errorf("Namespaces should not allow create on update") } oldNamespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, Spec: api.NamespaceSpec{Finalizers: []api.FinalizerName{"kubernetes", "example.com/org"}}, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9"}, Spec: api.NamespaceSpec{Finalizers: []api.FinalizerName{"example.com/foo"}}, Status: api.NamespaceStatus{Phase: api.NamespaceTerminating}, } FinalizeStrategy.PrepareForUpdate(ctx, namespace, oldNamespace) if namespace.Status.Phase != api.NamespaceActive { t.Errorf("finalize updates should not allow change of phase: %v", namespace.Status.Phase) } if len(namespace.Spec.Finalizers) != 1 || string(namespace.Spec.Finalizers[0]) != "example.com/foo" { t.Errorf("PrepareForUpdate should have modified finalizers") } errs := StatusStrategy.ValidateUpdate(ctx, namespace, oldNamespace) if len(errs) != 0 { t.Errorf("Unexpected error %v", errs) } if namespace.ResourceVersion != "9" { t.Errorf("Incoming resource version on update should not be mutated") } }
// createSchedulerServiceIfNeeded will create the specified service if it // doesn't already exist. func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error { ctx := api.NewDefaultContext() if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil { // The service already exists. return nil } svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, Labels: map[string]string{"provider": "k8sm", "component": "scheduler"}, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, SessionAffinity: api.ServiceAffinityNone, }, } if m.ServiceAddress != nil { svc.Spec.ClusterIP = m.ServiceAddress.String() } _, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
// TestRegistryResourceLocation tests if proper resource location URL is returner // for different build states. // Note: For this test, the mocked pod is set to "Running" phase, so the test // is evaluating the outcome based only on build state. func TestRegistryResourceLocation(t *testing.T) { expectedLocations := map[api.BuildPhase]string{ api.BuildPhaseComplete: fmt.Sprintf("https://foo-host:12345/containerLogs/%s/running-build/foo-container", kapi.NamespaceDefault), api.BuildPhaseFailed: fmt.Sprintf("https://foo-host:12345/containerLogs/%s/running-build/foo-container", kapi.NamespaceDefault), api.BuildPhaseRunning: fmt.Sprintf("https://foo-host:12345/containerLogs/%s/running-build/foo-container", kapi.NamespaceDefault), api.BuildPhaseNew: "", api.BuildPhasePending: "", api.BuildPhaseError: "", api.BuildPhaseCancelled: "", } ctx := kapi.NewDefaultContext() for BuildPhase, expectedLocation := range expectedLocations { location, err := resourceLocationHelper(BuildPhase, "running", ctx) switch BuildPhase { case api.BuildPhaseError, api.BuildPhaseCancelled: if err == nil { t.Errorf("Expected error when Build is in %s state, got nothing", BuildPhase) } default: if err != nil { t.Errorf("Unexpected error: %v", err) } } if location != expectedLocation { t.Errorf("Status: %s Expected Location: %s, Got %s", BuildPhase, expectedLocation, location) } } }
// setEndpoints sets the endpoints for the given service. // in a multi-master scenario only the master will be publishing an endpoint. // see SchedulerServer.bootstrap. func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) error { // The setting we want to find. want := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: ip.String()}}, Ports: []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}}, }} ctx := api.NewDefaultContext() e, err := m.client.Endpoints(api.NamespaceValue(ctx)).Get(serviceName) createOrUpdate := m.client.Endpoints(api.NamespaceValue(ctx)).Update if err != nil { if errors.IsNotFound(err) { createOrUpdate = m.client.Endpoints(api.NamespaceValue(ctx)).Create } e = &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, }, } } if !reflect.DeepEqual(e.Subsets, want) { e.Subsets = want glog.Infof("setting endpoints for master service %q to %#v", serviceName, e) _, err = createOrUpdate(e) return err } // We didn't make any changes, no need to actually call update. return nil }