func (c *ImportController) Next(stream *api.ImageStream) error { if !needsImport(stream) { return nil } insecure := stream.Annotations[api.InsecureRepositoryAnnotation] == "true" client := c.client if client == nil { client = dockerregistry.NewClient() } toImport, err := getTags(stream, client, insecure) // return here, only if there is an error and nothing to import if err != nil && len(toImport) == 0 { return err } errs := c.importTags(stream, toImport, client, insecure) // one of retry-able error happened, we need to inform the RetryController // the import should be retried by returning error if len(errs) > 0 { return kerrors.NewAggregate(errs) } if err != nil { return err } return c.done(stream, "", retryCount) }
func TestRegistryClientConnect(t *testing.T) { c := dockerregistry.NewClient() conn, err := c.Connect("docker.io", false) if err != nil { t.Fatal(err) } for _, s := range []string{"index.docker.io", "https://docker.io", "https://index.docker.io"} { otherConn, err := c.Connect(s, false) if err != nil { t.Errorf("%s: can't connect: %v", s, err) continue } if !reflect.DeepEqual(otherConn, conn) { t.Errorf("%s: did not reuse connection: %#v %#v", s, conn, otherConn) } } otherConn, err := c.Connect("index.docker.io:443", false) if err != nil || reflect.DeepEqual(otherConn, conn) { t.Errorf("should not have reused index.docker.io:443: %v", err) } if _, err := c.Connect("http://ba%3/", false); err == nil { t.Error("Unexpected non-error") } }
// PrepareAppConfig sets fields in config appropriate for running tests. It // returns two buffers bound to stdout and stderr. func PrepareAppConfig(config *cmd.AppConfig) (stdout, stderr *bytes.Buffer) { config.ExpectToBuild = true stdout, stderr = new(bytes.Buffer), new(bytes.Buffer) config.Out, config.ErrOut = stdout, stderr config.Detector = app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), } config.DockerSearcher = app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(10*time.Second, true), } config.ImageStreamByAnnotationSearcher = fakeImageStreamSearcher() config.ImageStreamSearcher = fakeImageStreamSearcher() config.OriginNamespace = "default" config.OSClient = &client.Fake{} config.RefBuilder = &app.ReferenceBuilder{} config.TemplateSearcher = app.TemplateSearcher{ Client: &client.Fake{}, TemplateConfigsNamespacer: &client.Fake{}, Namespaces: []string{"openshift", "default"}, } config.Typer = kapi.Scheme return }
func TestRegistryClientImage(t *testing.T) { for _, v2 := range []bool{true, false} { host := "index.docker.io" if !v2 { host = "registry.hub.docker.com" } conn, err := dockerregistry.NewClient().Connect(host, false) if err != nil { t.Fatal(err) } if _, err := conn.ImageByTag("openshift", "origin-not-found", "latest"); !dockerregistry.IsRepositoryNotFound(err) && !dockerregistry.IsTagNotFound(err) { t.Errorf("V2=%t: unexpected error: %v", v2, err) } image, err := conn.ImageByTag("openshift", "origin", "latest") if err != nil { t.Fatalf("V2=%t: %v", v2, err) } if len(image.ContainerConfig.Entrypoint) == 0 { t.Errorf("V2=%t: unexpected image: %#v", v2, image) } if v2 && !image.PullByID { t.Errorf("V2=%t: should be able to pull by ID %s", v2, image.ID) } other, err := conn.ImageByID("openshift", "origin", image.ID) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(other.ContainerConfig.Entrypoint, image.ContainerConfig.Entrypoint) { t.Errorf("V2=%t: unexpected image: %#v", v2, other) } } }
// Create creates an ImportController. func (f *ImportControllerFactory) Create() controller.RunnableController { lw := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } q := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run() c := &ImportController{ client: dockerregistry.NewClient(), streams: f.Client, mappings: f.Client, } return &controller.RetryController{ Queue: q, RetryManager: controller.NewQueueRetryManager( q, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { util.HandleError(err) return retries.Count < 5 }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { r := obj.(*api.ImageStream) return c.Next(r) }, } }
func TestRegistryClientV2DockerHub(t *testing.T) { c := dockerregistry.NewClient() conn, err := c.Connect("index.docker.io", false) if err != nil { t.Fatal(err) } image, err := conn.ImageByTag("kubernetes", "guestbook", "latest") // The V2 docker hub registry seems to have a bug for this repo, should eventually get fixed if !dockerregistry.IsTagNotFound(err) { t.Fatalf("unexpected error: %v", err) } // a v1 only path conn, err = c.Connect("registry.hub.docker.com", false) if err != nil { t.Fatal(err) } image, err = conn.ImageByTag("kubernetes", "guestbook", "latest") if err != nil { t.Fatalf("unable to retrieve image info: %v", err) } if len(image.ID) == 0 { t.Fatalf("image had no ID: %#v", image) } }
func TestNewBuildEnvVars(t *testing.T) { dockerSearcher := app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(), } tests := []struct { name string config *AppConfig expected []kapi.EnvVar expectedErr error }{ { name: "explicit environment variables for buildConfig and deploymentConfig", config: &AppConfig{ AddEnvironmentToBuild: true, SourceRepositories: util.StringList([]string{"https://github.com/openshift/ruby-hello-world"}), DockerImages: util.StringList([]string{"openshift/ruby-20-centos7", "openshift/mongodb-24-centos7"}), OutputDocker: true, Environment: util.StringList([]string{"BUILD_ENV_1=env_value_1", "BUILD_ENV_2=env_value_2"}), dockerSearcher: dockerSearcher, detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, typer: kapi.Scheme, osclient: &client.Fake{}, originNamespace: "default", }, expected: []kapi.EnvVar{ {Name: "BUILD_ENV_1", Value: "env_value_1"}, {Name: "BUILD_ENV_2", Value: "env_value_2"}, }, expectedErr: nil, }, } for _, test := range tests { test.config.refBuilder = &app.ReferenceBuilder{} test.config.Out, test.config.ErrOut = os.Stdout, os.Stderr res, err := test.config.RunBuilds() if err != test.expectedErr { t.Errorf("%s: Error mismatch! Expected %v, got %v", test.name, test.expectedErr, err) continue } got := []kapi.EnvVar{} for _, obj := range res.List.Items { switch tp := obj.(type) { case *buildapi.BuildConfig: got = tp.Spec.Strategy.SourceStrategy.Env break } } if !reflect.DeepEqual(test.expected, got) { t.Errorf("%s: unexpected output. Expected: %#v, Got: %#v", test.name, test.expected, got) continue } } }
func TestRegistryClientRegistryNotFound(t *testing.T) { conn, err := dockerregistry.NewClient().Connect("localhost:65000", false) if err != nil { t.Fatal(err) } if _, err := conn.ImageByID("foo", "bar", "baz"); !dockerregistry.IsRegistryNotFound(err) { t.Error(err) } }
func TestRegistryClientQuayIOImage(t *testing.T) { conn, err := dockerregistry.NewClient().Connect("quay.io", false) if err != nil { t.Fatal(err) } if _, err := conn.ImageByTag("coreos", "etcd", "latest"); err != nil { t.Errorf("unexpected error: %v", err) } }
func TestNewAppDetectSource(t *testing.T) { skipExternalGit(t) gitLocalDir := createLocalGitDirectory(t) defer os.RemoveAll(gitLocalDir) dockerSearcher := app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(10*time.Second, true), } mocks := MockSourceRepositories(t, gitLocalDir) tests := []struct { name string cfg *cmd.AppConfig repositories []*app.SourceRepository expectedLang string expectedErr string }{ { name: "detect source - ruby", cfg: &cmd.AppConfig{ Detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, DockerSearcher: dockerSearcher, }, repositories: []*app.SourceRepository{mocks[0]}, expectedLang: "ruby", expectedErr: "", }, } for _, test := range tests { err := test.cfg.DetectSource(test.repositories) if err != nil { if !strings.Contains(err.Error(), test.expectedErr) { t.Errorf("%s: Invalid error: Expected %s, got %v", test.name, test.expectedErr, err) } } else if len(test.expectedErr) != 0 { t.Errorf("%s: Expected %s error but got none", test.name, test.expectedErr) } for _, repo := range test.repositories { info := repo.Info() if info == nil { t.Errorf("%s: expected repository info to be populated; it is nil", test.name) continue } if term := strings.Join(info.Terms(), ","); term != test.expectedLang { t.Errorf("%s: expected repository info term to be %s; got %s\n", test.name, test.expectedLang, term) } } } }
// NewAppConfig returns a new AppConfig, but you must set your typer, mapper, and clientMapper after the command has been run // and flags have been parsed. func NewAppConfig() *AppConfig { dockerSearcher := app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(), } return &AppConfig{ detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, dockerSearcher: dockerSearcher, refBuilder: &app.ReferenceBuilder{}, } }
func TestRegistryClientQuayIOImage(t *testing.T) { for _, v2 := range []bool{true, false} { conn, err := dockerregistry.NewClient().Connect("quay.io", false, v2) if err != nil { t.Fatal(err) } _, err = conn.ImageByTag("coreos", "etcd", "latest") if err != nil { t.Errorf("v2=%t: unexpected error: %v", v2, err) } } }
func TestRegistryClientQuayIOImage(t *testing.T) { conn, err := dockerregistry.NewClient().Connect("quay.io", false) if err != nil { t.Fatal(err) } err = retryWhenUnreachable(t, func() error { _, err := conn.ImageByTag("coreos", "etcd", "latest") return err }) if err != nil { t.Skip("SKIPPING: unexpected error from quay.io: %v", err) } }
func TestRegistryClientConnectPulpRegistry(t *testing.T) { c := dockerregistry.NewClient() conn, err := c.Connect("registry.access.redhat.com", false) if err != nil { t.Fatal(err) } image, err := conn.ImageByTag("library", "rhel", "latest") if err != nil { t.Fatalf("unable to retrieve image info: %v", err) } if len(image.ID) == 0 { t.Fatalf("image had no ID: %#v", image) } }
func TestDetectSource(t *testing.T) { dockerResolver := app.DockerRegistryResolver{ Client: dockerregistry.NewClient(), } mocks := app.MockSourceRepositories() tests := []struct { name string cfg *AppConfig repositories []*app.SourceRepository expectedLang string expectedErr string }{ { name: "detect source - ruby", cfg: &AppConfig{ detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, dockerResolver: dockerResolver, }, repositories: []*app.SourceRepository{mocks[1]}, expectedLang: "ruby", expectedErr: "", }, } for _, test := range tests { err := test.cfg.detectSource(test.repositories) if err != nil { if !strings.Contains(err.Error(), test.expectedErr) { t.Errorf("%s: Invalid error: Expected %s, got %v", test.name, test.expectedErr, err) } } else if len(test.expectedErr) != 0 { t.Errorf("%s: Expected %s error but got none", test.name, test.expectedErr) } for _, repo := range test.repositories { info := repo.Info() if info == nil { t.Errorf("%s: expected repository info to be populated; it is nil", test.name) continue } if term := strings.Join(info.Terms(), ","); term != test.expectedLang { t.Errorf("%s: expected repository info term to be %s; got %s\n", test.name, test.expectedLang, term) } } } }
// NewAppConfig returns a new AppConfig func NewAppConfig(typer runtime.ObjectTyper, mapper meta.RESTMapper, clientMapper resource.ClientMapper) *AppConfig { dockerSearcher := app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(), } return &AppConfig{ detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, dockerSearcher: dockerSearcher, typer: typer, mapper: mapper, clientMapper: clientMapper, refBuilder: &app.ReferenceBuilder{}, } }
func TestDockerV1Fallback(t *testing.T) { var uri *url.URL server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Docker-Endpoints", uri.Host) // get all tags if strings.HasSuffix(r.URL.Path, "/tags") { fmt.Fprintln(w, `{"tag1":"image1", "test":"image2"}`) w.WriteHeader(http.StatusOK) return } if strings.HasSuffix(r.URL.Path, "/images") { fmt.Fprintln(w, `{"tag1":"image1", "test":"image2"}`) w.WriteHeader(http.StatusOK) return } if strings.HasSuffix(r.URL.Path, "/json") { fmt.Fprintln(w, `{"ID":"image2"}`) w.WriteHeader(http.StatusOK) return } t.Logf("tried to access %s", r.URL.Path) w.WriteHeader(http.StatusNotFound) })) client := dockerregistry.NewClient(10*time.Second, false) ctx := gocontext.WithValue(gocontext.Background(), ContextKeyV1RegistryClient, client) uri, _ = url.Parse(server.URL) isi := &api.ImageStreamImport{ Spec: api.ImageStreamImportSpec{ Repository: &api.RepositoryImportSpec{ From: kapi.ObjectReference{Kind: "DockerImage", Name: uri.Host + "/test:test"}, ImportPolicy: api.TagImportPolicy{Insecure: true}, }, }, } retriever := &mockRetriever{err: fmt.Errorf("does not support v2 API")} im := NewImageStreamImporter(retriever, 5, nil) if err := im.Import(ctx, isi); err != nil { t.Fatal(err) } if images := isi.Status.Repository.Images; len(images) != 2 || images[0].Tag != "tag1" || images[1].Tag != "test" { t.Errorf("unexpected images: %#v", images) } }
func doTestRegistryClientImage(t *testing.T, registry, reponame, version string) { conn, err := dockerregistry.NewClient(10*time.Second, version == "v2").Connect(registry, false) if err != nil { t.Fatal(err) } err = retryWhenUnreachable(t, func() error { _, err := conn.ImageByTag("openshift", "origin-not-found", "latest") return err }) if err == nil || (!dockerregistry.IsRepositoryNotFound(err) && !dockerregistry.IsTagNotFound(err)) { t.Errorf("%s: unexpected error: %v", version, err) } var image *dockerregistry.Image err = retryWhenUnreachable(t, func() error { image, err = conn.ImageByTag("openshift", reponame, "latest") return err }) if err != nil { t.Fatal(err) } if image.Comment != "Imported from -" { t.Errorf("%s: unexpected image comment", version) } if image.Architecture != "amd64" { t.Errorf("%s: unexpected image architecture", version) } if version == "v2" && !image.PullByID { t.Errorf("%s: should be able to pull by ID %s", version, image.ID) } var other *dockerregistry.Image err = retryWhenUnreachable(t, func() error { other, err = conn.ImageByID("openshift", reponame, image.ID) return err }) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(other.ContainerConfig.Entrypoint, image.ContainerConfig.Entrypoint) { t.Errorf("%s: unexpected image: %#v", version, other) } }
func TestRegistryClientConnectPulpRegistry(t *testing.T) { c := dockerregistry.NewClient() conn, err := c.Connect(pulpRegistryName, false) if err != nil { t.Fatal(err) } var image *dockerregistry.Image err = retryWhenUnreachable(t, func() error { image, err = conn.ImageByTag("library", "rhel", "latest") return err }) if err != nil { t.Fatal(err) } if len(image.ID) == 0 { t.Fatalf("image had no ID: %#v", image) } }
func TestRegistryClientDockerHubV2(t *testing.T) { c := dockerregistry.NewClient() conn, err := c.Connect(dockerHubV2RegistryName, false) if err != nil { t.Fatal(err) } var image *dockerregistry.Image err = retryWhenUnreachable(t, func() error { image, err = conn.ImageByTag("kubernetes", "guestbook", "latest") return err }) if err != nil { t.Fatal(err) } if len(image.ID) == 0 { t.Fatalf("image had no ID: %#v", image) } }
func TestRegistryClientConnectPulpRegistry(t *testing.T) { c := dockerregistry.NewClient(10 * time.Second) conn, err := c.Connect(pulpRegistryName, false) if err != nil { t.Fatal(err) } var image *dockerregistry.Image err = retryWhenUnreachable(t, func() error { image, err = conn.ImageByTag("library", "rhel", "latest") return err }, imageNotFoundErrorPatterns...) if err != nil { if strings.Contains(err.Error(), "x509: certificate has expired or is not yet valid") { t.Skip("SKIPPING: due to expired certificate of %s: %v", pulpRegistryName, err) } t.Fatal(err) } if len(image.ID) == 0 { t.Fatalf("image had no ID: %#v", image) } }
// Next processes the given image stream, looking for streams that have DockerImageRepository // set but have not yet been marked as "ready". If transient errors occur, err is returned but // the image stream is not modified (so it will be tried again later). If a permanent // failure occurs the image is marked with an annotation. The tags of the original spec image // are left as is (those are updated through status). // There are 3 use cases here: // 1. spec.DockerImageRepository defined without any tags results in all tags being imported // from upstream image repository // 2. spec.DockerImageRepository + tags defined - import all tags from upstream image repository, // and all the specified which (if name matches) will overwrite the default ones. // Additionally: // for kind == DockerImage import or reference underlying image, iow. exact tag (not provided means latest), // for kind != DockerImage reference tag from the same or other ImageStream // 3. spec.DockerImageRepository not defined - import tags per its definition. // Current behavior of the controller is to process import as far as possible, but // we still want to keep backwards compatibility and retries, for that we'll return // error in the following cases: // 1. connection failure to upstream image repository // 2. reading tags when error is different from RepositoryNotFound or RegistryNotFound // 3. image retrieving when error is different from RepositoryNotFound, RegistryNotFound or ImageNotFound // 4. ImageStreamMapping save error // 5. error when marking ImageStream as imported func (c *ImportController) Next(stream *api.ImageStream) error { if !needsImport(stream) { return nil } glog.V(4).Infof("Importing stream %s/%s...", stream.Namespace, stream.Name) insecure := stream.Annotations[api.InsecureRepositoryAnnotation] == "true" client := c.client if client == nil { client = dockerregistry.NewClient(5 * time.Second) } var errlist []error toImport, retry, err := getTags(stream, client, insecure) // return here, only if there is an error and nothing to import if err != nil && len(toImport) == 0 { if retry { return err } return c.done(stream, err.Error(), retryCount) } if err != nil { errlist = append(errlist, err) } retry, err = c.importTags(stream, toImport, client, insecure) if err != nil { if retry { return err } errlist = append(errlist, err) } if len(errlist) > 0 { return c.done(stream, kerrors.NewAggregate(errlist).Error(), retryCount) } return c.done(stream, "", retryCount) }
func TestRegistryClientV2DockerHub(t *testing.T) { c := dockerregistry.NewClient() conn, err := c.Connect("index.docker.io", false) if err != nil { t.Fatal(err) } image, err := conn.ImageByTag("kubernetes", "guestbook", "latest") if err != nil { t.Fatalf("unexpected error: %v", err) } // a v1 only path conn, err = c.Connect("registry.hub.docker.com", false) if err != nil { t.Fatal(err) } image, err = conn.ImageByTag("kubernetes", "guestbook", "latest") if err != nil { t.Fatalf("unable to retrieve image info: %v", err) } if len(image.ID) == 0 { t.Fatalf("image had no ID: %#v", image) } }
func (c *AppConfig) dockerRegistrySearcher() app.Searcher { return app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(), AllowInsecure: c.InsecureRegistry, } }
// Next processes the given image stream, looking for streams that have DockerImageRepository // set but have not yet been marked as "ready". If transient errors occur, err is returned but // the image stream is not modified (so it will be tried again later). If a permanent // failure occurs the image is marked with an annotation. The tags of the original spec image // are left as is (those are updated through status). func (c *ImportController) Next(stream *api.ImageStream) error { if !needsImport(stream) { return nil } name := stream.Spec.DockerImageRepository ref, err := api.ParseDockerImageReference(name) if err != nil { err = fmt.Errorf("invalid docker image repository, cannot import data: %v", err) util.HandleError(err) return c.done(stream, err.Error(), retryCount) } insecure := stream.Annotations != nil && stream.Annotations[api.InsecureRepositoryAnnotation] == "true" client := c.client if client == nil { client = dockerregistry.NewClient() } conn, err := client.Connect(ref.Registry, insecure, false) if err != nil { return err } tags, err := conn.ImageTags(ref.Namespace, ref.Name) switch { case dockerregistry.IsRepositoryNotFound(err), dockerregistry.IsRegistryNotFound(err): return c.done(stream, err.Error(), retryCount) case err != nil: return err } imageToTag := make(map[string][]string) for tag, image := range tags { if specTag, ok := stream.Spec.Tags[tag]; ok && specTag.From != nil { // spec tag is set to track another tag - do not import continue } imageToTag[image] = append(imageToTag[image], tag) } // no tags to import if len(imageToTag) == 0 { return c.done(stream, "", retryCount) } for id, tags := range imageToTag { dockerImage, err := conn.ImageByID(ref.Namespace, ref.Name, id) switch { case dockerregistry.IsRepositoryNotFound(err), dockerregistry.IsRegistryNotFound(err): return c.done(stream, err.Error(), retryCount) case dockerregistry.IsImageNotFound(err): continue case err != nil: return err } var image api.DockerImage if err := kapi.Scheme.Convert(&dockerImage.Image, &image); err != nil { err = fmt.Errorf("could not convert image: %#v", err) util.HandleError(err) return c.done(stream, err.Error(), retryCount) } idTagPresent := false if len(tags) > 1 && hasTag(tags, id) { // only set to true if we have at least 1 tag that isn't the image id idTagPresent = true } for _, tag := range tags { if idTagPresent && id == tag { continue } pullRef := api.DockerImageReference{ Registry: ref.Registry, Namespace: ref.Namespace, Name: ref.Name, Tag: tag, } // prefer to pull by ID always if dockerImage.PullByID { // if the registry indicates the image is pullable by ID, clear the tag pullRef.Tag = "" pullRef.ID = dockerImage.ID } else if idTagPresent { // if there is a tag for the image by its id (tag=tag), we can pull by id pullRef.Tag = id } mapping := &api.ImageStreamMapping{ ObjectMeta: kapi.ObjectMeta{ Name: stream.Name, Namespace: stream.Namespace, }, Tag: tag, Image: api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageReference: pullRef.String(), DockerImageMetadata: image, }, } if err := c.mappings.ImageStreamMappings(stream.Namespace).Create(mapping); err != nil { if errors.IsNotFound(err) { return c.done(stream, err.Error(), retryCount) } return err } } } // we've completed our updates return c.done(stream, "", retryCount) }
func (c *AppConfig) DockerRegistrySearcher() app.Searcher { return app.DockerRegistrySearcher{ Client: dockerregistry.NewClient(30*time.Second, true), AllowInsecure: c.InsecureRegistry, } }
func TestRunBuild(t *testing.T) { dockerResolver := app.DockerRegistryResolver{ Client: dockerregistry.NewClient(), } tests := []struct { name string config *AppConfig expected map[string][]string expectedErr error }{ { name: "successful ruby app generation", config: &AppConfig{ SourceRepositories: util.StringList{"https://github.com/openshift/ruby-hello-world"}, DockerImages: util.StringList{"openshift/ruby-20-centos7", "openshift/mongodb-24-centos7"}, OutputDocker: true, dockerResolver: dockerResolver, imageStreamResolver: app.ImageStreamResolver{ Client: &client.Fake{}, ImageStreamImages: &client.Fake{}, Namespaces: []string{"default"}, }, imageStreamByAnnotationResolver: &app.ImageStreamByAnnotationResolver{ Client: &client.Fake{}, ImageStreamImages: &client.Fake{}, Namespaces: []string{"default"}, }, templateResolver: app.TemplateResolver{ Client: &client.Fake{}, TemplateConfigsNamespacer: &client.Fake{}, Namespaces: []string{"openshift", "default"}, }, detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, typer: kapi.Scheme, osclient: &client.Fake{}, originNamespace: "default", }, expected: map[string][]string{ "buildConfig": {"ruby-hello-world"}, "imageStream": {"ruby-20-centos7"}, }, expectedErr: nil, }, } for _, test := range tests { test.config.refBuilder = &app.ReferenceBuilder{} res, err := test.config.RunBuilds(os.Stdout) if err != test.expectedErr { t.Errorf("%s: Error mismatch! Expected %v, got %v", test.name, test.expectedErr, err) continue } got := map[string][]string{} for _, obj := range res.List.Items { switch tp := obj.(type) { case *buildapi.BuildConfig: got["buildConfig"] = append(got["buildConfig"], tp.Name) case *imageapi.ImageStream: got["imageStream"] = append(got["imageStream"], tp.Name) } } if len(test.expected) != len(got) { t.Errorf("%s: Resource kind size mismatch! Expected %d, got %d", test.name, len(test.expected), len(got)) continue } for k, exp := range test.expected { g, ok := got[k] if !ok { t.Errorf("%s: Didn't find expected kind %s", test.name, k) } sort.Strings(g) sort.Strings(exp) if !reflect.DeepEqual(g, exp) { t.Errorf("%s: Resource names mismatch! Expected %v, got %v", test.name, exp, g) continue } } } }
func TestRunAll(t *testing.T) { dockerResolver := app.DockerRegistryResolver{ Client: dockerregistry.NewClient(), } tests := []struct { name string config *AppConfig expected map[string][]string expectedErr error expectInsecure util.StringSet }{ { name: "successful ruby app generation", config: &AppConfig{ SourceRepositories: util.StringList{"https://github.com/openshift/ruby-hello-world"}, dockerResolver: fakeDockerResolver(), imageStreamResolver: app.ImageStreamResolver{ Client: &client.Fake{}, ImageStreamImages: &client.Fake{}, Namespaces: []string{"default"}, }, Strategy: "source", imageStreamByAnnotationResolver: app.NewImageStreamByAnnotationResolver(&client.Fake{}, &client.Fake{}, []string{"default"}), templateResolver: app.TemplateResolver{ Client: &client.Fake{}, TemplateConfigsNamespacer: &client.Fake{}, Namespaces: []string{"openshift", "default"}, }, detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, typer: kapi.Scheme, osclient: &client.Fake{}, originNamespace: "default", }, expected: map[string][]string{ "imageStream": {"ruby-hello-world", "ruby"}, "buildConfig": {"ruby-hello-world"}, "deploymentConfig": {"ruby-hello-world"}, "service": {"ruby-hello-world"}, }, expectedErr: nil, }, { name: "app generation using context dir", config: &AppConfig{ SourceRepositories: util.StringList{"https://github.com/openshift/sti-ruby"}, ContextDir: "2.0/test/rack-test-app", dockerResolver: dockerResolver, imageStreamResolver: fakeImageStreamResolver(), imageStreamByAnnotationResolver: app.NewImageStreamByAnnotationResolver(&client.Fake{}, &client.Fake{}, []string{"default"}), templateResolver: app.TemplateResolver{ Client: &client.Fake{}, TemplateConfigsNamespacer: &client.Fake{}, Namespaces: []string{"openshift", "default"}, }, detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, typer: kapi.Scheme, osclient: &client.Fake{}, originNamespace: "default", }, expected: map[string][]string{ "imageStream": {"sti-ruby"}, "buildConfig": {"sti-ruby"}, "deploymentConfig": {"sti-ruby"}, "service": {"sti-ruby"}, }, expectedErr: nil, }, { name: "insecure registry generation", config: &AppConfig{ Components: util.StringList{"myrepo:5000/myco/example"}, SourceRepositories: util.StringList{"https://github.com/openshift/ruby-hello-world"}, Strategy: "source", dockerResolver: app.DockerClientResolver{ Client: &dockertools.FakeDockerClient{ Images: []docker.APIImages{{RepoTags: []string{"myrepo:5000/myco/example"}}}, Image: dockerBuilderImage(), }, Insecure: true, }, imageStreamResolver: app.ImageStreamResolver{ Client: &client.Fake{}, ImageStreamImages: &client.Fake{}, Namespaces: []string{"default"}, }, templateResolver: app.TemplateResolver{ Client: &client.Fake{}, TemplateConfigsNamespacer: &client.Fake{}, Namespaces: []string{}, }, templateFileResolver: &app.TemplateFileResolver{}, detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, typer: kapi.Scheme, osclient: &client.Fake{}, originNamespace: "default", InsecureRegistry: true, }, expected: map[string][]string{ "imageStream": {"example", "ruby-hello-world"}, "buildConfig": {"ruby-hello-world"}, "deploymentConfig": {"ruby-hello-world"}, "service": {"ruby-hello-world"}, }, expectedErr: nil, expectInsecure: util.NewStringSet("example"), }, { name: "docker build", config: &AppConfig{ SourceRepositories: util.StringList{"https://github.com/openshift/ruby-hello-world"}, dockerResolver: app.DockerClientResolver{ Client: &dockertools.FakeDockerClient{ Images: []docker.APIImages{{RepoTags: []string{"openshift/ruby-20-centos7"}}}, Image: dockerBuilderImage(), }, Insecure: true, }, imageStreamResolver: app.ImageStreamResolver{ Client: &client.Fake{}, ImageStreamImages: &client.Fake{}, Namespaces: []string{"default"}, }, imageStreamByAnnotationResolver: app.NewImageStreamByAnnotationResolver(&client.Fake{}, &client.Fake{}, []string{"default"}), templateResolver: app.TemplateResolver{ Client: &client.Fake{}, TemplateConfigsNamespacer: &client.Fake{}, Namespaces: []string{"openshift", "default"}, }, detector: app.SourceRepositoryEnumerator{ Detectors: source.DefaultDetectors, Tester: dockerfile.NewTester(), }, typer: kapi.Scheme, osclient: &client.Fake{}, originNamespace: "default", }, expected: map[string][]string{ "imageStream": {"ruby-hello-world", "ruby-20-centos7"}, "buildConfig": {"ruby-hello-world"}, "deploymentConfig": {"ruby-hello-world"}, "service": {"ruby-hello-world"}, }, expectedErr: nil, }, } for _, test := range tests { test.config.refBuilder = &app.ReferenceBuilder{} res, err := test.config.RunAll(os.Stdout) if err != test.expectedErr { t.Errorf("%s: Error mismatch! Expected %v, got %v", test.name, test.expectedErr, err) continue } imageStreams := []*imageapi.ImageStream{} got := map[string][]string{} for _, obj := range res.List.Items { switch tp := obj.(type) { case *buildapi.BuildConfig: got["buildConfig"] = append(got["buildConfig"], tp.Name) case *kapi.Service: got["service"] = append(got["service"], tp.Name) case *imageapi.ImageStream: got["imageStream"] = append(got["imageStream"], tp.Name) imageStreams = append(imageStreams, tp) case *deploy.DeploymentConfig: got["deploymentConfig"] = append(got["deploymentConfig"], tp.Name) } } if len(test.expected) != len(got) { t.Errorf("%s: Resource kind size mismatch! Expected %d, got %d", test.name, len(test.expected), len(got)) continue } for k, exp := range test.expected { g, ok := got[k] if !ok { t.Errorf("%s: Didn't find expected kind %s", test.name, k) } sort.Strings(g) sort.Strings(exp) if !reflect.DeepEqual(g, exp) { t.Errorf("%s: %s resource names mismatch! Expected %v, got %v", test.name, k, exp, g) continue } } if test.expectInsecure == nil { continue } for _, stream := range imageStreams { _, hasAnnotation := stream.Annotations[imageapi.InsecureRepositoryAnnotation] if test.expectInsecure.Has(stream.Name) && !hasAnnotation { t.Errorf("%s: Expected insecure annotation for stream: %s, but did not get one.", test.name, stream.Name) } if !test.expectInsecure.Has(stream.Name) && hasAnnotation { t.Errorf("%s: Got insecure annotation for stream: %s, and was not expecting one.", test.name, stream.Name) } } } }
func TestResolve(t *testing.T) { tests := []struct { name string cfg AppConfig components app.ComponentReferences expectedErr string }{ { name: "Resolver error", components: app.ComponentReferences{ app.ComponentReference(&app.ComponentInput{ Value: "mysql:invalid", Resolver: app.DockerRegistryResolver{ Client: dockerregistry.NewClient(), }, })}, expectedErr: `tag "invalid" has not been set`, }, { name: "Successful mysql builder", components: app.ComponentReferences{ app.ComponentReference(&app.ComponentInput{ Value: "mysql", Match: &app.ComponentMatch{ Builder: true, }, })}, expectedErr: "", }, { name: "Unable to build source code", components: app.ComponentReferences{ app.ComponentReference(&app.ComponentInput{ Value: "mysql", ExpectToBuild: true, })}, expectedErr: "no resolver", }, { name: "Successful docker build", cfg: AppConfig{ Strategy: "docker", }, components: app.ComponentReferences{ app.ComponentReference(&app.ComponentInput{ Value: "mysql", ExpectToBuild: true, })}, expectedErr: "", }, } for _, test := range tests { err := test.cfg.resolve(test.components) if err != nil { if !strings.Contains(err.Error(), test.expectedErr) { t.Errorf("%s: Invalid error: Expected %s, got %v", test.name, test.expectedErr, err) } } else if len(test.expectedErr) != 0 { t.Errorf("%s: Expected %s error but got none", test.name, test.expectedErr) } } }
func (c *MasterConfig) GetRestStorage() map[string]rest.Storage { kubeletClient, err := kubeletclient.NewStaticKubeletClient(c.KubeletClientConfig) if err != nil { glog.Fatalf("Unable to configure Kubelet client: %v", err) } // TODO: allow the system CAs and the local CAs to be joined together. importTransport, err := restclient.TransportFor(&restclient.Config{}) if err != nil { glog.Fatalf("Unable to configure a default transport for importing: %v", err) } insecureImportTransport, err := restclient.TransportFor(&restclient.Config{Insecure: true}) if err != nil { glog.Fatalf("Unable to configure a default transport for importing: %v", err) } buildStorage, buildDetailsStorage, err := buildetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) buildRegistry := buildregistry.NewRegistry(buildStorage) buildConfigStorage, err := buildconfigetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) buildConfigRegistry := buildconfigregistry.NewRegistry(buildConfigStorage) deployConfigStorage, deployConfigStatusStorage, deployConfigScaleStorage, err := deployconfigetcd.NewREST(c.RESTOptionsGetter) dcInstantiateOriginClient, dcInstantiateKubeClient := c.DeploymentConfigInstantiateClients() dcInstantiateStorage := deployconfiginstantiate.NewREST( *deployConfigStorage.Store, dcInstantiateOriginClient, dcInstantiateKubeClient, c.ExternalVersionCodec, c.AdmissionControl, ) checkStorageErr(err) deployConfigRegistry := deployconfigregistry.NewRegistry(deployConfigStorage) routeAllocator := c.RouteAllocator() routeStorage, routeStatusStorage, err := routeetcd.NewREST(c.RESTOptionsGetter, routeAllocator) checkStorageErr(err) hostSubnetStorage, err := hostsubnetetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) netNamespaceStorage, err := netnamespaceetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) clusterNetworkStorage, err := clusternetworketcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) egressNetworkPolicyStorage, err := egressnetworkpolicyetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) userStorage, err := useretcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) userRegistry := userregistry.NewRegistry(userStorage) identityStorage, err := identityetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) identityRegistry := identityregistry.NewRegistry(identityStorage) userIdentityMappingStorage := useridentitymapping.NewREST(userRegistry, identityRegistry) groupStorage, err := groupetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) policyStorage, err := policyetcd.NewStorage(c.RESTOptionsGetter) checkStorageErr(err) policyRegistry := policyregistry.NewRegistry(policyStorage) policyBindingStorage, err := policybindingetcd.NewStorage(c.RESTOptionsGetter) checkStorageErr(err) policyBindingRegistry := policybindingregistry.NewRegistry(policyBindingStorage) clusterPolicyStorage, err := clusterpolicystorage.NewStorage(c.RESTOptionsGetter) checkStorageErr(err) clusterPolicyRegistry := clusterpolicyregistry.NewRegistry(clusterPolicyStorage) clusterPolicyBindingStorage, err := clusterpolicybindingstorage.NewStorage(c.RESTOptionsGetter) checkStorageErr(err) clusterPolicyBindingRegistry := clusterpolicybindingregistry.NewRegistry(clusterPolicyBindingStorage) selfSubjectRulesReviewStorage := selfsubjectrulesreview.NewREST(c.RuleResolver, c.Informers.ClusterPolicies().Lister().ClusterPolicies()) subjectRulesReviewStorage := subjectrulesreview.NewREST(c.RuleResolver, c.Informers.ClusterPolicies().Lister().ClusterPolicies()) roleStorage := rolestorage.NewVirtualStorage(policyRegistry, c.RuleResolver) roleBindingStorage := rolebindingstorage.NewVirtualStorage(policyBindingRegistry, c.RuleResolver) clusterRoleStorage := clusterrolestorage.NewClusterRoleStorage(clusterPolicyRegistry, clusterPolicyBindingRegistry) clusterRoleBindingStorage := clusterrolebindingstorage.NewClusterRoleBindingStorage(clusterPolicyRegistry, clusterPolicyBindingRegistry) subjectAccessReviewStorage := subjectaccessreview.NewREST(c.Authorizer) subjectAccessReviewRegistry := subjectaccessreview.NewRegistry(subjectAccessReviewStorage) localSubjectAccessReviewStorage := localsubjectaccessreview.NewREST(subjectAccessReviewRegistry) resourceAccessReviewStorage := resourceaccessreview.NewREST(c.Authorizer) resourceAccessReviewRegistry := resourceaccessreview.NewRegistry(resourceAccessReviewStorage) localResourceAccessReviewStorage := localresourceaccessreview.NewREST(resourceAccessReviewRegistry) podSecurityPolicyReviewStorage := podsecuritypolicyreview.NewREST(oscc.NewDefaultSCCMatcher(c.Informers.SecurityContextConstraints().Lister()), clientadapter.FromUnversionedClient(c.PrivilegedLoopbackKubernetesClient)) podSecurityPolicySubjectStorage := podsecuritypolicysubjectreview.NewREST(oscc.NewDefaultSCCMatcher(c.Informers.SecurityContextConstraints().Lister()), clientadapter.FromUnversionedClient(c.PrivilegedLoopbackKubernetesClient)) podSecurityPolicySelfSubjectReviewStorage := podsecuritypolicyselfsubjectreview.NewREST(oscc.NewDefaultSCCMatcher(c.Informers.SecurityContextConstraints().Lister()), clientadapter.FromUnversionedClient(c.PrivilegedLoopbackKubernetesClient)) imageStorage, err := imageetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) imageRegistry := image.NewRegistry(imageStorage) imageSignatureStorage := imagesignature.NewREST(c.PrivilegedLoopbackOpenShiftClient.Images()) imageStreamSecretsStorage := imagesecret.NewREST(c.ImageStreamSecretClient()) imageStreamStorage, imageStreamStatusStorage, internalImageStreamStorage, err := imagestreametcd.NewREST(c.RESTOptionsGetter, c.RegistryNameFn, subjectAccessReviewRegistry, c.LimitVerifier) checkStorageErr(err) imageStreamRegistry := imagestream.NewRegistry(imageStreamStorage, imageStreamStatusStorage, internalImageStreamStorage) imageStreamMappingStorage := imagestreammapping.NewREST(imageRegistry, imageStreamRegistry, c.RegistryNameFn) imageStreamTagStorage := imagestreamtag.NewREST(imageRegistry, imageStreamRegistry) imageStreamTagRegistry := imagestreamtag.NewRegistry(imageStreamTagStorage) importerFn := func(r importer.RepositoryRetriever) imageimporter.Interface { return imageimporter.NewImageStreamImporter(r, c.Options.ImagePolicyConfig.MaxImagesBulkImportedPerRepository, flowcontrol.NewTokenBucketRateLimiter(2.0, 3)) } importerDockerClientFn := func() dockerregistry.Client { return dockerregistry.NewClient(20*time.Second, false) } imageStreamImportStorage := imagestreamimport.NewREST(importerFn, imageStreamRegistry, internalImageStreamStorage, imageStorage, c.ImageStreamImportSecretClient(), importTransport, insecureImportTransport, importerDockerClientFn) imageStreamImageStorage := imagestreamimage.NewREST(imageRegistry, imageStreamRegistry) imageStreamImageRegistry := imagestreamimage.NewRegistry(imageStreamImageStorage) buildGenerator := &buildgenerator.BuildGenerator{ Client: buildgenerator.Client{ GetBuildConfigFunc: buildConfigRegistry.GetBuildConfig, UpdateBuildConfigFunc: buildConfigRegistry.UpdateBuildConfig, GetBuildFunc: buildRegistry.GetBuild, CreateBuildFunc: buildRegistry.CreateBuild, GetImageStreamFunc: imageStreamRegistry.GetImageStream, GetImageStreamImageFunc: imageStreamImageRegistry.GetImageStreamImage, GetImageStreamTagFunc: imageStreamTagRegistry.GetImageStreamTag, }, ServiceAccounts: c.KubeClient(), Secrets: c.KubeClient(), } // TODO: with sharding, this needs to be changed deployConfigGenerator := &deployconfiggenerator.DeploymentConfigGenerator{ Client: deployconfiggenerator.Client{ DCFn: deployConfigRegistry.GetDeploymentConfig, ISFn: imageStreamRegistry.GetImageStream, LISFn2: imageStreamRegistry.ListImageStreams, }, } configClient, kclient := c.DeploymentConfigClients() deployRollbackClient := deployrollback.Client{ DCFn: deployConfigRegistry.GetDeploymentConfig, RCFn: clientDeploymentInterface{kclient}.GetDeployment, GRFn: deployrollback.NewRollbackGenerator().GenerateRollback, } deployConfigRollbackStorage := deployrollback.NewREST(configClient, kclient, c.ExternalVersionCodec) projectStorage := projectproxy.NewREST(c.PrivilegedLoopbackKubernetesClient.Namespaces(), c.ProjectAuthorizationCache, c.ProjectAuthorizationCache, c.ProjectCache) namespace, templateName, err := configapi.ParseNamespaceAndName(c.Options.ProjectConfig.ProjectRequestTemplate) if err != nil { glog.Errorf("Error parsing project request template value: %v", err) // we can continue on, the storage that gets created will be valid, it simply won't work properly. There's no reason to kill the master } projectRequestStorage := projectrequeststorage.NewREST(c.Options.ProjectConfig.ProjectRequestMessage, namespace, templateName, c.PrivilegedLoopbackOpenShiftClient, c.PrivilegedLoopbackKubernetesClient, c.Informers.PolicyBindings().Lister()) bcClient := c.BuildConfigWebHookClient() buildConfigWebHooks := buildconfigregistry.NewWebHookREST( buildConfigRegistry, buildclient.NewOSClientBuildConfigInstantiatorClient(bcClient), map[string]webhook.Plugin{ "generic": generic.New(), "github": github.New(), }, ) clientStorage, err := clientetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) clientRegistry := clientregistry.NewRegistry(clientStorage) // If OAuth is disabled, set the strategy to Deny saAccountGrantMethod := oauthapi.GrantHandlerDeny if c.Options.OAuthConfig != nil { // Otherwise, take the value provided in master-config.yaml saAccountGrantMethod = oauthapi.GrantHandlerType(c.Options.OAuthConfig.GrantConfig.ServiceAccountMethod) } combinedOAuthClientGetter := saoauth.NewServiceAccountOAuthClientGetter(c.KubeClient(), c.KubeClient(), clientRegistry, saAccountGrantMethod) authorizeTokenStorage, err := authorizetokenetcd.NewREST(c.RESTOptionsGetter, combinedOAuthClientGetter) checkStorageErr(err) accessTokenStorage, err := accesstokenetcd.NewREST(c.RESTOptionsGetter, combinedOAuthClientGetter) checkStorageErr(err) clientAuthorizationStorage, err := clientauthetcd.NewREST(c.RESTOptionsGetter, combinedOAuthClientGetter) checkStorageErr(err) templateStorage, err := templateetcd.NewREST(c.RESTOptionsGetter) checkStorageErr(err) storage := map[string]rest.Storage{ "images": imageStorage, "imagesignatures": imageSignatureStorage, "imageStreams/secrets": imageStreamSecretsStorage, "imageStreams": imageStreamStorage, "imageStreams/status": imageStreamStatusStorage, "imageStreamImports": imageStreamImportStorage, "imageStreamImages": imageStreamImageStorage, "imageStreamMappings": imageStreamMappingStorage, "imageStreamTags": imageStreamTagStorage, "deploymentConfigs": deployConfigStorage, "deploymentConfigs/scale": deployConfigScaleStorage, "deploymentConfigs/status": deployConfigStatusStorage, "deploymentConfigs/rollback": deployConfigRollbackStorage, "deploymentConfigs/log": deploylogregistry.NewREST(configClient, kclient, c.DeploymentLogClient(), kubeletClient), "deploymentConfigs/instantiate": dcInstantiateStorage, // TODO: Deprecate these "generateDeploymentConfigs": deployconfiggenerator.NewREST(deployConfigGenerator, c.ExternalVersionCodec), "deploymentConfigRollbacks": deployrollback.NewDeprecatedREST(deployRollbackClient, c.ExternalVersionCodec), "processedTemplates": templateregistry.NewREST(), "templates": templateStorage, "routes": routeStorage, "routes/status": routeStatusStorage, "projects": projectStorage, "projectRequests": projectRequestStorage, "hostSubnets": hostSubnetStorage, "netNamespaces": netNamespaceStorage, "clusterNetworks": clusterNetworkStorage, "egressNetworkPolicies": egressNetworkPolicyStorage, "users": userStorage, "groups": groupStorage, "identities": identityStorage, "userIdentityMappings": userIdentityMappingStorage, "oAuthAuthorizeTokens": authorizeTokenStorage, "oAuthAccessTokens": accessTokenStorage, "oAuthClients": clientStorage, "oAuthClientAuthorizations": clientAuthorizationStorage, "resourceAccessReviews": resourceAccessReviewStorage, "subjectAccessReviews": subjectAccessReviewStorage, "localSubjectAccessReviews": localSubjectAccessReviewStorage, "localResourceAccessReviews": localResourceAccessReviewStorage, "selfSubjectRulesReviews": selfSubjectRulesReviewStorage, "subjectRulesReviews": subjectRulesReviewStorage, "podSecurityPolicyReviews": podSecurityPolicyReviewStorage, "podSecurityPolicySubjectReviews": podSecurityPolicySubjectStorage, "podSecurityPolicySelfSubjectReviews": podSecurityPolicySelfSubjectReviewStorage, "policies": policyStorage, "policyBindings": policyBindingStorage, "roles": roleStorage, "roleBindings": roleBindingStorage, "clusterPolicies": clusterPolicyStorage, "clusterPolicyBindings": clusterPolicyBindingStorage, "clusterRoleBindings": clusterRoleBindingStorage, "clusterRoles": clusterRoleStorage, "clusterResourceQuotas": restInPeace(clusterresourcequotaregistry.NewStorage(c.RESTOptionsGetter)), "clusterResourceQuotas/status": updateInPeace(clusterresourcequotaregistry.NewStatusStorage(c.RESTOptionsGetter)), "appliedClusterResourceQuotas": appliedclusterresourcequotaregistry.NewREST( c.ClusterQuotaMappingController.GetClusterQuotaMapper(), c.Informers.ClusterResourceQuotas().Lister(), c.Informers.Namespaces().Lister()), } if configapi.IsBuildEnabled(&c.Options) { storage["builds"] = buildStorage storage["buildConfigs"] = buildConfigStorage storage["buildConfigs/webhooks"] = buildConfigWebHooks storage["builds/clone"] = buildclone.NewStorage(buildGenerator) storage["buildConfigs/instantiate"] = buildconfiginstantiate.NewStorage(buildGenerator) storage["buildConfigs/instantiatebinary"] = buildconfiginstantiate.NewBinaryStorage(buildGenerator, buildStorage, c.BuildLogClient(), kubeletClient) storage["builds/log"] = buildlogregistry.NewREST(buildStorage, buildStorage, c.BuildLogClient(), kubeletClient) storage["builds/details"] = buildDetailsStorage } return storage }