// getFactoryFromCluster returns an OpenShift's Factory // using the config that is made available when we are running in a cluster // (using environment variables and token secret file) // or an error if those are not available (meaning we are not running in a cluster) func getFactoryFromCluster() (*clientcmd.Factory, error) { clusterConfig, err := k8client.InClusterConfig() if err != nil { return nil, err } // keep only what we need to initialize a factory overrides := &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdapi.Cluster{ Server: clusterConfig.Host, APIVersion: clusterConfig.Version, }, AuthInfo: kclientcmdapi.AuthInfo{ Token: clusterConfig.BearerToken, }, Context: kclientcmdapi.Context{}, } if len(clusterConfig.TLSClientConfig.CAFile) > 0 { // FIXME "x509: cannot validate certificate for x.x.x.x because it doesn't contain any IP SANs" // overrides.ClusterInfo.CertificateAuthority = clusterConfig.TLSClientConfig.CAFile overrides.ClusterInfo.InsecureSkipTLSVerify = true } else { overrides.ClusterInfo.InsecureSkipTLSVerify = true } config := kclientcmd.NewDefaultClientConfig(*kclientcmdapi.NewConfig(), overrides) factory := clientcmd.NewFactory(config) return factory, nil }
// makes the client from the specified context and determines whether it is a cluster-admin. func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*client.Client, *kclientset.Clientset, bool, string, error) { overrides := &clientcmd.ConfigOverrides{Context: *context} clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, overrides) serverUrl := rawConfig.Clusters[context.Cluster].Server factory := osclientcmd.NewFactory(clientConfig) o.Logger.Debug("CED1005", fmt.Sprintf("Checking if context is cluster-admin: '%s'", contextName)) if osClient, _, kubeClient, err := factory.Clients(); err != nil { o.Logger.Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err)) return nil, nil, false, "", nil } else { subjectAccessReview := authorizationapi.SubjectAccessReview{Action: authorizationapi.Action{ // if you can do everything, you're the cluster admin. Verb: "*", Group: "*", Resource: "*", }} if resp, err := osClient.SubjectAccessReviews().Create(&subjectAccessReview); err != nil { if regexp.MustCompile(`User "[\w:]+" cannot create \w+ at the cluster scope`).MatchString(err.Error()) { o.Logger.Debug("CED1007", fmt.Sprintf("Context '%s' does not have cluster-admin access:\n%v", contextName, err)) return nil, nil, false, "", nil } else { o.Logger.Error("CED1008", fmt.Sprintf("Unknown error testing cluster-admin access for context '%s':\n%v", contextName, err)) return nil, nil, false, "", err } } else if resp.Allowed { o.Logger.Info("CED1009", fmt.Sprintf("Using context for cluster-admin access: '%s'", contextName)) return osClient, kubeClient, true, serverUrl, nil } } o.Logger.Debug("CED1010", fmt.Sprintf("Context does not have cluster-admin access: '%s'", contextName)) return nil, nil, false, "", nil }
func TestLocalFlag(t *testing.T) { in := &bytes.Buffer{} out := &bytes.Buffer{} errout := &bytes.Buffer{} f := clientcmd.NewFactory(nil) setCmd := NewCmdSet("", f, in, out, errout) ensureLocalFlagOnChildren(t, setCmd, "") }
func loggedInUserFactory() (*clientcmd.Factory, error) { cfg, err := kclientcmd.LoadFromFile(config.RecommendedHomeFile) if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) return clientcmd.NewFactory(defaultCfg), nil }
func loggedInUserFactory() (*clientcmd.Factory, error) { cfg, err := config.NewOpenShiftClientConfigLoadingRules().Load() if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) return clientcmd.NewFactory(defaultCfg), nil }
// Factory returns a command factory that works with OpenShift server's admin credentials func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) { if c.factory == nil { cfg, err := kclientcmd.LoadFromFile(filepath.Join(c.LocalConfigDir, "master", "admin.kubeconfig")) if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) c.factory = clientcmd.NewFactory(defaultCfg) } return c.factory, nil }
// Factory returns a command factory that works with OpenShift server's admin credentials func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) { if c.factory == nil { cfg, err := kclientcmd.LoadFromFile(filepath.Join(c.LocalConfigDir, "master", "admin.kubeconfig")) if err != nil { return nil, err } overrides := &kclientcmd.ConfigOverrides{} if c.PortForwarding { overrides.ClusterInfo.Server = fmt.Sprintf("https://%s:8443", c.ServerIP) } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, overrides) c.factory = clientcmd.NewFactory(defaultCfg) } return c.factory, nil }
// TestLogsFlagParity makes sure that our copied flags don't slip during rebases func TestLogsFlagParity(t *testing.T) { kubeCmd := kcmd.NewCmdLogs(nil, ioutil.Discard) f := clientcmd.NewFactory(nil) originCmd := NewCmdLogs("oc", "logs", f, ioutil.Discard) kubeCmd.LocalFlags().VisitAll(func(kubeFlag *pflag.Flag) { originFlag := originCmd.LocalFlags().Lookup(kubeFlag.Name) if originFlag == nil { t.Errorf("missing %v flag", kubeFlag.Name) return } if !reflect.DeepEqual(originFlag, kubeFlag) { t.Errorf("flag %v %v does not match %v", kubeFlag.Name, kubeFlag, originFlag) } }) }
func getFakeInfo(podInfo *api.Pod) ([]*resource.Info, *VolumeOptions) { f := clientcmd.NewFactory(nil) fakeMapping := getFakeMapping() info := &resource.Info{ Client: fake.NewSimpleClientset().Core().RESTClient(), Mapping: fakeMapping, Namespace: "default", Name: "fakepod", Object: podInfo, } infos := []*resource.Info{info} vOptions := &VolumeOptions{} vOptions.Name = "fake-mount" vOptions.Encoder = api.Codecs.LegacyCodec(registered.EnabledVersions()...) vOptions.Containers = "*" vOptions.UpdatePodSpecForObject = f.UpdatePodSpecForObject return infos, vOptions }
func TestStartBuildWebHookHTTPS(t *testing.T) { invoked := make(chan struct{}, 1) server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { invoked <- struct{}{} w.WriteHeader(http.StatusOK) })) defer server.Close() testErr := errors.New("not enabled") cfg := &FakeClientConfig{ Err: testErr, } f := clientcmd.NewFactory(cfg) buf := &bytes.Buffer{} if err := RunStartBuildWebHook(f, buf, server.URL+"/webhook", "", "", nil); err == nil || !strings.Contains(err.Error(), "certificate signed by unknown authority") { t.Fatalf("unexpected non-error: %v", err) } }
// NewFactory builds a new openshift client factory from the given config func NewFactory(config *kclient.Config) *clientcmd.Factory { // keep only what we need to initialize a factory clientConfig := kclientcmd.NewDefaultClientConfig( *kclientcmdapi.NewConfig(), &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdapi.Cluster{ Server: config.Host, APIVersion: config.Version, InsecureSkipTLSVerify: config.Insecure, }, AuthInfo: kclientcmdapi.AuthInfo{ Token: config.BearerToken, }, Context: kclientcmdapi.Context{}, }) factory := clientcmd.NewFactory(clientConfig) return factory }
func TestStartBuildWebHook(t *testing.T) { invoked := make(chan struct{}, 1) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { invoked <- struct{}{} w.WriteHeader(http.StatusOK) })) defer server.Close() cfg := &FakeClientConfig{} f := clientcmd.NewFactory(cfg) buf := &bytes.Buffer{} if err := RunStartBuildWebHook(f, buf, server.URL+"/webhook", "", "", nil); err != nil { t.Fatalf("unable to start hook: %v", err) } <-invoked if err := RunStartBuildWebHook(f, buf, server.URL+"/webhook", "", "unknownpath", nil); err == nil { t.Fatalf("unexpected non-error: %v", err) } }
func main() { cmds := &cobra.Command{ Use: "qtz", Short: "Quantezza CLI.", Long: `Quantezza Data Foundry client.`, // Run: runHelp, } in, out := os.Stdin, os.Stdout loadingRules := config.NewOpenShiftClientConfigLoadingRules() cmds.PersistentFlags().StringVar(&loadingRules.ExplicitPath, config.OpenShiftConfigFlagName, "", "Path to the config file to use for CLI requests.") overrides := &clientcmd.ConfigOverrides{} // overrideFlags := clientcmd.RecommendedConfigOverrideFlags("") // // overrideFlags.ContextOverrideFlags.Namespace.ShortName = "n" // overrideFlags.ClusterOverrideFlags.APIVersion.LongName = "" // overrideFlags.ClusterOverrideFlags.CertificateAuthority.LongName = "" // overrideFlags.ClusterOverrideFlags.CertificateAuthority.LongName = "" // overrideFlags.ContextOverrideFlags.ClusterName.LongName = "" // overrideFlags.ContextOverrideFlags.Namespace.LongName = "" // // overrideFlags.AuthOverrideFlags.ClientCertificate.LongName = "" // // clientcmd.BindOverrideFlags(overrides, cmds.PersistentFlags(), overrideFlags) clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) f := occlientcmd.NewFactory(clientConfig) fullName := "qtz" loginCmd := occmd.NewCmdLogin(fullName, f, in, out) cmds.AddCommand(loginCmd) whoamiCmd := occmd.NewCmdWhoAmI(occmd.WhoAmIRecommendedCommandName, fullName+" "+occmd.WhoAmIRecommendedCommandName, f, out) cmds.AddCommand(whoamiCmd) cmds.AddCommand(occmd.NewCmdProject(fullName+" project", f, out)) cmds.Execute() }
func getFakeInfo(podInfo *api.Pod) ([]*resource.Info, *VolumeOptions) { ns := testapi.Default.NegotiatedSerializer() f := clientcmd.NewFactory(nil) client := &fake.RESTClient{ NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { return nil, nil }), } fakeMapping := getFakeMapping() info := &resource.Info{ Client: client, Mapping: fakeMapping, Namespace: "default", Name: "fakepod", Object: podInfo, } infos := []*resource.Info{info} vOptions := &VolumeOptions{} vOptions.Name = "fake-mount" vOptions.Encoder = api.Codecs.LegacyCodec(registered.EnabledVersions()...) vOptions.Containers = "*" vOptions.UpdatePodSpecForObject = f.UpdatePodSpecForObject return infos, vOptions }
func TestStartBuildHookPostReceive(t *testing.T) { invoked := make(chan *buildapi.GenericWebHookEvent, 1) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { event := buildapi.GenericWebHookEvent{} decoder := json.NewDecoder(r.Body) if err := decoder.Decode(&event); err != nil { t.Errorf("unmarshal failed: %v", err) } invoked <- &event w.WriteHeader(http.StatusOK) })) defer server.Close() f, _ := ioutil.TempFile("", "test") defer os.Remove(f.Name()) fmt.Fprintf(f, `0000 2384 refs/heads/master 2548 2548 refs/heads/stage`) f.Close() testErr := errors.New("not enabled") cfg := &FakeClientConfig{ Err: testErr, } factory := clientcmd.NewFactory(cfg) buf := &bytes.Buffer{} if err := RunStartBuildWebHook(factory, buf, server.URL+"/webhook", "", f.Name(), nil); err != nil { t.Fatalf("unexpected error: %v", err) } event := <-invoked if event == nil || event.Git == nil || len(event.Git.Refs) != 1 { t.Fatalf("unexpected event: %#v", event) } if event.Git.Refs[0].Commit != "2384" { t.Fatalf("unexpected ref: %#v", event.Git.Refs[0]) } }
// Check is part of the Diagnostic interface; it runs the actual diagnostic logic func (d ConfigContext) Check() types.DiagnosticResult { r := types.NewDiagnosticResult(ConfigContextsName) isDefaultContext := d.RawConfig.CurrentContext == d.ContextName // prepare bad news message errorKey := "DCli0001" unusableLine := fmt.Sprintf("The client config context '%s' is unusable", d.ContextName) if isDefaultContext { errorKey = "DCli0002" unusableLine = fmt.Sprintf("The current client config context '%s' is unusable", d.ContextName) } // check that the context and its constituents are defined in the kubeconfig context, exists := d.RawConfig.Contexts[d.ContextName] if !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' is not defined.", unusableLine, d.ContextName)) return r } clusterName := context.Cluster cluster, exists := d.RawConfig.Clusters[clusterName] if !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' has a cluster '%s' which is not defined.", unusableLine, d.ContextName, clusterName)) return r } authName := context.AuthInfo if _, exists := d.RawConfig.AuthInfos[authName]; !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' has a user '%s' which is not defined.", unusableLine, d.ContextName, authName)) return r } // we found a fully-defined context project := context.Namespace if project == "" { project = kapi.NamespaceDefault // k8s fills this in anyway if missing from the context } msgText := contextDesc if isDefaultContext { msgText = currContextDesc } msgText = fmt.Sprintf(msgText, d.ContextName, cluster.Server, authName, project) // Actually send a request to see if context has connectivity. // Note: we cannot reuse factories as they cache the clients, so build new factory for each context. osClient, _, _, err := osclientcmd.NewFactory(kclientcmd.NewDefaultClientConfig(*d.RawConfig, &kclientcmd.ConfigOverrides{Context: *context})).Clients() // client create now *fails* if cannot connect to server; so, address connectivity errors below if err == nil { if projects, projerr := osClient.Projects().List(kapi.ListOptions{}); projerr != nil { err = projerr } else { // success! list := []string{} for i, project := range projects.Items { if i > 9 { list = append(list, "...") break } list = append(list, project.Name) } if len(list) == 0 { r.Info("DCli0003", msgText+"Successfully requested project list, but it is empty, so user has no access to anything.") } else { r.Info("DCli0004", msgText+fmt.Sprintf("Successfully requested project list; has access to project(s):\n %v", list)) } return r } } // something went wrong; couldn't create client or get project list. // interpret the terse error messages with helpful info. errMsg := err.Error() errFull := fmt.Sprintf("(%T) %[1]v\n", err) var reason, errId string switch { case regexp.MustCompile("dial tcp: lookup (\\S+): no such host").MatchString(errMsg): errId, reason = "DCli0005", clientNoResolve case strings.Contains(errMsg, "x509: certificate signed by unknown authority"): errId, reason = "DCli0006", clientUnknownCa case strings.Contains(errMsg, "specifying a root certificates file with the insecure flag is not allowed"): errId, reason = "DCli0007", clientUnneededCa case invalidCertNameRx.MatchString(errMsg): match := invalidCertNameRx.FindStringSubmatch(errMsg) serverHost := match[len(match)-1] errId, reason = "DCli0008", fmt.Sprintf(clientInvCertName, serverHost) case regexp.MustCompile("dial tcp (\\S+): connection refused").MatchString(errMsg): errId, reason = "DCli0009", clientConnRefused case regexp.MustCompile("dial tcp (\\S+): (?:connection timed out|i/o timeout|no route to host)").MatchString(errMsg): errId, reason = "DCli0010", clientConnTimeout case strings.Contains(errMsg, "malformed HTTP response"): errId, reason = "DCli0011", clientMalformedHTTP case strings.Contains(errMsg, "tls: oversized record received with length"): errId, reason = "DCli0012", clientMalformedTLS case strings.Contains(errMsg, `User "system:anonymous" cannot`): errId, reason = "DCli0013", clientUnauthn case strings.Contains(errMsg, "provide credentials"): errId, reason = "DCli0014", clientUnauthz default: errId, reason = "DCli0015", `Diagnostics does not have an explanation for what this means. Please report this error so one can be added.` } r.Error(errId, err, msgText+errFull+reason) return r }
func TestTransform(t *testing.T) { type variant struct { changed bool nilReporter bool err bool obj, expected runtime.Object } testCases := []struct { mappings ImageReferenceMappings variants []variant }{ { mappings: ImageReferenceMappings{{FromRegistry: "docker.io", ToRegistry: "index.docker.io"}}, variants: []variant{ { obj: &kapi.Pod{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, changed: true, expected: &kapi.Pod{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, { obj: &kapi.ReplicationController{ Spec: kapi.ReplicationControllerSpec{ Template: &kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, }, }, changed: true, expected: &kapi.ReplicationController{ Spec: kapi.ReplicationControllerSpec{ Template: &kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, }, }, { obj: &kextensions.Deployment{ Spec: kextensions.DeploymentSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, }, }, changed: true, expected: &kextensions.Deployment{ Spec: kextensions.DeploymentSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, }, }, { obj: &deployapi.DeploymentConfig{ Spec: deployapi.DeploymentConfigSpec{ Template: &kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, }, }, changed: true, expected: &deployapi.DeploymentConfig{ Spec: deployapi.DeploymentConfigSpec{ Template: &kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, }, }, { obj: &kextensions.DaemonSet{ Spec: kextensions.DaemonSetSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, }, }, changed: true, expected: &kextensions.DaemonSet{ Spec: kextensions.DaemonSetSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, }, }, { obj: &kextensions.ReplicaSet{ Spec: kextensions.ReplicaSetSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, }, }, changed: true, expected: &kextensions.ReplicaSet{ Spec: kextensions.ReplicaSetSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, }, }, { obj: &kbatch.Job{ Spec: kbatch.JobSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "docker.io/foo/bar"}, {Image: "foo/bar"}, }, }, }, }, }, changed: true, expected: &kbatch.Job{ Spec: kbatch.JobSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Image: "index.docker.io/foo/bar"}, {Image: "index.docker.io/foo/bar"}, }, }, }, }, }, }, { obj: &kapi.Node{}, nilReporter: true, }, { obj: &buildapi.BuildConfig{ Spec: buildapi.BuildConfigSpec{ CommonSpec: buildapi.CommonSpec{ Output: buildapi.BuildOutput{To: &kapi.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, Source: buildapi.BuildSource{ Images: []buildapi.ImageSource{ {From: kapi.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, {From: kapi.ObjectReference{Kind: "DockerImage", Name: "foo/bar"}}, }, }, Strategy: buildapi.BuildStrategy{ DockerStrategy: &buildapi.DockerBuildStrategy{From: &kapi.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, SourceStrategy: &buildapi.SourceBuildStrategy{From: kapi.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, CustomStrategy: &buildapi.CustomBuildStrategy{From: kapi.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, }, }, }, }, changed: true, expected: &buildapi.BuildConfig{ Spec: buildapi.BuildConfigSpec{ CommonSpec: buildapi.CommonSpec{ Output: buildapi.BuildOutput{To: &kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, Source: buildapi.BuildSource{ Images: []buildapi.ImageSource{ {From: kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, {From: kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, }, }, Strategy: buildapi.BuildStrategy{ DockerStrategy: &buildapi.DockerBuildStrategy{From: &kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, SourceStrategy: &buildapi.SourceBuildStrategy{From: kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, CustomStrategy: &buildapi.CustomBuildStrategy{From: kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, }, }, }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeDockercfg, Data: map[string][]byte{ kapi.DockerConfigKey: []byte(`{"docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, changed: true, expected: &kapi.Secret{ Type: kapi.SecretTypeDockercfg, Data: map[string][]byte{ kapi.DockerConfigKey: []byte(`{"index.docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeDockercfg, Data: map[string][]byte{ kapi.DockerConfigKey: []byte(`{"myserver.com":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, expected: &kapi.Secret{ Type: kapi.SecretTypeDockercfg, Data: map[string][]byte{ kapi.DockerConfigKey: []byte(`{"myserver.com":{"auth":"Og=="},"other.server":{"auth":"Og=="}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeDockerConfigJson, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{"docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, changed: true, expected: &kapi.Secret{ Type: kapi.SecretTypeDockerConfigJson, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{"index.docker.io":{"auth":"Og=="},"other.server":{"auth":"Og=="}}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeDockerConfigJson, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{"myserver.com":{},"other.server":{}}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, expected: &kapi.Secret{ Type: kapi.SecretTypeDockerConfigJson, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{"myserver.com":{},"other.server":{}}}`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeDockercfg, Data: map[string][]byte{ kapi.DockerConfigKey: []byte(`{"auths":{`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, err: true, expected: &kapi.Secret{ Type: kapi.SecretTypeDockercfg, Data: map[string][]byte{ kapi.DockerConfigKey: []byte(`{"auths":{`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeDockerConfigJson, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, err: true, expected: &kapi.Secret{ Type: kapi.SecretTypeDockerConfigJson, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{`), "another": []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &kapi.Secret{ Type: kapi.SecretTypeOpaque, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, expected: &kapi.Secret{ Type: kapi.SecretTypeOpaque, Data: map[string][]byte{ kapi.DockerConfigJsonKey: []byte(`{"auths":{"docker.io":{},"other.server":{}}}`), }, }, }, { obj: &imageapi.Image{ DockerImageReference: "docker.io/foo/bar", }, changed: true, expected: &imageapi.Image{ DockerImageReference: "index.docker.io/foo/bar", }, }, { obj: &imageapi.Image{ DockerImageReference: "other.docker.io/foo/bar", }, expected: &imageapi.Image{ DockerImageReference: "other.docker.io/foo/bar", }, }, { obj: &imageapi.ImageStream{ Spec: imageapi.ImageStreamSpec{ Tags: map[string]imageapi.TagReference{ "foo": {From: &kapi.ObjectReference{Kind: "DockerImage", Name: "docker.io/foo/bar"}}, "bar": {From: &kapi.ObjectReference{Kind: "ImageStream", Name: "docker.io/foo/bar"}}, "baz": {}, }, DockerImageRepository: "docker.io/foo/bar", }, Status: imageapi.ImageStreamStatus{ DockerImageRepository: "docker.io/foo/bar", Tags: map[string]imageapi.TagEventList{ "bar": {Items: []imageapi.TagEvent{ {DockerImageReference: "docker.io/foo/bar"}, {DockerImageReference: "docker.io/foo/bar"}, }}, "baz": {Items: []imageapi.TagEvent{ {DockerImageReference: "some.other/reference"}, {DockerImageReference: "docker.io/foo/bar"}, }}, }, }, }, changed: true, expected: &imageapi.ImageStream{ Spec: imageapi.ImageStreamSpec{ Tags: map[string]imageapi.TagReference{ "foo": {From: &kapi.ObjectReference{Kind: "DockerImage", Name: "index.docker.io/foo/bar"}}, "bar": {From: &kapi.ObjectReference{Kind: "ImageStream", Name: "docker.io/foo/bar"}}, "baz": {}, }, DockerImageRepository: "index.docker.io/foo/bar", }, Status: imageapi.ImageStreamStatus{ DockerImageRepository: "docker.io/foo/bar", Tags: map[string]imageapi.TagEventList{ "bar": {Items: []imageapi.TagEvent{ {DockerImageReference: "index.docker.io/foo/bar"}, {DockerImageReference: "index.docker.io/foo/bar"}, }}, "baz": {Items: []imageapi.TagEvent{ {DockerImageReference: "some.other/reference"}, {DockerImageReference: "index.docker.io/foo/bar"}, }}, }, }, }, }, }, }, { mappings: ImageReferenceMappings{{FromRegistry: "index.docker.io", ToRegistry: "another.registry"}}, }, { mappings: ImageReferenceMappings{{FromRegistry: "index.docker.io", ToRegistry: "another.registry", ToName: "extra"}}, }, } for _, test := range testCases { for i, v := range test.variants { o := MigrateImageReferenceOptions{ Mappings: test.mappings, UpdatePodSpecFn: clientcmd.NewFactory(nil).UpdatePodSpecForObject, } reporter, err := o.transform(v.obj) if (err != nil) != v.err { t.Errorf("%d: %v %t", i, err, v.err) continue } if err != nil { continue } if (reporter == nil) != v.nilReporter { t.Errorf("%d: reporter %#v %t", i, reporter, v.nilReporter) continue } if reporter == nil { continue } if reporter.Changed() != v.changed { t.Errorf("%d: changed %#v %t", i, reporter, v.changed) continue } if !kapi.Semantic.DeepEqual(v.expected, v.obj) { t.Errorf("%d: object: %s", i, diff.ObjectDiff(v.expected, v.obj)) continue } } } }