Example #1
0
func (o *DeploymentHookOptions) lifecycleHook(dc *deployapi.DeploymentConfig) (*deployapi.LifecycleHook, error) {
	hook := &deployapi.LifecycleHook{
		FailurePolicy: o.FailurePolicy,
		ExecNewPod: &deployapi.ExecNewPodHook{
			Command: o.Command,
		},
	}
	if len(o.Container) > 0 {
		found := false
		for _, c := range dc.Spec.Template.Spec.Containers {
			if c.Name == o.Container {
				found = true
				break
			}
		}
		if !found {
			fmt.Fprintf(o.Err, "warning: deployment config %q does not have a container named %q\n", dc.Name, o.Container)
		}
		hook.ExecNewPod.ContainerName = o.Container
	}
	if len(hook.ExecNewPod.ContainerName) == 0 {
		hook.ExecNewPod.ContainerName = dc.Spec.Template.Spec.Containers[0].Name
	}
	if len(o.Environment) > 0 {
		env, _, err := cmdutil.ParseEnv(o.Environment, nil)
		if err != nil {
			return nil, err
		}
		hook.ExecNewPod.Env = env
	}
	if len(o.Volumes) > 0 {
		for _, v := range o.Volumes {
			found := false
			for _, podVolume := range dc.Spec.Template.Spec.Volumes {
				if podVolume.Name == v {
					found = true
					break
				}
			}
			if !found {
				fmt.Fprintf(o.Err, "warning: deployment config %q does not have a volume named %q\n", dc.Name, v)
			}
		}
		hook.ExecNewPod.Volumes = o.Volumes
	}
	return hook, nil
}
Example #2
0
func (o *DebugOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, args []string, in io.Reader, out, errout io.Writer) error {
	if i := cmd.ArgsLenAtDash(); i != -1 && i < len(args) {
		o.Command = args[i:]
		args = args[:i]
	}
	resources, envArgs, ok := cmdutil.SplitEnvironmentFromResources(args)
	if !ok {
		return kcmdutil.UsageError(cmd, "all resources must be specified before environment changes: %s", strings.Join(args, " "))
	}

	switch {
	case o.ForceTTY && o.NoStdin:
		return kcmdutil.UsageError(cmd, "you may not specify -I and -t together")
	case o.ForceTTY && o.DisableTTY:
		return kcmdutil.UsageError(cmd, "you may not specify -t and -T together")
	case o.ForceTTY:
		o.Attach.TTY = true
	case o.DisableTTY:
		o.Attach.TTY = false
	// don't default TTY to true if a command is passed
	case len(o.Command) > 0:
		o.Attach.TTY = false
		o.Attach.Stdin = false
	default:
		o.Attach.TTY = term.IsTerminal(in)
		glog.V(4).Infof("Defaulting TTY to %t", o.Attach.TTY)
	}
	if o.NoStdin {
		o.Attach.TTY = false
		o.Attach.Stdin = false
	}

	if o.Annotations == nil {
		o.Annotations = make(map[string]string)
	}

	if len(o.Command) == 0 {
		o.Command = []string{"/bin/sh"}
	}

	cmdNamespace, explicit, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	mapper, typer := f.Object(false)
	b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()).
		NamespaceParam(cmdNamespace).DefaultNamespace().
		SingleResourceType().
		ResourceNames("pods", resources...).
		Flatten()
	if len(o.Filename) > 0 {
		b.FilenameParam(explicit, false, o.Filename)
	}

	o.AddEnv, o.RemoveEnv, err = cmdutil.ParseEnv(envArgs, nil)
	if err != nil {
		return err
	}

	one := false
	infos, err := b.Do().IntoSingular(&one).Infos()
	if err != nil {
		return err
	}
	if !one {
		return fmt.Errorf("you must identify a resource with a pod template to debug")
	}

	template, err := f.ApproximatePodTemplateForObject(infos[0].Object)
	if err != nil && template == nil {
		return fmt.Errorf("cannot debug %s: %v", infos[0].Name, err)
	}
	if err != nil {
		glog.V(4).Infof("Unable to get exact template, but continuing with fallback: %v", err)
	}
	pod := &kapi.Pod{
		ObjectMeta: template.ObjectMeta,
		Spec:       template.Spec,
	}
	pod.Name, pod.Namespace = infos[0].Name, infos[0].Namespace
	o.Attach.Pod = pod

	o.AsNonRoot = !o.AsRoot && cmd.Flag("as-root").Changed

	if len(o.Attach.ContainerName) == 0 && len(pod.Spec.Containers) > 0 {
		glog.V(4).Infof("Defaulting container name to %s", pod.Spec.Containers[0].Name)
		o.Attach.ContainerName = pod.Spec.Containers[0].Name
	}

	o.Annotations[debugPodAnnotationSourceResource] = fmt.Sprintf("%s/%s", infos[0].Mapping.Resource, infos[0].Name)
	o.Annotations[debugPodAnnotationSourceContainer] = o.Attach.ContainerName

	output := kcmdutil.GetFlagString(cmd, "output")
	if len(output) != 0 {
		o.Print = func(pod *kapi.Pod, out io.Writer) error {
			return f.PrintObject(cmd, mapper, pod, out)
		}
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}
	o.Attach.Config = config

	_, kc, err := f.Clients()
	if err != nil {
		return err
	}
	o.Attach.Client = kc
	return nil
}
Example #3
0
func (o *StartBuildOptions) Complete(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, cmdFullName string, args []string) error {
	o.In = in
	o.Out = out
	o.ErrOut = cmd.OutOrStderr()
	o.Git = git.NewRepository()
	o.ClientConfig = f.OpenShiftClientConfig
	o.Mapper, _ = f.Object(false)

	webhook := o.FromWebhook
	buildName := o.FromBuild
	fromFile := o.FromFile
	fromDir := o.FromDir
	fromRepo := o.FromRepo
	buildLogLevel := o.LogLevel

	outputFormat := kcmdutil.GetFlagString(cmd, "output")
	if outputFormat != "name" && outputFormat != "" {
		return kcmdutil.UsageError(cmd, "Unsupported output format: %s", outputFormat)
	}
	o.ShortOutput = outputFormat == "name"

	switch {
	case len(webhook) > 0:
		if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 {
			return kcmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags")
		}
		return nil

	case len(args) != 1 && len(buildName) == 0:
		return kcmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag.\nUse \"%s get bc\" to list all available build configs.", cmdFullName)
	}

	if len(buildName) != 0 && (len(fromFile) != 0 || len(fromDir) != 0 || len(fromRepo) != 0) {
		// TODO: we should support this, it should be possible to clone a build to run again with new uploaded artifacts.
		// Doing so requires introducing a new clonebinary endpoint.
		return kcmdutil.UsageError(cmd, "Cannot use '--from-build' flag with binary builds")
	}
	o.AsBinary = len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, _, err := f.Clients()
	if err != nil {
		return err
	}
	o.Client = client

	var (
		name     = buildName
		resource = buildapi.Resource("builds")
	)

	if len(name) == 0 && len(args) > 0 && len(args[0]) > 0 {
		mapper, _ := f.Object(false)
		resource, name, err = cmdutil.ResolveResource(buildapi.Resource("buildconfigs"), args[0], mapper)
		if err != nil {
			return err
		}
		switch resource {
		case buildapi.Resource("buildconfigs"):
			// no special handling required
		case buildapi.Resource("builds"):
			if len(o.ListWebhooks) == 0 {
				return fmt.Errorf("use --from-build to rerun your builds")
			}
		default:
			return fmt.Errorf("invalid resource provided: %v", resource)
		}
	}
	// when listing webhooks, allow --from-build to lookup a build config
	if resource == buildapi.Resource("builds") && len(o.ListWebhooks) > 0 {
		build, err := client.Builds(namespace).Get(name)
		if err != nil {
			return err
		}
		ref := build.Status.Config
		if ref == nil {
			return fmt.Errorf("the provided Build %q was not created from a BuildConfig and cannot have webhooks", name)
		}
		if len(ref.Namespace) > 0 {
			namespace = ref.Namespace
		}
		name = ref.Name
	}

	if len(name) == 0 {
		return fmt.Errorf("a resource name is required either as an argument or by using --from-build")
	}

	o.Namespace = namespace
	o.Name = name

	env, _, err := cmdutil.ParseEnv(o.Env, in)
	if err != nil {
		return err
	}
	if len(buildLogLevel) > 0 {
		env = append(env, kapi.EnvVar{Name: "BUILD_LOGLEVEL", Value: buildLogLevel})
	}
	o.EnvVar = env

	return nil
}
Example #4
0
File: env.go Project: richm/origin
// RunEnv contains all the necessary functionality for the OpenShift cli env command
// TODO: refactor to share the common "patch resource" pattern of probe
func RunEnv(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, args []string, envParams, filenames []string) error {
	resources, envArgs, ok := cmdutil.SplitEnvironmentFromResources(args)
	if !ok {
		return kcmdutil.UsageError(cmd, "all resources must be specified before environment changes: %s", strings.Join(args, " "))
	}
	if len(filenames) == 0 && len(resources) < 1 {
		return kcmdutil.UsageError(cmd, "one or more resources must be specified as <resource> <name> or <resource>/<name>")
	}

	containerMatch := kcmdutil.GetFlagString(cmd, "containers")
	list := kcmdutil.GetFlagBool(cmd, "list")
	selector := kcmdutil.GetFlagString(cmd, "selector")
	all := kcmdutil.GetFlagBool(cmd, "all")
	//overwrite := kcmdutil.GetFlagBool(cmd, "overwrite")
	resourceVersion := kcmdutil.GetFlagString(cmd, "resource-version")
	outputFormat := kcmdutil.GetFlagString(cmd, "output")

	if list && len(outputFormat) > 0 {
		return kcmdutil.UsageError(cmd, "--list and --output may not be specified together")
	}

	clientConfig, err := f.ClientConfig()
	if err != nil {
		return err
	}

	cmdNamespace, explicit, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	env, remove, err := cmdutil.ParseEnv(append(envParams, envArgs...), in)
	if err != nil {
		return err
	}

	mapper, typer := f.Object()
	b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()).
		ContinueOnError().
		NamespaceParam(cmdNamespace).DefaultNamespace().
		FilenameParam(explicit, filenames...).
		SelectorParam(selector).
		ResourceTypeOrNameArgs(all, resources...).
		Flatten()

	one := false
	infos, err := b.Do().IntoSingular(&one).Infos()
	if err != nil {
		return err
	}

	// only apply resource version locking on a single resource
	if !one && len(resourceVersion) > 0 {
		return kcmdutil.UsageError(cmd, "--resource-version may only be used with a single resource")
	}
	// Keep a copy of the original objects prior to updating their environment.
	// Used in constructing the patch(es) that will be applied in the server.
	oldObjects, err := resource.AsVersionedObjects(infos, clientConfig.GroupVersion.String(), kapi.Codecs.LegacyCodec(*clientConfig.GroupVersion))
	if err != nil {
		return err
	}
	if len(oldObjects) != len(infos) {
		return fmt.Errorf("could not convert all objects to API version %q", clientConfig.GroupVersion)
	}
	oldData := make([][]byte, len(infos))
	for i := range oldObjects {
		old, err := json.Marshal(oldObjects[i])
		if err != nil {
			return err
		}
		oldData[i] = old
	}

	skipped := 0
	for _, info := range infos {
		ok, err := f.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error {
			containers, _ := selectContainers(spec.Containers, containerMatch)
			if len(containers) == 0 {
				fmt.Fprintf(cmd.Out(), "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, containerMatch)
				return nil
			}
			for _, c := range containers {
				c.Env = updateEnv(c.Env, env, remove)

				if list {
					fmt.Fprintf(out, "# %s %s, container %s\n", info.Mapping.Resource, info.Name, c.Name)
					for _, env := range c.Env {
						// if env.ValueFrom != nil && env.ValueFrom.FieldRef != nil {
						// 	fmt.Fprintf(cmd.Out(), "%s= # calculated from pod %s %s\n", env.Name, env.ValueFrom.FieldRef.FieldPath, env.ValueFrom.FieldRef.APIVersion)
						// 	continue
						// }
						fmt.Fprintf(out, "%s=%s\n", env.Name, env.Value)

					}
				}
			}
			return nil
		})
		if !ok {
			skipped++
			continue
		}
		if err != nil {
			fmt.Fprintf(cmd.Out(), "error: %s/%s %v\n", info.Mapping.Resource, info.Name, err)
			continue
		}
	}
	if one && skipped == len(infos) {
		return fmt.Errorf("%s/%s is not a pod or does not have a pod template", infos[0].Mapping.Resource, infos[0].Name)
	}

	if list {
		return nil
	}

	if len(outputFormat) != 0 {
		outputVersion, err := kcmdutil.OutputVersion(cmd, clientConfig.GroupVersion)
		if err != nil {
			return err
		}
		objects, err := resource.AsVersionedObjects(infos, outputVersion.String(), kapi.Codecs.LegacyCodec(outputVersion))
		if err != nil {
			return err
		}
		if len(objects) != len(infos) {
			return fmt.Errorf("could not convert all objects to API version %q", outputVersion)
		}
		p, _, err := kubectl.GetPrinter(outputFormat, "")
		if err != nil {
			return err
		}
		for _, object := range objects {
			if err := p.PrintObj(object, out); err != nil {
				return err
			}
		}
		return nil
	}

	objects, err := resource.AsVersionedObjects(infos, clientConfig.GroupVersion.String(), kapi.Codecs.LegacyCodec(*clientConfig.GroupVersion))
	if err != nil {
		return err
	}
	if len(objects) != len(infos) {
		return fmt.Errorf("could not convert all objects to API version %q", clientConfig.GroupVersion)
	}

	failed := false
	for i, info := range infos {
		newData, err := json.Marshal(objects[i])
		if err != nil {
			return err
		}
		patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData[i], newData, objects[i])
		if err != nil {
			return err
		}
		obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, kapi.StrategicMergePatchType, patchBytes)
		if err != nil {
			handlePodUpdateError(cmd.Out(), err, "environment variables")
			failed = true
			continue
		}
		info.Refresh(obj, true)

		shortOutput := kcmdutil.GetFlagString(cmd, "output") == "name"
		kcmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "updated")
	}
	if failed {
		return cmdutil.ErrExit
	}
	return nil
}
Example #5
0
// RunStartBuild contains all the necessary functionality for the OpenShift cli start-build command
func RunStartBuild(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, envParams []string, args []string, webhooks util.StringFlag) error {
	webhook := kcmdutil.GetFlagString(cmd, "from-webhook")
	buildName := kcmdutil.GetFlagString(cmd, "from-build")
	follow := kcmdutil.GetFlagBool(cmd, "follow")
	commit := kcmdutil.GetFlagString(cmd, "commit")
	waitForComplete := kcmdutil.GetFlagBool(cmd, "wait")
	fromFile := kcmdutil.GetFlagString(cmd, "from-file")
	fromDir := kcmdutil.GetFlagString(cmd, "from-dir")
	fromRepo := kcmdutil.GetFlagString(cmd, "from-repo")
	buildLogLevel := kcmdutil.GetFlagString(cmd, "build-loglevel")

	switch {
	case len(webhook) > 0:
		if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 {
			return kcmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags")
		}
		path := kcmdutil.GetFlagString(cmd, "git-repository")
		postReceivePath := kcmdutil.GetFlagString(cmd, "git-post-receive")
		repo := git.NewRepository()
		return RunStartBuildWebHook(f, out, webhook, path, postReceivePath, repo)
	case len(args) != 1 && len(buildName) == 0:
		return kcmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag")
	}

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	var (
		name     = buildName
		resource = "builds"
	)

	if len(name) == 0 && len(args) > 0 && len(args[0]) > 0 {
		mapper, _ := f.Object()
		resource, name, err = cmdutil.ResolveResource("buildconfigs", args[0], mapper)
		if err != nil {
			return err
		}
		switch resource {
		case "buildconfigs":
			// no special handling required
		case "builds":
			return fmt.Errorf("use --from-build to rerun your builds")
		default:
			return fmt.Errorf("invalid resource provided: %s", resource)
		}
	}
	if len(name) == 0 {
		return fmt.Errorf("a resource name is required either as an argument or by using --from-build")
	}

	if webhooks.Provided() {
		return RunListBuildWebHooks(f, out, cmd.Out(), name, resource, webhooks.String())
	}

	client, _, err := f.Clients()
	if err != nil {
		return err
	}

	env, _, err := cmdutil.ParseEnv(envParams, in)
	if err != nil {
		return err
	}

	if len(buildLogLevel) > 0 {
		env = append(env, kapi.EnvVar{Name: "BUILD_LOGLEVEL", Value: buildLogLevel})
	}

	request := &buildapi.BuildRequest{
		ObjectMeta: kapi.ObjectMeta{Name: name},
	}
	if len(env) > 0 {
		request.Env = env
	}
	if len(commit) > 0 {
		request.Revision = &buildapi.SourceRevision{
			Git: &buildapi.GitSourceRevision{
				Commit: commit,
			},
		}
	}

	git := git.NewRepository()

	var newBuild *buildapi.Build
	switch {
	case len(args) > 0 && (len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0):
		request := &buildapi.BinaryBuildRequestOptions{
			ObjectMeta: kapi.ObjectMeta{
				Name:      name,
				Namespace: namespace,
			},
			Commit: commit,
		}
		if len(env) > 0 {
			fmt.Fprintf(cmd.Out(), "WARNING: Specifying environment variables with binary builds is not supported.\n")
		}
		if newBuild, err = streamPathToBuild(git, in, cmd.Out(), client.BuildConfigs(namespace), fromDir, fromFile, fromRepo, request); err != nil {
			return err
		}
	case resource == "builds":
		if newBuild, err = client.Builds(namespace).Clone(request); err != nil {
			return err
		}
	case resource == "buildconfigs":
		if newBuild, err = client.BuildConfigs(namespace).Instantiate(request); err != nil {
			return err
		}
	default:
		return fmt.Errorf("invalid resource provided: %s", resource)
	}

	fmt.Fprintln(out, newBuild.Name)
	// mapper, typer := f.Object()
	// resourceMapper := &resource.Mapper{ObjectTyper: typer, RESTMapper: mapper, ClientMapper: f.ClientMapperForCommand()}
	// info, err := resourceMapper.InfoForObject(newBuild)
	// if err != nil {
	// 	return err
	// }
	// shortOutput := kcmdutil.GetFlagString(cmd, "output") == "name"
	// kcmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "started")

	var (
		wg      sync.WaitGroup
		exitErr error
	)

	// Wait for the build to complete
	if waitForComplete {
		wg.Add(1)
		go func() {
			defer wg.Done()
			exitErr = WaitForBuildComplete(client.Builds(namespace), newBuild.Name)
		}()
	}

	// Stream the logs from the build
	if follow {
		wg.Add(1)
		go func() {
			// if --wait option is set, then don't wait for logs to finish streaming
			// but wait for the build to reach its final state
			if waitForComplete {
				wg.Done()
			} else {
				defer wg.Done()
			}
			opts := buildapi.BuildLogOptions{
				Follow: true,
				NoWait: false,
			}
			for {
				rd, err := client.BuildLogs(namespace).Get(newBuild.Name, opts).Stream()
				if err != nil {
					// if --wait options is set, then retry the connection to build logs
					// when we hit the timeout.
					if waitForComplete && errors.IsTimeoutErr(err) {
						continue
					}
					fmt.Fprintf(cmd.Out(), "error getting logs: %v\n", err)
					return
				}
				defer rd.Close()
				if _, err = io.Copy(out, rd); err != nil {
					fmt.Fprintf(cmd.Out(), "error streaming logs: %v\n", err)
				}
				break
			}
		}()
	}

	wg.Wait()

	return exitErr
}
Example #6
0
func (o *StartBuildOptions) Complete(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, args []string) error {
	o.In = in
	o.Out = out
	o.ErrOut = cmd.Out()
	o.Git = git.NewRepository()
	o.ClientConfig = f.OpenShiftClientConfig

	webhook := o.FromWebhook
	buildName := o.FromBuild
	fromFile := o.FromFile
	fromDir := o.FromDir
	fromRepo := o.FromRepo
	buildLogLevel := o.LogLevel

	switch {
	case len(webhook) > 0:
		if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 {
			return kcmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags")
		}
		return nil

	case len(args) != 1 && len(buildName) == 0:
		return kcmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag")
	}

	o.AsBinary = len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, _, err := f.Clients()
	if err != nil {
		return err
	}
	o.Client = client

	var (
		name     = buildName
		resource = buildapi.Resource("builds")
	)

	if len(name) == 0 && len(args) > 0 && len(args[0]) > 0 {
		mapper, _ := f.Object(false)
		resource, name, err = cmdutil.ResolveResource(buildapi.Resource("buildconfigs"), args[0], mapper)
		if err != nil {
			return err
		}
		switch resource {
		case buildapi.Resource("buildconfigs"):
			// no special handling required
		case buildapi.Resource("builds"):
			if len(o.ListWebhooks) == 0 {
				return fmt.Errorf("use --from-build to rerun your builds")
			}
		default:
			return fmt.Errorf("invalid resource provided: %v", resource)
		}
	}
	// when listing webhooks, allow --from-build to lookup a build config
	if resource == buildapi.Resource("builds") && len(o.ListWebhooks) > 0 {
		build, err := client.Builds(namespace).Get(name)
		if err != nil {
			return err
		}
		ref := build.Status.Config
		if ref == nil {
			return fmt.Errorf("the provided Build %q was not created from a BuildConfig and cannot have webhooks", name)
		}
		if len(ref.Namespace) > 0 {
			namespace = ref.Namespace
		}
		name = ref.Name
	}

	if len(name) == 0 {
		return fmt.Errorf("a resource name is required either as an argument or by using --from-build")
	}

	o.Namespace = namespace
	o.Name = name

	env, _, err := cmdutil.ParseEnv(o.Env, in)
	if err != nil {
		return err
	}
	if len(buildLogLevel) > 0 {
		env = append(env, kapi.EnvVar{Name: "BUILD_LOGLEVEL", Value: buildLogLevel})
	}
	o.EnvVar = env

	return nil
}
Example #7
0
// RunEnv contains all the necessary functionality for the OpenShift cli env command
// TODO: refactor to share the common "patch resource" pattern of probe
func RunEnv(f *clientcmd.Factory, in io.Reader, out, errout io.Writer, cmd *cobra.Command, args []string, envParams, filenames []string) error {
	resources, envArgs, ok := cmdutil.SplitEnvironmentFromResources(args)
	if !ok {
		return kcmdutil.UsageError(cmd, "all resources must be specified before environment changes: %s", strings.Join(args, " "))
	}
	if len(filenames) == 0 && len(resources) < 1 {
		return kcmdutil.UsageError(cmd, "one or more resources must be specified as <resource> <name> or <resource>/<name>")
	}

	containerMatch := kcmdutil.GetFlagString(cmd, "containers")
	list := kcmdutil.GetFlagBool(cmd, "list")
	resolve := kcmdutil.GetFlagBool(cmd, "resolve")
	selector := kcmdutil.GetFlagString(cmd, "selector")
	all := kcmdutil.GetFlagBool(cmd, "all")
	overwrite := kcmdutil.GetFlagBool(cmd, "overwrite")
	resourceVersion := kcmdutil.GetFlagString(cmd, "resource-version")
	outputFormat := kcmdutil.GetFlagString(cmd, "output")
	from := kcmdutil.GetFlagString(cmd, "from")
	prefix := kcmdutil.GetFlagString(cmd, "prefix")

	if list && len(outputFormat) > 0 {
		return kcmdutil.UsageError(cmd, "--list and --output may not be specified together")
	}

	clientConfig, err := f.ClientConfig()
	if err != nil {
		return err
	}

	cmdNamespace, explicit, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	cmdutil.WarnAboutCommaSeparation(errout, envParams, "--env")

	env, remove, err := cmdutil.ParseEnv(append(envParams, envArgs...), in)
	if err != nil {
		return err
	}

	if len(from) != 0 {
		mapper, typer := f.Object(false)
		b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()).
			ContinueOnError().
			NamespaceParam(cmdNamespace).DefaultNamespace().
			FilenameParam(explicit, false, filenames...).
			SelectorParam(selector).
			ResourceTypeOrNameArgs(all, from).
			Flatten()

		one := false
		infos, err := b.Do().IntoSingular(&one).Infos()
		if err != nil {
			return err
		}

		for _, info := range infos {
			switch from := info.Object.(type) {
			case *kapi.Secret:
				for key := range from.Data {
					envVar := kapi.EnvVar{
						Name: keyToEnvName(key),
						ValueFrom: &kapi.EnvVarSource{
							SecretKeyRef: &kapi.SecretKeySelector{
								LocalObjectReference: kapi.LocalObjectReference{
									Name: from.Name,
								},
								Key: key,
							},
						},
					}
					env = append(env, envVar)
				}
			case *kapi.ConfigMap:
				for key := range from.Data {
					envVar := kapi.EnvVar{
						Name: keyToEnvName(key),
						ValueFrom: &kapi.EnvVarSource{
							ConfigMapKeyRef: &kapi.ConfigMapKeySelector{
								LocalObjectReference: kapi.LocalObjectReference{
									Name: from.Name,
								},
								Key: key,
							},
						},
					}
					env = append(env, envVar)
				}
			default:
				return fmt.Errorf("unsupported resource specified in --from")
			}
		}
	}

	if len(prefix) != 0 {
		for i := range env {
			env[i].Name = fmt.Sprintf("%s%s", prefix, env[i].Name)
		}
	}

	mapper, typer := f.Object(false)
	b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()).
		ContinueOnError().
		NamespaceParam(cmdNamespace).DefaultNamespace().
		FilenameParam(explicit, false, filenames...).
		SelectorParam(selector).
		ResourceTypeOrNameArgs(all, resources...).
		Flatten()

	one := false
	infos, err := b.Do().IntoSingular(&one).Infos()
	if err != nil {
		return err
	}

	// only apply resource version locking on a single resource
	if !one && len(resourceVersion) > 0 {
		return kcmdutil.UsageError(cmd, "--resource-version may only be used with a single resource")
	}
	// Keep a copy of the original objects prior to updating their environment.
	// Used in constructing the patch(es) that will be applied in the server.
	gv := *clientConfig.GroupVersion
	oldObjects, err := resource.AsVersionedObjects(infos, gv, kapi.Codecs.LegacyCodec(gv))
	if err != nil {
		return err
	}
	if len(oldObjects) != len(infos) {
		return fmt.Errorf("could not convert all objects to API version %q", clientConfig.GroupVersion)
	}
	oldData := make([][]byte, len(infos))
	for i := range oldObjects {
		old, err := json.Marshal(oldObjects[i])
		if err != nil {
			return err
		}
		oldData[i] = old
	}

	skipped := 0
	errored := []*resource.Info{}
	for _, info := range infos {
		ok, err := f.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error {
			resolutionErrorsEncountered := false
			containers, _ := selectContainers(spec.Containers, containerMatch)
			if len(containers) == 0 {
				fmt.Fprintf(errout, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, containerMatch)
				return nil
			}
			for _, c := range containers {
				if !overwrite {
					if err := validateNoOverwrites(c.Env, env); err != nil {
						errored = append(errored, info)
						return err
					}
				}

				c.Env = updateEnv(c.Env, env, remove)

				if list {
					resolveErrors := map[string][]string{}
					store := newResourceStore()

					fmt.Fprintf(out, "# %s %s, container %s\n", info.Mapping.Resource, info.Name, c.Name)
					for _, env := range c.Env {
						// Print the simple value
						if env.ValueFrom == nil {
							fmt.Fprintf(out, "%s=%s\n", env.Name, env.Value)
							continue
						}

						// Print the reference version
						if !resolve {
							fmt.Fprintf(out, "# %s from %s\n", env.Name, getEnvVarRefString(env.ValueFrom))
							continue
						}

						value, err := getEnvVarRefValue(f, store, env.ValueFrom, info.Object, c)
						// Print the resolved value
						if err == nil {
							fmt.Fprintf(out, "%s=%s\n", env.Name, value)
							continue
						}

						// Print the reference version and save the resolve error
						fmt.Fprintf(out, "# %s from %s\n", env.Name, getEnvVarRefString(env.ValueFrom))
						errString := err.Error()
						resolveErrors[errString] = append(resolveErrors[errString], env.Name)
						resolutionErrorsEncountered = true
					}

					// Print any resolution errors
					errs := []string{}
					for err, vars := range resolveErrors {
						sort.Strings(vars)
						errs = append(errs, fmt.Sprintf("error retrieving reference for %s: %v", strings.Join(vars, ", "), err))
					}
					sort.Strings(errs)
					for _, err := range errs {
						fmt.Fprintln(errout, err)
					}
				}
			}
			if resolutionErrorsEncountered {
				errored = append(errored, info)
				return errors.New("failed to retrieve valueFrom references")
			}
			return nil
		})
		if !ok {
			// This is a fallback function for objects that don't have pod spec.
			ok, err = f.UpdateObjectEnvironment(info.Object, func(vars *[]kapi.EnvVar) error {
				if vars == nil {
					return fmt.Errorf("no environment variables provided")
				}
				if !overwrite {
					if err := validateNoOverwrites(*vars, env); err != nil {
						errored = append(errored, info)
						return err
					}
				}
				*vars = updateEnv(*vars, env, remove)
				if list {
					fmt.Fprintf(out, "# %s %s\n", info.Mapping.Resource, info.Name)
					for _, env := range *vars {
						fmt.Fprintf(out, "%s=%s\n", env.Name, env.Value)
					}
				}
				return nil
			})
			if !ok {
				skipped++
				continue
			}
		}
		if err != nil {
			fmt.Fprintf(errout, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, err)
			continue
		}
	}
	if one && skipped == len(infos) {
		return fmt.Errorf("%s/%s is not a pod or does not have a pod template", infos[0].Mapping.Resource, infos[0].Name)
	}
	if len(errored) == len(infos) {
		return cmdutil.ErrExit
	}

	if list {
		return nil
	}

	if len(outputFormat) > 0 {
		return f.PrintResourceInfos(cmd, infos, out)
	}

	objects, err := resource.AsVersionedObjects(infos, gv, kapi.Codecs.LegacyCodec(gv))
	if err != nil {
		return err
	}
	if len(objects) != len(infos) {
		return fmt.Errorf("could not convert all objects to API version %q", clientConfig.GroupVersion)
	}

	failed := false
updates:
	for i, info := range infos {
		for _, erroredInfo := range errored {
			if info == erroredInfo {
				continue updates
			}
		}
		newData, err := json.Marshal(objects[i])
		if err != nil {
			return err
		}
		patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData[i], newData, objects[i])
		if err != nil {
			return err
		}
		obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, kapi.StrategicMergePatchType, patchBytes)
		if err != nil {
			handlePodUpdateError(errout, err, "environment variables")
			failed = true
			continue
		}
		info.Refresh(obj, true)

		// make sure arguments to set or replace environment variables are set
		// before returning a successful message
		if len(env) == 0 && len(envArgs) == 0 {
			return fmt.Errorf("at least one environment variable must be provided")
		}

		shortOutput := kcmdutil.GetFlagString(cmd, "output") == "name"
		kcmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, false, "updated")
	}
	if failed {
		return cmdutil.ErrExit
	}
	return nil
}