// PullPolicyFromString converts a string into an api.PullPolicy. returns an error if the string does not match a pull policy in ValidPullPolicies() func PullPolicyFromString(ppStr string) (api.PullPolicy, error) { candidatePP := api.PullPolicy(ppStr) if _, ok := ValidPullPolicies[candidatePP]; !ok { return emptyPullPolicy, ErrInvalidPullPolicy{str: ppStr} } return candidatePP, nil }
func verifyImagePullPolicy(cmd *cobra.Command) error { pullPolicy := cmdutil.GetFlagString(cmd, "image-pull-policy") switch api.PullPolicy(pullPolicy) { case api.PullAlways, api.PullIfNotPresent, api.PullNever: return nil case "": return nil default: return cmdutil.UsageError(cmd, fmt.Sprintf("invalid image pull policy: %s", pullPolicy)) } }
func Containers(userContainers []interface{}) []api.Container { if len(userContainers) == 0 { return nil } var containers []api.Container for _, c := range userContainers { userContainer := c.(map[string]interface{}) container := api.Container{ Image: userContainer["image"].(string), Name: userContainer["name"].(string), } if _, ok := userContainer["args"]; ok { container.Args = convertListToStringArray(userContainer["args"].([]interface{})) } if _, ok := userContainer["command"]; ok { container.Command = convertListToStringArray(userContainer["command"].([]interface{})) } if _, ok := userContainer["working_dir"]; ok { container.WorkingDir = userContainer["working_dir"].(string) } if _, ok := userContainer["ports"]; ok { container.Ports = ContainerPorts(userContainer["ports"].([]interface{})) } if _, ok := userContainer["env"]; ok { container.Env = EnvVar(userContainer["env"].([]interface{})) } if _, ok := userContainer["volume_mounts"]; ok { container.VolumeMounts = VolumeMounts(userContainer["volume_mounts"].([]interface{})) } if _, ok := userContainer["termination_message_path"]; ok { container.TerminationMessagePath = userContainer["termination_message_path"].(string) } if _, ok := userContainer["image_pull_policy"]; ok { container.ImagePullPolicy = api.PullPolicy(userContainer["image_pull_policy"].(string)) } // TODO: populate these fields: // resources // liveness_probe // readiness_probe // lifecycle // security_context containers = append(containers, container) } return containers }
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *RollingUpdateOptions) error { if len(os.Args) > 1 && os.Args[1] == "rollingupdate" { printDeprecationWarning("rolling-update", "rollingupdate") } err := validateArguments(cmd, options.Filenames, args) if err != nil { return err } deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key") filename := "" image := cmdutil.GetFlagString(cmd, "image") pullPolicy := cmdutil.GetFlagString(cmd, "image-pull-policy") oldName := args[0] rollback := cmdutil.GetFlagBool(cmd, "rollback") period := cmdutil.GetFlagDuration(cmd, "update-period") interval := cmdutil.GetFlagDuration(cmd, "poll-interval") timeout := cmdutil.GetFlagDuration(cmd, "timeout") dryrun := cmdutil.GetFlagBool(cmd, "dry-run") outputFormat := cmdutil.GetFlagString(cmd, "output") container := cmdutil.GetFlagString(cmd, "container") if len(options.Filenames) > 0 { filename = options.Filenames[0] } cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } var newRc *api.ReplicationController // fetch rc oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName) if err != nil { if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 { return err } // We're in the middle of a rename, look for an RC with a source annotation of oldName newRc, err := kubectl.FindSourceController(client, cmdNamespace, oldName) if err != nil { return err } return kubectl.Rename(client, newRc, oldName) } var keepOldName bool var replicasDefaulted bool mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) if len(filename) != 0 { schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) if err != nil { return err } request := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). Schema(schema). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, false, filename). Do() obj, err := request.Object() if err != nil { return err } var ok bool // Handle filename input from stdin. The resource builder always returns an api.List // when creating resource(s) from a stream. if list, ok := obj.(*api.List); ok { if len(list.Items) > 1 { return cmdutil.UsageError(cmd, "%s specifies multiple items", filename) } obj = list.Items[0] } newRc, ok = obj.(*api.ReplicationController) if !ok { if gvk, err := typer.ObjectKind(obj); err == nil { return cmdutil.UsageError(cmd, "%s contains a %v not a ReplicationController", filename, gvk) } glog.V(4).Infof("Object %#v is not a ReplicationController", obj) return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename) } infos, err := request.Infos() if err != nil || len(infos) != 1 { glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted") } else { replicasDefaulted = isReplicasDefaulted(infos[0]) } } // If the --image option is specified, we need to create a new rc with at least one different selector // than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for // same-image updates. if len(image) != 0 { codec := api.Codecs.LegacyCodec(client.APIVersion()) keepOldName = len(args) == 1 newName := findNewName(args, oldRc) if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil { return err } if newRc != nil { if inProgressImage := newRc.Spec.Template.Spec.Containers[0].Image; inProgressImage != image { return cmdutil.UsageError(cmd, "Found existing in-progress update to image (%s).\nEither continue in-progress update with --image=%s or rollback with --rollback", inProgressImage, inProgressImage) } fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name) } else { config := &kubectl.NewControllerConfig{ Namespace: cmdNamespace, OldName: oldName, NewName: newName, Image: image, Container: container, DeploymentKey: deploymentKey, } if oldRc.Spec.Template.Spec.Containers[0].Image == image { if len(pullPolicy) == 0 { return cmdutil.UsageError(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image") } config.PullPolicy = api.PullPolicy(pullPolicy) } newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, config) if err != nil { return err } } // Update the existing replication controller with pointers to the 'next' controller // and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller. oldHash, err := api.HashObject(oldRc, codec) if err != nil { return err } // If new image is same as old, the hash may not be distinct, so add a suffix. oldHash += "-orig" oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out) if err != nil { return err } } if rollback { keepOldName = len(args) == 1 newName := findNewName(args, oldRc) if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil { return err } if newRc == nil { return cmdutil.UsageError(cmd, "Could not find %s to rollback.\n", newName) } } if oldName == newRc.Name { return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s", filename, oldName) } updater := kubectl.NewRollingUpdater(newRc.Namespace, client) // To successfully pull off a rolling update the new and old rc have to differ // by at least one selector. Every new pod should have the selector and every // old pod should not have the selector. var hasLabel bool for key, oldValue := range oldRc.Spec.Selector { if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue { hasLabel = true break } } if !hasLabel { return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s", filename, oldName) } // TODO: handle scales during rolling update if replicasDefaulted { newRc.Spec.Replicas = oldRc.Spec.Replicas } if dryrun { oldRcData := &bytes.Buffer{} newRcData := &bytes.Buffer{} if outputFormat == "" { oldRcData.WriteString(oldRc.Name) newRcData.WriteString(newRc.Name) } else { if err := f.PrintObject(cmd, mapper, oldRc, oldRcData); err != nil { return err } if err := f.PrintObject(cmd, mapper, newRc, newRcData); err != nil { return err } } fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes())) return nil } updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy if keepOldName { updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy } config := &kubectl.RollingUpdaterConfig{ Out: out, OldRc: oldRc, NewRc: newRc, UpdatePeriod: period, Interval: interval, Timeout: timeout, CleanupPolicy: updateCleanupPolicy, MaxUnavailable: intstr.FromInt(0), MaxSurge: intstr.FromInt(1), } if rollback { err = kubectl.AbortRollingUpdate(config) if err != nil { return err } client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc) } err = updater.Update(config) if err != nil { return err } message := "rolling updated" if keepOldName { newRc.Name = oldName } else { message = fmt.Sprintf("rolling updated to %q", newRc.Name) } newRc, err = client.ReplicationControllers(cmdNamespace).Get(newRc.Name) if err != nil { return err } if outputFormat != "" { return f.PrintObject(cmd, mapper, newRc, out) } kind, err := api.Scheme.ObjectKind(newRc) if err != nil { return err } _, res := meta.KindToResource(kind) cmdutil.PrintSuccess(mapper, false, out, res.Resource, oldName, message) return nil }
func convert_v1beta3_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Container))(in) } out.Name = in.Name out.Image = in.Image if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } if in.Args != nil { out.Args = make([]string, len(in.Args)) for i := range in.Args { out.Args[i] = in.Args[i] } } out.WorkingDir = in.WorkingDir if in.Ports != nil { out.Ports = make([]api.ContainerPort, len(in.Ports)) for i := range in.Ports { if err := convert_v1beta3_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } if in.Env != nil { out.Env = make([]api.EnvVar, len(in.Env)) for i := range in.Env { if err := convert_v1beta3_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { return err } } } if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil { return err } if in.VolumeMounts != nil { out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) for i := range in.VolumeMounts { if err := convert_v1beta3_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { return err } } } if in.LivenessProbe != nil { out.LivenessProbe = new(api.Probe) if err := convert_v1beta3_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { out.ReadinessProbe = new(api.Probe) if err := convert_v1beta3_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { out.Lifecycle = new(api.Lifecycle) if err := convert_v1beta3_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { return err } } else { out.Lifecycle = nil } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) if in.SecurityContext != nil { if in.SecurityContext.Capabilities != nil { if !reflect.DeepEqual(in.SecurityContext.Capabilities.Add, in.Capabilities.Add) || !reflect.DeepEqual(in.SecurityContext.Capabilities.Drop, in.Capabilities.Drop) { return fmt.Errorf("container capability settings do not match security context settings, cannot convert") } } if in.SecurityContext.Privileged != nil { if in.Privileged != *in.SecurityContext.Privileged { return fmt.Errorf("container privileged settings do not match security context settings, cannot convert") } } } if in.SecurityContext != nil { out.SecurityContext = new(api.SecurityContext) if err := convert_v1beta3_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { return err } } else { out.SecurityContext = nil } out.Stdin = in.Stdin out.TTY = in.TTY return nil }
package k8s import ( "fmt" "k8s.io/kubernetes/pkg/api" ) var ( emptyPullPolicy = api.PullPolicy("") // ValidPullPolicies is the set of pull policies that this package considers valid ValidPullPolicies = map[api.PullPolicy]struct{}{ api.PullAlways: struct{}{}, api.PullIfNotPresent: struct{}{}, api.PullNever: struct{}{}, } ) // ErrInvalidPullPolicy is the error returned when trying to convert an unknown string to an api.PullPolicy type ErrInvalidPullPolicy struct { str string } // Error is the error interface implementation func (e ErrInvalidPullPolicy) Error() string { return fmt.Sprintf("%s is an invalid pull policy", e.str) } // PullPolicyFromString converts a string into an api.PullPolicy. returns an error if the string does not match a pull policy in ValidPullPolicies() func PullPolicyFromString(ppStr string) (api.PullPolicy, error) { candidatePP := api.PullPolicy(ppStr)
func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, error) { args, err := getArgs(genericParams) if err != nil { return nil, err } envs, err := getEnvs(genericParams) if err != nil { return nil, err } params, err := getParams(genericParams) if err != nil { return nil, err } name, err := getName(params) if err != nil { return nil, err } labels, err := getLabels(params, false, name) if err != nil { return nil, err } stdin, err := GetBool(params, "stdin", false) if err != nil { return nil, err } leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } tty, err := GetBool(params, "tty", false) if err != nil { return nil, err } resourceRequirements, err := HandleResourceRequirements(params) if err != nil { return nil, err } restartPolicy := api.RestartPolicy(params["restart"]) if len(restartPolicy) == 0 { restartPolicy = api.RestartPolicyAlways } // TODO: Figure out why we set ImagePullPolicy here, whether we can make it // consistent with the other places imagePullPolicy is set using flag. pod := api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: name, Image: params["image"], ImagePullPolicy: api.PullIfNotPresent, Stdin: stdin, StdinOnce: !leaveStdinOpen && stdin, TTY: tty, Resources: resourceRequirements, }, }, DNSPolicy: api.DNSClusterFirst, RestartPolicy: restartPolicy, }, } imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) if err = updatePodContainers(params, args, envs, imagePullPolicy, &pod.Spec); err != nil { return nil, err } if err := updatePodPorts(params, &pod.Spec); err != nil { return nil, err } return &pod, nil }
func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { args, err := getArgs(genericParams) if err != nil { return nil, err } envs, err := getEnvs(genericParams) if err != nil { return nil, err } params, err := getParams(genericParams) if err != nil { return nil, err } name, err := getName(params) if err != nil { return nil, err } labels, err := getLabels(params, true, name) if err != nil { return nil, err } count, err := strconv.Atoi(params["replicas"]) if err != nil { return nil, err } podSpec, err := makePodSpec(params, name) if err != nil { return nil, err } imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { return nil, err } if err := updatePodPorts(params, podSpec); err != nil { return nil, err } // TODO: use versioned types for generators so that we don't need to // set default values manually (see issue #17384) deployment := extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, Spec: extensions.DeploymentSpec{ Replicas: int32(count), Selector: &metav1.LabelSelector{MatchLabels: labels}, Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, }, Spec: *podSpec, }, }, } return &deployment, nil }
func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) { args, err := getArgs(genericParams) if err != nil { return nil, err } envs, err := getEnvs(genericParams) if err != nil { return nil, err } params, err := getParams(genericParams) if err != nil { return nil, err } name, err := getName(params) if err != nil { return nil, err } labels, err := getLabels(params, true, name) if err != nil { return nil, err } count, err := strconv.Atoi(params["replicas"]) if err != nil { return nil, err } podSpec, err := makePodSpec(params, name) if err != nil { return nil, err } imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { return nil, err } if err := updatePodPorts(params, podSpec); err != nil { return nil, err } controller := api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, Spec: api.ReplicationControllerSpec{ Replicas: int32(count), Selector: labels, Template: &api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, }, Spec: *podSpec, }, }, } return &controller, nil }
func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { args, err := getArgs(genericParams) if err != nil { return nil, err } envs, err := getEnvs(genericParams) if err != nil { return nil, err } params, err := getParams(genericParams) if err != nil { return nil, err } name, err := getName(params) if err != nil { return nil, err } labels, err := getLabels(params, true, name) if err != nil { return nil, err } podSpec, err := makePodSpec(params, name) if err != nil { return nil, err } imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { return nil, err } leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin if err := updatePodPorts(params, podSpec); err != nil { return nil, err } restartPolicy := api.RestartPolicy(params["restart"]) if len(restartPolicy) == 0 { restartPolicy = api.RestartPolicyNever } podSpec.RestartPolicy = restartPolicy cronJob := batch.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, Spec: batch.CronJobSpec{ Schedule: params["schedule"], ConcurrencyPolicy: batch.AllowConcurrent, JobTemplate: batch.JobTemplateSpec{ Spec: batch.JobSpec{ Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, }, Spec: *podSpec, }, }, }, }, } return &cronJob, nil }
func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { args, err := getArgs(genericParams) if err != nil { return nil, err } envs, err := getEnvs(genericParams) if err != nil { return nil, err } params, err := getParams(genericParams) if err != nil { return nil, err } name, err := getName(params) if err != nil { return nil, err } labels, err := getLabels(params, true, name) if err != nil { return nil, err } podSpec, err := makePodSpec(params, name) if err != nil { return nil, err } imagePullPolicy := api.PullPolicy(params["image-pull-policy"]) if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { return nil, err } leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) if err != nil { return nil, err } podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin if err := updatePodPorts(params, podSpec); err != nil { return nil, err } restartPolicy := api.RestartPolicy(params["restart"]) if len(restartPolicy) == 0 { restartPolicy = api.RestartPolicyNever } podSpec.RestartPolicy = restartPolicy job := batch.Job{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels, }, Spec: batch.JobSpec{ Selector: &metav1.LabelSelector{ MatchLabels: labels, }, ManualSelector: newBool(true), Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: labels, }, Spec: *podSpec, }, }, } return &job, nil }
func writePodContainer(m map[string]interface{}, item *api.Container) error { if x, ok := m["name"].(string); ok { item.Name = x } if x, ok := m["image"].(string); ok { item.Image = x } if x, ok := m["image_pull_policy"].(string); ok { item.ImagePullPolicy = api.PullPolicy(x) } if x, ok := m["termination_message_path"].(string); ok { item.TerminationMessagePath = x } if x, ok := m["working_dir"].(string); ok { item.WorkingDir = x } if x, ok := m["command"].([]interface{}); ok { for _, y := range x { item.Command = append(item.Command, y.(string)) } } if x, ok := m["args"].([]interface{}); ok { for _, y := range x { item.Args = append(item.Args, y.(string)) } } if x, ok := m["port"].([]interface{}); ok { for _, y := range x { ref := api.ContainerPort{} writeContainerPort(y.(map[string]interface{}), &ref) item.Ports = append(item.Ports, ref) } } if x, ok := m["env"].([]interface{}); ok { for _, y := range x { ref := api.EnvVar{} writeEnvVar(y.(map[string]interface{}), &ref) item.Env = append(item.Env, ref) } } if x, ok := m["volume_mount"].([]interface{}); ok { for _, y := range x { ref := api.VolumeMount{} writeVolumeMount(y.(map[string]interface{}), &ref) item.VolumeMounts = append(item.VolumeMounts, ref) } } if n, ok := extractSingleMap(m["liveness_probe"]); ok { item.LivenessProbe = &api.Probe{} writeProbe(n, item.LivenessProbe) } if n, ok := extractSingleMap(m["readiness_probe"]); ok { item.ReadinessProbe = &api.Probe{} writeProbe(n, item.ReadinessProbe) } if n, ok := extractSingleMap(m["resources"]); ok { if o, ok := extractSingleMap(n["limits"]); ok { item.Resources.Limits = make(api.ResourceList) if x, ok := o["cpu"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Limits[api.ResourceCPU] = *q } if x, ok := o["memory"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Limits[api.ResourceMemory] = *q } } if o, ok := extractSingleMap(n["requests"]); ok { item.Resources.Requests = make(api.ResourceList) if x, ok := o["cpu"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Requests[api.ResourceCPU] = *q } if x, ok := o["memory"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Requests[api.ResourceMemory] = *q } } } return nil }