Example #1
0
func NewCompose(dockerHost, composeFilePath, projectName string) (*Compose, error) {
	ctx := project.Context{
		ComposeFiles: []string{composeFilePath},
		ProjectName:  projectName,
	}

	ctx.ResourceLookup = &lookup.FileResourceLookup{}
	ctx.EnvironmentLookup = &lookup.ComposableEnvLookup{
		Lookups: []config.EnvironmentLookup{
			&lookup.OsEnvLookup{},
		},
	}

	prj := project.NewProject(&ctx, nil, nil)

	if err := prj.Parse(); err != nil {
		return nil, errors.Wrap(err, "Failed to parse docker-compose.yml.")
	}

	return &Compose{
		ComposeFilePath: composeFilePath,
		ProjectName:     projectName,
		dockerHost:      dockerHost,
		project:         prj,
	}, nil
}
Example #2
0
func NewProject(context *Context) (*project.Project, error) {
	if context.ConfigLookup == nil {
		context.ConfigLookup = &lookup.FileConfigLookup{}
	}

	if context.EnvironmentLookup == nil {
		context.EnvironmentLookup = &lookup.OsEnvLookup{}
	}

	if context.ServiceFactory == nil {
		context.ServiceFactory = &ServiceFactory{
			context: context,
		}
	}

	if context.Builder == nil {
		context.Builder = NewDaemonBuilder(context)
	}

	p := project.NewProject(&context.Context)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	return p, err
}
func setupTestProject(t *testing.T) *ecsProject {
	envLookup, err := utils.GetDefaultEnvironmentLookup()
	if err != nil {
		t.Fatal("Unexpected error in setting up a project", err)
	}
	resourceLookup, err := utils.GetDefaultResourceLookup()
	if err != nil {
		t.Fatal("Unexpected error in setting up a project", err)
	}

	composeContext := flag.NewFlagSet("ecs-cli", 0)
	composeContext.String(ProjectNameFlag, testProjectName, "")
	parentContext := cli.NewContext(nil, composeContext, nil)
	cliContext := cli.NewContext(nil, nil, parentContext)

	ecsContext := &Context{
		CLIContext: cliContext,
	}
	ecsContext.EnvironmentLookup = envLookup
	ecsContext.ResourceLookup = resourceLookup
	libcomposeProject := project.NewProject(&ecsContext.Context, nil, nil)

	return &ecsProject{
		context: ecsContext,
		Project: *libcomposeProject,
	}
}
Example #4
0
// NewProject creates a Project with the specified context.
func NewProject(context *Context, parseOptions *config.ParseOptions) (*project.Project, error) {
	if context.ResourceLookup == nil {
		context.ResourceLookup = &lookup.FileConfigLookup{}
	}

	if context.EnvironmentLookup == nil {
		cwd, err := os.Getwd()
		if err != nil {
			return nil, err
		}
		context.EnvironmentLookup = &lookup.ComposableEnvLookup{
			Lookups: []config.EnvironmentLookup{
				&lookup.EnvfileLookup{
					Path: filepath.Join(cwd, ".env"),
				},
				&lookup.OsEnvLookup{},
			},
		}
	}

	if context.AuthLookup == nil {
		context.AuthLookup = NewConfigAuthLookup(context)
	}

	if context.ServiceFactory == nil {
		context.ServiceFactory = &ServiceFactory{
			context: context,
		}
	}

	if context.ClientFactory == nil {
		factory, err := project.NewDefaultClientFactory(client.Options{})
		if err != nil {
			return nil, err
		}
		context.ClientFactory = factory
	}

	// FIXME(vdemeester) Remove the context duplication ?
	p := project.NewProject(context.ClientFactory, &context.Context, parseOptions)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	return p, err
}
Example #5
0
func NewProject(context *Context) (*project.Project, error) {
	context.ServiceFactory = &RancherServiceFactory{
		Context: context,
	}

	context.VolumesFactory = &RancherVolumesFactory{
		Context: context,
	}

	if context.Binding != nil {
		bindingBytes, err := json.Marshal(context.Binding)
		if err != nil {
			return nil, err
		}
		context.BindingsBytes = bindingBytes
	}

	if context.BindingsBytes == nil {
		if context.BindingsFile != "" {
			bindingsContent, err := ioutil.ReadFile(context.BindingsFile)
			if err != nil {
				return nil, err
			}
			context.BindingsBytes = bindingsContent
		}
	}

	preProcessServiceMap := preprocess.PreprocessServiceMap(context.BindingsBytes)
	p := project.NewProject(&context.Context, nil, &config.ParseOptions{
		Interpolate: true,
		Validate:    true,
		Preprocess:  preProcessServiceMap,
	})

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	p.Name = context.ProjectName

	context.SidekickInfo = NewSidekickInfo(p)

	return p, err
}
Example #6
0
// NewProject creates a Project with the specified context.
func NewProject(context *ctx.Context, parseOptions *config.ParseOptions) (project.APIProject, error) {
	if context.AuthLookup == nil {
		context.AuthLookup = auth.NewConfigLookup(context.ConfigFile)
	}

	if context.ServiceFactory == nil {
		context.ServiceFactory = service.NewFactory(context)
	}

	if context.ClientFactory == nil {
		factory, err := client.NewDefaultFactory(client.Options{})
		if err != nil {
			return nil, err
		}
		context.ClientFactory = factory
	}

	if context.NetworksFactory == nil {
		networksFactory := &network.DockerFactory{
			ClientFactory: context.ClientFactory,
		}
		context.NetworksFactory = networksFactory
	}

	if context.VolumesFactory == nil {
		volumesFactory := &volume.DockerFactory{
			ClientFactory: context.ClientFactory,
		}
		context.VolumesFactory = volumesFactory
	}

	// FIXME(vdemeester) Remove the context duplication ?
	runtime := &Project{
		clientFactory: context.ClientFactory,
	}
	p := project.NewProject(&context.Context, runtime, parseOptions)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.LookupConfig(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	return p, err
}
func createEnv(rancherUrl, projectName string, composeBytes []byte, rancherComposeMap map[string]rancher.RancherConfig, env *client.Environment) error {
	context := rancher.Context{
		Url:           rancherUrl,
		RancherConfig: rancherComposeMap,
		Uploader:      nil,
	}
	context.ProjectName = projectName
	context.ComposeBytes = composeBytes
	context.ConfigLookup = nil
	context.EnvironmentLookup = &lookup.OsEnvLookup{}
	context.LoggerFactory = logger.NewColorLoggerFactory()
	context.ServiceFactory = &rancher.RancherServiceFactory{
		Context: &context,
	}

	p := project.NewProject(&context.Context)

	err := p.Parse()
	if err != nil {
		log.WithFields(log.Fields{
			"err": err,
		}).Errorf("Error parsing docker-compose.yml")
		return err
	}

	apiClient, err := client.NewRancherClient(&client.ClientOpts{
		Url:       rancherUrl,
		AccessKey: os.Getenv("CATTLE_ACCESS_KEY"),
		SecretKey: os.Getenv("CATTLE_SECRET_KEY"),
	})

	context.Client = apiClient

	c := &context

	c.Environment = env

	context.SidekickInfo = rancher.NewSidekickInfo(p)

	err = p.Create([]string{}...)
	if err != nil {
		log.WithFields(log.Fields{
			"err": err,
		}).Error("Error while creating project.")
		return err
	}
	return nil
}
Example #8
0
// NewProject creates a new instance of the ECS Compose Project
func NewProject(context *Context) Project {
	libcomposeProject := project.NewProject(&context.Context, nil, nil)

	p := &ecsProject{
		context: context,
		Project: *libcomposeProject,
	}

	if context.IsService {
		p.entity = NewService(context)
	} else {
		p.entity = NewTask(context)
	}

	return p
}
func parseDockerCompose() *project.Project {
	composeFile := composeFilePath + "docker-compose.yml"
	p := project.NewProject(&project.Context{
		ProjectName:  "kube",
		ComposeFiles: []string{composeFile},
	}, nil, &config.ParseOptions{})

	if err := p.Parse(); err != nil {
		log.Fatalf("Failed to parse the compose project from %s: %v", composeFile, err)
	}
	if err := os.MkdirAll(outputDir, 0755); err != nil {
		log.Fatalf("Failed to create the output directory %s: %v", outputDir, err)
	}

	if p.ServiceConfigs == nil {
		log.Fatalf("No service config found, aborting")
	}
	return p
}
func NewProject(context *Context) (*project.Project, error) {
	context.ServiceFactory = &RancherServiceFactory{
		Context: context,
	}

	p := project.NewProject(&context.Context)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	p.Name = context.ProjectName

	context.SidekickInfo = NewSidekickInfo(p)

	return p, err
}
Example #11
0
func NewProject(context *Context) (*project.Project, error) {
	context.ConfigLookup = &lookup.FileConfigLookup{}
	context.EnvironmentLookup = &lookup.OsEnvLookup{}
	context.LoggerFactory = logger.NewColorLoggerFactory()
	context.ServiceFactory = &RancherServiceFactory{
		context: context,
	}

	p := project.NewProject(&context.Context)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	context.SidekickInfo = NewSidekickInfo(p)

	return p, err
}
Example #12
0
func main() {
	flag.Parse()

	p := project.NewProject(&project.Context{
		ProjectName: "kube",
		ComposeFile: composeFile,
	})

	if err := p.Parse(); err != nil {
		log.Fatalf("Failed to parse the compose project from %s: %v", composeFile, err)
	}
	if err := os.MkdirAll(outputDir, 0755); err != nil {
		log.Fatalf("Failed to create the output directory %s: %v", outputDir, err)
	}

	for name, service := range p.Configs {
		rc := &api.ReplicationController{
			TypeMeta: api.TypeMeta{
				Kind:       "ReplicationController",
				APIVersion: "v1",
			},
			ObjectMeta: api.ObjectMeta{
				Name:   name,
				Labels: map[string]string{"service": name},
			},
			Spec: api.ReplicationControllerSpec{
				Replicas: 1,
				Selector: map[string]string{"service": name},
				Template: &api.PodTemplateSpec{
					ObjectMeta: api.ObjectMeta{
						Labels: map[string]string{"service": name},
					},
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  name,
								Image: service.Image,
							},
						},
					},
				},
			},
		}

		// Configure the container ports.
		var ports []api.ContainerPort
		for _, port := range service.Ports {
			portNumber, err := strconv.Atoi(port)
			if err != nil {
				log.Fatalf("Invalid container port %s for service %s", port, name)
			}
			ports = append(ports, api.ContainerPort{ContainerPort: portNumber})
		}

		rc.Spec.Template.Spec.Containers[0].Ports = ports

		// Configure the container restart policy.
		switch service.Restart {
		case "", "always":
			rc.Spec.Template.Spec.RestartPolicy = api.RestartPolicyAlways
		case "no":
			rc.Spec.Template.Spec.RestartPolicy = api.RestartPolicyNever
		case "on-failure":
			rc.Spec.Template.Spec.RestartPolicy = api.RestartPolicyOnFailure
		default:
			log.Fatalf("Unknown restart policy %s for service %s", service.Restart, name)
		}

		data, err := json.MarshalIndent(rc, "", "  ")
		if err != nil {
			log.Fatalf("Failed to marshal the replication controller: %v", err)
		}

		// Save the replication controller for the Docker compose service to the
		// configs directory.
		outputFileName := fmt.Sprintf("%s-rc.yaml", name)
		outputFilePath := filepath.Join(outputDir, outputFileName)
		if err := ioutil.WriteFile(outputFilePath, data, 0644); err != nil {
			log.Fatalf("Failed to write replication controller %s: %v", outputFileName, err)
		}
		fmt.Println(outputFilePath)
	}
}
Example #13
0
func Compose2kube(in io.Reader) ([]K8sConfig, error) {

	inBytes, err := ioutil.ReadAll(in)
	if err != nil {
		return nil, err
	}

	p := project.NewProject(&project.Context{
		ProjectName:  "kube",
		ComposeBytes: inBytes,
	})

	if err := p.Parse(); err != nil {
		return nil, fmt.Errorf("Failed to parse the compose project: %v", err)
	}

	k8sConfigs := []K8sConfig{}

	for name, service := range p.Configs {
		pod := &api.Pod{
			TypeMeta: unversioned.TypeMeta{
				Kind:       "Pod",
				APIVersion: "v1",
			},
			ObjectMeta: api.ObjectMeta{
				Name:   name,
				Labels: map[string]string{"service": name},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  name,
						Image: service.Image,
						Args:  service.Command.Slice(),
						Resources: api.ResourceRequirements{
							Limits: api.ResourceList{},
						},
					},
				},
			},
		}

		if service.CPUShares != 0 {
			pod.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewQuantity(service.CPUShares, "decimalSI")
		}

		if service.MemLimit != 0 {
			pod.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(service.MemLimit, "decimalSI")
		}

		// Configure the environment variables
		var environment []api.EnvVar
		for _, envs := range service.Environment.Slice() {
			value := strings.Split(envs, "=")
			environment = append(environment, api.EnvVar{Name: value[0], Value: value[1]})
		}

		pod.Spec.Containers[0].Env = environment

		// Configure the container ports.
		var ports []api.ContainerPort
		for _, port := range service.Ports {
			portNumber, err := strconv.Atoi(port)
			if err != nil {
				log.Fatalf("Invalid container port %s for service %s", port, name)
			}
			ports = append(ports, api.ContainerPort{ContainerPort: portNumber})
		}

		pod.Spec.Containers[0].Ports = ports

		// Configure the container restart policy.
		var (
			rc      *api.ReplicationController
			objType string
			data    []byte
			err     error
		)
		switch service.Restart {
		case "", "always":
			objType = "rc"
			rc = replicationController(name, pod)
			pod.Spec.RestartPolicy = api.RestartPolicyAlways
			data, err = json.MarshalIndent(rc, "", "  ")
		case "no", "false":
			objType = "pod"
			pod.Spec.RestartPolicy = api.RestartPolicyNever
			data, err = json.MarshalIndent(pod, "", "  ")
		case "on-failure":
			objType = "rc"
			rc = replicationController(name, pod)
			pod.Spec.RestartPolicy = api.RestartPolicyOnFailure
			data, err = json.MarshalIndent(rc, "", "  ")
		default:
			log.Fatalf("Unknown restart policy %s for service %s", service.Restart, name)
		}

		if err != nil {
			log.Fatalf("Failed to marshal the replication controller: %v", err)
		}
		k8c := K8sConfig{
			Name:     name,
			ObjType:  objType,
			JsonData: data,
		}
		k8sConfigs = append(k8sConfigs, k8c)
	}
	return k8sConfigs, nil
}
Example #14
0
func main() {
	flag.Parse()

	p := project.NewProject(&project.Context{
		ProjectName:  "kube",
		ComposeFiles: []string{composeFile},
	})

	if err := p.Parse(); err != nil {
		log.Fatalf("Failed to parse the compose project from %s: %v", composeFile, err)
	}
	if err := os.MkdirAll(outputDir, 0755); err != nil {
		log.Fatalf("Failed to create the output directory %s: %v", outputDir, err)
	}

	for name, service := range p.Configs {
		pod := &api.PodSpec{
			Containers: []api.Container{
				{
					Name:  strings.ToLower(name),
					Image: service.Image,
					Args:  service.Command.Slice(),
					Resources: api.ResourceRequirements{
						Limits: api.ResourceList{},
					},
				},
			},
		}

		if service.CPUShares != 0 {
			pod.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(service.CPUShares, resource.BinarySI)
		}

		if service.MemLimit != 0 {
			pod.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(service.MemLimit, "decimalSI")
		}

		// If Privileged, create a SecurityContext and configure it
		if service.Privileged == true {
			priv := true
			context := &api.SecurityContext{
				Capabilities: &api.Capabilities{},
				Privileged:   &priv,
			}
			pod.Containers[0].SecurityContext = context
		}

		if pullPolicy != "" {
			switch pullPolicy {
			case "", "IfNotPresent":
				pod.Containers[0].ImagePullPolicy = api.PullIfNotPresent
			case "Always":
				pod.Containers[0].ImagePullPolicy = api.PullAlways
			case "Never":
				pod.Containers[0].ImagePullPolicy = api.PullNever
			default:
				log.Fatalf("Unknown pull policy %s for service %s", pullPolicy, name)
			}
		}

		if nodeSelector != "" {
			ss := strings.Split(nodeSelector, ";")
			m := make(map[string]string)
			for _, pair := range ss {
				z := strings.Split(pair, "=")
				m[z[0]] = z[1]
			}
			pod.NodeSelector = m
		}

		// Configure the environment variables
		var environment []api.EnvVar
		for _, envs := range service.Environment.Slice() {
			value := strings.Split(envs, "=")
			environment = append(environment, api.EnvVar{Name: value[0], Value: value[1]})
		}

		pod.Containers[0].Env = environment

		// Configure the container ports.
		var ports []api.ContainerPort
		for _, port := range service.Ports {
			// Check if we have to deal with a mapped port
			if strings.Contains(port, ":") {
				parts := strings.Split(port, ":")
				port = parts[1]
			}
			portNumber, err := strconv.ParseInt(port, 10, 32)
			if err != nil {
				log.Fatalf("Invalid container port %s for service %s", port, name)
			}
			ports = append(ports, api.ContainerPort{ContainerPort: int32(portNumber)})
		}

		pod.Containers[0].Ports = ports

		// Configure the container restart policy.
		var (
			objType string
			data    []byte
			err     error
		)
		switch service.Restart {
		case "", "always":
			objType = "rc"
			pod.RestartPolicy = api.RestartPolicyAlways
			data, err = yaml.Marshal(replicationController(name, pod))
		case "no", "false":
			objType = "pod"
			pod.RestartPolicy = api.RestartPolicyNever
			data, err = yaml.Marshal(job(name, pod))
		case "on-failure":
			objType = "job"
			pod.RestartPolicy = api.RestartPolicyOnFailure
			data, err = yaml.Marshal(job(name, pod))
		default:
			log.Fatalf("Unknown restart policy %s for service %s", service.Restart, name)
		}

		if err != nil {
			log.Fatalf("Failed to marshal: %v", err)
		}

		// Save the job controller for the Docker compose service to the
		// configs directory.
		outputFileName := fmt.Sprintf("%s-%s.yaml", name, objType)
		outputFilePath := filepath.Join(outputDir, outputFileName)
		if err := ioutil.WriteFile(outputFilePath, data, 0644); err != nil {
			log.Fatalf("Failed to write job controller %s: %v", outputFileName, err)
		}
		fmt.Println(outputFilePath)
	}
}
Example #15
0
// NewProject creates a Project with the specified context.
func NewProject(context *Context, parseOptions *config.ParseOptions) (project.APIProject, error) {
	if context.ResourceLookup == nil {
		context.ResourceLookup = &lookup.FileResourceLookup{}
	}

	if context.EnvironmentLookup == nil {
		cwd, err := os.Getwd()
		if err != nil {
			return nil, err
		}
		context.EnvironmentLookup = &lookup.ComposableEnvLookup{
			Lookups: []config.EnvironmentLookup{
				&lookup.EnvfileLookup{
					Path: filepath.Join(cwd, ".env"),
				},
				&lookup.OsEnvLookup{},
			},
		}
	}

	if context.AuthLookup == nil {
		context.AuthLookup = auth.NewConfigLookup(context.ConfigFile)
	}

	if context.ServiceFactory == nil {
		context.ServiceFactory = &ServiceFactory{
			context: context,
		}
	}

	if context.ClientFactory == nil {
		factory, err := client.NewDefaultFactory(client.Options{})
		if err != nil {
			return nil, err
		}
		context.ClientFactory = factory
	}

	if context.NetworksFactory == nil {
		networksFactory := &network.DockerFactory{
			ClientFactory: context.ClientFactory,
		}
		context.NetworksFactory = networksFactory
	}

	if context.VolumesFactory == nil {
		volumesFactory := &volume.DockerFactory{
			ClientFactory: context.ClientFactory,
		}
		context.VolumesFactory = volumesFactory
	}

	// FIXME(vdemeester) Remove the context duplication ?
	runtime := &Project{
		clientFactory: context.ClientFactory,
	}
	p := project.NewProject(&context.Context, runtime, parseOptions)

	err := p.Parse()
	if err != nil {
		return nil, err
	}

	if err = context.open(); err != nil {
		logrus.Errorf("Failed to open project %s: %v", p.Name, err)
		return nil, err
	}

	return p, err
}
Example #16
0
func main() {
	flag.Parse()

	p := project.NewProject(&project.Context{
		ProjectName: "kube",
		ComposeFile: composeFile,
	})

	if err := p.Parse(); err != nil {
		log.Fatalf("Failed to parse the compose project from %s: %v", composeFile, err)
	}
	if err := os.MkdirAll(outputDir, 0755); err != nil {
		log.Fatalf("Failed to create the output directory %s: %v", outputDir, err)
	}

	for name, service := range p.Configs {
		pod := &api.Pod{
			TypeMeta: api.TypeMeta{
				Kind:       "Pod",
				APIVersion: "v1",
			},
			ObjectMeta: api.ObjectMeta{
				Name:   name,
				Labels: map[string]string{"service": name},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  name,
						Image: service.Image,
						Args:  service.Command.Slice(),
						Resources: api.ResourceRequirements{
							Limits: api.ResourceList{},
						},
					},
				},
			},
		}

		if service.CPUShares != 0 {
			pod.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewQuantity(service.CPUShares, "decimalSI")
		}

		if service.MemLimit != 0 {
			pod.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(service.MemLimit, "decimalSI")
		}

		// If Privileged, create a SecurityContext and configure it
		if service.Privileged == true {
			priv := true
			context := &api.SecurityContext{
				Capabilities: &api.Capabilities{},
				Privileged:   &priv,
			}
			pod.Spec.Containers[0].SecurityContext = context
		}

		// Configure the environment variables
		var environment []api.EnvVar
		for _, envs := range service.Environment.Slice() {
			value := strings.Split(envs, "=")
			environment = append(environment, api.EnvVar{Name: value[0], Value: value[1]})
		}

		pod.Spec.Containers[0].Env = environment

		// Configure the container ports.
		var ports []api.ContainerPort
		for _, port := range service.Ports {
			portNumber, err := strconv.Atoi(port)
			if err != nil {
				log.Fatalf("Invalid container port %s for service %s", port, name)
			}
			ports = append(ports, api.ContainerPort{ContainerPort: portNumber})
		}

		pod.Spec.Containers[0].Ports = ports

		// Configure the container restart policy.
		var (
			rc      *api.ReplicationController
			objType string
			data    []byte
			err     error
		)
		switch service.Restart {
		case "", "always":
			objType = "rc"
			rc = replicationController(name, pod)
			pod.Spec.RestartPolicy = api.RestartPolicyAlways
			data, err = json.MarshalIndent(rc, "", "  ")
		case "no", "false":
			objType = "pod"
			pod.Spec.RestartPolicy = api.RestartPolicyNever
			data, err = json.MarshalIndent(pod, "", "  ")
		case "on-failure":
			objType = "rc"
			rc = replicationController(name, pod)
			pod.Spec.RestartPolicy = api.RestartPolicyOnFailure
			data, err = json.MarshalIndent(rc, "", "  ")
		default:
			log.Fatalf("Unknown restart policy %s for service %s", service.Restart, name)
		}

		if err != nil {
			log.Fatalf("Failed to marshal the replication controller: %v", err)
		}

		// Save the replication controller for the Docker compose service to the
		// configs directory.
		outputFileName := fmt.Sprintf("%s-%s.yaml", name, objType)
		outputFilePath := filepath.Join(outputDir, outputFileName)
		if err := ioutil.WriteFile(outputFilePath, data, 0644); err != nil {
			log.Fatalf("Failed to write replication controller %s: %v", outputFileName, err)
		}
		fmt.Println(outputFilePath)
	}
}