Exemplo n.º 1
0
func (s *S) newContainer(opts newContainerOpts, p *fakeDockerProvisioner) (*Container, error) {
	if p == nil {
		p = s.p
	}
	container := Container{
		ID:          "id",
		IP:          "10.10.10.10",
		HostPort:    "3333",
		HostAddr:    "127.0.0.1",
		ProcessName: opts.ProcessName,
		Image:       opts.Image,
		AppName:     opts.AppName,
		ExposedPort: "8888/tcp",
	}
	if container.AppName == "" {
		container.AppName = "container"
	}
	if container.ProcessName == "" {
		container.ProcessName = "web"
	}
	if container.Image == "" {
		container.Image = "tsuru/python:latest"
	}
	routertest.FakeRouter.AddBackend(container.AppName)
	routertest.FakeRouter.AddRoute(container.AppName, container.Address())
	ports := map[docker.Port]struct{}{
		docker.Port(dockercommon.WebProcessDefaultPort() + "/tcp"): {},
	}
	config := docker.Config{
		Image:        container.Image,
		Cmd:          []string{"ps"},
		ExposedPorts: ports,
	}
	err := p.Cluster().PullImage(docker.PullImageOptions{Repository: container.Image}, docker.AuthConfiguration{})
	if err != nil {
		return nil, err
	}
	_, c, err := p.Cluster().CreateContainer(docker.CreateContainerOptions{Config: &config}, net.StreamInactivityTimeout)
	if err != nil {
		return nil, err
	}
	container.ID = c.ID
	coll := p.Collection()
	defer coll.Close()
	err = coll.Insert(container)
	if err != nil {
		return nil, err
	}
	return &container, nil
}
Exemplo n.º 2
0
func (c *Container) Create(args *CreateArgs) error {
	securityOpts, _ := config.GetList("docker:security-opts")
	var exposedPorts map[docker.Port]struct{}
	if !args.Deploy {
		if c.ExposedPort == "" {
			c.ExposedPort = dockercommon.WebProcessDefaultPort() + "/tcp"
		}
		exposedPorts = map[docker.Port]struct{}{
			docker.Port(c.ExposedPort): {},
		}
	}
	var user string
	if args.Building {
		user = c.user()
	}
	routerName, err := args.App.GetRouter()
	if err != nil {
		return err
	}
	routerType, _, err := router.Type(routerName)
	if err != nil {
		return err
	}
	hostConf, err := c.hostConfig(args.App, args.Deploy)
	if err != nil {
		return err
	}
	conf := docker.Config{
		Image:        args.ImageID,
		Cmd:          args.Commands,
		Entrypoint:   []string{},
		ExposedPorts: exposedPorts,
		AttachStdin:  false,
		AttachStdout: false,
		AttachStderr: false,
		Memory:       hostConf.Memory,
		MemorySwap:   hostConf.MemorySwap,
		CPUShares:    hostConf.CPUShares,
		SecurityOpts: securityOpts,
		User:         user,
		Labels: map[string]string{
			"tsuru.container":    strconv.FormatBool(true),
			"tsuru.app.name":     args.App.GetName(),
			"tsuru.app.platform": args.App.GetPlatform(),
			"tsuru.process.name": c.ProcessName,
			"tsuru.router.name":  routerName,
			"tsuru.router.type":  routerType,
		},
	}
	c.addEnvsToConfig(args, strings.TrimSuffix(c.ExposedPort, "/tcp"), &conf)
	opts := docker.CreateContainerOptions{Name: c.Name, Config: &conf, HostConfig: hostConf}
	var nodeList []string
	if len(args.DestinationHosts) > 0 {
		var node cluster.Node
		node, err = args.Provisioner.GetNodeByHost(args.DestinationHosts[0])
		if err != nil {
			return err
		}
		nodeList = []string{node.Address}
	}
	schedulerOpts := &SchedulerOpts{
		AppName:       args.App.GetName(),
		ProcessName:   args.ProcessName,
		ActionLimiter: args.Provisioner.ActionLimiter(),
	}
	addr, cont, err := args.Provisioner.Cluster().CreateContainerSchedulerOpts(opts, schedulerOpts, net.StreamInactivityTimeout, nodeList...)
	hostAddr := net.URLToHost(addr)
	if schedulerOpts.LimiterDone != nil {
		schedulerOpts.LimiterDone()
	}
	if err != nil {
		log.Errorf("error on creating container in docker %s - %s", c.AppName, err)
		return err
	}
	c.ID = cont.ID
	c.HostAddr = hostAddr
	return nil
}
Exemplo n.º 3
0
func serviceSpecForApp(opts tsuruServiceOpts) (*swarm.ServiceSpec, error) {
	var envs []string
	for _, envData := range opts.app.Envs() {
		envs = append(envs, fmt.Sprintf("%s=%s", envData.Name, envData.Value))
	}
	host, _ := config.GetString("host")
	envs = append(envs, fmt.Sprintf("%s=%s", "TSURU_HOST", host))
	var cmds []string
	var err error
	var endpointSpec *swarm.EndpointSpec
	var networks []swarm.NetworkAttachmentConfig
	var healthConfig *container.HealthConfig
	port := dockercommon.WebProcessDefaultPort()
	portInt, _ := strconv.Atoi(port)
	if !opts.isDeploy && !opts.isIsolatedRun {
		envs = append(envs, []string{
			fmt.Sprintf("%s=%s", "port", port),
			fmt.Sprintf("%s=%s", "PORT", port),
		}...)
		endpointSpec = &swarm.EndpointSpec{
			Mode: swarm.ResolutionModeVIP,
			Ports: []swarm.PortConfig{
				{TargetPort: uint32(portInt), PublishedPort: 0},
			},
		}
		networks = []swarm.NetworkAttachmentConfig{
			{Target: networkNameForApp(opts.app)},
		}
		extra := []string{extraRegisterCmds(opts.app)}
		cmds, _, err = dockercommon.LeanContainerCmdsWithExtra(opts.process, opts.image, opts.app, extra)
		if err != nil {
			return nil, errors.WithStack(err)
		}
		var yamlData provision.TsuruYamlData
		yamlData, err = image.GetImageTsuruYamlData(opts.image)
		if err != nil {
			return nil, errors.WithStack(err)
		}
		healthConfig = toHealthConfig(yamlData.Healthcheck, portInt)
	}
	restartCount := 0
	replicas := 0
	if opts.baseSpec != nil {
		replicas, err = strconv.Atoi(opts.baseSpec.Labels[labelProcessReplicas.String()])
		if err != nil && opts.baseSpec.Mode.Replicated != nil {
			replicas = int(*opts.baseSpec.Mode.Replicated.Replicas)
		}
		restartCount, _ = strconv.Atoi(opts.baseSpec.Labels[labelServiceRestart.String()])
	}
	if opts.processState.increment != 0 {
		replicas += opts.processState.increment
		if replicas < 0 {
			return nil, errors.New("cannot have less than 0 units")
		}
	} else if replicas == 0 && opts.processState.start {
		replicas = 1
	}
	routerName, err := opts.app.GetRouter()
	if err != nil {
		return nil, errors.WithStack(err)
	}
	routerType, _, err := router.Type(routerName)
	if err != nil {
		return nil, errors.WithStack(err)
	}
	srvName := serviceNameForApp(opts.app, opts.process)
	if opts.isDeploy {
		replicas = 1
		srvName = fmt.Sprintf("%s-build", srvName)
	}
	if opts.isIsolatedRun {
		replicas = 1
		srvName = fmt.Sprintf("%sisolated-run", srvName)
	}
	uReplicas := uint64(replicas)
	if opts.processState.stop {
		uReplicas = 0
	}
	if opts.processState.restart {
		restartCount++
	}
	labels := map[string]string{
		labelService.String():            strconv.FormatBool(true),
		labelServiceDeploy.String():      strconv.FormatBool(opts.isDeploy),
		labelServiceIsolatedRun.String(): strconv.FormatBool(opts.isIsolatedRun),
		labelServiceBuildImage.String():  opts.buildImage,
		labelAppName.String():            opts.app.GetName(),
		labelAppProcess.String():         opts.process,
		labelAppPlatform.String():        opts.app.GetPlatform(),
		labelRouterName.String():         routerName,
		labelRouterType.String():         routerType,
		labelProcessReplicas.String():    strconv.Itoa(replicas),
		labelServiceRestart.String():     strconv.Itoa(restartCount),
		labelPoolName.String():           opts.app.GetPool(),
		labelProvisionerName.String():    "swarm",
	}
	user, err := config.GetString("docker:user")
	if err != nil {
		user, _ = config.GetString("docker:ssh:user")
	}
	opts.constraints = append(opts.constraints, fmt.Sprintf("node.labels.%s == %s", labelNodePoolName, opts.app.GetPool()))
	spec := swarm.ServiceSpec{
		TaskTemplate: swarm.TaskSpec{
			ContainerSpec: swarm.ContainerSpec{
				Image:       opts.image,
				Env:         envs,
				Labels:      labels,
				Command:     cmds,
				User:        user,
				Healthcheck: healthConfig,
			},
			Networks: networks,
			RestartPolicy: &swarm.RestartPolicy{
				Condition: swarm.RestartPolicyConditionAny,
			},
			Placement: &swarm.Placement{
				Constraints: opts.constraints,
			},
		},
		Networks:     networks,
		EndpointSpec: endpointSpec,
		Annotations: swarm.Annotations{
			Name:   srvName,
			Labels: labels,
		},
		Mode: swarm.ServiceMode{
			Replicated: &swarm.ReplicatedService{
				Replicas: &uReplicas,
			},
		},
	}
	return &spec, nil
}