예제 #1
0
func startJob(s *State, hostID string, job *host.Job) (*Job, error) {
	cc, err := s.ClusterClient()
	if err != nil {
		return nil, err
	}
	if hostID == "" {
		hostID, err = randomHost(cc)
		if err != nil {
			return nil, err
		}
	}

	// TODO: filter by tags

	job.ID = cluster.RandomJobID("")
	data := &Job{HostID: hostID, JobID: job.ID}

	hc, err := cc.DialHost(hostID)
	if err != nil {
		return nil, err
	}
	defer hc.Close()

	jobStatus := make(chan error)
	events := make(chan *host.Event)
	stream := hc.StreamEvents(data.JobID, events)
	go func() {
		defer stream.Close()
		for e := range events {
			switch e.Event {
			case "start", "stop":
				jobStatus <- nil
				return
			case "error":
				job, err := hc.GetJob(data.JobID)
				if err != nil {
					jobStatus <- err
					return
				}
				if job.Error == nil {
					jobStatus <- fmt.Errorf("bootstrap: unknown error from host")
					return
				}
				jobStatus <- fmt.Errorf("bootstrap: host error while launching job: %q", *job.Error)
				return
			default:
			}
		}
		jobStatus <- fmt.Errorf("bootstrap: host job stream disconnected unexpectedly: %q", stream.Err())
	}()

	_, err = cc.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{hostID: {job}}})
	if err != nil {
		return nil, err
	}

	return data, <-jobStatus
}
예제 #2
0
func (m *manifestRunner) runManifest(r io.Reader) (map[string]*ManifestData, error) {
	var services []manifestService
	if err := json.NewDecoder(r).Decode(&services); err != nil {
		return nil, err
	}

	serviceData := make(map[string]*ManifestData, len(services))
	for _, service := range services {
		data := &ManifestData{
			Env:        parseEnviron(),
			Services:   serviceData,
			ExternalIP: m.externalIP,
			ports:      m.ports,
		}

		// Add explicit tcp ports to data.TCPPorts
		for _, port := range service.TCPPorts {
			port, err := strconv.Atoi(port)
			if err != nil {
				return nil, err
			}
			data.TCPPorts = append(data.TCPPorts, port)
		}

		var buf bytes.Buffer

		interp := func(s string) (string, error) {
			t, err := template.New("arg").Parse(s)
			if err != nil {
				return "", err
			}
			if err := t.Execute(&buf, data); err != nil {
				return "", err
			}
			defer buf.Reset()
			return buf.String(), nil
		}

		args := make([]string, 0, len(service.Args))
		for _, arg := range service.Args {
			arg, err := interp(arg)
			if err != nil {
				return nil, err
			}
			if strings.TrimSpace(arg) == "" {
				continue
			}
			args = append(args, arg)
		}
		var err error
		for k, v := range service.Env {
			service.Env[k], err = interp(v)
			if err != nil {
				return nil, err
			}
		}
		data.Env = service.Env

		// Always include at least one port
		if len(data.TCPPorts) == 0 {
			data.TCPPorts = append(data.TCPPorts, <-m.ports)
		}

		if service.Image == "" {
			service.Image = "flynn/" + service.ID
		}

		// Preload ports channel with the pre-allocated ports for this job
		ports := make(chan int, len(data.TCPPorts))
		for _, p := range data.TCPPorts {
			ports <- p
		}

		job := &host.Job{
			ID:       cluster.RandomJobID("flynn-" + service.ID + "-"),
			TCPPorts: len(data.TCPPorts),
			Config: &docker.Config{
				Image:        service.Image,
				Entrypoint:   service.Entrypoint,
				Cmd:          args,
				AttachStdout: true,
				AttachStderr: true,
				Env:          dockerEnv(data.Env),
				Volumes:      data.Volumes,
			},
		}

		container, err := m.processor.processJob(ports, job)
		if err != nil {
			return nil, err
		}
		container, err = m.docker.InspectContainer(container.ID)
		if err != nil {
			return nil, err
		}

		data.InternalIP = container.NetworkSettings.IPAddress
		data.readonly = true
		serviceData[service.ID] = data
	}

	return serviceData, nil
}
예제 #3
0
func (m *manifestRunner) runManifest(r io.Reader) (map[string]*ManifestData, error) {
	var services []manifestService
	if err := json.NewDecoder(r).Decode(&services); err != nil {
		return nil, err
	}

	serviceData := make(map[string]*ManifestData, len(services))
	for _, service := range services {
		data := &ManifestData{
			Env:        parseEnviron(),
			Services:   serviceData,
			ExternalIP: m.externalAddr,
			ports:      m.backend.(*DockerBackend).ports,
		}

		// Add explicit tcp ports to data.TCPPorts
		for _, port := range service.TCPPorts {
			port, err := strconv.Atoi(port)
			if err != nil {
				return nil, err
			}
			data.TCPPorts = append(data.TCPPorts, port)
		}

		var buf bytes.Buffer

		interp := func(s string) (string, error) {
			t, err := template.New("arg").Parse(s)
			if err != nil {
				return "", err
			}
			if err := t.Execute(&buf, data); err != nil {
				return "", err
			}
			defer buf.Reset()
			return buf.String(), nil
		}

		args := make([]string, 0, len(service.Args))
		for _, arg := range service.Args {
			arg, err := interp(arg)
			if err != nil {
				return nil, err
			}
			if strings.TrimSpace(arg) == "" {
				continue
			}
			args = append(args, arg)
		}
		var err error
		for k, v := range service.Env {
			service.Env[k], err = interp(v)
			if err != nil {
				return nil, err
			}
		}
		data.Env = service.Env

		if service.Image == "" {
			service.Image = "flynn/" + service.ID
		}

		job := &host.Job{
			ID:       cluster.RandomJobID("flynn-" + service.ID + "-"),
			TCPPorts: 1,
			Config: &docker.Config{
				Image:        service.Image,
				Entrypoint:   service.Entrypoint,
				Cmd:          args,
				AttachStdout: true,
				AttachStderr: true,
				Env:          dockerEnv(data.Env),
				Volumes:      data.Volumes,
				ExposedPorts: make(map[string]struct{}, len(service.TCPPorts)),
			},
			HostConfig: &docker.HostConfig{
				PortBindings: make(map[string][]docker.PortBinding, len(service.TCPPorts)),
			},
		}
		job.Config.Env = append(job.Config.Env, "EXTERNAL_IP="+m.externalAddr)

		for i, port := range service.TCPPorts {
			job.TCPPorts = 0
			if i == 0 {
				job.Config.Env = append(job.Config.Env, "PORT="+port)
			}
			job.Config.Env = append(job.Config.Env, fmt.Sprintf("PORT_%d=%s", i, port))
			job.Config.ExposedPorts[port+"/tcp"] = struct{}{}
			job.HostConfig.PortBindings[port+"/tcp"] = []docker.PortBinding{{HostPort: port, HostIp: m.bindAddr}}
		}

		if err := m.backend.Run(job); err != nil {
			return nil, err
		}

		container, err := m.backend.(*DockerBackend).docker.InspectContainer(job.ID)
		if err != nil {
			return nil, err
		}

		data.InternalIP = container.NetworkSettings.IPAddress
		data.readonly = true
		serviceData[service.ID] = data
	}

	return serviceData, nil
}
예제 #4
0
func (c *Cmd) Start() error {
	if c.started {
		return errors.New("exec: already started")
	}
	c.started = true
	if c.cluster == nil {
		var err error
		c.cluster, err = cluster.NewClient()
		if err != nil {
			return err
		}
		c.closeCluster = true
	}

	hosts, err := c.cluster.ListHosts()
	if err != nil {
		return err
	}
	if c.HostID == "" {
		// TODO: check if this is actually random
		for c.HostID = range hosts {
			break
		}
	}
	if c.JobID == "" {
		c.JobID = cluster.RandomJobID("")
	}

	job := &host.Job{
		ID: c.JobID,
		Config: &docker.Config{
			Image: c.Image,
			Cmd:   c.Cmd,
			Tty:   c.TTY,
			Env:   formatEnv(c.Env),
		},
		Attributes: c.Attrs,
	}
	if c.Stdout != nil || c.stdoutPipe != nil {
		job.Config.AttachStdout = true
	}
	if c.Stderr != nil || c.stderrPipe != nil {
		job.Config.AttachStderr = true
	}
	if c.Stdin != nil || c.stdinPipe != nil {
		job.Config.AttachStdin = true
		job.Config.OpenStdin = true
		job.Config.StdinOnce = true
	}

	c.host, err = c.cluster.DialHost(c.HostID)
	if err != nil {
		return err
	}

	// subscribe to host events
	ch := make(chan *host.Event)
	stream := c.host.StreamEvents(job.ID, ch)
	go func() {
		for event := range ch {
			if event.Event == "stop" || event.Event == "error" {
				close(c.done)
				return
			}
		}
		c.streamErr = stream.Err()
		close(c.done)
		// TODO: handle disconnections
	}()

	var rwc cluster.ReadWriteCloser
	var attachWait func() error

	if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil ||
		c.stdoutPipe != nil || c.stderrPipe != nil || c.stdinPipe != nil {
		req := &host.AttachReq{
			JobID:  job.ID,
			Height: c.TermHeight,
			Width:  c.TermWidth,
			Flags:  host.AttachFlagStream,
		}
		if job.Config.AttachStdout {
			req.Flags |= host.AttachFlagStdout
		}
		if job.Config.AttachStderr {
			req.Flags |= host.AttachFlagStderr
		}
		if job.Config.AttachStdin {
			req.Flags |= host.AttachFlagStdin
		}
		rwc, attachWait, err = c.host.Attach(req, true)
		if err != nil {
			c.close()
			return err
		}
	}

	goroutines := make([]func() error, 0, 4)

	c.attachConn = rwc
	if attachWait != nil {
		goroutines = append(goroutines, attachWait)
	}

	if c.stdinPipe != nil {
		c.stdinPipe.set(writeCloseCloser{rwc})
	} else if c.Stdin != nil {
		goroutines = append(goroutines, func() error {
			_, err := io.Copy(rwc, c.Stdin)
			rwc.CloseWrite()
			return err
		})
	}
	if !c.TTY {
		if c.stdoutPipe != nil || c.stderrPipe != nil {
			stdout, stderr := demultiplex.Streams(rwc)
			if c.stdoutPipe != nil {
				c.stdoutPipe.set(stdout)
			} else if c.Stdout != nil {
				goroutines = append(goroutines, cpFunc(c.Stdout, stdout))
			}
			if c.stderrPipe != nil {
				c.stderrPipe.set(stderr)
			} else if c.Stderr != nil {
				goroutines = append(goroutines, cpFunc(c.Stderr, stderr))
			}
		} else if c.Stdout != nil || c.Stderr != nil {
			goroutines = append(goroutines, func() error {
				return demultiplex.Copy(c.Stdout, c.Stderr, rwc)
			})
		}
	} else if c.stdoutPipe != nil {
		c.stdoutPipe.set(rwc)
	} else if c.Stdout != nil {
		goroutines = append(goroutines, cpFunc(c.Stdout, rwc))
	}

	c.errCh = make(chan error, len(goroutines))
	for _, fn := range goroutines {
		go func(fn func() error) {
			c.errCh <- fn()
		}(fn)
	}

	_, err = c.cluster.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{c.HostID: {job}}})
	return err
}