示例#1
0
func startJob(s *State, hostID string, job *host.Job) (*Job, error) {
	cc, err := s.ClusterClient()
	if err != nil {
		return nil, err
	}
	if hostID == "" {
		hostID, err = randomHost(cc)
		if err != nil {
			return nil, err
		}
	}

	// TODO: filter by tags

	job.ID = cluster.RandomJobID("")
	data := &Job{HostID: hostID, JobID: job.ID}

	hc, err := cc.DialHost(hostID)
	if err != nil {
		return nil, err
	}
	defer hc.Close()

	jobStatus := make(chan error)
	events := make(chan *host.Event)
	stream := hc.StreamEvents(data.JobID, events)
	go func() {
		defer stream.Close()
		for e := range events {
			switch e.Event {
			case "start", "stop":
				jobStatus <- nil
				return
			case "error":
				job, err := hc.GetJob(data.JobID)
				if err != nil {
					jobStatus <- err
					return
				}
				if job.Error == nil {
					jobStatus <- fmt.Errorf("bootstrap: unknown error from host")
					return
				}
				jobStatus <- fmt.Errorf("bootstrap: host error while launching job: %q", *job.Error)
				return
			default:
			}
		}
		jobStatus <- fmt.Errorf("bootstrap: host job stream disconnected unexpectedly: %q", stream.Err())
	}()

	_, err = cc.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{hostID: {job}}})
	if err != nil {
		return nil, err
	}

	return data, <-jobStatus
}
示例#2
0
func (f *Formation) start(typ string, hostID string) (job *Job, err error) {
	config, err := f.jobConfig(typ)
	if err != nil {
		return nil, err
	}
	config.ID = cluster.RandomJobID("")

	hosts, err := f.c.ListHosts()
	if err != nil {
		return nil, err
	}
	if len(hosts) == 0 {
		// TODO: log/handle error
	}
	var h host.Host

	if hostID != "" {
		h = hosts[hostID]
	} else {
		hostCounts := make(map[string]int, len(hosts))
		for _, h := range hosts {
			hostCounts[h.ID] = 0
			for _, job := range h.Jobs {
				if f.jobType(job) != typ {
					continue
				}
				hostCounts[h.ID]++
			}
		}
		sh := make(sortHosts, 0, len(hosts))
		for id, count := range hostCounts {
			sh = append(sh, sortHost{id, count})
		}
		sh.Sort()

		h = hosts[sh[0].ID]
	}

	job = f.jobs.Add(typ, h.ID, config.ID)
	job.Formation = f
	f.c.jobs.Add(job)

	_, err = f.c.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{h.ID: {config}}})
	if err != nil {
		f.jobs.Remove(job)
		f.c.jobs.Remove(config.ID, h.ID)
		return nil, err
	}
	return job, nil
}
示例#3
0
文件: exec.go 项目: johan--/flynn
func (c *Cmd) Start() error {
	if c.started {
		return errors.New("exec: already started")
	}
	c.started = true
	if c.cluster == nil {
		var err error
		c.cluster, err = cluster.NewClient()
		if err != nil {
			return err
		}
		c.closeCluster = true
	}

	hosts, err := c.cluster.ListHosts()
	if err != nil {
		return err
	}
	if c.HostID == "" {
		// TODO: check if this is actually random
		for c.HostID = range hosts {
			break
		}
	}
	if c.JobID == "" {
		c.JobID = cluster.RandomJobID("")
	}

	job := &host.Job{
		ID:       c.JobID,
		Artifact: c.Artifact,
		Config: host.ContainerConfig{
			Entrypoint: c.Entrypoint,
			Cmd:        c.Cmd,
			TTY:        c.TTY,
			Env:        c.Env,
			Stdin:      c.Stdin != nil || c.stdinPipe != nil,
		},
		Metadata: c.Meta,
	}

	c.host, err = c.cluster.DialHost(c.HostID)
	if err != nil {
		return err
	}

	if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil || c.stdinPipe != nil {
		req := &host.AttachReq{
			JobID:  job.ID,
			Height: c.TermHeight,
			Width:  c.TermWidth,
			Flags:  host.AttachFlagStream,
		}
		if c.Stdout != nil {
			req.Flags |= host.AttachFlagStdout
		}
		if c.Stderr != nil {
			req.Flags |= host.AttachFlagStderr
		}
		if job.Config.Stdin {
			req.Flags |= host.AttachFlagStdin
		}
		c.attachClient, err = c.host.Attach(req, true)
		if err != nil {
			c.close()
			return err
		}
	}

	if c.stdinPipe != nil {
		c.stdinPipe.set(writeCloseCloser{c.attachClient})
	} else if c.Stdin != nil {
		go func() {
			io.Copy(c.attachClient, c.Stdin)
			c.attachClient.CloseWrite()
		}()
	}
	go func() {
		c.exitStatus, c.streamErr = c.attachClient.Receive(c.Stdout, c.Stderr)
		close(c.done)
	}()

	_, err = c.cluster.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{c.HostID: {job}}})
	return err
}
示例#4
0
文件: manifest.go 项目: johan--/flynn
func (m *manifestRunner) runManifest(r io.Reader) (map[string]*ManifestData, error) {
	g := grohl.NewContext(grohl.Data{"fn": "run_manifest"})
	var services []*manifestService
	if err := json.NewDecoder(r).Decode(&services); err != nil {
		return nil, err
	}

	serviceData := make(map[string]*ManifestData, len(services))

	m.state.mtx.Lock()
	for _, job := range m.state.jobs {
		if job.ManifestID == "" || job.Status != host.StatusRunning {
			continue
		}
		var service *manifestService
		for _, service = range services {
			if service.ID == job.ManifestID {
				break
			}
		}
		if service == nil {
			continue
		}
		g.Log(grohl.Data{"at": "restore", "service": service.ID, "job.id": job.Job.ID})

		data := &ManifestData{
			ExternalIP: m.externalAddr,
			InternalIP: job.InternalIP,
			Env:        job.Job.Config.Env,
			Services:   serviceData,
			ports:      m.ports["tcp"],
			readonly:   true,
		}
		data.TCPPorts = make([]int, 0, len(job.Job.Config.Ports))
		for _, p := range job.Job.Config.Ports {
			if p.Proto != "tcp" {
				continue
			}
			data.TCPPorts = append(data.TCPPorts, p.Port)
		}
		serviceData[service.ID] = data
	}
	m.state.mtx.Unlock()

	for _, service := range services {
		if _, exists := serviceData[service.ID]; exists {
			continue
		}

		data := &ManifestData{
			Env:        parseEnviron(),
			Services:   serviceData,
			ExternalIP: m.externalAddr,
			ports:      m.ports["tcp"],
		}

		// Add explicit tcp ports to data.TCPPorts
		for _, port := range service.TCPPorts {
			port, err := strconv.Atoi(port)
			if err != nil {
				return nil, err
			}
			data.TCPPorts = append(data.TCPPorts, port)
		}

		var buf bytes.Buffer

		interp := func(s string) (string, error) {
			t, err := template.New("arg").Parse(s)
			if err != nil {
				return "", err
			}
			if err := t.Execute(&buf, data); err != nil {
				return "", err
			}
			defer buf.Reset()
			return buf.String(), nil
		}

		args := make([]string, 0, len(service.Args))
		for _, arg := range service.Args {
			arg, err := interp(arg)
			if err != nil {
				return nil, err
			}
			if strings.TrimSpace(arg) == "" {
				continue
			}
			args = append(args, arg)
		}
		var err error
		for k, v := range service.Env {
			service.Env[k], err = interp(v)
			if err != nil {
				return nil, err
			}
		}
		data.Env = service.Env

		if service.Image == "" {
			service.Image = "https://registry.hub.docker.com/flynn/" + service.ID
		}
		if service.ImageID != "" {
			service.Image += "?id=" + service.ImageID
		}

		job := &host.Job{
			ID: cluster.RandomJobID("flynn-" + service.ID + "-"),
			Artifact: host.Artifact{
				Type: "docker",
				URI:  service.Image,
			},
			Config: host.ContainerConfig{
				Entrypoint: service.Entrypoint,
				Cmd:        args,
				Env:        data.Env,
			},
		}
		if job.Config.Env == nil {
			job.Config.Env = make(map[string]string)
		}
		job.Config.Env["EXTERNAL_IP"] = m.externalAddr

		job.Config.Ports = make([]host.Port, len(data.TCPPorts))
		for i, port := range data.TCPPorts {
			job.Config.Ports[i] = host.Port{Proto: "tcp", Port: port}
		}
		if len(job.Config.Ports) == 0 {
			job.Config.Ports = []host.Port{{Proto: "tcp"}}
		}

		if err := m.backend.Run(job); err != nil {
			return nil, err
		}

		m.state.SetManifestID(job.ID, service.ID)
		activeJob := m.state.GetJob(job.ID)
		data.InternalIP = activeJob.InternalIP
		data.readonly = true
		serviceData[service.ID] = data
	}

	return serviceData, nil
}
示例#5
0
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) {
	var newJob ct.NewJob
	if err := httphelper.DecodeJSON(req, &newJob); err != nil {
		respondWithError(w, err)
		return
	}

	if err := schema.Validate(newJob); err != nil {
		respondWithError(w, err)
		return
	}

	data, err := c.releaseRepo.Get(newJob.ReleaseID)
	if err != nil {
		respondWithError(w, err)
		return
	}
	release := data.(*ct.Release)
	data, err = c.artifactRepo.Get(release.ArtifactID)
	if err != nil {
		respondWithError(w, err)
		return
	}
	artifact := data.(*ct.Artifact)
	attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0")

	hosts, err := c.clusterClient.ListHosts()
	if err != nil {
		respondWithError(w, err)
		return
	}
	if len(hosts) == 0 {
		respondWithError(w, errors.New("no hosts found"))
		return
	}
	hostID := schedutil.PickHost(hosts).ID

	id := cluster.RandomJobID("")
	app := c.getApp(ctx)
	env := make(map[string]string, len(release.Env)+len(newJob.Env)+4)
	env["FLYNN_APP_ID"] = app.ID
	env["FLYNN_RELEASE_ID"] = release.ID
	env["FLYNN_PROCESS_TYPE"] = ""
	env["FLYNN_JOB_ID"] = hostID + "-" + id
	if newJob.ReleaseEnv {
		for k, v := range release.Env {
			env[k] = v
		}
	}
	for k, v := range newJob.Env {
		env[k] = v
	}
	metadata := make(map[string]string, len(newJob.Meta)+3)
	for k, v := range newJob.Meta {
		metadata[k] = v
	}
	metadata["flynn-controller.app"] = app.ID
	metadata["flynn-controller.app_name"] = app.Name
	metadata["flynn-controller.release"] = release.ID
	job := &host.Job{
		ID:       id,
		Metadata: metadata,
		Artifact: host.Artifact{
			Type: artifact.Type,
			URI:  artifact.URI,
		},
		Config: host.ContainerConfig{
			Cmd:        newJob.Cmd,
			Env:        env,
			TTY:        newJob.TTY,
			Stdin:      attach,
			DisableLog: newJob.DisableLog,
		},
	}
	if len(newJob.Entrypoint) > 0 {
		job.Config.Entrypoint = newJob.Entrypoint
	}

	var attachClient cluster.AttachClient
	if attach {
		attachReq := &host.AttachReq{
			JobID:  job.ID,
			Flags:  host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream,
			Height: uint16(newJob.Lines),
			Width:  uint16(newJob.Columns),
		}
		client, err := c.clusterClient.DialHost(hostID)
		if err != nil {
			respondWithError(w, fmt.Errorf("host connect failed: %s", err.Error()))
			return
		}
		attachClient, err = client.Attach(attachReq, true)
		if err != nil {
			respondWithError(w, fmt.Errorf("attach failed: %s", err.Error()))
			return
		}
		defer attachClient.Close()
	}

	_, err = c.clusterClient.AddJobs(map[string][]*host.Job{hostID: {job}})
	if err != nil {
		respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error()))
		return
	}

	if attach {
		if err := attachClient.Wait(); err != nil {
			respondWithError(w, fmt.Errorf("attach wait failed: %s", err.Error()))
			return
		}
		w.Header().Set("Connection", "upgrade")
		w.Header().Set("Upgrade", "flynn-attach/0")
		w.WriteHeader(http.StatusSwitchingProtocols)
		conn, _, err := w.(http.Hijacker).Hijack()
		if err != nil {
			panic(err)
		}
		defer conn.Close()

		done := make(chan struct{}, 2)
		cp := func(to io.Writer, from io.Reader) {
			io.Copy(to, from)
			done <- struct{}{}
		}
		go cp(conn, attachClient.Conn())
		go cp(attachClient.Conn(), conn)
		<-done
		<-done

		return
	} else {
		httphelper.JSON(w, 200, &ct.Job{
			ID:        hostID + "-" + job.ID,
			ReleaseID: newJob.ReleaseID,
			Cmd:       newJob.Cmd,
		})
	}
}
示例#6
0
func (l *LibvirtLXCBackend) Run(job *host.Job, runConfig *RunConfig) (err error) {
	g := grohl.NewContext(grohl.Data{"backend": "libvirt-lxc", "fn": "run", "job.id": job.ID})
	g.Log(grohl.Data{"at": "start", "job.artifact.uri": job.Artifact.URI, "job.cmd": job.Config.Cmd})

	if !job.Config.HostNetwork {
		<-l.networkConfigured
	}
	if _, ok := job.Config.Env["DISCOVERD"]; !ok {
		<-l.discoverdConfigured
	}

	if runConfig == nil {
		runConfig = &RunConfig{}
	}
	container := &libvirtContainer{
		l:    l,
		job:  job,
		done: make(chan struct{}),
	}
	if !job.Config.HostNetwork {
		container.IP, err = l.ipalloc.RequestIP(l.bridgeNet, runConfig.IP)
		if err != nil {
			g.Log(grohl.Data{"at": "request_ip", "status": "error", "err": err})
			return err
		}
	}
	defer func() {
		if err != nil {
			go container.cleanup()
		}
	}()

	g.Log(grohl.Data{"at": "pull_image"})
	layers, err := l.pinkertonPull(job.Artifact.URI)
	if err != nil {
		g.Log(grohl.Data{"at": "pull_image", "status": "error", "err": err})
		return err
	}
	imageID, err := pinkerton.ImageID(job.Artifact.URI)
	if err == pinkerton.ErrNoImageID && len(layers) > 0 {
		imageID = layers[len(layers)-1].ID
	} else if err != nil {
		g.Log(grohl.Data{"at": "image_id", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "read_config"})
	imageConfig, err := readDockerImageConfig(imageID)
	if err != nil {
		g.Log(grohl.Data{"at": "read_config", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "checkout"})
	rootPath, err := l.pinkerton.Checkout(job.ID, imageID)
	if err != nil {
		g.Log(grohl.Data{"at": "checkout", "status": "error", "err": err})
		return err
	}
	container.RootPath = rootPath

	g.Log(grohl.Data{"at": "mount"})
	if err := bindMount(l.InitPath, filepath.Join(rootPath, ".containerinit"), false, true); err != nil {
		g.Log(grohl.Data{"at": "mount", "file": ".containerinit", "status": "error", "err": err})
		return err
	}
	if err := os.MkdirAll(filepath.Join(rootPath, "etc"), 0755); err != nil {
		g.Log(grohl.Data{"at": "mkdir", "dir": "etc", "status": "error", "err": err})
		return err
	}

	if err := bindMount(l.resolvConf, filepath.Join(rootPath, "etc/resolv.conf"), false, true); err != nil {
		g.Log(grohl.Data{"at": "mount", "file": "resolv.conf", "status": "error", "err": err})
		return err
	}

	if err := writeHostname(filepath.Join(rootPath, "etc/hosts"), job.ID); err != nil {
		g.Log(grohl.Data{"at": "write_hosts", "status": "error", "err": err})
		return err
	}
	if err := os.MkdirAll(filepath.Join(rootPath, ".container-shared"), 0700); err != nil {
		g.Log(grohl.Data{"at": "mkdir", "dir": ".container-shared", "status": "error", "err": err})
		return err
	}
	for i, m := range job.Config.Mounts {
		if err := os.MkdirAll(filepath.Join(rootPath, m.Location), 0755); err != nil {
			g.Log(grohl.Data{"at": "mkdir_mount", "dir": m.Location, "status": "error", "err": err})
			return err
		}
		if m.Target == "" {
			m.Target = filepath.Join(l.VolPath, cluster.RandomJobID(""))
			job.Config.Mounts[i].Target = m.Target
			if err := os.MkdirAll(m.Target, 0755); err != nil {
				g.Log(grohl.Data{"at": "mkdir_vol", "dir": m.Target, "status": "error", "err": err})
				return err
			}
		}
		if err := bindMount(m.Target, filepath.Join(rootPath, m.Location), m.Writeable, true); err != nil {
			g.Log(grohl.Data{"at": "mount", "target": m.Target, "location": m.Location, "status": "error", "err": err})
			return err
		}
	}

	// apply volumes
	for _, v := range job.Config.Volumes {
		vol := l.vman.GetVolume(v.VolumeID)
		if vol == nil {
			err := fmt.Errorf("job %s required volume %s, but that volume does not exist", job.ID, v.VolumeID)
			g.Log(grohl.Data{"at": "volume", "volumeID": v.VolumeID, "status": "error", "err": err})
			return err
		}
		if err := os.MkdirAll(filepath.Join(rootPath, v.Target), 0755); err != nil {
			g.Log(grohl.Data{"at": "volume_mkdir", "dir": v.Target, "status": "error", "err": err})
			return err
		}
		if err != nil {
			g.Log(grohl.Data{"at": "volume_mount", "target": v.Target, "volumeID": v.VolumeID, "status": "error", "err": err})
			return err
		}
		if err := bindMount(vol.Location(), filepath.Join(rootPath, v.Target), v.Writeable, true); err != nil {
			g.Log(grohl.Data{"at": "volume_mount2", "target": v.Target, "volumeID": v.VolumeID, "status": "error", "err": err})
			return err
		}
	}

	if job.Config.Env == nil {
		job.Config.Env = make(map[string]string)
	}
	for i, p := range job.Config.Ports {
		if p.Proto != "tcp" && p.Proto != "udp" {
			return fmt.Errorf("unknown port proto %q", p.Proto)
		}

		if p.Port == 0 {
			job.Config.Ports[i].Port = 5000 + i
		}
		if i == 0 {
			job.Config.Env["PORT"] = strconv.Itoa(job.Config.Ports[i].Port)
		}
		job.Config.Env[fmt.Sprintf("PORT_%d", i)] = strconv.Itoa(job.Config.Ports[i].Port)
	}

	if !job.Config.HostNetwork {
		job.Config.Env["EXTERNAL_IP"] = container.IP.String()
	}

	config := &containerinit.Config{
		TTY:       job.Config.TTY,
		OpenStdin: job.Config.Stdin,
		WorkDir:   job.Config.WorkingDir,
		Resources: job.Resources,
	}
	if !job.Config.HostNetwork {
		config.IP = container.IP.String() + "/24"
		config.Gateway = l.bridgeAddr.String()
	}
	if config.WorkDir == "" {
		config.WorkDir = imageConfig.WorkingDir
	}
	if job.Config.Uid > 0 {
		config.User = strconv.Itoa(job.Config.Uid)
	} else if imageConfig.User != "" {
		// TODO: check and lookup user from image config
	}
	if len(job.Config.Entrypoint) > 0 {
		config.Args = job.Config.Entrypoint
		config.Args = append(config.Args, job.Config.Cmd...)
	} else {
		config.Args = imageConfig.Entrypoint
		if len(job.Config.Cmd) > 0 {
			config.Args = append(config.Args, job.Config.Cmd...)
		} else {
			config.Args = append(config.Args, imageConfig.Cmd...)
		}
	}
	for _, port := range job.Config.Ports {
		config.Ports = append(config.Ports, port)
	}

	g.Log(grohl.Data{"at": "write_config"})
	l.envMtx.RLock()
	err = writeContainerConfig(filepath.Join(rootPath, ".containerconfig"), config,
		map[string]string{
			"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
			"TERM": "xterm",
			"HOME": "/",
		},
		l.defaultEnv,
		job.Config.Env,
		map[string]string{
			"HOSTNAME": job.ID,
		},
	)
	l.envMtx.RUnlock()
	if err != nil {
		g.Log(grohl.Data{"at": "write_config", "status": "error", "err": err})
		return err
	}

	l.state.AddJob(job, container.IP)
	domain := &lt.Domain{
		Type:   "lxc",
		Name:   job.ID,
		Memory: lt.UnitInt{Value: 1, Unit: "GiB"},
		OS: lt.OS{
			Type: lt.OSType{Value: "exe"},
			Init: "/.containerinit",
		},
		Devices: lt.Devices{
			Filesystems: []lt.Filesystem{{
				Type:   "mount",
				Source: lt.FSRef{Dir: rootPath},
				Target: lt.FSRef{Dir: "/"},
			}},
			Consoles: []lt.Console{{Type: "pty"}},
		},
		OnPoweroff: "preserve",
		OnCrash:    "preserve",
	}
	if spec, ok := job.Resources[resource.TypeMemory]; ok && spec.Limit != nil {
		domain.Memory = lt.UnitInt{Value: *spec.Limit, Unit: "bytes"}
	}

	if !job.Config.HostNetwork {
		domain.Devices.Interfaces = []lt.Interface{{
			Type:   "network",
			Source: lt.InterfaceSrc{Network: libvirtNetName},
		}}
	}

	// attempt to run libvirt commands multiple times in case the libvirt daemon is
	// temporarily unavailable (e.g. it has restarted, which sometimes happens in CI)
	g.Log(grohl.Data{"at": "define_domain"})
	var vd libvirt.VirDomain
	if err := l.withConnRetries(func() (err error) {
		vd, err = l.libvirt.DomainDefineXML(string(domain.XML()))
		return
	}); err != nil {
		g.Log(grohl.Data{"at": "define_domain", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "create_domain"})
	if err := l.withConnRetries(vd.Create); err != nil {
		g.Log(grohl.Data{"at": "create_domain", "status": "error", "err": err})
		return err
	}
	uuid, err := vd.GetUUIDString()
	if err != nil {
		g.Log(grohl.Data{"at": "get_domain_uuid", "status": "error", "err": err})
		return err
	}
	g.Log(grohl.Data{"at": "get_uuid", "uuid": uuid})
	l.state.SetContainerID(job.ID, uuid)

	domainXML, err := vd.GetXMLDesc(0)
	if err != nil {
		g.Log(grohl.Data{"at": "get_domain_xml", "status": "error", "err": err})
		return err
	}
	domain = &lt.Domain{}
	if err := xml.Unmarshal([]byte(domainXML), domain); err != nil {
		g.Log(grohl.Data{"at": "unmarshal_domain_xml", "status": "error", "err": err})
		return err
	}

	go container.watch(nil)

	g.Log(grohl.Data{"at": "finish"})
	return nil
}
示例#7
0
func (l *LibvirtLXCBackend) Run(job *host.Job) (err error) {
	g := grohl.NewContext(grohl.Data{"backend": "libvirt-lxc", "fn": "run", "job.id": job.ID})
	g.Log(grohl.Data{"at": "start", "job.artifact.uri": job.Artifact.URI, "job.cmd": job.Config.Cmd})

	ip, err := ipallocator.RequestIP(defaultNet, nil)
	if err != nil {
		g.Log(grohl.Data{"at": "request_ip", "status": "error", "err": err})
		return err
	}
	container := &libvirtContainer{
		l:    l,
		job:  job,
		IP:   *ip,
		done: make(chan struct{}),
	}
	defer func() {
		if err != nil {
			go container.cleanup()
		}
	}()

	g.Log(grohl.Data{"at": "pull_image"})
	layers, err := pinkerton.Pull(job.Artifact.URI)
	if err != nil {
		g.Log(grohl.Data{"at": "pull_image", "status": "error", "err": err})
		return err
	}
	imageID, err := pinkerton.ImageID(job.Artifact.URI)
	if err == pinkerton.ErrNoImageID && len(layers) > 0 {
		imageID = layers[len(layers)-1].ID
	} else if err != nil {
		g.Log(grohl.Data{"at": "image_id", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "read_config"})
	imageConfig, err := readDockerImageConfig(imageID)
	if err != nil {
		g.Log(grohl.Data{"at": "read_config", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "checkout"})
	rootPath, err := pinkerton.Checkout(job.ID, imageID)
	if err != nil {
		g.Log(grohl.Data{"at": "checkout", "status": "error", "err": err})
		return err
	}
	container.RootPath = rootPath

	g.Log(grohl.Data{"at": "mount"})
	if err := bindMount(l.InitPath, filepath.Join(rootPath, ".containerinit"), false, true); err != nil {
		g.Log(grohl.Data{"at": "mount", "file": ".containerinit", "status": "error", "err": err})
		return err
	}
	if err := os.MkdirAll(filepath.Join(rootPath, "etc"), 0755); err != nil {
		g.Log(grohl.Data{"at": "mkdir", "dir": "etc", "status": "error", "err": err})
		return err
	}
	if err := bindMount("/etc/resolv.conf", filepath.Join(rootPath, "etc/resolv.conf"), false, true); err != nil {
		g.Log(grohl.Data{"at": "mount", "file": "resolv.conf", "status": "error", "err": err})
		return err
	}
	if err := writeHostname(filepath.Join(rootPath, "etc/hosts"), job.ID); err != nil {
		g.Log(grohl.Data{"at": "write_hosts", "status": "error", "err": err})
		return err
	}
	if err := os.MkdirAll(filepath.Join(rootPath, ".container-shared"), 0700); err != nil {
		g.Log(grohl.Data{"at": "mkdir", "dir": ".container-shared", "status": "error", "err": err})
		return err
	}
	for i, m := range job.Config.Mounts {
		if err := os.MkdirAll(filepath.Join(rootPath, m.Location), 0755); err != nil {
			g.Log(grohl.Data{"at": "mkdir_mount", "dir": m.Location, "status": "error", "err": err})
			return err
		}
		if m.Target == "" {
			m.Target = filepath.Join(l.VolPath, cluster.RandomJobID(""))
			job.Config.Mounts[i].Target = m.Target
			if err := os.MkdirAll(m.Target, 0755); err != nil {
				g.Log(grohl.Data{"at": "mkdir_vol", "dir": m.Target, "status": "error", "err": err})
				return err
			}
		}
		if err := bindMount(m.Target, filepath.Join(rootPath, m.Location), m.Writeable, true); err != nil {
			g.Log(grohl.Data{"at": "mount", "target": m.Target, "location": m.Location, "status": "error", "err": err})
			return err
		}
	}

	if job.Config.Env == nil {
		job.Config.Env = make(map[string]string)
	}
	for i, p := range job.Config.Ports {
		if p.Proto != "tcp" && p.Proto != "udp" {
			return fmt.Errorf("unknown port proto %q", p.Proto)
		}

		var port uint16
		if p.Port <= 0 {
			job.Config.Ports[i].RangeEnd = 0
			port, err = l.ports[p.Proto].Get()
		} else {
			port, err = l.ports[p.Proto].GetPort(uint16(p.Port))
		}
		if err != nil {
			g.Log(grohl.Data{"at": "alloc_port", "status": "error", "err": err})
			return err
		}
		job.Config.Ports[i].Port = int(port)
		if job.Config.Ports[i].RangeEnd == 0 {
			job.Config.Ports[i].RangeEnd = int(port)
		}

		if i == 0 {
			job.Config.Env["PORT"] = strconv.Itoa(int(port))
		}
		job.Config.Env[fmt.Sprintf("PORT_%d", i)] = strconv.Itoa(int(port))
	}

	g.Log(grohl.Data{"at": "write_env"})
	err = writeContainerEnv(filepath.Join(rootPath, ".containerenv"),
		map[string]string{
			"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
			"TERM": "xterm",
			"HOME": "/",
		},
		job.Config.Env,
		map[string]string{
			"HOSTNAME": job.ID,
		},
	)
	if err != nil {
		g.Log(grohl.Data{"at": "write_env", "status": "error", "err": err})
		return err
	}

	args := []string{
		"-i", ip.String() + "/24",
		"-g", defaultGW.String(),
	}
	if job.Config.TTY {
		args = append(args, "-tty")
	}
	if job.Config.Stdin {
		args = append(args, "-stdin")
	}
	if job.Config.WorkingDir != "" {
		args = append(args, "-w", job.Config.WorkingDir)
	} else if imageConfig.WorkingDir != "" {
		args = append(args, "-w", imageConfig.WorkingDir)
	}
	if job.Config.Uid > 0 {
		args = append(args, "-u", strconv.Itoa(job.Config.Uid))
	} else if imageConfig.User != "" {
		// TODO: check and lookup user from image config
	}
	if len(job.Config.Entrypoint) > 0 {
		args = append(args, job.Config.Entrypoint...)
		args = append(args, job.Config.Cmd...)
	} else {
		args = append(args, imageConfig.Entrypoint...)
		if len(job.Config.Cmd) > 0 {
			args = append(args, job.Config.Cmd...)
		} else {
			args = append(args, imageConfig.Cmd...)
		}
	}

	l.state.AddJob(job)
	l.state.SetInternalIP(job.ID, ip.String())
	domain := &lt.Domain{
		Type:   "lxc",
		Name:   job.ID,
		Memory: lt.UnitInt{Value: 1, Unit: "GiB"},
		VCPU:   1,
		OS: lt.OS{
			Type:     lt.OSType{Value: "exe"},
			Init:     "/.containerinit",
			InitArgs: args,
		},
		Devices: lt.Devices{
			Filesystems: []lt.Filesystem{{
				Type:   "mount",
				Source: lt.FSRef{Dir: rootPath},
				Target: lt.FSRef{Dir: "/"},
			}},
			Interfaces: []lt.Interface{{
				Type:   "network",
				Source: lt.InterfaceSrc{Network: "default"},
			}},
			Consoles: []lt.Console{{Type: "pty"}},
		},
		OnPoweroff: "preserve",
		OnCrash:    "preserve",
	}

	g.Log(grohl.Data{"at": "define_domain"})
	vd, err := l.libvirt.DomainDefineXML(string(domain.XML()))
	if err != nil {
		g.Log(grohl.Data{"at": "define_domain", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "create_domain"})
	if err := vd.Create(); err != nil {
		g.Log(grohl.Data{"at": "create_domain", "status": "error", "err": err})
		return err
	}
	uuid, err := vd.GetUUIDString()
	if err != nil {
		g.Log(grohl.Data{"at": "get_domain_uuid", "status": "error", "err": err})
		return err
	}
	g.Log(grohl.Data{"at": "get_uuid", "uuid": uuid})
	l.state.SetContainerID(job.ID, uuid)

	domainXML, err := vd.GetXMLDesc(0)
	if err != nil {
		g.Log(grohl.Data{"at": "get_domain_xml", "status": "error", "err": err})
		return err
	}
	domain = &lt.Domain{}
	if err := xml.Unmarshal([]byte(domainXML), domain); err != nil {
		g.Log(grohl.Data{"at": "unmarshal_domain_xml", "status": "error", "err": err})
		return err
	}

	if len(domain.Devices.Interfaces) == 0 || domain.Devices.Interfaces[0].Target == nil ||
		domain.Devices.Interfaces[0].Target.Dev == "" {
		err = errors.New("domain config missing interface")
		g.Log(grohl.Data{"at": "enable_hairpin", "status": "error", "err": err})
		return err
	}
	iface := domain.Devices.Interfaces[0].Target.Dev
	if err := enableHairpinMode(iface); err != nil {
		g.Log(grohl.Data{"at": "enable_hairpin", "status": "error", "err": err})
		return err
	}

	for _, p := range job.Config.Ports {
		if err := l.forwarder.Add(&net.TCPAddr{IP: *ip, Port: p.Port}, p.RangeEnd, p.Proto); err != nil {
			g.Log(grohl.Data{"at": "forward_port", "port": p.Port, "status": "error", "err": err})
			return err
		}
	}

	go container.watch(nil)

	g.Log(grohl.Data{"at": "finish"})
	return nil
}
示例#8
0
文件: jobs.go 项目: snormore/flynn
func runJob(app *ct.App, newJob ct.NewJob, releases *ReleaseRepo, artifacts *ArtifactRepo, cl clusterClient, req *http.Request, w http.ResponseWriter, r ResponseHelper) {
	data, err := releases.Get(newJob.ReleaseID)
	if err != nil {
		r.Error(err)
		return
	}
	release := data.(*ct.Release)
	data, err = artifacts.Get(release.ArtifactID)
	if err != nil {
		r.Error(err)
		return
	}
	artifact := data.(*ct.Artifact)
	attach := strings.Contains(req.Header.Get("Accept"), "application/vnd.flynn.attach")

	env := make(map[string]string, len(release.Env)+len(newJob.Env))
	for k, v := range release.Env {
		env[k] = v
	}
	for k, v := range newJob.Env {
		env[k] = v
	}
	job := &host.Job{
		ID: cluster.RandomJobID(""),
		Metadata: map[string]string{
			"flynn-controller.app":      app.ID,
			"flynn-controller.app_name": app.Name,
			"flynn-controller.release":  release.ID,
		},
		Artifact: host.Artifact{
			Type: artifact.Type,
			URI:  artifact.URI,
		},
		Config: host.ContainerConfig{
			Cmd:   newJob.Cmd,
			Env:   env,
			TTY:   newJob.TTY,
			Stdin: attach,
		},
	}
	if len(newJob.Entrypoint) > 0 {
		job.Config.Entrypoint = newJob.Entrypoint
	}

	hosts, err := cl.ListHosts()
	if err != nil {
		r.Error(err)
		return
	}
	// pick a random host
	var hostID string
	for hostID = range hosts {
		break
	}
	if hostID == "" {
		r.Error(errors.New("no hosts found"))
		return
	}

	var attachClient cluster.AttachClient
	if attach {
		attachReq := &host.AttachReq{
			JobID:  job.ID,
			Flags:  host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream,
			Height: uint16(newJob.Lines),
			Width:  uint16(newJob.Columns),
		}
		client, err := cl.DialHost(hostID)
		if err != nil {
			r.Error(fmt.Errorf("host connect failed: %s", err.Error()))
			return
		}
		defer client.Close()
		attachClient, err = client.Attach(attachReq, true)
		if err != nil {
			r.Error(fmt.Errorf("attach failed: %s", err.Error()))
			return
		}
		defer attachClient.Close()
	}

	_, err = cl.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{hostID: {job}}})
	if err != nil {
		r.Error(fmt.Errorf("schedule failed: %s", err.Error()))
		return
	}

	if attach {
		if err := attachClient.Wait(); err != nil {
			r.Error(fmt.Errorf("attach wait failed: %s", err.Error()))
			return
		}
		w.Header().Set("Content-Type", "application/vnd.flynn.attach")
		w.Header().Set("Content-Length", "0")
		w.WriteHeader(http.StatusSwitchingProtocols)
		conn, _, err := w.(http.Hijacker).Hijack()
		if err != nil {
			panic(err)
		}
		defer conn.Close()

		done := make(chan struct{}, 2)
		cp := func(to io.Writer, from io.Reader) {
			io.Copy(to, from)
			done <- struct{}{}
		}
		go cp(conn, attachClient.Conn())
		go cp(attachClient.Conn(), conn)
		<-done
		<-done

		return
	} else {
		r.JSON(200, &ct.Job{
			ID:        hostID + "-" + job.ID,
			ReleaseID: newJob.ReleaseID,
			Cmd:       newJob.Cmd,
		})
	}
}
示例#9
0
func (c *Cmd) Start() error {
	if c.started {
		return errors.New("exec: already started")
	}
	c.started = true
	if c.cluster == nil {
		var err error
		c.cluster, err = cluster.NewClient()
		if err != nil {
			return err
		}
		c.closeCluster = true
	}

	hosts, err := c.cluster.ListHosts()
	if err != nil {
		return err
	}
	if c.HostID == "" {
		// TODO: check if this is actually random
		for c.HostID = range hosts {
			break
		}
	}
	if c.JobID == "" {
		c.JobID = cluster.RandomJobID("")
	}

	job := &host.Job{
		ID: c.JobID,
		Config: &docker.Config{
			Image: c.Image,
			Cmd:   c.Cmd,
			Tty:   c.TTY,
			Env:   formatEnv(c.Env),
		},
		Attributes: c.Attrs,
	}
	if c.Stdout != nil || c.stdoutPipe != nil {
		job.Config.AttachStdout = true
	}
	if c.Stderr != nil || c.stderrPipe != nil {
		job.Config.AttachStderr = true
	}
	if c.Stdin != nil || c.stdinPipe != nil {
		job.Config.AttachStdin = true
		job.Config.OpenStdin = true
		job.Config.StdinOnce = true
	}

	c.host, err = c.cluster.DialHost(c.HostID)
	if err != nil {
		return err
	}

	// subscribe to host events
	ch := make(chan *host.Event)
	stream := c.host.StreamEvents(job.ID, ch)
	go func() {
		for event := range ch {
			if event.Event == "stop" || event.Event == "error" {
				close(c.done)
				return
			}
		}
		c.streamErr = stream.Err()
		close(c.done)
		// TODO: handle disconnections
	}()

	var rwc cluster.ReadWriteCloser
	var attachWait func() error

	if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil ||
		c.stdoutPipe != nil || c.stderrPipe != nil || c.stdinPipe != nil {
		req := &host.AttachReq{
			JobID:  job.ID,
			Height: c.TermHeight,
			Width:  c.TermWidth,
			Flags:  host.AttachFlagStream,
		}
		if job.Config.AttachStdout {
			req.Flags |= host.AttachFlagStdout
		}
		if job.Config.AttachStderr {
			req.Flags |= host.AttachFlagStderr
		}
		if job.Config.AttachStdin {
			req.Flags |= host.AttachFlagStdin
		}
		rwc, attachWait, err = c.host.Attach(req, true)
		if err != nil {
			c.close()
			return err
		}
	}

	goroutines := make([]func() error, 0, 4)

	c.attachConn = rwc
	if attachWait != nil {
		goroutines = append(goroutines, attachWait)
	}

	if c.stdinPipe != nil {
		c.stdinPipe.set(writeCloseCloser{rwc})
	} else if c.Stdin != nil {
		goroutines = append(goroutines, func() error {
			_, err := io.Copy(rwc, c.Stdin)
			rwc.CloseWrite()
			return err
		})
	}
	if !c.TTY {
		if c.stdoutPipe != nil || c.stderrPipe != nil {
			stdout, stderr := demultiplex.Streams(rwc)
			if c.stdoutPipe != nil {
				c.stdoutPipe.set(stdout)
			} else if c.Stdout != nil {
				goroutines = append(goroutines, cpFunc(c.Stdout, stdout))
			}
			if c.stderrPipe != nil {
				c.stderrPipe.set(stderr)
			} else if c.Stderr != nil {
				goroutines = append(goroutines, cpFunc(c.Stderr, stderr))
			}
		} else if c.Stdout != nil || c.Stderr != nil {
			goroutines = append(goroutines, func() error {
				return demultiplex.Copy(c.Stdout, c.Stderr, rwc)
			})
		}
	} else if c.stdoutPipe != nil {
		c.stdoutPipe.set(rwc)
	} else if c.Stdout != nil {
		goroutines = append(goroutines, cpFunc(c.Stdout, rwc))
	}

	c.errCh = make(chan error, len(goroutines))
	for _, fn := range goroutines {
		go func(fn func() error) {
			c.errCh <- fn()
		}(fn)
	}

	_, err = c.cluster.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{c.HostID: {job}}})
	return err
}
示例#10
0
func (m *manifestRunner) runManifest(r io.Reader) (map[string]*ManifestData, error) {
	g := grohl.NewContext(grohl.Data{"fn": "run_manifest"})
	var services []*manifestService
	if err := json.NewDecoder(r).Decode(&services); err != nil {
		return nil, err
	}

	serviceData := make(map[string]*ManifestData, len(services))

	m.state.mtx.Lock()
	for _, job := range m.state.jobs {
		if job.ManifestID == "" || job.Status != host.StatusRunning && job.Status != host.StatusStarting {
			continue
		}
		var service *manifestService
		for _, service = range services {
			if service.ID == job.ManifestID {
				break
			}
		}
		if service == nil {
			continue
		}
		g.Log(grohl.Data{"at": "restore", "service": service.ID, "job.id": job.Job.ID})

		data := &ManifestData{
			ExternalIP: m.externalAddr,
			Env:        job.Job.Config.Env,
			Services:   serviceData,
			readonly:   true,
		}
		data.TCPPorts = make([]int, 0, len(job.Job.Config.Ports))
		for _, p := range job.Job.Config.Ports {
			if p.Proto != "tcp" {
				continue
			}
			data.TCPPorts = append(data.TCPPorts, p.Port)
		}
		serviceData[service.ID] = data
	}
	m.state.mtx.Unlock()

	var netInfo NetworkInfo

	runService := func(service *manifestService) error {
		if _, exists := serviceData[service.ID]; exists {
			return nil
		}

		data := &ManifestData{
			Env:         parseEnviron(),
			Services:    serviceData,
			ExternalIP:  m.externalAddr,
			BridgeIP:    netInfo.BridgeAddr,
			Nameservers: strings.Join(netInfo.Nameservers, ","),
		}

		if service.Data {
			data.Volume("/data")
		}

		// Add explicit tcp ports to data.TCPPorts
		for _, port := range service.TCPPorts {
			port, err := strconv.Atoi(port)
			if err != nil {
				return err
			}
			data.TCPPorts = append(data.TCPPorts, port)
		}

		var buf bytes.Buffer

		interp := func(s string) (string, error) {
			t, err := template.New("arg").Parse(s)
			if err != nil {
				return "", err
			}
			if err := t.Execute(&buf, data); err != nil {
				return "", err
			}
			defer buf.Reset()
			return buf.String(), nil
		}

		args := make([]string, 0, len(service.Args))
		for _, arg := range service.Args {
			arg, err := interp(arg)
			if err != nil {
				return err
			}
			if strings.TrimSpace(arg) == "" {
				continue
			}
			args = append(args, arg)
		}
		var err error
		for k, v := range service.Env {
			service.Env[k], err = interp(v)
			if err != nil {
				return err
			}
		}
		data.Env = service.Env

		if service.Image == "" {
			service.Image = "https://registry.hub.docker.com/flynn/" + service.ID
		}
		if service.ImageID != "" {
			service.Image += "?id=" + service.ImageID
		}

		// prepare named volumes
		volumeBindings := make([]host.VolumeBinding, 0, len(data.Volumes))
		for mntPath := range data.Volumes {
			vol, err := m.vman.NewVolume()
			if err != nil {
				return err
			}
			volumeBindings = append(volumeBindings, host.VolumeBinding{
				Target:    mntPath,
				VolumeID:  vol.Info().ID,
				Writeable: true,
			})
		}

		job := &host.Job{
			ID: cluster.RandomJobID("flynn-" + service.ID + "-"),
			Artifact: host.Artifact{
				Type: "docker",
				URI:  service.Image,
			},
			Config: host.ContainerConfig{
				Entrypoint:  service.Entrypoint,
				Cmd:         args,
				Env:         data.Env,
				HostNetwork: true,
				Volumes:     volumeBindings,
			},
			Resurrect: true,
		}
		if job.Config.Env == nil {
			job.Config.Env = make(map[string]string)
		}
		job.Config.Env["EXTERNAL_IP"] = m.externalAddr

		for _, k := range service.ExposeEnv {
			if v := os.Getenv(k); v != "" {
				job.Config.Env[k] = v
			}
		}

		job.Config.Ports = make([]host.Port, len(data.TCPPorts))
		for i, port := range data.TCPPorts {
			job.Config.Ports[i] = host.Port{Proto: "tcp", Port: port}
		}
		if len(job.Config.Ports) == 0 {
			job.Config.Ports = []host.Port{{Proto: "tcp"}}
		}

		if err := m.backend.Run(job, &RunConfig{ManifestID: service.ID}); err != nil {
			return err
		}

		data.readonly = true
		serviceData[service.ID] = data
		return nil
	}

	for _, service := range services {
		if err := runService(service); err != nil {
			return nil, err
		}
		if service.ID == "flannel" {
			var job *host.Job
			for _, j := range m.state.jobs {
				if j.ManifestID != service.ID {
					continue
				}
				job = j.Job
				break
			}
			if job == nil {
				return nil, fmt.Errorf("Could not find the flannel container!")
			}
			ni, err := m.backend.ConfigureNetworking(NetworkStrategyFlannel, job.ID)
			if err != nil {
				return nil, err
			}
			netInfo = *ni
		}
	}

	return serviceData, nil
}
示例#11
0
func (c *Cmd) Start() error {
	if c.started {
		return errors.New("exec: already started")
	}
	c.done = make(chan struct{})
	c.started = true
	if c.cluster == nil {
		var err error
		c.cluster = cluster.NewClient()
		if err != nil {
			return err
		}
		c.closeCluster = true
	}

	if c.HostID == "" {
		hosts, err := c.cluster.Hosts()
		if err != nil {
			return err
		}
		if len(hosts) == 0 {
			return errors.New("exec: no hosts found")
		}
		host := schedutil.PickHost(hosts)
		c.HostID = host.ID()
		c.host = host
	}

	// Use the pre-defined host.Job configuration if provided;
	// otherwise generate one from the fields on exec.Cmd that mirror stdlib's os.exec.
	if c.Job == nil {
		c.Job = &host.Job{
			Artifact: c.Artifact,
			Config: host.ContainerConfig{
				Entrypoint: c.Entrypoint,
				Cmd:        c.Cmd,
				TTY:        c.TTY,
				Env:        c.Env,
				Stdin:      c.Stdin != nil || c.stdinPipe != nil,
			},
			Metadata: c.Meta,
		}
		// if attaching to stdout / stderr, avoid round tripping the
		// streams via on-disk log files.
		if c.Stdout != nil || c.Stderr != nil {
			c.Job.Config.DisableLog = true
		}
	} else {
		c.Job.Artifact = c.Artifact
	}
	if c.Job.ID == "" {
		c.Job.ID = cluster.RandomJobID("")
	}

	if c.host == nil {
		var err error
		c.host, err = c.cluster.Host(c.HostID)
		if err != nil {
			return err
		}
	}

	if c.Stdout != nil || c.Stderr != nil || c.Stdin != nil || c.stdinPipe != nil {
		req := &host.AttachReq{
			JobID:  c.Job.ID,
			Height: c.TermHeight,
			Width:  c.TermWidth,
			Flags:  host.AttachFlagStream,
		}
		if c.Stdout != nil {
			req.Flags |= host.AttachFlagStdout
		}
		if c.Stderr != nil {
			req.Flags |= host.AttachFlagStderr
		}
		if c.Job.Config.Stdin {
			req.Flags |= host.AttachFlagStdin
		}
		var err error
		c.attachClient, err = c.host.Attach(req, true)
		if err != nil {
			c.close()
			return err
		}
	}

	if c.stdinPipe != nil {
		c.stdinPipe.set(writeCloseCloser{c.attachClient})
	} else if c.Stdin != nil {
		go func() {
			io.Copy(c.attachClient, c.Stdin)
			c.attachClient.CloseWrite()
		}()
	}

	if c.attachClient == nil {
		c.eventChan = make(chan *host.Event)
		var err error
		c.eventStream, err = c.host.StreamEvents(c.Job.ID, c.eventChan)
		if err != nil {
			return err
		}
	}

	go func() {
		defer close(c.done)
		if c.attachClient != nil {
			c.exitStatus, c.streamErr = c.attachClient.Receive(c.Stdout, c.Stderr)
		} else {
		outer:
			for e := range c.eventChan {
				switch e.Event {
				case "stop":
					c.exitStatus = e.Job.ExitStatus
					break outer
				case "error":
					c.streamErr = errors.New(*e.Job.Error)
					break outer
				}
			}
			c.eventStream.Close()
			if c.streamErr == nil {
				c.streamErr = c.eventStream.Err()
			}
		}
	}()

	return c.host.AddJob(c.Job)
}
示例#12
0
文件: manifest.go 项目: upton/flynn
func (m *manifestRunner) runManifest(r io.Reader) (map[string]*ManifestData, error) {
	var services []manifestService
	if err := json.NewDecoder(r).Decode(&services); err != nil {
		return nil, err
	}

	serviceData := make(map[string]*ManifestData, len(services))
	for _, service := range services {
		data := &ManifestData{
			Env:        parseEnviron(),
			Services:   serviceData,
			ExternalIP: m.externalAddr,
			ports:      m.backend.(*DockerBackend).ports,
		}

		// Add explicit tcp ports to data.TCPPorts
		for _, port := range service.TCPPorts {
			port, err := strconv.Atoi(port)
			if err != nil {
				return nil, err
			}
			data.TCPPorts = append(data.TCPPorts, port)
		}

		var buf bytes.Buffer

		interp := func(s string) (string, error) {
			t, err := template.New("arg").Parse(s)
			if err != nil {
				return "", err
			}
			if err := t.Execute(&buf, data); err != nil {
				return "", err
			}
			defer buf.Reset()
			return buf.String(), nil
		}

		args := make([]string, 0, len(service.Args))
		for _, arg := range service.Args {
			arg, err := interp(arg)
			if err != nil {
				return nil, err
			}
			if strings.TrimSpace(arg) == "" {
				continue
			}
			args = append(args, arg)
		}
		var err error
		for k, v := range service.Env {
			service.Env[k], err = interp(v)
			if err != nil {
				return nil, err
			}
		}
		data.Env = service.Env

		if service.Image == "" {
			service.Image = "flynn/" + service.ID
		}

		job := &host.Job{
			ID:       cluster.RandomJobID("flynn-" + service.ID + "-"),
			TCPPorts: 1,
			Config: &docker.Config{
				Image:        service.Image,
				Entrypoint:   service.Entrypoint,
				Cmd:          args,
				AttachStdout: true,
				AttachStderr: true,
				Env:          dockerEnv(data.Env),
				Volumes:      data.Volumes,
				ExposedPorts: make(map[string]struct{}, len(service.TCPPorts)),
			},
			HostConfig: &docker.HostConfig{
				PortBindings: make(map[string][]docker.PortBinding, len(service.TCPPorts)),
			},
		}
		job.Config.Env = append(job.Config.Env, "EXTERNAL_IP="+m.externalAddr)

		for i, port := range service.TCPPorts {
			job.TCPPorts = 0
			if i == 0 {
				job.Config.Env = append(job.Config.Env, "PORT="+port)
			}
			job.Config.Env = append(job.Config.Env, fmt.Sprintf("PORT_%d=%s", i, port))
			job.Config.ExposedPorts[port+"/tcp"] = struct{}{}
			job.HostConfig.PortBindings[port+"/tcp"] = []docker.PortBinding{{HostPort: port, HostIp: m.bindAddr}}
		}

		if err := m.backend.Run(job); err != nil {
			return nil, err
		}

		container, err := m.backend.(*DockerBackend).docker.InspectContainer(job.ID)
		if err != nil {
			return nil, err
		}

		data.InternalIP = container.NetworkSettings.IPAddress
		data.readonly = true
		serviceData[service.ID] = data
	}

	return serviceData, nil
}
示例#13
0
/*
	Restore prior state from the save location defined at construction time.
	If the state save file is empty, nothing is loaded, and no error is returned.
*/
func (s *State) Restore(backend Backend) (func(), error) {
	s.backend = backend

	var resurrect []*host.ActiveJob
	if err := s.stateDB.View(func(tx *bolt.Tx) error {
		jobsBucket := tx.Bucket([]byte("jobs"))
		backendJobsBucket := tx.Bucket([]byte("backend-jobs"))
		backendGlobalBucket := tx.Bucket([]byte("backend-global"))
		resurrectionBucket := tx.Bucket([]byte("resurrection-jobs"))

		// restore jobs
		if err := jobsBucket.ForEach(func(k, v []byte) error {
			job := &host.ActiveJob{}
			if err := json.Unmarshal(v, job); err != nil {
				return err
			}
			if job.ContainerID != "" {
				s.containers[job.ContainerID] = job
			}
			s.jobs[string(k)] = job

			return nil
		}); err != nil {
			return err
		}

		// hand opaque blobs back to backend so it can do its restore
		backendJobsBlobs := make(map[string][]byte)
		if err := backendJobsBucket.ForEach(func(k, v []byte) error {
			backendJobsBlobs[string(k)] = v
			return nil
		}); err != nil {
			return err
		}
		backendGlobalBlob := backendGlobalBucket.Get([]byte("backend"))
		if err := backend.UnmarshalState(s.jobs, backendJobsBlobs, backendGlobalBlob); err != nil {
			return err
		}

		if resurrectionBucket == nil {
			s.mtx.Lock()
			for _, job := range s.jobs {
				// if there was an unclean shutdown, we resurrect all jobs marked
				// that were running at shutdown and are no longer running.
				if job.Job.Resurrect && job.Status != host.StatusRunning {
					resurrect = append(resurrect, job)
				}
			}
			s.mtx.Unlock()
		} else {
			defer tx.DeleteBucket([]byte("resurrection-jobs"))
			if err := resurrectionBucket.ForEach(func(k, v []byte) error {
				job := &host.ActiveJob{}
				if err := json.Unmarshal(v, job); err != nil {
					return err
				}
				resurrect = append(resurrect, job)
				return nil
			}); err != nil {
				return err
			}
		}
		return nil
	}); err != nil && err != io.EOF {
		return nil, fmt.Errorf("could not restore from host persistence db: %s", err)
	}

	return func() {
		var wg sync.WaitGroup
		wg.Add(len(resurrect))
		for _, job := range resurrect {
			go func(job *host.ActiveJob) {
				// generate a new job id, this is a new job
				newID := cluster.RandomJobID("")
				log.Printf("resurrecting %s as %s", job.Job.ID, newID)
				job.Job.ID = newID
				config := &RunConfig{
					// TODO(titanous): Use Job instead of ActiveJob in
					// resurrection bucket once InternalIP is not used.
					// TODO(titanous): Passing the IP is a hack, remove it once the
					// postgres appliance doesn't use it to calculate its ID in the
					// state machine.
					IP: net.ParseIP(job.InternalIP),
				}
				backend.Run(job.Job, config)
				wg.Done()
			}(job)
		}
		wg.Wait()
	}, nil
}
示例#14
0
文件: jobs.go 项目: upton/flynn
func runJob(app *ct.App, newJob ct.NewJob, releases *ReleaseRepo, artifacts *ArtifactRepo, cl clusterClient, req *http.Request, w http.ResponseWriter, r ResponseHelper) {
	data, err := releases.Get(newJob.ReleaseID)
	if err != nil {
		r.Error(err)
		return
	}
	release := data.(*ct.Release)
	data, err = artifacts.Get(release.ArtifactID)
	if err != nil {
		r.Error(err)
		return
	}
	artifact := data.(*ct.Artifact)
	image, err := utils.DockerImage(artifact.URI)
	if err != nil {
		log.Println("error parsing artifact uri", err)
		r.Error(ct.ValidationError{
			Field:   "artifact.uri",
			Message: "is invalid",
		})
		return
	}
	attach := strings.Contains(req.Header.Get("Accept"), "application/vnd.flynn.attach")

	job := &host.Job{
		ID: cluster.RandomJobID(""),
		Attributes: map[string]string{
			"flynn-controller.app":     app.ID,
			"flynn-controller.release": release.ID,
		},
		Config: &docker.Config{
			Cmd:          newJob.Cmd,
			Env:          utils.FormatEnv(release.Env, newJob.Env),
			Image:        image,
			AttachStdout: true,
			AttachStderr: true,
		},
	}
	if newJob.TTY {
		job.Config.Tty = true
	}
	if attach {
		job.Config.AttachStdin = true
		job.Config.StdinOnce = true
		job.Config.OpenStdin = true
	}

	hosts, err := cl.ListHosts()
	if err != nil {
		r.Error(err)
		return
	}
	// pick a random host
	var hostID string
	for hostID = range hosts {
		break
	}
	if hostID == "" {
		r.Error(errors.New("no hosts found"))
		return
	}

	var attachConn cluster.ReadWriteCloser
	var attachWait func() error
	if attach {
		attachReq := &host.AttachReq{
			JobID:  job.ID,
			Flags:  host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream,
			Height: newJob.Lines,
			Width:  newJob.Columns,
		}
		client, err := cl.DialHost(hostID)
		if err != nil {
			r.Error(fmt.Errorf("lorne connect failed: %s", err.Error()))
			return
		}
		defer client.Close()
		attachConn, attachWait, err = client.Attach(attachReq, true)
		if err != nil {
			r.Error(fmt.Errorf("attach failed: %s", err.Error()))
			return
		}
		defer attachConn.Close()
	}

	_, err = cl.AddJobs(&host.AddJobsReq{HostJobs: map[string][]*host.Job{hostID: {job}}})
	if err != nil {
		r.Error(fmt.Errorf("schedule failed: %s", err.Error()))
		return
	}

	if attach {
		if err := attachWait(); err != nil {
			r.Error(fmt.Errorf("attach wait failed: %s", err.Error()))
			return
		}
		w.Header().Set("Content-Type", "application/vnd.flynn.attach")
		w.Header().Set("Content-Length", "0")
		w.WriteHeader(http.StatusSwitchingProtocols)
		conn, _, err := w.(http.Hijacker).Hijack()
		if err != nil {
			panic(err)
		}
		defer conn.Close()

		done := make(chan struct{}, 2)
		cp := func(to cluster.ReadWriteCloser, from io.Reader) {
			io.Copy(to, from)
			to.CloseWrite()
			done <- struct{}{}
		}
		go cp(conn.(cluster.ReadWriteCloser), attachConn)
		go cp(attachConn, conn)
		<-done
		<-done

		return
	} else {
		r.JSON(200, &ct.Job{
			ID:        hostID + "-" + job.ID,
			ReleaseID: newJob.ReleaseID,
			Cmd:       newJob.Cmd,
		})
	}
}