示例#1
0
func (h *attachHandler) attach(req *host.AttachReq, conn io.ReadWriteCloser) {
	defer conn.Close()

	g := grohl.NewContext(grohl.Data{"fn": "attach", "job.id": req.JobID})
	g.Log(grohl.Data{"at": "start"})
	attachWait := make(chan struct{})
	job := h.state.AddAttacher(req.JobID, attachWait)
	if job == nil {
		defer h.state.RemoveAttacher(req.JobID, attachWait)
		if _, err := conn.Write([]byte{host.AttachWaiting}); err != nil {
			return
		}
		// TODO: add timeout
		<-attachWait
		job = h.state.GetJob(req.JobID)
	}

	success := make(chan struct{})
	failed := make(chan struct{})
	opts := &AttachRequest{
		Job:        job,
		Logs:       req.Flags&host.AttachFlagLogs != 0,
		Stream:     req.Flags&host.AttachFlagStream != 0,
		Height:     req.Height,
		Width:      req.Width,
		Attached:   success,
		ReadWriter: conn,
		Streams:    make([]string, 0, 3),
	}
	if req.Flags&host.AttachFlagStdin != 0 {
		opts.Streams = append(opts.Streams, "stdin")
	}
	if req.Flags&host.AttachFlagStdout != 0 {
		opts.Streams = append(opts.Streams, "stdout")
	}
	if req.Flags&host.AttachFlagStderr != 0 {
		opts.Streams = append(opts.Streams, "stderr")
	}

	go func() {
		select {
		case <-success:
			conn.Write([]byte{host.AttachSuccess})
			close(success)
		case <-failed:
		}
		close(attachWait)
	}()
	if err := h.backend.Attach(opts); err != nil {
		select {
		case <-success:
		default:
			close(failed)
			conn.Write(append([]byte{host.AttachError}, err.Error()...))
		}
		g.Log(grohl.Data{"status": "error", "err": err})
		return
	}
	g.Log(grohl.Data{"at": "finish"})
}
示例#2
0
// populateReaderPool periodically globs for new files or files that previously
// hit EOF and creates file readers for them.
func (s *Supervisor) populateReaderPool() {
	logger := grohl.NewContext(grohl.Data{"ns": "Supervisor", "fn": "populateReaderPool"})

	timer := time.NewTimer(0)
	for {
		select {
		case <-s.stopRequest:
			return
		case <-timer.C:
			logTimer := logger.Timer(grohl.Data{})
			for _, config := range s.files {
				for _, path := range config.Paths {
					matches, err := filepath.Glob(path)
					if err != nil {
						logger.Report(err, grohl.Data{"path": path, "msg": "failed to glob", "resolution": "skipping path"})
						continue
					}

					for _, filePath := range matches {
						if err = s.startFileReader(filePath, config.Fields); err != nil {
							logger.Report(err, grohl.Data{"path": path, "filePath": filePath, "msg": "failed to start reader", "resolution": "skipping file"})
						}
					}
				}
			}
			logTimer.Finish()
			timer.Reset(s.GlobRefresh)
		}
	}
}
示例#3
0
func (p *jobProcessor) processJob(ports <-chan int, job *host.Job) (*docker.Container, error) {
	g := grohl.NewContext(grohl.Data{"fn": "process_job", "job.id": job.ID})
	g.Log(grohl.Data{"at": "start", "job.image": job.Config.Image, "job.cmd": job.Config.Cmd, "job.entrypoint": job.Config.Entrypoint})

	var hostConfig *docker.HostConfig
	for i := 0; i < job.TCPPorts; i++ {
		port := strconv.Itoa(<-ports)
		if i == 0 {
			job.Config.Env = append(job.Config.Env, "PORT="+port)
		}
		job.Config.Env = append(job.Config.Env, fmt.Sprintf("PORT_%d=%s", i, port))
		if job.Config.ExposedPorts == nil {
			job.Config.ExposedPorts = make(map[string]struct{}, job.TCPPorts)
		}
		job.Config.ExposedPorts[port+"/tcp"] = struct{}{}
		if hostConfig == nil {
			hostConfig = &docker.HostConfig{
				PortBindings:    make(map[string][]docker.PortBinding, job.TCPPorts),
				PublishAllPorts: true,
			}
		}
		hostConfig.PortBindings[port+"/tcp"] = []docker.PortBinding{{HostPort: port}}
	}
	if p.externalAddr != "" {
		job.Config.Env = appendUnique(job.Config.Env, "EXTERNAL_IP="+p.externalAddr, "SD_HOST="+p.externalAddr, "DISCOVERD="+p.discoverd)
	}
	p.state.AddJob(job)
	g.Log(grohl.Data{"at": "create_container"})
	container, err := p.docker.CreateContainer(job.Config)
	if err == docker.ErrNoSuchImage {
		g.Log(grohl.Data{"at": "pull_image"})
		err = p.docker.PullImage(docker.PullImageOptions{Repository: job.Config.Image}, os.Stdout)
		if err != nil {
			g.Log(grohl.Data{"at": "pull_image", "status": "error", "err": err})
			p.state.SetStatusFailed(job.ID, err)
			return nil, err
		}
		container, err = p.docker.CreateContainer(job.Config)
	}
	if err != nil {
		g.Log(grohl.Data{"at": "create_container", "status": "error", "err": err})
		p.state.SetStatusFailed(job.ID, err)
		return nil, err
	}
	p.state.SetContainerID(job.ID, container.ID)
	p.state.WaitAttach(job.ID)
	g.Log(grohl.Data{"at": "start_container"})
	if err := p.docker.StartContainer(container.ID, hostConfig); err != nil {
		g.Log(grohl.Data{"at": "start_container", "status": "error", "err": err})
		p.state.SetStatusFailed(job.ID, err)
		return nil, err
	}
	p.state.SetStatusRunning(job.ID)
	g.Log(grohl.Data{"at": "finish"})
	return container, nil
}
示例#4
0
func killExistingContainers(dc *docker.Client) error {
	g := grohl.NewContext(grohl.Data{"fn": "kill_existing"})
	g.Log(grohl.Data{"at": "start"})
	containers, err := dc.ListContainers(docker.ListContainersOptions{})
	if err != nil {
		g.Log(grohl.Data{"at": "list", "status": "error", "err": err})
		return err
	}
outer:
	for _, c := range containers {
		for _, name := range c.Names {
			if strings.HasPrefix(name, "/flynn-") {
				g.Log(grohl.Data{"at": "kill", "container.id": c.ID, "container.name": name})
				if err := dc.KillContainer(c.ID); err != nil {
					g.Log(grohl.Data{"at": "kill", "container.id": c.ID, "container.name": name, "status": "error", "err": err})
				}
				continue outer
			}
		}
	}
	g.Log(grohl.Data{"at": "finish"})
	return nil
}
示例#5
0
func (d *DockerBackend) Cleanup() error {
	g := grohl.NewContext(grohl.Data{"backend": "docker", "fn": "cleanup"})
	g.Log(grohl.Data{"at": "start"})
	containers, err := d.docker.ListContainers(docker.ListContainersOptions{})
	if err != nil {
		g.Log(grohl.Data{"at": "list", "status": "error", "err": err})
		return err
	}
outer:
	for _, c := range containers {
		for _, name := range c.Names {
			if strings.HasPrefix(name, "/flynn-") {
				g.Log(grohl.Data{"at": "kill", "container.id": c.ID, "container.name": name})
				if err := d.docker.KillContainer(c.ID); err != nil {
					g.Log(grohl.Data{"at": "kill", "container.id": c.ID, "container.name": name, "status": "error", "err": err})
				}
				continue outer
			}
		}
	}
	g.Log(grohl.Data{"at": "finish"})
	return nil
}
示例#6
0
func (h *FileReader) read() {
	logger := grohl.NewContext(grohl.Data{"ns": "FileReader", "file_path": h.filePath})

	currentChunk := make([]*FileData, 0, h.ChunkSize)
	for {
		line, err := h.buf.ReadBytes('\n')
		if err != nil {
			if err != io.EOF {
				logger.Report(err, grohl.Data{"msg": "error reading file", "resolution": "closing file"})
			}

			h.sendChunk(currentChunk)
			close(h.C)

			return
		}
		h.position += int64(len(line))
		// if maxLength is configured, skip lines that are too long
		if h.MaxLength > 0 && len(line) > h.MaxLength {
			continue
		}

		fileData := &FileData{
			Data: h.buildDataWithLine(bytes.TrimRight(line, "\r\n")),
			HighWaterMark: &HighWaterMark{
				FilePath: h.filePath,
				Position: h.position,
			},
		}
		currentChunk = append(currentChunk, fileData)

		if len(currentChunk) >= h.ChunkSize {
			h.sendChunk(currentChunk)
			currentChunk = make([]*FileData, 0, h.ChunkSize)
		}
	}
}
示例#7
0
func (c *Client) ensureConnected() error {
	if c.conn == nil {
		logger := grohl.NewContext(grohl.Data{"ns": "lumberjack.Client", "fn": "ensureConnected", "addr": c.options.Address})
		timer := logger.Timer(grohl.Data{})

		var conn net.Conn

		conn, err := net.DialTimeout(c.options.Network, c.options.Address, c.options.ConnectionTimeout)
		if err != nil {
			logger.Report(err, grohl.Data{})
			return err
		}

		if c.options.TLSConfig != nil {
			if c.options.TLSConfig.ServerName == "" {
				parts := strings.Split(c.options.Address, ":")
				c.options.TLSConfig.ServerName = parts[0]
			}

			tlsConn := tls.Client(conn, c.options.TLSConfig)
			tlsConn.SetDeadline(time.Now().Add(c.options.SendTimeout))
			if err := tlsConn.Handshake(); err != nil {
				conn.Close()

				logger.Report(err, grohl.Data{})
				return err
			}
			conn = tlsConn
		}

		timer.Finish()
		c.conn = conn
	}

	return nil
}
示例#8
0
// Reads chunks from available file readers, putting together ready 'chunks'
// that can be sent to clients.
func (s *Supervisor) populateReadyChunks() {
	logger := grohl.NewContext(grohl.Data{"ns": "Supervisor", "fn": "populateReadyChunks"})

	backoff := &ExponentialBackoff{Minimum: 50 * time.Millisecond, Maximum: 5000 * time.Millisecond}
	for {
		available, locked := s.readerPool.Counts()
		GlobalStatistics.UpdateFileReaderPoolStatistics(available, locked)

		currentChunk := &readyChunk{
			Chunk:         make([]*FileData, 0),
			LockedReaders: make([]*FileReader, 0),
		}

		for len(currentChunk.Chunk) < s.SpoolSize {
			if reader := s.readerPool.LockNext(); reader != nil {
				select {
				case <-s.stopRequest:
					return
				case chunk := <-reader.C:
					if chunk != nil {
						currentChunk.Chunk = append(currentChunk.Chunk, chunk...)
						currentChunk.LockedReaders = append(currentChunk.LockedReaders, reader)

						if len(chunk) > 0 {
							if hwm := chunk[len(chunk)-1].HighWaterMark; hwm != nil {
								GlobalStatistics.SetFilePosition(hwm.FilePath, hwm.Position)
							}
						}
					} else {
						// The reader hit EOF or another error. Remove it and it'll get
						// picked up by populateReaderPool again if it still needs to be
						// read.
						logger.Log(grohl.Data{"status": "EOF", "file": reader.FilePath()})

						s.readerPool.Remove(reader)
						GlobalStatistics.DeleteFileStatistics(reader.FilePath())
					}
				default:
					// The reader didn't have anything queued up for us. Unlock the
					// reader and move on.
					s.readerPool.Unlock(reader)
				}
			} else {
				// If there are no more readers, send the chunk ASAP so we can get
				// the next chunk in line
				logger.Log(grohl.Data{"msg": "no readers available", "resolution": "sending current chunk"})
				break
			}
		}

		if len(currentChunk.Chunk) > 0 {
			select {
			case <-s.stopRequest:
				return
			case s.readyChunks <- currentChunk:
				backoff.Reset()
			}
		} else {
			select {
			case <-s.stopRequest:
				return
			case <-time.After(backoff.Next()):
				grohl.Log(grohl.Data{"msg": "no lines available to send", "resolution": "backing off"})
			}
		}
	}
}
示例#9
0
func (h *attachHandler) attach(req *host.AttachReq, conn io.ReadWriteCloser) {
	defer conn.Close()

	g := grohl.NewContext(grohl.Data{"fn": "attach", "job.id": req.JobID})
	g.Log(grohl.Data{"at": "start"})
	attachWait := make(chan struct{})
	job := h.state.AddAttacher(req.JobID, attachWait)
	if job == nil {
		defer h.state.RemoveAttacher(req.JobID, attachWait)
		if _, err := conn.Write([]byte{host.AttachWaiting}); err != nil {
			return
		}
		// TODO: add timeout
		<-attachWait
		job = h.state.GetJob(req.JobID)
	}
	if job.Job.Config.Tty && req.Flags&host.AttachFlagStdin != 0 {
		resize := func() { h.docker.ResizeContainerTTY(job.ContainerID, req.Height, req.Width) }
		if job.Status == host.StatusRunning {
			resize()
		} else {
			var once sync.Once
			go func() {
				ch := make(chan host.Event)
				h.state.AddListener(req.JobID, ch)
				go func() {
					// There is a race that can result in the listener being
					// added after the container has started, so check the
					// status *after* subscribing.
					// This can deadlock if we try to get a state lock while an
					// event is being sent on the listen channel, so we do it
					// in the goroutine and wrap in a sync.Once.
					j := h.state.GetJob(req.JobID)
					if j.Status == host.StatusRunning {
						once.Do(resize)
					}
				}()
				defer h.state.RemoveListener(req.JobID, ch)
				for event := range ch {
					if event.Event == "start" {
						once.Do(resize)
						return
					}
					if event.Event == "stop" {
						return
					}
				}
			}()
		}
	}

	success := make(chan struct{})
	failed := make(chan struct{})
	opts := docker.AttachToContainerOptions{
		Container:    job.ContainerID,
		InputStream:  conn,
		OutputStream: conn,
		Stdin:        req.Flags&host.AttachFlagStdin != 0,
		Stdout:       req.Flags&host.AttachFlagStdout != 0,
		Stderr:       req.Flags&host.AttachFlagStderr != 0,
		Logs:         req.Flags&host.AttachFlagLogs != 0,
		Stream:       req.Flags&host.AttachFlagStream != 0,
		Success:      success,
	}
	go func() {
		select {
		case <-success:
			conn.Write([]byte{host.AttachSuccess})
			close(success)
		case <-failed:
		}
		close(attachWait)
	}()
	if err := h.docker.AttachToContainer(opts); err != nil {
		select {
		case <-success:
		default:
			close(failed)
			conn.Write(append([]byte{host.AttachError}, err.Error()...))
		}
		g.Log(grohl.Data{"at": "docker", "status": "error", "err": err})
		return
	}
	g.Log(grohl.Data{"at": "finish"})
}
示例#10
0
func main() {
	hostname, _ := os.Hostname()
	externalAddr := flag.String("external", "", "external IP of host")
	bindAddr := flag.String("bind", "", "bind containers to this IP")
	configFile := flag.String("config", "", "configuration file")
	manifestFile := flag.String("manifest", "/etc/flynn-host.json", "manifest file")
	hostID := flag.String("id", hostname, "host id")
	force := flag.Bool("force", false, "kill all containers booted by flynn-host before starting")
	attributes := make(AttributeFlag)
	flag.Var(&attributes, "attribute", "key=value pair to add as an attribute")
	flag.Parse()
	grohl.AddContext("app", "lorne")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	dockerc, err := docker.NewClient("unix:///var/run/docker.sock")
	if err != nil {
		log.Fatal(err)
	}

	if *force {
		if err := killExistingContainers(dockerc); err != nil {
			os.Exit(1)
		}
	}

	state := NewState()
	ports := make(chan int)

	go allocatePorts(ports, 55000, 65535)
	go serveHTTP(&Host{state: state, docker: dockerc}, &attachHandler{state: state, docker: dockerc})
	go streamEvents(dockerc, state)

	processor := &jobProcessor{
		externalAddr: *externalAddr,
		bindAddr:     *bindAddr,
		docker:       dockerc,
		state:        state,
		discoverd:    os.Getenv("DISCOVERD"),
	}

	runner := &manifestRunner{
		env:        parseEnviron(),
		externalIP: *externalAddr,
		ports:      ports,
		processor:  processor,
		docker:     dockerc,
	}

	var disc *discoverd.Client
	if *manifestFile != "" {
		var r io.Reader
		var f *os.File
		if *manifestFile == "-" {
			r = os.Stdin
		} else {
			f, err = os.Open(*manifestFile)
			if err != nil {
				log.Fatal(err)
			}
			r = f
		}
		services, err := runner.runManifest(r)
		if err != nil {
			log.Fatal(err)
		}
		if f != nil {
			f.Close()
		}

		if d, ok := services["discoverd"]; ok {
			processor.discoverd = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientWithAddr(processor.discoverd)
				return
			})
			if err != nil {
				log.Fatal(err)
			}
		}
	}

	if processor.discoverd == "" && *externalAddr != "" {
		processor.discoverd = *externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", processor.discoverd)
	if disc == nil {
		disc, err = discoverd.NewClientWithAddr(processor.discoverd)
		if err != nil {
			log.Fatal(err)
		}
	}
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", *externalAddr+":1113", map[string]string{"id": *hostID})
	if err != nil {
		log.Fatal(err)
	}

	// Check if we are the leader so that we can use the cluster functions directly
	sampiCluster := sampi.NewCluster(sampi.NewState())
	select {
	case <-sampiStandby:
		g.Log(grohl.Data{"at": "sampi_leader"})
		rpc.Register(sampiCluster)
	case <-time.After(5 * time.Millisecond):
		go func() {
			<-sampiStandby
			g.Log(grohl.Data{"at": "sampi_leader"})
			rpc.Register(sampiCluster)
		}()
	}
	cluster, err := cluster.NewClientWithSelf(*hostID, NewLocalClient(*hostID, sampiCluster))
	if err != nil {
		log.Fatal(err)
	}

	g.Log(grohl.Data{"at": "sampi_connected"})

	events := make(chan host.Event)
	state.AddListener("all", events)
	go syncScheduler(cluster, events)

	h := &host.Host{}
	if *configFile != "" {
		h, err = openConfig(*configFile)
		if err != nil {
			log.Fatal(err)
		}
	}
	if h.Attributes == nil {
		h.Attributes = make(map[string]string)
	}
	for k, v := range attributes {
		h.Attributes[k] = v
	}
	h.ID = *hostID

	for {
		newLeader := cluster.NewLeaderSignal()

		h.Jobs = state.ClusterJobs()
		jobs := make(chan *host.Job)
		hostErr := cluster.RegisterHost(h, jobs)
		g.Log(grohl.Data{"at": "host_registered"})
		processor.Process(ports, jobs)
		g.Log(grohl.Data{"at": "sampi_disconnected", "err": *hostErr})

		<-newLeader
	}
}
示例#11
0
func main() {
	hostname, _ := os.Hostname()
	externalAddr := flag.String("external", "", "external IP of host")
	configFile := flag.String("config", "", "configuration file")
	manifestFile := flag.String("manifest", "", "manifest file")
	hostID := flag.String("id", hostname, "host id")
	attributes := make(AttributeFlag)
	flag.Var(&attributes, "attribute", "key=value pair to add as an attribute")
	flag.Parse()
	grohl.AddContext("app", "lorne")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	dockerc, err := docker.NewClient("unix:///var/run/docker.sock")
	if err != nil {
		log.Fatal(err)
	}

	state := NewState()
	ports := make(chan int)

	go allocatePorts(ports, 55000, 65535)
	go serveHTTP(&Host{state: state, docker: dockerc}, &attachHandler{state: state, docker: dockerc})
	go streamEvents(dockerc, state)

	processor := &jobProcessor{
		externalAddr: *externalAddr,
		docker:       dockerc,
		state:        state,
		discoverd:    os.Getenv("DISCOVERD"),
	}

	runner := &manifestRunner{
		env:        parseEnviron(),
		externalIP: *externalAddr,
		ports:      ports,
		processor:  processor,
		docker:     dockerc,
	}

	var disc *discoverd.Client
	if *manifestFile != "" {
		f, err := os.Open(*manifestFile)
		if err != nil {
			log.Fatal(err)
		}
		services, err := runner.runManifest(f)
		if err != nil {
			log.Fatal(err)
		}
		f.Close()

		if d, ok := services["discoverd"]; ok {
			processor.discoverd = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientUsingAddress(processor.discoverd)
				return
			})
			if err != nil {
				log.Fatal(err)
			}
		}
	}

	if processor.discoverd == "" && *externalAddr != "" {
		processor.discoverd = *externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", processor.discoverd)
	if disc == nil {
		disc, err = discoverd.NewClientUsingAddress(processor.discoverd)
		if err != nil {
			log.Fatal(err)
		}
	}
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", *externalAddr+":1113", map[string]string{"id": *hostID})
	if err != nil {
		log.Fatal(err)
	}
	go func() {
		<-sampiStandby
		rpc.Register(sampi.NewCluster(sampi.NewState()))
	}()

	cluster, err := client.New()
	if err != nil {
		log.Fatal(err)
	}
	g.Log(grohl.Data{"at": "sampi_connected"})

	events := make(chan host.Event)
	state.AddListener("all", events)
	go syncScheduler(cluster, events)

	var h *host.Host
	if *configFile != "" {
		h, err = openConfig(*configFile)
		if err != nil {
			log.Fatal(err)
		}
	} else {
		h = &host.Host{Resources: make(map[string]host.ResourceValue)}
	}
	if _, ok := h.Resources["memory"]; !ok {
		h.Resources["memory"] = host.ResourceValue{Value: 1024}
	}
	h.ID = *hostID
	h.Jobs = state.ClusterJobs()

	if h.Attributes == nil {
		h.Attributes = make(map[string]string)
	}

	for k, v := range attributes {
		h.Attributes[k] = v
	}

	jobs := make(chan *host.Job)
	hostErr := cluster.ConnectHost(h, jobs)
	g.Log(grohl.Data{"at": "host_registered"})
	processor.Process(ports, jobs)
	log.Fatal(*hostErr)
}
示例#12
0
func (d *DockerBackend) Run(job *host.Job) error {
	g := grohl.NewContext(grohl.Data{"backend": "docker", "fn": "run_job", "job.id": job.ID})
	g.Log(grohl.Data{"at": "start", "job.image": job.Config.Image, "job.cmd": job.Config.Cmd, "job.entrypoint": job.Config.Entrypoint})

	if job.HostConfig == nil {
		job.HostConfig = &docker.HostConfig{
			PortBindings: make(map[string][]docker.PortBinding, job.TCPPorts),
		}
	}
	if job.Config.ExposedPorts == nil {
		job.Config.ExposedPorts = make(map[string]struct{}, job.TCPPorts)
	}
	for i := 0; i < job.TCPPorts; i++ {
		p, err := d.ports.Get()
		if err != nil {
			return err
		}
		port := strconv.Itoa(int(p))

		if i == 0 {
			job.Config.Env = append(job.Config.Env, "PORT="+port)
		}
		job.Config.Env = append(job.Config.Env, fmt.Sprintf("PORT_%d=%s", i, port))
		job.Config.ExposedPorts[port+"/tcp"] = struct{}{}
		job.HostConfig.PortBindings[port+"/tcp"] = []docker.PortBinding{{HostPort: port, HostIp: d.bindAddr}}
	}

	job.Config.AttachStdout = true
	job.Config.AttachStderr = true
	if strings.HasPrefix(job.ID, "flynn-") {
		job.Config.Name = job.ID
	} else {
		job.Config.Name = "flynn-" + job.ID
	}

	d.state.AddJob(job)
	g.Log(grohl.Data{"at": "create_container"})
	container, err := d.docker.CreateContainer(job.Config)
	if err == docker.ErrNoSuchImage {
		g.Log(grohl.Data{"at": "pull_image"})
		err = d.docker.PullImage(docker.PullImageOptions{Repository: job.Config.Image}, os.Stdout)
		if err != nil {
			g.Log(grohl.Data{"at": "pull_image", "status": "error", "err": err})
			d.state.SetStatusFailed(job.ID, err)
			return err
		}
		container, err = d.docker.CreateContainer(job.Config)
	}
	if err != nil {
		g.Log(grohl.Data{"at": "create_container", "status": "error", "err": err})
		d.state.SetStatusFailed(job.ID, err)
		return err
	}
	d.state.SetContainerID(job.ID, container.ID)
	d.state.WaitAttach(job.ID)
	g.Log(grohl.Data{"at": "start_container"})
	if err := d.docker.StartContainer(container.ID, job.HostConfig); err != nil {
		g.Log(grohl.Data{"at": "start_container", "status": "error", "err": err})
		d.state.SetStatusFailed(job.ID, err)
		return err
	}
	container, err = d.docker.InspectContainer(container.ID)
	if err != nil {
		g.Log(grohl.Data{"at": "inspect_container", "status": "error", "err": err})
		d.state.SetStatusFailed(job.ID, err)
		return err
	}
	d.state.SetStatusRunning(job.ID)
	g.Log(grohl.Data{"at": "finish"})
	return nil
}