func (e *engine) runJobNotify(r *Task, client dockerclient.Client) error { name := fmt.Sprintf("drone_build_%d_notify", r.Build.ID) defer func() { client.KillContainer(name, "9") client.RemoveContainer(name, true, true) }() // encode the build payload to write to stdin // when launching the build container in, err := encodeToLegacyFormat(r) if err != nil { log.Errorf("failure to marshal work. %s", err) return err } args := DefaultNotifyArgs args = append(args, "--") args = append(args, string(in)) conf := &dockerclient.ContainerConfig{ Image: DefaultAgent, Entrypoint: DefaultEntrypoint, Cmd: args, Env: e.envs, HostConfig: dockerclient.HostConfig{ Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, MemorySwappiness: -1, }, Volumes: map[string]struct{}{ "/var/run/docker.sock": struct{}{}, }, } log.Infof("preparing container %s", name) info, err := docker.Run(client, conf, name) if err != nil { log.Errorf("Error starting notification container %s. %s", name, err) } // for debugging purposes we print a failed notification executions // output to the logs. Otherwise we have no way to troubleshoot failed // notifications. This is temporary code until I've come up with // a better solution. if info != nil && info.State.ExitCode != 0 && log.GetLevel() >= log.InfoLevel { var buf bytes.Buffer rc, err := client.ContainerLogs(name, docker.LogOpts) if err == nil { defer rc.Close() stdcopy.StdCopy(&buf, &buf, io.LimitReader(rc, 50000)) } log.Infof("Notification container %s exited with %d", name, info.State.ExitCode) log.Infoln(buf.String()) } return err }
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, auth *dockerclient.AuthConfig, pull bool, outw, errw io.Writer) (*dockerclient.ContainerInfo, error) { if outw == nil { outw = os.Stdout } if errw == nil { errw = os.Stdout } // fetches the container information. info, err := Start(client, conf, auth, pull) if err != nil { return nil, err } // ensures the container is always stopped // and ready to be removed. defer func() { client.StopContainer(info.Id, 5) client.KillContainer(info.Id, "9") }() // channel listening for errors while the // container is running async. errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(info.Id, logOptsTail) if err != nil { log.Errorf("Error tailing %s. %s\n", conf.Image, err) errc <- err return } defer rc.Close() StdCopy(outw, errw, rc) // fetches the container information info, err := client.InspectContainer(info.Id) if err != nil { log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err) errc <- err return } infoc <- info }() select { case info := <-infoc: return info, nil case err := <-errc: return info, err } }
// Wait blocks until the named container exits, returning the exit information. func Wait(client dockerclient.Client, name string) (*dockerclient.ContainerInfo, error) { defer func() { client.StopContainer(name, 5) client.KillContainer(name, "9") }() errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(name, LogOptsTail) if err != nil { errc <- err return } io.Copy(ioutil.Discard, rc) rc.Close() info, err := client.InspectContainer(name) if err != nil { errc <- err return } infoc <- info }() select { case info := <-infoc: return info, nil case err := <-errc: return nil, err } }
func (e *engine) runJob(c context.Context, r *Task, updater *updater, client dockerclient.Client) error { name := fmt.Sprintf("drone_build_%d_job_%d", r.Build.ID, r.Job.ID) defer func() { if r.Job.Status == model.StatusRunning { r.Job.Status = model.StatusError r.Job.Finished = time.Now().UTC().Unix() r.Job.ExitCode = 255 } if r.Job.Status == model.StatusPending { r.Job.Status = model.StatusError r.Job.Started = time.Now().UTC().Unix() r.Job.Finished = time.Now().UTC().Unix() r.Job.ExitCode = 255 } updater.SetJob(c, r) client.KillContainer(name, "9") client.RemoveContainer(name, true, true) }() // marks the task as running r.Job.Status = model.StatusRunning r.Job.Started = time.Now().UTC().Unix() // encode the build payload to write to stdin // when launching the build container in, err := encodeToLegacyFormat(r) if err != nil { log.Errorf("failure to marshal work. %s", err) return err } // CREATE AND START BUILD args := DefaultBuildArgs if r.Build.Event == model.EventPull { args = DefaultPullRequestArgs } args = append(args, "--") args = append(args, string(in)) conf := &dockerclient.ContainerConfig{ Image: DefaultAgent, Entrypoint: DefaultEntrypoint, Cmd: args, Env: e.envs, HostConfig: dockerclient.HostConfig{ Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, }, Volumes: map[string]struct{}{ "/var/run/docker.sock": struct{}{}, }, } log.Infof("preparing container %s", name) client.PullImage(conf.Image, nil) _, err = docker.RunDaemon(client, conf, name) if err != nil { log.Errorf("error starting build container. %s", err) return err } // UPDATE STATUS err = updater.SetJob(c, r) if err != nil { log.Errorf("error updating job status as running. %s", err) return err } // WAIT FOR OUTPUT info, builderr := docker.Wait(client, name) switch { case info.State.ExitCode == 128: r.Job.ExitCode = info.State.ExitCode r.Job.Status = model.StatusKilled case info.State.ExitCode == 130: r.Job.ExitCode = info.State.ExitCode r.Job.Status = model.StatusKilled case builderr != nil: r.Job.Status = model.StatusError case info.State.ExitCode != 0: r.Job.ExitCode = info.State.ExitCode r.Job.Status = model.StatusFailure default: r.Job.Status = model.StatusSuccess } // send the logs to the datastore var buf bytes.Buffer rc, err := client.ContainerLogs(name, docker.LogOpts) if err != nil && builderr != nil { buf.WriteString("Error launching build") buf.WriteString(builderr.Error()) } else if err != nil { buf.WriteString("Error launching build") buf.WriteString(err.Error()) log.Errorf("error opening connection to logs. %s", err) return err } else { defer rc.Close() stdcopy.StdCopy(&buf, &buf, io.LimitReader(rc, 5000000)) } // update the task in the datastore r.Job.Finished = time.Now().UTC().Unix() err = updater.SetJob(c, r) if err != nil { log.Errorf("error updating job after completion. %s", err) return err } err = updater.SetLogs(c, r, ioutil.NopCloser(&buf)) if err != nil { log.Errorf("error updating logs. %s", err) return err } log.Debugf("completed job %d with status %s.", r.Job.ID, r.Job.Status) return nil }
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, auth *dockerclient.AuthConfig, pull bool, outw, errw io.Writer) (*dockerclient.ContainerInfo, error) { if outw == nil { outw = os.Stdout } if errw == nil { errw = os.Stdout } // fetches the container information. info, err := Start(client, conf, auth, pull) if err != nil { return nil, err } // ensures the container is always stopped // and ready to be removed. defer func() { client.StopContainer(info.Id, 5) client.KillContainer(info.Id, "9") }() // channel listening for errors while the // container is running async. errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // options to fetch the stdout and stderr logs // by tailing the output. logOptsTail := &dockerclient.LogOptions{ Follow: true, Stdout: true, Stderr: true, } // It's possible that the docker logs endpoint returns before the container // is done, we'll naively resume up to 5 times if when the logs unblocks // the container is still reported to be running. for attempts := 0; attempts < 5; attempts++ { if attempts > 0 { // When resuming the stream, only grab the last line when starting // the tailing. logOptsTail.Tail = 1 } // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(info.Id, logOptsTail) if err != nil { log.Errorf("Error tailing %s. %s\n", conf.Image, err) errc <- err return } defer rc.Close() _, err = StdCopy(outw, errw, rc) if err != nil { log.Errorf("Error streaming docker logs for %s. %s\n", conf.Image, err) errc <- err return } // fetches the container information info, err := client.InspectContainer(info.Id) if err != nil { log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err) errc <- err return } if !info.State.Running { // The container is no longer running, there should be no more logs to tail. infoc <- info return } log.Debugf("Attempting to resume log tailing after %d attempts.\n", attempts) } errc <- errors.New("Maximum number of attempts made while tailing logs.") }() select { case info := <-infoc: return info, nil case err := <-errc: return info, err } }
func Run(client dockerclient.Client, conf *dockerclient.ContainerConfig, auth *dockerclient.AuthConfig, pull bool, outw, errw io.Writer) (*dockerclient.ContainerInfo, error) { if outw == nil { outw = os.Stdout } if errw == nil { errw = os.Stdout } // fetches the container information. info, err := Start(client, conf, auth, pull) if err != nil { return nil, err } // ensures the container is always stopped // and ready to be removed. defer func() { client.StopContainer(info.Id, 5) client.KillContainer(info.Id, "9") }() // channel listening for errors while the // container is running async. errc := make(chan error, 1) infoc := make(chan *dockerclient.ContainerInfo, 1) go func() { // It's possible that the docker logs endpoint returns before the container // is done, we'll naively resume up to 5 times if when the logs unblocks // the container is still reported to be running. var total int64 for attempts := 0; attempts < 5; attempts++ { // blocks and waits for the container to finish // by streaming the logs (to /dev/null). Ideally // we could use the `wait` function instead rc, err := client.ContainerLogs(info.Id, logOptsTail) if err != nil { log.Errorf("Error tailing %s. %s\n", conf.Image, err) errc <- err return } defer rc.Close() if total != 0 { // Discard off the total bytes we've received so far. // io.LimitReader returns EOF once it has read the specified number // of bytes as per https://golang.org/pkg/io/#LimitReader. r := io.LimitReader(rc, total) _, err := io.Copy(ioutil.Discard, r) if err != nil && err != io.EOF { log.Errorf("Error resuming streaming docker logs for %s. %s\n", conf.Image, err) errc <- err return } } rcv, err := StdCopy(outw, errw, rc) if err != nil { log.Errorf("Error streaming docker logs for %s. %s\n", conf.Image, err) errc <- err return } // fetches the container information info, err := client.InspectContainer(info.Id) if err != nil { log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err) errc <- err return } if !info.State.Running { // The container is no longer running, there should be no more logs to tail. infoc <- info return } total += rcv log.Debugf("Attempting to resume log tailing after receiving %d bytes. Attempts %d.\n", total, attempts) } errc <- errors.New("Maximum number of attempts made while tailing logs.") }() select { case info := <-infoc: return info, nil case err := <-errc: return info, err } }