func (j *BuildImageRequest) Execute(resp jobs.Response) { w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) fmt.Fprintf(w, "Processing build-image request:\n") // TODO: download source, add bind-mount unitName := containers.JobIdentifier(j.Name).UnitNameForBuild() unitDescription := fmt.Sprintf("Builder for %s", j.Tag) stdout, err := systemd.ProcessLogsForUnit(unitName) if err != nil { stdout = utils.EmptyReader log.Printf("job_build_image: Unable to fetch build logs: %s, %+v", err.Error(), err) } defer stdout.Close() conn, errc := systemd.NewConnection() if errc != nil { log.Print("job_build_image:", errc) fmt.Fprintf(w, "Unable to watch start status", errc) return } if err := conn.Subscribe(); err != nil { log.Print("job_build_image:", err) fmt.Fprintf(w, "Unable to watch start status", errc) return } defer conn.Unsubscribe() // make subscription global for efficiency var ( changes <-chan map[string]*dbus.UnitStatus errch <-chan error ) if resp.StreamResult() { changes, errch = conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) } fmt.Fprintf(w, "Running sti build unit: %s\n", unitName) log.Printf("build_image: Running build %s", unitName) var startCmd []string if _, err := os.Stat(gearBinaryPath); err != nil { log.Println("gear executable is not installed on system; using sti builder image") startCmd = []string{ "/usr/bin/docker", "run", "-rm", "-v", "/run/docker.sock:/run/docker.sock", "-t", buildImage, "sti", "build", j.Source, j.BaseImage, j.Tag, "-U", "unix:///run/docker.sock", } } else { startCmd = []string{ gearBinaryPath, "build", j.Source, j.BaseImage, j.Tag, } } if j.RuntimeImage != "" { startCmd = append(startCmd, "--runtime-image") startCmd = append(startCmd, j.RuntimeImage) } if j.Clean { startCmd = append(startCmd, "--clean") } if j.Verbose { startCmd = append(startCmd, "--debug") } if j.CallbackUrl != "" { startCmd = append(startCmd, "--callbackUrl="+j.CallbackUrl) } log.Printf("build_image: Will execute %v", startCmd) status, err := systemd.Connection().StartTransientUnit( unitName, "fail", dbus.PropExecStart(startCmd, true), dbus.PropDescription(unitDescription), dbus.PropRemainAfterExit(true), dbus.PropSlice("container-small.slice"), ) if err != nil { errType := reflect.TypeOf(err) fmt.Fprintf(w, "Unable to start build container for this image due to (%s): %s\n", errType, err.Error()) return } else if status != "done" { fmt.Fprintf(w, "Build did not complete successfully: %s\n", status) } else { fmt.Fprintf(w, "Sti build is running\n") } if resp.StreamResult() { go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { fmt.Fprintf(w, "Build completed\n") break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(25 * time.Second): log.Print("job_build_image:", "timeout") break wait } } } }
func (j *runContainer) Execute(resp jobs.Response) { command := j.UnitCommand() unitName := containers.JobIdentifier(j.Name).UnitNameFor() unitDescription := fmt.Sprintf("Execute image '%s': %s %s", j.Image, j.Command, strings.Join(command, " ")) var ( stdout io.ReadCloser changes <-chan map[string]*dbus.UnitStatus errch <-chan error ) if resp.StreamResult() { r, err := systemd.ProcessLogsForUnit(unitName) if err != nil { r = utils.EmptyReader log.Printf("run_container: Unable to fetch container run logs: %s, %+v", err.Error(), err) } defer r.Close() conn, errc := systemd.NewConnection() if errc != nil { log.Print("run_container:", errc) return } if err := conn.Subscribe(); err != nil { log.Print("run_container:", err) return } defer conn.Unsubscribe() // make subscription global for efficiency c, ech := conn.SubscribeUnitsCustom(1*time.Second, 2, func(s1 *dbus.UnitStatus, s2 *dbus.UnitStatus) bool { return true }, func(unit string) bool { return unit != unitName }) stdout = r changes = c errch = ech } log.Printf("run_container: Running container %s", unitName) status, err := systemd.Connection().StartTransientUnit( unitName, "fail", dbus.PropExecStart(command, true), dbus.PropDescription(unitDescription), dbus.PropRemainAfterExit(true), dbus.PropSlice("container.slice"), ) switch { case err != nil: errType := reflect.TypeOf(err) resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Unable to start container execution due to (%s): %s", errType, err.Error())}) return case status != "done": resp.Failure(jobs.SimpleError{jobs.ResponseError, fmt.Sprintf("Start did not complete successfully: %s", status)}) return case stdout == nil: resp.Success(jobs.ResponseOk) return } w := resp.SuccessWithWrite(jobs.ResponseAccepted, true, false) go io.Copy(w, stdout) wait: for { select { case c := <-changes: if changed, ok := c[unitName]; ok { if changed.SubState != "running" { break wait } } case err := <-errch: fmt.Fprintf(w, "Error %+v\n", err) case <-time.After(1 * time.Minute): log.Print("run_container:", "timeout") break wait } } stdout.Close() }