func (h *execHandle) run() { ps, err := h.executor.Wait() close(h.doneCh) // If the exitcode is 0 and we had an error that means the plugin didn't // connect and doesn't know the state of the user process so we are killing // the user process so that when we create a new executor on restarting the // new user process doesn't have collisions with resources that the older // user pid might be holding onto. if ps.ExitCode == 0 && err != nil { if h.isolationConfig != nil { isoConf := h.isolationConfig ePid := h.pluginClient.ReattachConfig().Pid if e := executor.DestroyCgroup(isoConf.Cgroup, isoConf.CgroupPaths, ePid); e != nil { h.logger.Printf("[ERR] driver.exec: destroying cgroup failed while killing cgroup: %v", e) } } if e := h.allocDir.UnmountAll(); e != nil { h.logger.Printf("[ERR] driver.exec: unmounting dev,proc and alloc dirs failed: %v", e) } } h.waitCh <- cstructs.NewWaitResult(ps.ExitCode, ps.Signal, err) close(h.waitCh) // Remove services if err := h.executor.DeregisterServices(); err != nil { h.logger.Printf("[ERR] driver.exec: failed to deregister services: %v", err) } if err := h.executor.Exit(); err != nil { h.logger.Printf("[ERR] driver.exec: error destroying executor: %v", err) } h.pluginClient.Kill() }
func (h *rktHandle) run() { ps, werr := h.executor.Wait() close(h.doneCh) if ps.ExitCode == 0 && werr != nil { if e := killProcess(h.executorPid); e != nil { h.logger.Printf("[ERROR] driver.rkt: error killing user process: %v", e) } if e := h.allocDir.UnmountAll(); e != nil { h.logger.Printf("[ERROR] driver.rkt: unmounting dev,proc and alloc dirs failed: %v", e) } } // Remove services if err := h.executor.DeregisterServices(); err != nil { h.logger.Printf("[ERR] driver.rkt: failed to deregister services: %v", err) } // Exit the executor if err := h.executor.Exit(); err != nil { h.logger.Printf("[ERR] driver.rkt: error killing executor: %v", err) } h.pluginClient.Kill() // Send the results h.waitCh <- dstructs.NewWaitResult(ps.ExitCode, 0, werr) close(h.waitCh) }
func (h *rktHandle) run() { ps, err := h.proc.Wait() close(h.doneCh) code := 0 if !ps.Success() { // TODO: Better exit code parsing. code = 1 } h.waitCh <- cstructs.NewWaitResult(code, 0, err) close(h.waitCh) }
// waitAsParent waits on the process if the current process was the spawner. func (s *Spawner) waitAsParent() *structs.WaitResult { if s.SpawnPpid != os.Getpid() { return structs.NewWaitResult(-1, 0, fmt.Errorf("not the parent. Spawner parent is %v; current pid is %v", s.SpawnPpid, os.Getpid())) } // Try to reattach to the spawn. if s.spawn == nil { // If it can't be reattached, it means the spawn process has exited so // we should just read its exit file. var err error if s.spawn, err = os.FindProcess(s.SpawnPid); err != nil { return s.pollWait() } } if _, err := s.spawn.Wait(); err != nil { return structs.NewWaitResult(-1, 0, err) } return s.pollWait() }
// run waits for the configured amount of time and then indicates the task has // terminated func (h *mockDriverHandle) run() { timer := time.NewTimer(h.runFor) defer timer.Stop() for { select { case <-timer.C: close(h.doneCh) case <-h.doneCh: h.logger.Printf("[DEBUG] driver.mock: finished running task %q", h.taskName) h.waitCh <- dstructs.NewWaitResult(h.exitCode, h.exitSignal, h.exitErr) return } } }
func (h *DockerHandle) run() { // Wait for it... exitCode, werr := h.waitClient.WaitContainer(h.containerID) if werr != nil { h.logger.Printf("[ERR] driver.docker: failed to wait for %s; container already terminated", h.containerID) } if exitCode != 0 { werr = fmt.Errorf("Docker container exited with non-zero exit code: %d", exitCode) } close(h.doneCh) // Remove services if err := h.executor.DeregisterServices(); err != nil { h.logger.Printf("[ERR] driver.docker: error deregistering services: %v", err) } // Shutdown the syslog collector if err := h.executor.Exit(); err != nil { h.logger.Printf("[ERR] driver.docker: failed to kill the syslog collector: %v", err) } h.pluginClient.Kill() // Stop the container just incase the docker daemon's wait returned // incorrectly if err := h.client.StopContainer(h.containerID, 0); err != nil { _, noSuchContainer := err.(*docker.NoSuchContainer) _, containerNotRunning := err.(*docker.ContainerNotRunning) if !containerNotRunning && !noSuchContainer { h.logger.Printf("[ERR] driver.docker: error stopping container: %v", err) } } // Remove the container if err := h.client.RemoveContainer(docker.RemoveContainerOptions{ID: h.containerID, RemoveVolumes: true, Force: true}); err != nil { h.logger.Printf("[ERR] driver.docker: error removing container: %v", err) } // Cleanup the image if h.cleanupImage { if err := h.client.RemoveImage(h.imageID); err != nil { h.logger.Printf("[DEBUG] driver.docker: error removing image: %v", err) } } // Send the results h.waitCh <- dstructs.NewWaitResult(exitCode, 0, werr) close(h.waitCh) }
func (h *DockerHandle) run() { // Wait for it... exitCode, err := h.client.WaitContainer(h.containerID) if err != nil { h.logger.Printf("[ERR] driver.docker: failed to wait for %s; container already terminated", h.containerID) } if exitCode != 0 { err = fmt.Errorf("Docker container exited with non-zero exit code: %d", exitCode) } close(h.doneCh) h.waitCh <- cstructs.NewWaitResult(exitCode, 0, err) close(h.waitCh) }
// readExitCode parses the state file and returns the exit code of the task. It // returns an error if the file can't be read. func (s *Spawner) readExitCode() *structs.WaitResult { f, err := os.Open(s.StateFile) defer f.Close() if err != nil { return structs.NewWaitResult(-1, 0, fmt.Errorf("Failed to open %v to read exit code: %v", s.StateFile, err)) } stat, err := f.Stat() if err != nil { return structs.NewWaitResult(-1, 0, fmt.Errorf("Failed to stat file %v: %v", s.StateFile, err)) } if stat.Size() == 0 { return structs.NewWaitResult(-1, 0, fmt.Errorf("Empty state file: %v", s.StateFile)) } var exitStatus command.SpawnExitStatus dec := json.NewDecoder(f) if err := dec.Decode(&exitStatus); err != nil { return structs.NewWaitResult(-1, 0, fmt.Errorf("Failed to parse exit status from %v: %v", s.StateFile, err)) } return structs.NewWaitResult(exitStatus.ExitCode, 0, nil) }
func (h *DockerHandle) run() { // Wait for it... exitCode, err := h.client.WaitContainer(h.containerID) if err != nil { h.logger.Printf("[ERR] driver.docker: failed to wait for %s; container already terminated", h.containerID) } if exitCode != 0 { err = fmt.Errorf("Docker container exited with non-zero exit code: %d", exitCode) } close(h.doneCh) h.waitCh <- cstructs.NewWaitResult(exitCode, 0, err) close(h.waitCh) // Shutdown the syslog collector if err := h.logCollector.Exit(); err != nil { h.logger.Printf("[ERR] driver.docker: failed to kill the syslog collector: %v", err) } h.pluginClient.Kill() }
// pollWait polls on the spawn daemon to determine when it exits. After it // exits, it reads the state file and returns the exit code and possibly an // error. func (s *Spawner) pollWait() *structs.WaitResult { // Stat to check if it is there to avoid a race condition. stat, err := os.Stat(s.StateFile) if err != nil { return structs.NewWaitResult(-1, 0, fmt.Errorf("Failed to Stat exit status file %v: %v", s.StateFile, err)) } // If there is data it means that the file has already been written. if stat.Size() > 0 { return s.readExitCode() } // Read after the process exits. for _ = range time.Tick(5 * time.Second) { if !s.Alive() { break } } return s.readExitCode() }
func testWaitResult(exit int) *cstructs.WaitResult { return cstructs.NewWaitResult(exit, 0, nil) }