// Register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) Register(container *Container) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } container.daemon = daemon // Attach to stdout and stderr container.stderr = broadcastwriter.New() container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done daemon.containers.Add(container.ID, container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) if container.IsRunning() { logrus.Debugf("killing old running container %s", container.ID) // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137}) // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } daemon.execDriver.Terminate(cmd) if err := container.unmountIpcMounts(); err != nil { logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err) } if err := container.Unmount(); err != nil { logrus.Debugf("unmount error %s", err) } if err := container.toDiskLocking(); err != nil { logrus.Errorf("Error saving stopped state to disk: %v", err) } } if err := daemon.verifyVolumesInfo(container); err != nil { return err } if err := container.prepareMountPoints(); err != nil { return err } return nil }
// register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } container.daemon = daemon // Attach to stdout and stderr container.stderr = broadcastwriter.New() container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done daemon.containers.Add(container.ID, container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) if err := daemon.verifyOldVolumesInfo(container); err != nil { return err } if err := container.prepareMountPoints(); err != nil { return err } if container.IsRunning() { logrus.Debugf("killing old running container %s", container.ID) container.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } daemon.execDriver.Terminate(cmd) if err := container.Unmount(); err != nil { logrus.Debugf("unmount error %s", err) } if err := container.ToDisk(); err != nil { logrus.Debugf("saving stopped state to disk %s", err) } } return nil }
func InitDockerMonitor(cfg *MonitorConfig, w *StateWatcher) *DockerMonitor { containerId := cfg.ID dockerRoot = cfg.Root containerRoot := getContainerRoot(dockerRoot, containerId) container := &Container{ root: containerRoot, State: NewState(), } if err := container.FromDisk(); err != nil { log.Errorf("InitDockerMonitor: container from disk failed: %v", err) os.Exit(1) } if container.ID != containerId { log.Errorf("InitDockerMonitor: Container %s is stored at %s", container.ID, containerId) os.Exit(1) } if err := container.ReadCommandConfig(); err != nil { log.Errorf("InitDockerMonitor: command from disk failed: %v", err) os.Exit(1) } // Attach to stdout and stderr container.stderr = broadcastwriter.New() container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) } monitor, err := newExternalMonitor(container, w) if err != nil { log.Errorf("external monitor initial error: %v", err) return nil } //container.exMonitor = monitor return &DockerMonitor{ monitor, } }
// register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } container.daemon = daemon // Attach to stdout and stderr container.stderr = broadcastwriter.New() container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done daemon.containers.Add(container.ID, container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.IsRunning() { log.Debugf("killing old running container %s", container.ID) existingPid := container.Pid container.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) // We only have to handle this for lxc because the other drivers will ensure that // no processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } var err error cmd.ProcessConfig.Process, err = os.FindProcess(existingPid) if err != nil { log.Debugf("cannot find existing process for %d", existingPid) } daemon.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { log.Debugf("unmount error %s", err) } if err := container.ToDisk(); err != nil { log.Debugf("saving stopped state to disk %s", err) } info := daemon.execDriver.Info(container.ID) if !info.IsRunning() { log.Debugf("Container %s was supposed to be running but is not.", container.ID) log.Debugf("Marking as stopped") container.SetStopped(&execdriver.ExitStatus{ExitCode: -127}) if err := container.ToDisk(); err != nil { return err } } } return nil }
func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer ) execConfig, err := d.getExecConfig(execName) if err != nil { return err } func() { execConfig.Lock() defer execConfig.Unlock() if execConfig.Running { err = fmt.Errorf("Error: Exec command %s is already running", execName) } execConfig.Running = true }() if err != nil { return err } logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) container := execConfig.Container container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) if execConfig.OpenStdin { r, w := io.Pipe() go func() { defer w.Close() defer logrus.Debugf("Closing buffered stdin pipe") pools.Copy(w, stdin) }() cStdin = r } if execConfig.OpenStdout { cStdout = stdout } if execConfig.OpenStderr { cStderr = stderr } execConfig.StreamConfig.stderr = broadcastwriter.New() execConfig.StreamConfig.stdout = broadcastwriter.New() // Attach to stdin if execConfig.OpenStdin { execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() } else { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) // Note, the execConfig data will be removed when the container // itself is deleted. This allows us to query it (for things like // the exitStatus) even after the cmd is done running. go func() { if err := container.Exec(execConfig); err != nil { execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) } }() select { case err := <-attachErr: if err != nil { return fmt.Errorf("attach failed with error: %s", err) } return nil case err := <-execErr: if err == nil { return nil } // Maybe the container stopped while we were trying to exec if !container.IsRunning() { return fmt.Errorf("container stopped while running exec") } return err } }
func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s [options] exec", job.Name) } var ( cStdin io.ReadCloser cStdout, cStderr io.Writer execName = job.Args[0] ) execConfig, err := d.getExecConfig(execName) if err != nil { return job.Error(err) } func() { execConfig.Lock() defer execConfig.Unlock() if execConfig.Running { err = fmt.Errorf("Error: Exec command %s is already running", execName) } execConfig.Running = true }() if err != nil { return job.Error(err) } log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) container := execConfig.Container if execConfig.OpenStdin { r, w := io.Pipe() go func() { defer w.Close() defer log.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r } if execConfig.OpenStdout { cStdout = job.Stdout } if execConfig.OpenStderr { cStderr = job.Stderr } execConfig.StreamConfig.stderr = broadcastwriter.New() execConfig.StreamConfig.stdout = broadcastwriter.New() // Attach to stdin if execConfig.OpenStdin { execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() } else { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) // Note, the execConfig data will be removed when the container // itself is deleted. This allows us to query it (for things like // the exitStatus) even after the cmd is done running. go func() { err := container.Exec(execConfig) if err != nil { execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) } }() select { case err := <-attachErr: if err != nil { return job.Errorf("attach failed with error: %s", err) } break case err := <-execErr: return job.Error(err) } return engine.StatusOK }
// register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } container.daemon = daemon // Attach to stdout and stderr container.stderr = broadcastwriter.New() container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done daemon.containers.Add(container.ID, container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.IsRunning() { if container.MonitorDriver == MonitorBuiltin { log.Debugf("killing old running container %s", container.ID) existingPid := container.Pid container.SetStopped(0) // We only have to handle this for lxc because the other drivers will ensure that // no processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } var err error cmd.ProcessConfig.Process, err = os.FindProcess(existingPid) if err != nil { log.Debugf("cannot find existing process for %d", existingPid) } daemon.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { log.Debugf("unmount error %s", err) } if err := container.ToDisk(); err != nil { log.Debugf("saving stopped state to disk %s", err) } info := daemon.execDriver.Info(container.ID) if !info.IsRunning() { log.Debugf("Container %s was supposed to be running but is not.", container.ID) log.Debugf("Marking as stopped") container.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } else { // restore external container log.Debugf("restore external container: %s", container.ID) _, err := container.daemon.callMonitorAPI(container, "GET", "_ping") if err != nil { log.Errorf("Call monitor _ping API failed: %v, mark container %s stopped", err, container.ID) // Think monitor is down, mark container is down container.SetStopped(-125) if err := container.ToDisk(); err != nil { return err } } else { obj, err := container.daemon.callMonitorAPI(container, "GET", "state") if err != nil { log.Errorf("Call monitor state API failed: %v", err) return err } m := make(map[string]*WatchState) if err = json.Unmarshal(obj, &m); err != nil { log.Errorf("Decode WatchState error: %v", err) return err } ws := m["WatchState"] log.Debugf("WatchState %v", ws) if !ws.Running { log.Errorf("Container %s is supposed be running, but not running in monitor, mark it stopped", container.ID) container.SetStopped(-125) if err = container.ToDisk(); err != nil { return err } // kill monitor server if err = syscall.Kill(container.monitorState.Pid, syscall.SIGTERM); err != nil { log.Errorf("kill monitor server with pid %v error: %v", container.monitorState.Pid, err) return err } // write monitor state container.monitorState.SetStopped(0) if err = container.WriteMonitorState(); err != nil { log.Errorf("write monitor state error: %v", err) return err } } else { // external container is running // register to graph driver if err := container.daemon.driver.Register(container.ID); err != nil { log.Errorf("register container to graph driver error: %v", err) } monitor := NewMonitorProxy(container, false) container.exMonitor = monitor if err := monitor.RunStatePoller(); err != nil { log.Errorf("Container %s run StatePoll failed: %v", container.ID, err) return err } } } } } return nil }
func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s [options] exec", job.Name) } var ( cStdin io.ReadCloser cStdout, cStderr io.Writer cStdinCloser io.Closer execName = job.Args[0] ) execConfig, err := d.getExecConfig(execName) if err != nil { return job.Error(err) } func() { execConfig.Lock() defer execConfig.Unlock() if execConfig.Running { err = fmt.Errorf("Error: Exec command %s is already running", execName) } execConfig.Running = true }() if err != nil { return job.Error(err) } log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) container := execConfig.Container if execConfig.OpenStdin { r, w := io.Pipe() go func() { defer w.Close() io.Copy(w, job.Stdin) }() cStdin = r cStdinCloser = job.Stdin } if execConfig.OpenStdout { cStdout = job.Stdout } if execConfig.OpenStderr { cStderr = job.Stderr } execConfig.StreamConfig.stderr = broadcastwriter.New() execConfig.StreamConfig.stdout = broadcastwriter.New() // Attach to stdin if execConfig.OpenStdin { execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() } else { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) execErr := make(chan error) // Remove exec from daemon and container. defer d.unregisterExecCommand(execConfig) go func() { err := container.Exec(execConfig) if err != nil { execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) } }() select { case err := <-attachErr: if err != nil { return job.Errorf("attach failed with error: %s", err) } break case err := <-execErr: return job.Error(err) } return engine.StatusOK }
// ContainerExecStart starts a previously set up exec instance. The // std streams are set up. func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer ) ec, err := d.getExecConfig(name) if err != nil { return derr.ErrorCodeNoExecID.WithArgs(name) } ec.Lock() if ec.Running { ec.Unlock() return derr.ErrorCodeExecRunning.WithArgs(ec.ID) } ec.Running = true ec.Unlock() logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID) container := ec.Container container.logEvent("exec_start: " + ec.ProcessConfig.Entrypoint + " " + strings.Join(ec.ProcessConfig.Arguments, " ")) if ec.OpenStdin { r, w := io.Pipe() go func() { defer w.Close() defer logrus.Debugf("Closing buffered stdin pipe") pools.Copy(w, stdin) }() cStdin = r } if ec.OpenStdout { cStdout = stdout } if ec.OpenStderr { cStderr = stderr } ec.streamConfig.stderr = broadcastwriter.New() ec.streamConfig.stdout = broadcastwriter.New() // Attach to stdin if ec.OpenStdin { ec.streamConfig.stdin, ec.streamConfig.stdinPipe = io.Pipe() } else { ec.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } attachErr := attach(&ec.streamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) // Note, the ExecConfig data will be removed when the container // itself is deleted. This allows us to query it (for things like // the exitStatus) even after the cmd is done running. go func() { if err := container.exec(ec); err != nil { execErr <- derr.ErrorCodeExecCantRun.WithArgs(ec.ID, container.ID, err) } }() select { case err := <-attachErr: if err != nil { return derr.ErrorCodeExecAttach.WithArgs(err) } return nil case err := <-execErr: if err == nil { return nil } // Maybe the container stopped while we were trying to exec if !container.IsRunning() { return derr.ErrorCodeExecContainerStopped } return err } }