// Register makes a container object usable by the runtime as <container.ID> func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := runtime.ensureName(container); err != nil { return err } container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { if container.State.IsGhost() { utils.Debugf("killing ghost %s", container.ID) existingPid := container.State.Pid container.State.SetGhost(false) container.State.SetStopped(0) if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { command := &execdriver.Command{ ID: container.ID, } command.Process = &os.Process{Pid: existingPid} runtime.execDriver.Kill(command, 9) } // ensure that the filesystem is also unmounted unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { utils.Debugf("ghost unmount error %s", err) } } info := runtime.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { utils.Debugf("restart unmount error %s", err) } container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } } else { // When the container is not running, we still initialize the waitLock // chan and close it. Receiving on nil chan blocks whereas receiving on a // closed chan does not. In this case we do not want to block. container.waitLock = make(chan struct{}) close(container.waitLock) } return nil }
// Register makes a container object usable by the runtime as <container.ID> func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } // init the wait lock container.waitLock = make(chan struct{}) container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // When we actually restart, Start() do the monitoring. // However, when we simply 'reattach', we have to restart a monitor nomonitor := false // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.Running { output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput() if err != nil { return err } if !strings.Contains(string(output), "RUNNING") { utils.Debugf("Container %s was supposed to be running be is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") container.State.Ghost = false container.State.setStopped(0) hostConfig, _ := container.ReadHostConfig() if err := container.Start(hostConfig); err != nil { return err } nomonitor = true } else { utils.Debugf("Marking as stopped") container.State.setStopped(-127) if err := container.ToDisk(); err != nil { return err } } } } // If the container is not running or just has been flagged not running // then close the wait lock chan (will be reset upon start) if !container.State.Running { close(container.waitLock) } else if !nomonitor { hostConfig, _ := container.ReadHostConfig() container.allocateNetwork(hostConfig) go container.monitor(hostConfig) } return nil }
// register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } container.daemon = daemon // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done daemon.containers.PushBack(container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container if updateSuffixarray { daemon.idIndex.Add(container.ID) } else { daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID) } // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { utils.Debugf("killing old running container %s", container.ID) existingPid := container.State.Pid container.State.SetStopped(0) // We only have to handle this for lxc because the other drivers will ensure that // no processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } var err error cmd.Process, err = os.FindProcess(existingPid) if err != nil { utils.Debugf("cannot find existing process for %d", existingPid) } daemon.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { utils.Debugf("unmount error %s", err) } if err := container.ToDisk(); err != nil { utils.Debugf("saving stopped state to disk %s", err) } info := daemon.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if daemon.config.AutoRestart { utils.Debugf("Restarting") if err := container.Unmount(); err != nil { utils.Debugf("restart unmount error %s", err) } if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } } else { // When the container is not running, we still initialize the waitLock // chan and close it. Receiving on nil chan blocks whereas receiving on a // closed chan does not. In this case we do not want to block. container.waitLock = make(chan struct{}) close(container.waitLock) } return nil }
// Register makes a container object usable by the runtime as <container.ID> func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := runtime.ensureName(container); err != nil { return err } container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput() if err != nil { return err } if !strings.Contains(string(output), "RUNNING") { utils.Debugf("Container %s was supposed to be running be is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } else { utils.Debugf("Reconnecting to container %v", container.ID) if err := container.allocateNetwork(); err != nil { return err } container.waitLock = make(chan struct{}) go container.monitor() } } return nil }
// Register makes a container object usable by the runtime as <container.ID> func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := runtime.ensureName(container); err != nil { return err } container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { info := runtime.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } else { utils.Debugf("Reconnecting to container %v", container.ID) if err := container.allocateNetwork(); err != nil { return err } container.waitLock = make(chan struct{}) go container.monitor(nil) } } else { // When the container is not running, we still initialize the waitLock // chan and close it. Receiving on nil chan blocks whereas receiving on a // closed chan does not. In this case we do not want to block. container.waitLock = make(chan struct{}) close(container.waitLock) } return nil }
// Register makes a container object usable by the runtime as <container.ID> func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := runtime.ensureName(container); err != nil { return err } container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { if container.State.IsGhost() { utils.Debugf("killing ghost %s", container.ID) existingPid := container.State.Pid container.State.SetGhost(false) container.State.SetStopped(0) // We only have to handle this for lxc because the other drivers will ensure that // no ghost processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } var err error cmd.Process, err = os.FindProcess(existingPid) if err != nil { utils.Debugf("cannot find existing process for %d", existingPid) } runtime.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { utils.Debugf("ghost unmount error %s", err) } if err := container.ToDisk(); err != nil { utils.Debugf("saving ghost state to disk %s", err) } } info := runtime.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") if err := container.Unmount(); err != nil { utils.Debugf("restart unmount error %s", err) } container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } } return nil }
// Register makes a container object usable by the runtime as <container.ID> func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := runtime.ensureName(container); err != nil { return err } // Get the root filesystem from the driver rootfs, err := runtime.driver.Get(container.ID) if err != nil { return fmt.Errorf("Error getting container filesystem %s from driver %s: %s", container.ID, runtime.driver, err) } container.rootfs = rootfs container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { info := runtime.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } else { utils.Debugf("Reconnecting to container %v", container.ID) if err := container.allocateNetwork(); err != nil { return err } container.waitLock = make(chan struct{}) go container.monitor(nil) } } return nil }