func setupBaseImage() {
	eng, err := engine.New(unitTestStoreBase)
	if err != nil {
		log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
	}
	job := eng.Job("initserver")
	job.Setenv("Root", unitTestStoreBase)
	job.SetenvBool("Autorestart", false)
	job.Setenv("BridgeIface", unitTestNetworkBridge)
	if err := job.Run(); err != nil {
		log.Fatalf("Unable to create a runtime for tests: %s", err)
	}

	job = eng.Job("inspect", unitTestImageName, "image")
	img, _ := job.Stdout.AddEnv()
	// If the unit test is not found, try to download it.
	if err := job.Run(); err != nil || img.Get("id") != unitTestImageID {
		// Retrieve the Image
		job = eng.Job("pull", unitTestImageName)
		job.Stdout.Add(utils.NopWriteCloser(os.Stdout))
		if err := job.Run(); err != nil {
			log.Fatalf("Unable to pull the test image: %s", err)
		}
	}
}
Esempio n. 2
0
// Job creates a new job which can later be executed.
// This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job {
	job := &Job{
		Eng:    eng,
		Name:   name,
		Args:   args,
		Stdin:  NewInput(),
		Stdout: NewOutput(),
		Stderr: NewOutput(),
	}
	job.Stdout.Add(utils.NopWriteCloser(eng.Stdout))
	job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
	handler, exists := eng.handlers[name]
	if exists {
		job.handler = handler
	}
	return job
}
Esempio n. 3
0
func setupBaseImage() {
	eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase)
	job := eng.Job("inspect", unitTestImageName, "image")
	img, _ := job.Stdout.AddEnv()
	// If the unit test is not found, try to download it.
	if err := job.Run(); err != nil || img.Get("id") != unitTestImageID {
		// Retrieve the Image
		job = eng.Job("pull", unitTestImageName)
		job.Stdout.Add(utils.NopWriteCloser(os.Stdout))
		if err := job.Run(); err != nil {
			log.Fatalf("Unable to pull the test image: %s", err)
		}
	}
}
Esempio n. 4
0
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {

	switch compression {
	case Uncompressed:
		return utils.NopWriteCloser(dest), nil
	case Gzip:
		return gzip.NewWriter(dest), nil
	case Bzip2, Xz:
		// archive/bzip2 does not support writing, and there is no xz support at all
		// However, this is not a problem as docker only currently generates gzipped tars
		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
	default:
		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
	}
}
Esempio n. 5
0
// Job creates a new job which can later be executed.
// This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job {
	job := &Job{
		Eng:    eng,
		Name:   name,
		Args:   args,
		Stdin:  NewInput(),
		Stdout: NewOutput(),
		Stderr: NewOutput(),
		env:    &Env{},
	}
	if eng.Logging {
		job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
	}

	// Catchall is shadowed by specific Register.
	if handler, exists := eng.handlers[name]; exists {
		job.handler = handler
	} else if eng.catchall != nil && name != "" {
		// empty job names are illegal, catchall or not.
		job.handler = eng.catchall
	}
	return job
}
Esempio n. 6
0
// Job creates a new job which can later be executed.
// This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job {
	job := &Job{
		Eng:    eng,
		Name:   name,
		Args:   args,
		Stdin:  NewInput(),
		Stdout: NewOutput(),
		Stderr: NewOutput(),
		env:    &Env{},
	}
	if eng.Logging {
		job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
	}
	if eng.catchall != nil {
		job.handler = eng.catchall
	} else {
		handler, exists := eng.handlers[name]
		if exists {
			job.handler = handler
		}
	}
	return job
}
Esempio n. 7
0
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
	if container.runtime != nil || runtime.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}

	// init the wait lock
	container.waitLock = make(chan struct{})

	container.runtime = runtime

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	runtime.containers.PushBack(container)
	runtime.idIndex.Add(container.ID)

	// When we actually restart, Start() do the monitoring.
	// However, when we simply 'reattach', we have to restart a monitor
	nomonitor := false

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.Running {
		output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
		if err != nil {
			return err
		}
		if !strings.Contains(string(output), "RUNNING") {
			utils.Debugf("Container %s was supposed to be running be is not.", container.ID)
			if runtime.config.AutoRestart {
				utils.Debugf("Restarting")
				container.State.Ghost = false
				container.State.setStopped(0)
				hostConfig, _ := container.ReadHostConfig()
				if err := container.Start(hostConfig); err != nil {
					return err
				}
				nomonitor = true
			} else {
				utils.Debugf("Marking as stopped")
				container.State.setStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		}
	}

	// If the container is not running or just has been flagged not running
	// then close the wait lock chan (will be reset upon start)
	if !container.State.Running {
		close(container.waitLock)
	} else if !nomonitor {
		hostConfig, _ := container.ReadHostConfig()
		container.allocateNetwork(hostConfig)
		go container.monitor(hostConfig)
	}
	return nil
}
Esempio n. 8
0
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
	if container.runtime != nil || runtime.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := runtime.ensureName(container); err != nil {
		return err
	}

	container.runtime = runtime

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	runtime.containers.PushBack(container)
	runtime.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.IsRunning() {
		if container.State.IsGhost() {
			utils.Debugf("killing ghost %s", container.ID)

			existingPid := container.State.Pid
			container.State.SetGhost(false)
			container.State.SetStopped(0)

			if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
				lxc.KillLxc(container.ID, 9)
			} else {
				command := &execdriver.Command{
					ID: container.ID,
				}
				command.Process = &os.Process{Pid: existingPid}
				runtime.execDriver.Kill(command, 9)
			}
			// ensure that the filesystem is also unmounted
			unmountVolumesForContainer(container)
			if err := container.Unmount(); err != nil {
				utils.Debugf("ghost unmount error %s", err)
			}
		}

		info := runtime.execDriver.Info(container.ID)
		if !info.IsRunning() {
			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
			if runtime.config.AutoRestart {
				utils.Debugf("Restarting")
				unmountVolumesForContainer(container)
				if err := container.Unmount(); err != nil {
					utils.Debugf("restart unmount error %s", err)
				}

				container.State.SetGhost(false)
				container.State.SetStopped(0)
				if err := container.Start(); err != nil {
					return err
				}
			} else {
				utils.Debugf("Marking as stopped")
				container.State.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		}
	} else {
		// When the container is not running, we still initialize the waitLock
		// chan and close it. Receiving on nil chan blocks whereas receiving on a
		// closed chan does not. In this case we do not want to block.
		container.waitLock = make(chan struct{})
		close(container.waitLock)
	}
	return nil
}
Esempio n. 9
0
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
	if container.daemon != nil || daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}

	container.daemon = daemon

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	daemon.containers.PushBack(container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	if updateSuffixarray {
		daemon.idIndex.Add(container.ID)
	} else {
		daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID)
	}

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.IsRunning() {
		utils.Debugf("killing old running container %s", container.ID)

		existingPid := container.State.Pid
		container.State.SetStopped(0)

		// We only have to handle this for lxc because the other drivers will ensure that
		// no processes are left when docker dies
		if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
			lxc.KillLxc(container.ID, 9)
		} else {
			// use the current driver and ensure that the container is dead x.x
			cmd := &execdriver.Command{
				ID: container.ID,
			}
			var err error
			cmd.Process, err = os.FindProcess(existingPid)
			if err != nil {
				utils.Debugf("cannot find existing process for %d", existingPid)
			}
			daemon.execDriver.Terminate(cmd)
		}
		if err := container.Unmount(); err != nil {
			utils.Debugf("unmount error %s", err)
		}
		if err := container.ToDisk(); err != nil {
			utils.Debugf("saving stopped state to disk %s", err)
		}

		info := daemon.execDriver.Info(container.ID)
		if !info.IsRunning() {
			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
			if daemon.config.AutoRestart {
				utils.Debugf("Restarting")
				if err := container.Unmount(); err != nil {
					utils.Debugf("restart unmount error %s", err)
				}

				if err := container.Start(); err != nil {
					return err
				}
			} else {
				utils.Debugf("Marking as stopped")
				container.State.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		}
	} else {
		// When the container is not running, we still initialize the waitLock
		// chan and close it. Receiving on nil chan blocks whereas receiving on a
		// closed chan does not. In this case we do not want to block.
		container.waitLock = make(chan struct{})
		close(container.waitLock)
	}
	return nil
}
Esempio n. 10
0
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
	if container.runtime != nil || runtime.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := runtime.ensureName(container); err != nil {
		return err
	}

	container.runtime = runtime

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	runtime.containers.PushBack(container)
	runtime.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.IsRunning() {
		output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
		if err != nil {
			return err
		}
		if !strings.Contains(string(output), "RUNNING") {
			utils.Debugf("Container %s was supposed to be running be is not.", container.ID)
			if runtime.config.AutoRestart {
				utils.Debugf("Restarting")
				container.State.SetGhost(false)
				container.State.SetStopped(0)
				if err := container.Start(); err != nil {
					return err
				}
			} else {
				utils.Debugf("Marking as stopped")
				container.State.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		} else {
			utils.Debugf("Reconnecting to container %v", container.ID)

			if err := container.allocateNetwork(); err != nil {
				return err
			}

			container.waitLock = make(chan struct{})

			go container.monitor()
		}
	}
	return nil
}
Esempio n. 11
0
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
	if container.runtime != nil || runtime.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := runtime.ensureName(container); err != nil {
		return err
	}

	container.runtime = runtime

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	runtime.containers.PushBack(container)
	runtime.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.IsRunning() {
		info := runtime.execDriver.Info(container.ID)

		if !info.IsRunning() {
			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
			if runtime.config.AutoRestart {
				utils.Debugf("Restarting")
				container.State.SetGhost(false)
				container.State.SetStopped(0)
				if err := container.Start(); err != nil {
					return err
				}
			} else {
				utils.Debugf("Marking as stopped")
				container.State.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		} else {
			utils.Debugf("Reconnecting to container %v", container.ID)

			if err := container.allocateNetwork(); err != nil {
				return err
			}

			container.waitLock = make(chan struct{})
			go container.monitor(nil)
		}
	} else {
		// When the container is not running, we still initialize the waitLock
		// chan and close it. Receiving on nil chan blocks whereas receiving on a
		// closed chan does not. In this case we do not want to block.
		container.waitLock = make(chan struct{})
		close(container.waitLock)
	}
	return nil
}
Esempio n. 12
0
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
	if container.runtime != nil || runtime.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := runtime.ensureName(container); err != nil {
		return err
	}

	container.runtime = runtime

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	runtime.containers.PushBack(container)
	runtime.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.IsRunning() {
		if container.State.IsGhost() {
			utils.Debugf("killing ghost %s", container.ID)

			existingPid := container.State.Pid
			container.State.SetGhost(false)
			container.State.SetStopped(0)

			// We only have to handle this for lxc because the other drivers will ensure that
			// no ghost processes are left when docker dies
			if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
				lxc.KillLxc(container.ID, 9)
			} else {
				// use the current driver and ensure that the container is dead x.x
				cmd := &execdriver.Command{
					ID: container.ID,
				}
				var err error
				cmd.Process, err = os.FindProcess(existingPid)
				if err != nil {
					utils.Debugf("cannot find existing process for %d", existingPid)
				}
				runtime.execDriver.Terminate(cmd)
			}
			if err := container.Unmount(); err != nil {
				utils.Debugf("ghost unmount error %s", err)
			}
			if err := container.ToDisk(); err != nil {
				utils.Debugf("saving ghost state to disk %s", err)
			}
		}

		info := runtime.execDriver.Info(container.ID)
		if !info.IsRunning() {
			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
			if runtime.config.AutoRestart {
				utils.Debugf("Restarting")
				if err := container.Unmount(); err != nil {
					utils.Debugf("restart unmount error %s", err)
				}

				container.State.SetGhost(false)
				container.State.SetStopped(0)
				if err := container.Start(); err != nil {
					return err
				}
			} else {
				utils.Debugf("Marking as stopped")
				container.State.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		}
	}
	return nil
}
Esempio n. 13
0
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
	if container.runtime != nil || runtime.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := runtime.ensureName(container); err != nil {
		return err
	}

	// Get the root filesystem from the driver
	rootfs, err := runtime.driver.Get(container.ID)
	if err != nil {
		return fmt.Errorf("Error getting container filesystem %s from driver %s: %s", container.ID, runtime.driver, err)
	}
	container.rootfs = rootfs

	container.runtime = runtime

	// Attach to stdout and stderr
	container.stderr = utils.NewWriteBroadcaster()
	container.stdout = utils.NewWriteBroadcaster()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	runtime.containers.PushBack(container)
	runtime.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.State.IsRunning() {
		info := runtime.execDriver.Info(container.ID)

		if !info.IsRunning() {
			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
			if runtime.config.AutoRestart {
				utils.Debugf("Restarting")
				container.State.SetGhost(false)
				container.State.SetStopped(0)
				if err := container.Start(); err != nil {
					return err
				}
			} else {
				utils.Debugf("Marking as stopped")
				container.State.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		} else {
			utils.Debugf("Reconnecting to container %v", container.ID)

			if err := container.allocateNetwork(); err != nil {
				return err
			}

			container.waitLock = make(chan struct{})
			go container.monitor(nil)
		}
	}
	return nil
}