コード例 #1
0
ファイル: qemu.go プロジェクト: zanella/nomad
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu
// image and save it to the Drivers Allocation Dir
func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig QemuDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	if len(driverConfig.PortMap) > 1 {
		return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config")
	}

	// Get the image source
	vmPath := driverConfig.ImagePath
	if vmPath == "" {
		return nil, fmt.Errorf("image_path must be set")
	}
	vmID := filepath.Base(vmPath)

	// Get the tasks local directory.
	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Parse configuration arguments
	// Create the base arguments
	accelerator := "tcg"
	if driverConfig.Accelerator != "" {
		accelerator = driverConfig.Accelerator
	}
	// TODO: Check a lower bounds, e.g. the default 128 of Qemu
	mem := fmt.Sprintf("%dM", task.Resources.MemoryMB)

	absPath, err := GetAbsolutePath("qemu-system-x86_64")
	if err != nil {
		return nil, err
	}

	args := []string{
		absPath,
		"-machine", "type=pc,accel=" + accelerator,
		"-name", vmID,
		"-m", mem,
		"-drive", "file=" + vmPath,
		"-nographic",
	}

	// Add pass through arguments to qemu executable. A user can specify
	// these arguments in driver task configuration. These arguments are
	// passed directly to the qemu driver as command line options.
	// For example, args = [ "-nodefconfig", "-nodefaults" ]
	// This will allow a VM with embedded configuration to boot successfully.
	args = append(args, driverConfig.Args...)

	// Check the Resources required Networks to add port mappings. If no resources
	// are required, we assume the VM is a purely compute job and does not require
	// the outside world to be able to reach it. VMs ran without port mappings can
	// still reach out to the world, but without port mappings it is effectively
	// firewalled
	protocols := []string{"udp", "tcp"}
	if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 {
		// Loop through the port map and construct the hostfwd string, to map
		// reserved ports to the ports listenting in the VM
		// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
		var forwarding []string
		taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
		for label, guest := range driverConfig.PortMap[0] {
			host, ok := taskPorts[label]
			if !ok {
				return nil, fmt.Errorf("Unknown port label %q", label)
			}

			for _, p := range protocols {
				forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest))
			}
		}

		if len(forwarding) != 0 {
			args = append(args,
				"-netdev",
				fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")),
				"-device", "virtio-net,netdev=user.0",
			)
		}
	}

	// If using KVM, add optimization args
	if accelerator == "kvm" {
		args = append(args,
			"-enable-kvm",
			"-cpu", "host",
			// Do we have cores information available to the Driver?
			// "-smp", fmt.Sprintf("%d", cores),
		)
	}

	d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " "))
	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:  d.taskEnv,
		Driver:   "qemu",
		AllocDir: ctx.AllocDir,
		AllocID:  ctx.AllocID,
		Task:     task,
	}
	if err := exec.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	execCmd := &executor.ExecCommand{
		Cmd:  args[0],
		Args: args[1:],
		User: task.User,
	}
	ps, err := exec.LaunchCmd(execCmd)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}
	d.logger.Printf("[INFO] Started new QemuVM: %s", vmID)

	// Create and Return Handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &qemuHandle{
		pluginClient:   pluginClient,
		executor:       exec,
		userPid:        ps.Pid,
		allocDir:       ctx.AllocDir,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		version:        d.config.Version,
		logger:         d.logger,
		doneCh:         make(chan struct{}),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}

	if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
		h.logger.Printf("[ERR] driver.qemu: error registering services for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
コード例 #2
0
ファイル: docker.go プロジェクト: nak3/nomad
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	driverConfig, err := NewDockerDriverConfig(task)
	if err != nil {
		return nil, err
	}

	cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Initialize docker API clients
	client, waitClient, err := d.dockerClients()
	if err != nil {
		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
	}

	if err := d.createImage(driverConfig, client, taskDir); err != nil {
		return nil, fmt.Errorf("failed to create image: %v", err)
	}

	image := driverConfig.ImageName
	// Now that we have the image we can get the image id
	dockerImage, err := client.InspectImage(image)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
		return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
	}
	d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:        d.taskEnv,
		Task:           task,
		Driver:         "docker",
		AllocDir:       ctx.AllocDir,
		AllocID:        ctx.AllocID,
		PortLowerBound: d.config.ClientMinPort,
		PortUpperBound: d.config.ClientMaxPort,
	}
	if err := exec.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	// Only launch syslog server if we're going to use it!
	syslogAddr := ""
	if len(driverConfig.Logging) == 0 || driverConfig.Logging[0].Type == "syslog" {
		ss, err := exec.LaunchSyslogServer()
		if err != nil {
			pluginClient.Kill()
			return nil, fmt.Errorf("failed to start syslog collector: %v", err)
		}
		syslogAddr = ss.Addr
	}

	config, err := d.createContainer(ctx, task, driverConfig, syslogAddr)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
	}
	// Create a container
	container, err := client.CreateContainer(config)
	if err != nil {
		// If the container already exists because of a previous failure we'll
		// try to purge it and re-create it.
		if strings.Contains(err.Error(), "container already exists") {
			// Get the ID of the existing container so we can delete it
			containers, err := client.ListContainers(docker.ListContainersOptions{
				// The image might be in use by a stopped container, so check everything
				All: true,
				Filters: map[string][]string{
					"name": []string{config.Name},
				},
			})
			if err != nil {
				d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to query list of containers: %s", err)
			}

			// Couldn't find any matching containers
			if len(containers) == 0 {
				d.logger.Printf("[ERR] driver.docker: failed to get id for container %s: %#v", config.Name, containers)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to get id for container %s", config.Name)
			}

			// Delete matching containers
			d.logger.Printf("[INFO] driver.docker: a container with the name %s already exists; will attempt to purge and re-create", config.Name)
			for _, container := range containers {
				err = client.RemoveContainer(docker.RemoveContainerOptions{
					ID: container.ID,
				})
				if err != nil {
					d.logger.Printf("[ERR] driver.docker: failed to purge container %s", container.ID)
					pluginClient.Kill()
					return nil, fmt.Errorf("Failed to purge container %s: %s", container.ID, err)
				}
				d.logger.Printf("[INFO] driver.docker: purged container %s", container.ID)
			}

			container, err = client.CreateContainer(config)
			if err != nil {
				d.logger.Printf("[ERR] driver.docker: failed to re-create container %s; aborting", config.Name)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to re-create container %s; aborting", config.Name)
			}
		} else {
			// We failed to create the container for some other reason.
			d.logger.Printf("[ERR] driver.docker: failed to create container from image %s: %s", image, err)
			pluginClient.Kill()
			return nil, fmt.Errorf("Failed to create container from image %s: %s", image, err)
		}
	}
	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)

	// Start the container
	err = client.StartContainer(container.ID, container.HostConfig)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to start container %s: %s", container.ID, err)
	}
	d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &DockerHandle{
		client:         client,
		waitClient:     waitClient,
		executor:       exec,
		pluginClient:   pluginClient,
		cleanupImage:   cleanupImage,
		logger:         d.logger,
		imageID:        dockerImage.ID,
		containerID:    container.ID,
		version:        d.config.Version,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		doneCh:         make(chan bool),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}
	if err := exec.SyncServices(consulContext(d.config, container.ID)); err != nil {
		d.logger.Printf("[ERR] driver.docker: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.collectStats()
	go h.run()
	return h, nil
}
コード例 #3
0
ファイル: exec.go プロジェクト: nak3/nomad
func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig ExecDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	// Get the command to be ran
	command := driverConfig.Command
	if err := validateCommand(command, "args"); err != nil {
		return nil, err
	}

	// Set the host environment variables.
	filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
	d.taskEnv.AppendHostEnvvars(filter)

	// Get the task directory for storing the executor logs.
	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:   d.taskEnv,
		Driver:    "exec",
		AllocDir:  ctx.AllocDir,
		AllocID:   ctx.AllocID,
		ChrootEnv: d.config.ChrootEnv,
		Task:      task,
	}
	if err := exec.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	execCmd := &executor.ExecCommand{
		Cmd:            command,
		Args:           driverConfig.Args,
		FSIsolation:    true,
		ResourceLimits: true,
		User:           getExecutorUser(task),
	}

	ps, err := exec.LaunchCmd(execCmd)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}

	d.logger.Printf("[DEBUG] driver.exec: started process via plugin with pid: %v", ps.Pid)

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &execHandle{
		pluginClient:    pluginClient,
		userPid:         ps.Pid,
		executor:        exec,
		allocDir:        ctx.AllocDir,
		isolationConfig: ps.IsolationConfig,
		killTimeout:     GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout:  maxKill,
		logger:          d.logger,
		version:         d.config.Version,
		doneCh:          make(chan struct{}),
		waitCh:          make(chan *dstructs.WaitResult, 1),
	}
	if err := exec.SyncServices(consulContext(d.config, "")); err != nil {
		d.logger.Printf("[ERR] driver.exec: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
コード例 #4
0
ファイル: docker.go プロジェクト: zanella/nomad
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	// Set environment variables.
	d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath).
		SetTaskLocalDir(allocdir.TaskLocalContainerPath).SetSecretsDir(allocdir.TaskSecretsContainerPath).Build()

	driverConfig, err := NewDockerDriverConfig(task, d.taskEnv)
	if err != nil {
		return nil, err
	}

	cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Initialize docker API clients
	client, waitClient, err := d.dockerClients()
	if err != nil {
		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
	}

	if err := d.createImage(driverConfig, client, taskDir); err != nil {
		return nil, err
	}

	image := driverConfig.ImageName
	// Now that we have the image we can get the image id
	dockerImage, err := client.InspectImage(image)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
		return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
	}
	d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:        d.taskEnv,
		Task:           task,
		Driver:         "docker",
		AllocDir:       ctx.AllocDir,
		AllocID:        ctx.AllocID,
		PortLowerBound: d.config.ClientMinPort,
		PortUpperBound: d.config.ClientMaxPort,
	}
	if err := exec.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	// Only launch syslog server if we're going to use it!
	syslogAddr := ""
	if runtime.GOOS == "darwin" && len(driverConfig.Logging) == 0 {
		d.logger.Printf("[DEBUG] driver.docker: disabling syslog driver as Docker for Mac workaround")
	} else if len(driverConfig.Logging) == 0 || driverConfig.Logging[0].Type == "syslog" {
		ss, err := exec.LaunchSyslogServer()
		if err != nil {
			pluginClient.Kill()
			return nil, fmt.Errorf("failed to start syslog collector: %v", err)
		}
		syslogAddr = ss.Addr
	}

	config, err := d.createContainerConfig(ctx, task, driverConfig, syslogAddr)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
	}

	container, rerr := d.createContainer(config)
	if rerr != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container: %s", rerr)
		pluginClient.Kill()
		rerr.Err = fmt.Sprintf("Failed to create container: %s", rerr.Err)
		return nil, rerr
	}

	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)

	// We don't need to start the container if the container is already running
	// since we don't create containers which are already present on the host
	// and are running
	if !container.State.Running {
		// Start the container
		err := d.startContainer(container)
		if err != nil {
			d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
			pluginClient.Kill()
			err.Err = fmt.Sprintf("Failed to start container %s: %s", container.ID, err)
			return nil, err
		}
		d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
	} else {
		d.logger.Printf("[DEBUG] driver.docker: re-attaching to container %s with status %q",
			container.ID, container.State.String())
	}

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &DockerHandle{
		client:         client,
		waitClient:     waitClient,
		executor:       exec,
		pluginClient:   pluginClient,
		cleanupImage:   cleanupImage,
		logger:         d.logger,
		imageID:        dockerImage.ID,
		containerID:    container.ID,
		version:        d.config.Version,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		doneCh:         make(chan bool),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}
	if err := exec.SyncServices(consulContext(d.config, container.ID)); err != nil {
		d.logger.Printf("[ERR] driver.docker: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.collectStats()
	go h.run()
	return h, nil
}