예제 #1
0
func TestDockerDriver_Handle(t *testing.T) {
	t.Parallel()

	bin, err := discover.NomadExecutable()
	if err != nil {
		t.Fatalf("got an err: %v", err)
	}

	f, _ := ioutil.TempFile(os.TempDir(), "")
	defer f.Close()
	defer os.Remove(f.Name())
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "syslog", f.Name()),
	}
	exec, pluginClient, err := createExecutor(pluginConfig, os.Stdout, &config.Config{})
	if err != nil {
		t.Fatalf("got an err: %v", err)
	}
	defer pluginClient.Kill()

	h := &DockerHandle{
		version:        "version",
		imageID:        "imageid",
		executor:       exec,
		pluginClient:   pluginClient,
		containerID:    "containerid",
		killTimeout:    5 * time.Nanosecond,
		maxKillTimeout: 15 * time.Nanosecond,
		doneCh:         make(chan bool),
		waitCh:         make(chan *cstructs.WaitResult, 1),
	}

	actual := h.ID()
	expected := fmt.Sprintf("DOCKER:{\"Version\":\"version\",\"ImageID\":\"imageid\",\"ContainerID\":\"containerid\",\"KillTimeout\":5,\"MaxKillTimeout\":15,\"PluginConfig\":{\"Pid\":%d,\"AddrNet\":\"unix\",\"AddrName\":\"%s\"}}",
		pluginClient.ReattachConfig().Pid, pluginClient.ReattachConfig().Addr.String())
	if actual != expected {
		t.Errorf("Expected `%s`, found `%s`", expected, actual)
	}
}
예제 #2
0
파일: java.go 프로젝트: xytis/nomad
func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig JavaDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	// Set the host environment variables.
	filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
	d.taskEnv.AppendHostEnvvars(filter)

	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	if driverConfig.JarPath == "" {
		return nil, fmt.Errorf("jar_path must be specified")
	}

	args := []string{}
	// Look for jvm options
	if len(driverConfig.JvmOpts) != 0 {
		d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts)
		args = append(args, driverConfig.JvmOpts...)
	}

	// Build the argument list.
	args = append(args, "-jar", driverConfig.JarPath)
	if len(driverConfig.Args) != 0 {
		args = append(args, driverConfig.Args...)
	}

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}

	// Set the context
	executorCtx := &executor.ExecutorContext{
		TaskEnv:   d.taskEnv,
		Driver:    "java",
		AllocDir:  ctx.AllocDir,
		AllocID:   ctx.AllocID,
		ChrootEnv: d.config.ChrootEnv,
		Task:      task,
	}
	if err := execIntf.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	absPath, err := GetAbsolutePath("java")
	if err != nil {
		return nil, err
	}

	execCmd := &executor.ExecCommand{
		Cmd:            absPath,
		Args:           args,
		FSIsolation:    true,
		ResourceLimits: true,
		User:           getExecutorUser(task),
	}
	ps, err := execIntf.LaunchCmd(execCmd)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}
	d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid)

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &javaHandle{
		pluginClient:    pluginClient,
		executor:        execIntf,
		userPid:         ps.Pid,
		isolationConfig: ps.IsolationConfig,
		taskDir:         taskDir,
		allocDir:        ctx.AllocDir,
		killTimeout:     GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout:  maxKill,
		version:         d.config.Version,
		logger:          d.logger,
		doneCh:          make(chan struct{}),
		waitCh:          make(chan *dstructs.WaitResult, 1),
	}
	if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
		d.logger.Printf("[ERR] driver.java: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
예제 #3
0
파일: rkt.go 프로젝트: carriercomm/nomad
// Run an existing Rkt image.
func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig RktDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}
	// Validate that the config is valid.
	img := driverConfig.ImageName
	if img == "" {
		return nil, fmt.Errorf("Missing ACI image for rkt")
	}

	// Get the tasks local directory.
	taskName := d.DriverContext.taskName
	taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Build the command.
	var cmdArgs []string

	// Add the given trust prefix
	trustPrefix, trustCmd := task.Config["trust_prefix"]
	insecure := false
	if trustCmd {
		var outBuf, errBuf bytes.Buffer
		cmd := exec.Command("rkt", "trust", "--skip-fingerprint-review=true", fmt.Sprintf("--prefix=%s", trustPrefix))
		cmd.Stdout = &outBuf
		cmd.Stderr = &errBuf
		if err := cmd.Run(); err != nil {
			return nil, fmt.Errorf("Error running rkt trust: %s\n\nOutput: %s\n\nError: %s",
				err, outBuf.String(), errBuf.String())
		}
		d.logger.Printf("[DEBUG] driver.rkt: added trust prefix: %q", trustPrefix)
	} else {
		// Disble signature verification if the trust command was not run.
		insecure = true
	}

	cmdArgs = append(cmdArgs, "run")
	cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", task.Name, ctx.AllocDir.SharedDir))
	cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", task.Name, ctx.AllocDir.SharedDir))
	cmdArgs = append(cmdArgs, img)
	if insecure == true {
		cmdArgs = append(cmdArgs, "--insecure-options=all")
	}

	// Inject enviornment variables
	for k, v := range d.taskEnv.EnvMap() {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--set-env=%v=%v", k, v))
	}

	// Check if the user has overriden the exec command.
	if execCmd, ok := task.Config["command"]; ok {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--exec=%v", execCmd))
	}

	if task.Resources.MemoryMB == 0 {
		return nil, fmt.Errorf("Memory limit cannot be zero")
	}
	if task.Resources.CPU == 0 {
		return nil, fmt.Errorf("CPU limit cannot be zero")
	}

	// Add memory isolator
	cmdArgs = append(cmdArgs, fmt.Sprintf("--memory=%vM", int64(task.Resources.MemoryMB)*bytesToMB))

	// Add CPU isolator
	cmdArgs = append(cmdArgs, fmt.Sprintf("--cpu=%vm", int64(task.Resources.CPU)))

	// Add DNS servers
	for _, ip := range driverConfig.DNSServers {
		if err := net.ParseIP(ip); err == nil {
			msg := fmt.Errorf("invalid ip address for container dns server %q", ip)
			d.logger.Printf("[DEBUG] driver.rkt: %v", msg)
			return nil, msg
		} else {
			cmdArgs = append(cmdArgs, fmt.Sprintf("--dns=%s", ip))
		}
	}

	// set DNS search domains
	for _, domain := range driverConfig.DNSSearchDomains {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--dns-search=%s", domain))
	}

	// Add user passed arguments.
	if len(driverConfig.Args) != 0 {
		parsed := d.taskEnv.ParseAndReplace(driverConfig.Args)

		// Need to start arguments with "--"
		if len(parsed) > 0 {
			cmdArgs = append(cmdArgs, "--")
		}

		for _, arg := range parsed {
			cmdArgs = append(cmdArgs, fmt.Sprintf("%v", arg))
		}
	}

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:  d.taskEnv,
		Driver:   "rkt",
		AllocDir: ctx.AllocDir,
		AllocID:  ctx.AllocID,
		Task:     task,
	}

	absPath, err := GetAbsolutePath("rkt")
	if err != nil {
		return nil, err
	}

	ps, err := execIntf.LaunchCmd(&executor.ExecCommand{
		Cmd:  absPath,
		Args: cmdArgs,
		User: task.User,
	}, executorCtx)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}

	d.logger.Printf("[DEBUG] driver.rkt: started ACI %q with: %v", img, cmdArgs)
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &rktHandle{
		pluginClient:   pluginClient,
		executor:       execIntf,
		executorPid:    ps.Pid,
		allocDir:       ctx.AllocDir,
		logger:         d.logger,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		doneCh:         make(chan struct{}),
		waitCh:         make(chan *cstructs.WaitResult, 1),
	}
	if h.executor.SyncServices(consulContext(d.config, "")); err != nil {
		h.logger.Printf("[ERR] driver.rkt: error registering services for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
예제 #4
0
파일: docker.go 프로젝트: hooklift/nomad
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig DockerDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	if err := driverConfig.Init(); err != nil {
		return nil, err
	}

	if err := driverConfig.Validate(); err != nil {
		return nil, err
	}

	cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Initialize docker API clients
	client, waitClient, err := d.dockerClients()
	if err != nil {
		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
	}

	if err := d.createImage(&driverConfig, client, taskDir); err != nil {
		return nil, fmt.Errorf("failed to create image: %v", err)
	}

	image := driverConfig.ImageName
	// Now that we have the image we can get the image id
	dockerImage, err := client.InspectImage(image)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
		return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
	}
	d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:        d.taskEnv,
		Task:           task,
		Driver:         "docker",
		AllocDir:       ctx.AllocDir,
		AllocID:        ctx.AllocID,
		PortLowerBound: d.config.ClientMinPort,
		PortUpperBound: d.config.ClientMaxPort,
	}
	ss, err := exec.LaunchSyslogServer(executorCtx)
	if err != nil {
		return nil, fmt.Errorf("failed to start syslog collector: %v", err)
	}

	config, err := d.createContainer(ctx, task, &driverConfig, ss.Addr)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
	}
	// Create a container
	container, err := client.CreateContainer(config)
	if err != nil {
		// If the container already exists because of a previous failure we'll
		// try to purge it and re-create it.
		if strings.Contains(err.Error(), "container already exists") {
			// Get the ID of the existing container so we can delete it
			containers, err := client.ListContainers(docker.ListContainersOptions{
				// The image might be in use by a stopped container, so check everything
				All: true,
				Filters: map[string][]string{
					"name": []string{config.Name},
				},
			})
			if err != nil {
				d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to query list of containers: %s", err)
			}

			// Couldn't find any matching containers
			if len(containers) == 0 {
				d.logger.Printf("[ERR] driver.docker: failed to get id for container %s: %#v", config.Name, containers)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to get id for container %s", config.Name)
			}

			// Delete matching containers
			d.logger.Printf("[INFO] driver.docker: a container with the name %s already exists; will attempt to purge and re-create", config.Name)
			for _, container := range containers {
				err = client.RemoveContainer(docker.RemoveContainerOptions{
					ID: container.ID,
				})
				if err != nil {
					d.logger.Printf("[ERR] driver.docker: failed to purge container %s", container.ID)
					pluginClient.Kill()
					return nil, fmt.Errorf("Failed to purge container %s: %s", container.ID, err)
				}
				d.logger.Printf("[INFO] driver.docker: purged container %s", container.ID)
			}

			container, err = client.CreateContainer(config)
			if err != nil {
				d.logger.Printf("[ERR] driver.docker: failed to re-create container %s; aborting", config.Name)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to re-create container %s; aborting", config.Name)
			}
		} else {
			// We failed to create the container for some other reason.
			d.logger.Printf("[ERR] driver.docker: failed to create container from image %s: %s", image, err)
			pluginClient.Kill()
			return nil, fmt.Errorf("Failed to create container from image %s: %s", image, err)
		}
	}
	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)

	// Start the container
	err = client.StartContainer(container.ID, container.HostConfig)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to start container %s: %s", container.ID, err)
	}
	d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &DockerHandle{
		client:         client,
		waitClient:     waitClient,
		executor:       exec,
		pluginClient:   pluginClient,
		cleanupImage:   cleanupImage,
		logger:         d.logger,
		imageID:        dockerImage.ID,
		containerID:    container.ID,
		version:        d.config.Version,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		doneCh:         make(chan bool),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}
	if err := exec.SyncServices(consulContext(d.config, container.ID)); err != nil {
		d.logger.Printf("[ERR] driver.docker: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.collectStats()
	go h.run()
	return h, nil
}
예제 #5
0
// spawnDaemon executes a double fork to start the user command with proper
// isolation. Stores the child process for use in Wait.
func (e *LinuxExecutor) spawnDaemon() error {
	bin, err := discover.NomadExecutable()
	if err != nil {
		return fmt.Errorf("Failed to determine the nomad executable: %v", err)
	}

	// Serialize the cmd and the cgroup configuration so it can be passed to the
	// sub-process.
	var buffer bytes.Buffer
	enc := json.NewEncoder(&buffer)

	c := command.DaemonConfig{
		Cmd:        e.cmd.Cmd,
		Chroot:     e.taskDir,
		StdoutFile: filepath.Join(e.taskDir, allocdir.TaskLocal, fmt.Sprintf("%v.stdout", e.taskName)),
		StderrFile: filepath.Join(e.taskDir, allocdir.TaskLocal, fmt.Sprintf("%v.stderr", e.taskName)),
		StdinFile:  "/dev/null",
	}
	if err := enc.Encode(c); err != nil {
		return fmt.Errorf("Failed to serialize daemon configuration: %v", err)
	}

	// Create a pipe to capture Stdout.
	pr, pw, err := os.Pipe()
	if err != nil {
		return err
	}
	e.spawnOutputWriter = pw
	e.spawnOutputReader = pr

	// Call ourselves using a hidden flag. The new instance of nomad will join
	// the passed cgroup, forkExec the cmd, and output status codes through
	// Stdout.
	escaped := strconv.Quote(buffer.String())
	spawn := exec.Command(bin, "spawn-daemon", escaped)
	spawn.Stdout = e.spawnOutputWriter

	// Capture its Stdin.
	spawnStdIn, err := spawn.StdinPipe()
	if err != nil {
		return err
	}

	if err := spawn.Start(); err != nil {
		fmt.Errorf("Failed to call spawn-daemon on nomad executable: %v", err)
	}

	// Join the spawn-daemon to the cgroup.
	if e.groups != nil {
		manager := cgroupFs.Manager{}
		manager.Cgroups = e.groups

		// Apply will place the current pid into the tasks file for each of the
		// created cgroups:
		//  /sys/fs/cgroup/memory/user/1000.user/4.session/<uuid>/tasks
		//
		// Apply requires superuser permissions, and may fail if Nomad is not run with
		// the required permissions
		if err := manager.Apply(spawn.Process.Pid); err != nil {
			errs := new(multierror.Error)
			errs = multierror.Append(errs, fmt.Errorf("Failed to join spawn-daemon to the cgroup (config => %+v): %v", manager.Cgroups, err))

			if err := sendAbortCommand(spawnStdIn); err != nil {
				errs = multierror.Append(errs, err)
			}

			return errs
		}
	}

	// Tell it to start.
	if err := sendStartCommand(spawnStdIn); err != nil {
		return err
	}

	// Parse the response.
	dec := json.NewDecoder(e.spawnOutputReader)
	var resp command.SpawnStartStatus
	if err := dec.Decode(&resp); err != nil {
		return fmt.Errorf("Failed to parse spawn-daemon start response: %v", err)
	}

	if resp.ErrorMsg != "" {
		return fmt.Errorf("Failed to execute user command: %s", resp.ErrorMsg)
	}

	e.spawnChild = *spawn
	return nil
}
예제 #6
0
파일: rkt.go 프로젝트: zanella/nomad
// Run an existing Rkt image.
func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig RktDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	driverConfig.PortMap = mapMergeStrStr(driverConfig.PortMapRaw...)

	// ACI image
	img := driverConfig.ImageName

	// Get the tasks local directory.
	taskName := d.DriverContext.taskName
	taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Build the command.
	var cmdArgs []string

	// Add debug option to rkt command.
	debug := driverConfig.Debug

	// Add the given trust prefix
	trustPrefix := driverConfig.TrustPrefix
	insecure := false
	if trustPrefix != "" {
		var outBuf, errBuf bytes.Buffer
		cmd := exec.Command(rktCmd, "trust", "--skip-fingerprint-review=true", fmt.Sprintf("--prefix=%s", trustPrefix), fmt.Sprintf("--debug=%t", debug))
		cmd.Stdout = &outBuf
		cmd.Stderr = &errBuf
		if err := cmd.Run(); err != nil {
			return nil, fmt.Errorf("Error running rkt trust: %s\n\nOutput: %s\n\nError: %s",
				err, outBuf.String(), errBuf.String())
		}
		d.logger.Printf("[DEBUG] driver.rkt: added trust prefix: %q", trustPrefix)
	} else {
		// Disble signature verification if the trust command was not run.
		insecure = true
	}
	cmdArgs = append(cmdArgs, "run")

	// Mount /alloc
	allocVolName := fmt.Sprintf("%s-%s-alloc", ctx.AllocID, task.Name)
	cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", allocVolName, ctx.AllocDir.SharedDir))
	cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", allocVolName, allocdir.SharedAllocContainerPath))

	// Mount /local
	localVolName := fmt.Sprintf("%s-%s-local", ctx.AllocID, task.Name)
	cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", localVolName, filepath.Join(taskDir, allocdir.TaskLocal)))
	cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", localVolName, allocdir.TaskLocalContainerPath))

	// Mount /secrets
	secretsVolName := fmt.Sprintf("%s-%s-secrets", ctx.AllocID, task.Name)
	cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", secretsVolName, filepath.Join(taskDir, allocdir.TaskSecrets)))
	cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", secretsVolName, allocdir.TaskSecretsContainerPath))

	// Mount arbitrary volumes if enabled
	if len(driverConfig.Volumes) > 0 {
		if enabled := d.config.ReadBoolDefault(rktVolumesConfigOption, rktVolumesConfigDefault); !enabled {
			return nil, fmt.Errorf("%s is false; cannot use rkt volumes: %+q", rktVolumesConfigOption, driverConfig.Volumes)
		}

		for i, rawvol := range driverConfig.Volumes {
			parts := strings.Split(rawvol, ":")
			if len(parts) != 2 {
				return nil, fmt.Errorf("invalid rkt volume: %q", rawvol)
			}
			volName := fmt.Sprintf("%s-%s-%d", ctx.AllocID, task.Name, i)
			cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", volName, parts[0]))
			cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", volName, parts[1]))
		}
	}

	cmdArgs = append(cmdArgs, img)
	if insecure {
		cmdArgs = append(cmdArgs, "--insecure-options=all")
	}
	cmdArgs = append(cmdArgs, fmt.Sprintf("--debug=%t", debug))

	// Inject environment variables
	d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath)
	d.taskEnv.SetTaskLocalDir(allocdir.TaskLocalContainerPath)
	d.taskEnv.SetSecretsDir(allocdir.TaskSecretsContainerPath)
	d.taskEnv.Build()
	for k, v := range d.taskEnv.EnvMap() {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--set-env=%v=%v", k, v))
	}

	// Check if the user has overridden the exec command.
	if driverConfig.Command != "" {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--exec=%v", driverConfig.Command))
	}

	// Add memory isolator
	cmdArgs = append(cmdArgs, fmt.Sprintf("--memory=%vM", int64(task.Resources.MemoryMB)))

	// Add CPU isolator
	cmdArgs = append(cmdArgs, fmt.Sprintf("--cpu=%vm", int64(task.Resources.CPU)))

	// Add DNS servers
	if len(driverConfig.DNSServers) == 1 && (driverConfig.DNSServers[0] == "host" || driverConfig.DNSServers[0] == "none") {
		// Special case single item lists with the special values "host" or "none"
		cmdArgs = append(cmdArgs, fmt.Sprintf("--dns=%s", driverConfig.DNSServers[0]))
	} else {
		for _, ip := range driverConfig.DNSServers {
			if err := net.ParseIP(ip); err == nil {
				msg := fmt.Errorf("invalid ip address for container dns server %q", ip)
				d.logger.Printf("[DEBUG] driver.rkt: %v", msg)
				return nil, msg
			} else {
				cmdArgs = append(cmdArgs, fmt.Sprintf("--dns=%s", ip))
			}
		}
	}

	// set DNS search domains
	for _, domain := range driverConfig.DNSSearchDomains {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--dns-search=%s", domain))
	}

	// set network
	network := strings.Join(driverConfig.Net, ",")
	if network != "" {
		cmdArgs = append(cmdArgs, fmt.Sprintf("--net=%s", network))
	}

	// Setup port mapping and exposed ports
	if len(task.Resources.Networks) == 0 {
		d.logger.Println("[DEBUG] driver.rkt: No network interfaces are available")
		if len(driverConfig.PortMap) > 0 {
			return nil, fmt.Errorf("Trying to map ports but no network interface is available")
		}
	} else {
		// TODO add support for more than one network
		network := task.Resources.Networks[0]
		for _, port := range network.ReservedPorts {
			var containerPort string

			mapped, ok := driverConfig.PortMap[port.Label]
			if !ok {
				// If the user doesn't have a mapped port using port_map, driver stops running container.
				return nil, fmt.Errorf("port_map is not set. When you defined port in the resources, you need to configure port_map.")
			}
			containerPort = mapped

			hostPortStr := strconv.Itoa(port.Value)

			d.logger.Printf("[DEBUG] driver.rkt: exposed port %s", containerPort)
			// Add port option to rkt run arguments. rkt allows multiple port args
			cmdArgs = append(cmdArgs, fmt.Sprintf("--port=%s:%s", containerPort, hostPortStr))
		}

		for _, port := range network.DynamicPorts {
			// By default we will map the allocated port 1:1 to the container
			var containerPort string

			if mapped, ok := driverConfig.PortMap[port.Label]; ok {
				containerPort = mapped
			} else {
				// If the user doesn't have mapped a port using port_map, driver stops running container.
				return nil, fmt.Errorf("port_map is not set. When you defined port in the resources, you need to configure port_map.")
			}

			hostPortStr := strconv.Itoa(port.Value)

			d.logger.Printf("[DEBUG] driver.rkt: exposed port %s", containerPort)
			// Add port option to rkt run arguments. rkt allows multiple port args
			cmdArgs = append(cmdArgs, fmt.Sprintf("--port=%s:%s", containerPort, hostPortStr))
		}

	}

	// Add user passed arguments.
	if len(driverConfig.Args) != 0 {
		parsed := d.taskEnv.ParseAndReplace(driverConfig.Args)

		// Need to start arguments with "--"
		if len(parsed) > 0 {
			cmdArgs = append(cmdArgs, "--")
		}

		for _, arg := range parsed {
			cmdArgs = append(cmdArgs, fmt.Sprintf("%v", arg))
		}
	}

	// Set the host environment variables.
	filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
	d.taskEnv.AppendHostEnvvars(filter)

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:  d.taskEnv,
		Driver:   "rkt",
		AllocDir: ctx.AllocDir,
		AllocID:  ctx.AllocID,
		Task:     task,
	}
	if err := execIntf.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	absPath, err := GetAbsolutePath(rktCmd)
	if err != nil {
		return nil, err
	}

	execCmd := &executor.ExecCommand{
		Cmd:  absPath,
		Args: cmdArgs,
		User: task.User,
	}
	ps, err := execIntf.LaunchCmd(execCmd)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}

	d.logger.Printf("[DEBUG] driver.rkt: started ACI %q with: %v", img, cmdArgs)
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &rktHandle{
		pluginClient:   pluginClient,
		executor:       execIntf,
		executorPid:    ps.Pid,
		allocDir:       ctx.AllocDir,
		logger:         d.logger,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		doneCh:         make(chan struct{}),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}
	if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
		h.logger.Printf("[ERR] driver.rkt: error registering services for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
예제 #7
0
파일: raw_exec.go 프로젝트: dgshep/nomad
func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig ExecDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}
	// Get the tasks local directory.
	taskName := d.DriverContext.taskName
	taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Get the command to be ran
	command := driverConfig.Command
	if err := validateCommand(command, "args"); err != nil {
		return nil, err
	}

	// Check if an artificat is specified and attempt to download it
	source, ok := task.Config["artifact_source"]
	if ok && source != "" {
		// Proceed to download an artifact to be executed.
		_, err := getter.GetArtifact(
			taskDir,
			driverConfig.ArtifactSource,
			driverConfig.Checksum,
			d.logger,
		)
		if err != nil {
			return nil, err
		}
	}

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:       d.taskEnv,
		AllocDir:      ctx.AllocDir,
		TaskName:      task.Name,
		TaskResources: task.Resources,
		LogConfig:     task.LogConfig,
	}
	ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: command, Args: driverConfig.Args}, executorCtx)
	if err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("error starting process via the plugin: %v", err)
	}
	d.logger.Printf("[DEBUG] driver.raw_exec: started process with pid: %v", ps.Pid)

	// Return a driver handle
	h := &rawExecHandle{
		pluginClient: pluginClient,
		executor:     exec,
		userPid:      ps.Pid,
		killTimeout:  d.DriverContext.KillTimeout(task),
		allocDir:     ctx.AllocDir,
		version:      d.config.Version,
		logger:       d.logger,
		doneCh:       make(chan struct{}),
		waitCh:       make(chan *cstructs.WaitResult, 1),
	}
	go h.run()
	return h, nil
}
예제 #8
0
파일: qemu.go 프로젝트: zanella/nomad
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu
// image and save it to the Drivers Allocation Dir
func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig QemuDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	if len(driverConfig.PortMap) > 1 {
		return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config")
	}

	// Get the image source
	vmPath := driverConfig.ImagePath
	if vmPath == "" {
		return nil, fmt.Errorf("image_path must be set")
	}
	vmID := filepath.Base(vmPath)

	// Get the tasks local directory.
	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Parse configuration arguments
	// Create the base arguments
	accelerator := "tcg"
	if driverConfig.Accelerator != "" {
		accelerator = driverConfig.Accelerator
	}
	// TODO: Check a lower bounds, e.g. the default 128 of Qemu
	mem := fmt.Sprintf("%dM", task.Resources.MemoryMB)

	absPath, err := GetAbsolutePath("qemu-system-x86_64")
	if err != nil {
		return nil, err
	}

	args := []string{
		absPath,
		"-machine", "type=pc,accel=" + accelerator,
		"-name", vmID,
		"-m", mem,
		"-drive", "file=" + vmPath,
		"-nographic",
	}

	// Add pass through arguments to qemu executable. A user can specify
	// these arguments in driver task configuration. These arguments are
	// passed directly to the qemu driver as command line options.
	// For example, args = [ "-nodefconfig", "-nodefaults" ]
	// This will allow a VM with embedded configuration to boot successfully.
	args = append(args, driverConfig.Args...)

	// Check the Resources required Networks to add port mappings. If no resources
	// are required, we assume the VM is a purely compute job and does not require
	// the outside world to be able to reach it. VMs ran without port mappings can
	// still reach out to the world, but without port mappings it is effectively
	// firewalled
	protocols := []string{"udp", "tcp"}
	if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 {
		// Loop through the port map and construct the hostfwd string, to map
		// reserved ports to the ports listenting in the VM
		// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
		var forwarding []string
		taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
		for label, guest := range driverConfig.PortMap[0] {
			host, ok := taskPorts[label]
			if !ok {
				return nil, fmt.Errorf("Unknown port label %q", label)
			}

			for _, p := range protocols {
				forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest))
			}
		}

		if len(forwarding) != 0 {
			args = append(args,
				"-netdev",
				fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")),
				"-device", "virtio-net,netdev=user.0",
			)
		}
	}

	// If using KVM, add optimization args
	if accelerator == "kvm" {
		args = append(args,
			"-enable-kvm",
			"-cpu", "host",
			// Do we have cores information available to the Driver?
			// "-smp", fmt.Sprintf("%d", cores),
		)
	}

	d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " "))
	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:  d.taskEnv,
		Driver:   "qemu",
		AllocDir: ctx.AllocDir,
		AllocID:  ctx.AllocID,
		Task:     task,
	}
	if err := exec.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	execCmd := &executor.ExecCommand{
		Cmd:  args[0],
		Args: args[1:],
		User: task.User,
	}
	ps, err := exec.LaunchCmd(execCmd)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}
	d.logger.Printf("[INFO] Started new QemuVM: %s", vmID)

	// Create and Return Handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &qemuHandle{
		pluginClient:   pluginClient,
		executor:       exec,
		userPid:        ps.Pid,
		allocDir:       ctx.AllocDir,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		version:        d.config.Version,
		logger:         d.logger,
		doneCh:         make(chan struct{}),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}

	if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
		h.logger.Printf("[ERR] driver.qemu: error registering services for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
예제 #9
0
파일: docker.go 프로젝트: stigkj/nomad
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig DockerDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}
	image := driverConfig.ImageName

	if err := driverConfig.Validate(); err != nil {
		return nil, err
	}
	if task.Resources == nil {
		return nil, fmt.Errorf("Resources are not specified")
	}
	if task.Resources.MemoryMB == 0 {
		return nil, fmt.Errorf("Memory limit cannot be zero")
	}
	if task.Resources.CPU == 0 {
		return nil, fmt.Errorf("CPU limit cannot be zero")
	}

	cleanupContainer := d.config.ReadBoolDefault("docker.cleanup.container", true)
	cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

	// Initialize docker API client
	client, err := d.dockerClient()
	if err != nil {
		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
	}

	repo, tag := docker.ParseRepositoryTag(image)
	// Make sure tag is always explicitly set. We'll default to "latest" if it
	// isn't, which is the expected behavior.
	if tag == "" {
		tag = "latest"
	}

	var dockerImage *docker.Image
	// We're going to check whether the image is already downloaded. If the tag
	// is "latest" we have to check for a new version every time so we don't
	// bother to check and cache the id here. We'll download first, then cache.
	if tag != "latest" {
		dockerImage, err = client.InspectImage(image)
	}

	// Download the image
	if dockerImage == nil {
		pullOptions := docker.PullImageOptions{
			Repository: repo,
			Tag:        tag,
		}

		authOptions := docker.AuthConfiguration{}
		if len(driverConfig.Auth) != 0 {
			authOptions = docker.AuthConfiguration{
				Username:      driverConfig.Auth[0].Username,
				Password:      driverConfig.Auth[0].Password,
				Email:         driverConfig.Auth[0].Email,
				ServerAddress: driverConfig.Auth[0].ServerAddress,
			}
		}

		if authConfig := d.config.Read("docker.auth.config"); authConfig != "" {
			if f, err := os.Open(authConfig); err == nil {
				defer f.Close()
				if authConfigurations, err := docker.NewAuthConfigurations(f); err == nil {
					if authConfiguration, ok := authConfigurations.Configs[repo]; ok {
						authOptions = authConfiguration
					}
				}
			}
		}

		err = client.PullImage(pullOptions, authOptions)
		if err != nil {
			d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s", repo, tag, err)
			return nil, fmt.Errorf("Failed to pull `%s`: %s", image, err)
		}
		d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded", repo, tag)

		// Now that we have the image we can get the image id
		dockerImage, err = client.InspectImage(image)
		if err != nil {
			d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
			return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
		}
	}

	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-syslog-collector.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "syslog", pluginLogFile),
	}

	logCollector, pluginClient, err := createLogCollector(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	logCollectorCtx := &logcollector.LogCollectorContext{
		TaskName:       task.Name,
		AllocDir:       ctx.AllocDir,
		LogConfig:      task.LogConfig,
		PortLowerBound: d.config.ClientMinPort,
		PortUpperBound: d.config.ClientMaxPort,
	}
	ss, err := logCollector.LaunchCollector(logCollectorCtx)
	if err != nil {
		return nil, fmt.Errorf("failed to start syslog collector: %v", err)
	}

	config, err := d.createContainer(ctx, task, &driverConfig, ss.Addr)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
	}
	// Create a container
	container, err := client.CreateContainer(config)
	if err != nil {
		// If the container already exists because of a previous failure we'll
		// try to purge it and re-create it.
		if strings.Contains(err.Error(), "container already exists") {
			// Get the ID of the existing container so we can delete it
			containers, err := client.ListContainers(docker.ListContainersOptions{
				// The image might be in use by a stopped container, so check everything
				All: true,
				Filters: map[string][]string{
					"name": []string{config.Name},
				},
			})
			if err != nil {
				d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to query list of containers: %s", err)
			}

			// Couldn't find any matching containers
			if len(containers) == 0 {
				d.logger.Printf("[ERR] driver.docker: failed to get id for container %s: %#v", config.Name, containers)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to get id for container %s", config.Name)
			}

			// Delete matching containers
			d.logger.Printf("[INFO] driver.docker: a container with the name %s already exists; will attempt to purge and re-create", config.Name)
			for _, container := range containers {
				err = client.RemoveContainer(docker.RemoveContainerOptions{
					ID: container.ID,
				})
				if err != nil {
					d.logger.Printf("[ERR] driver.docker: failed to purge container %s", container.ID)
					pluginClient.Kill()
					return nil, fmt.Errorf("Failed to purge container %s: %s", container.ID, err)
				}
				d.logger.Printf("[INFO] driver.docker: purged container %s", container.ID)
			}

			container, err = client.CreateContainer(config)
			if err != nil {
				d.logger.Printf("[ERR] driver.docker: failed to re-create container %s; aborting", config.Name)
				pluginClient.Kill()
				return nil, fmt.Errorf("Failed to re-create container %s; aborting", config.Name)
			}
		} else {
			// We failed to create the container for some other reason.
			d.logger.Printf("[ERR] driver.docker: failed to create container from image %s: %s", image, err)
			pluginClient.Kill()
			return nil, fmt.Errorf("Failed to create container from image %s: %s", image, err)
		}
	}
	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)

	// Start the container
	err = client.StartContainer(container.ID, container.HostConfig)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to start container %s: %s", container.ID, err)
	}
	d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)

	// Return a driver handle
	h := &DockerHandle{
		client:           client,
		logCollector:     logCollector,
		pluginClient:     pluginClient,
		cleanupContainer: cleanupContainer,
		cleanupImage:     cleanupImage,
		logger:           d.logger,
		imageID:          dockerImage.ID,
		containerID:      container.ID,
		killTimeout:      d.DriverContext.KillTimeout(task),
		doneCh:           make(chan struct{}),
		waitCh:           make(chan *cstructs.WaitResult, 1),
	}
	go h.run()
	return h, nil
}
예제 #10
0
// Spawn does a double-fork to start and isolate the user command. It takes a
// call-back that is invoked with the pid of the intermediary process. If the
// call back returns an error, the user command is not started and the spawn is
// cancelled. This can be used to put the process into a cgroup or jail and
// cancel starting the user process if that was not successful. An error is
// returned if the call-back returns an error or the user-command couldn't be
// started.
func (s *Spawner) Spawn(cb func(pid int) error) error {
	bin, err := discover.NomadExecutable()
	if err != nil {
		return fmt.Errorf("Failed to determine the nomad executable: %v", err)
	}

	exitFile, err := os.OpenFile(s.StateFile, os.O_CREATE|os.O_WRONLY, 0666)
	defer exitFile.Close()
	if err != nil {
		return fmt.Errorf("Error opening file to store exit status: %v", err)
	}

	config, err := s.spawnConfig()
	if err != nil {
		return err
	}

	spawn := exec.Command(bin, "spawn-daemon", config)

	// Capture stdout
	spawnStdout, err := spawn.StdoutPipe()
	defer spawnStdout.Close()
	if err != nil {
		return fmt.Errorf("Failed to capture spawn-daemon stdout: %v", err)
	}

	// Capture stdin.
	spawnStdin, err := spawn.StdinPipe()
	defer spawnStdin.Close()
	if err != nil {
		return fmt.Errorf("Failed to capture spawn-daemon stdin: %v", err)
	}

	if err := spawn.Start(); err != nil {
		return fmt.Errorf("Failed to call spawn-daemon on nomad executable: %v", err)
	}

	if cb != nil {
		cbErr := cb(spawn.Process.Pid)
		if cbErr != nil {
			errs := new(multierror.Error)
			errs = multierror.Append(errs, cbErr)
			if err := s.sendAbortCommand(spawnStdin); err != nil {
				errs = multierror.Append(errs, err)
			}

			return errs
		}
	}

	if err := s.sendStartCommand(spawnStdin); err != nil {
		return err
	}

	respCh := make(chan command.SpawnStartStatus, 1)
	errCh := make(chan error, 1)

	go func() {
		var resp command.SpawnStartStatus
		dec := json.NewDecoder(spawnStdout)
		if err := dec.Decode(&resp); err != nil {
			errCh <- fmt.Errorf("Failed to parse spawn-daemon start response: %v", err)
		}
		respCh <- resp
	}()

	select {
	case err := <-errCh:
		return err
	case resp := <-respCh:
		if resp.ErrorMsg != "" {
			return fmt.Errorf("Failed to execute user command: %s", resp.ErrorMsg)
		}
		s.UserPid = resp.UserPID
	case <-time.After(5 * time.Second):
		return fmt.Errorf("timed out waiting for response")
	}

	// Store the spawn process.
	s.spawn = spawn.Process
	s.SpawnPid = s.spawn.Pid
	s.SpawnPpid = os.Getpid()
	return nil
}
예제 #11
0
파일: exec.go 프로젝트: carriercomm/nomad
func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig ExecDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	// Get the command to be ran
	command := driverConfig.Command
	if err := validateCommand(command, "args"); err != nil {
		return nil, err
	}

	// Set the host environment variables.
	filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
	d.taskEnv.AppendHostEnvvars(filter)

	// Get the task directory for storing the executor logs.
	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:  d.taskEnv,
		Driver:   "exec",
		AllocDir: ctx.AllocDir,
		AllocID:  ctx.AllocID,
		Task:     task,
	}

	ps, err := exec.LaunchCmd(&executor.ExecCommand{
		Cmd:            command,
		Args:           driverConfig.Args,
		FSIsolation:    true,
		ResourceLimits: true,
		User:           getExecutorUser(task),
	}, executorCtx)
	if err != nil {
		pluginClient.Kill()
		return nil, err
	}
	d.logger.Printf("[DEBUG] driver.exec: started process via plugin with pid: %v", ps.Pid)

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &execHandle{
		pluginClient:    pluginClient,
		userPid:         ps.Pid,
		executor:        exec,
		allocDir:        ctx.AllocDir,
		isolationConfig: ps.IsolationConfig,
		killTimeout:     GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout:  maxKill,
		logger:          d.logger,
		version:         d.config.Version,
		doneCh:          make(chan struct{}),
		waitCh:          make(chan *cstructs.WaitResult, 1),
	}
	if err := exec.SyncServices(consulContext(d.config, "")); err != nil {
		d.logger.Printf("[ERR] driver.exec: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.run()
	return h, nil
}
예제 #12
0
파일: java.go 프로젝트: shadabahmed/nomad
func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig JavaDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}
	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Proceed to download an artifact to be executed.
	path, err := getter.GetArtifact(
		taskDir,
		driverConfig.ArtifactSource,
		driverConfig.Checksum,
		d.logger,
	)
	if err != nil {
		return nil, err
	}

	jarName := filepath.Base(path)

	args := []string{}
	// Look for jvm options
	if len(driverConfig.JvmOpts) != 0 {
		d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts)
		args = append(args, driverConfig.JvmOpts...)
	}

	// Build the argument list.
	args = append(args, "-jar", jarName)
	if len(driverConfig.Args) != 0 {
		args = append(args, driverConfig.Args...)
	}

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:          d.taskEnv,
		AllocDir:         ctx.AllocDir,
		TaskName:         task.Name,
		TaskResources:    task.Resources,
		FSIsolation:      true,
		ResourceLimits:   true,
		UnprivilegedUser: true,
	}
	ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: "java", Args: args}, executorCtx)
	if err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("error starting process via the plugin: %v", err)
	}
	d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid)

	// Return a driver handle
	h := &javaHandle{
		pluginClient:    pluginClient,
		executor:        exec,
		userPid:         ps.Pid,
		isolationConfig: ps.IsolationConfig,
		taskDir:         taskDir,
		allocDir:        ctx.AllocDir,
		killTimeout:     d.DriverContext.KillTimeout(task),
		logger:          d.logger,
		doneCh:          make(chan struct{}),
		waitCh:          make(chan *cstructs.WaitResult, 1),
	}

	go h.run()
	return h, nil
}
예제 #13
0
파일: qemu.go 프로젝트: dgshep/nomad
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu
// image and save it to the Drivers Allocation Dir
func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig QemuDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}

	if len(driverConfig.PortMap) > 1 {
		return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config")
	}

	// Get the image source
	source, ok := task.Config["artifact_source"]
	if !ok || source == "" {
		return nil, fmt.Errorf("Missing source image Qemu driver")
	}

	// Qemu defaults to 128M of RAM for a given VM. Instead, we force users to
	// supply a memory size in the tasks resources
	if task.Resources == nil || task.Resources.MemoryMB == 0 {
		return nil, fmt.Errorf("Missing required Task Resource: Memory")
	}

	// Get the tasks local directory.
	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Proceed to download an artifact to be executed.
	vmPath, err := getter.GetArtifact(
		taskDir,
		driverConfig.ArtifactSource,
		driverConfig.Checksum,
		d.logger,
	)
	if err != nil {
		return nil, err
	}

	vmID := filepath.Base(vmPath)

	// Parse configuration arguments
	// Create the base arguments
	accelerator := "tcg"
	if driverConfig.Accelerator != "" {
		accelerator = driverConfig.Accelerator
	}
	// TODO: Check a lower bounds, e.g. the default 128 of Qemu
	mem := fmt.Sprintf("%dM", task.Resources.MemoryMB)

	args := []string{
		"qemu-system-x86_64",
		"-machine", "type=pc,accel=" + accelerator,
		"-name", vmID,
		"-m", mem,
		"-drive", "file=" + vmPath,
		"-nodefconfig",
		"-nodefaults",
		"-nographic",
	}

	// Check the Resources required Networks to add port mappings. If no resources
	// are required, we assume the VM is a purely compute job and does not require
	// the outside world to be able to reach it. VMs ran without port mappings can
	// still reach out to the world, but without port mappings it is effectively
	// firewalled
	protocols := []string{"udp", "tcp"}
	if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 {
		// Loop through the port map and construct the hostfwd string, to map
		// reserved ports to the ports listenting in the VM
		// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
		var forwarding []string
		taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
		for label, guest := range driverConfig.PortMap[0] {
			host, ok := taskPorts[label]
			if !ok {
				return nil, fmt.Errorf("Unknown port label %q", label)
			}

			for _, p := range protocols {
				forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest))
			}
		}

		if len(forwarding) != 0 {
			args = append(args,
				"-netdev",
				fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")),
				"-device", "virtio-net,netdev=user.0",
			)
		}
	}

	// If using KVM, add optimization args
	if accelerator == "kvm" {
		args = append(args,
			"-enable-kvm",
			"-cpu", "host",
			// Do we have cores information available to the Driver?
			// "-smp", fmt.Sprintf("%d", cores),
		)
	}

	d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " "))
	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}

	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:       d.taskEnv,
		AllocDir:      ctx.AllocDir,
		TaskName:      task.Name,
		TaskResources: task.Resources,
		LogConfig:     task.LogConfig,
	}
	ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: args[0], Args: args[1:]}, executorCtx)
	if err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("error starting process via the plugin: %v", err)
	}
	d.logger.Printf("[INFO] Started new QemuVM: %s", vmID)

	// Create and Return Handle
	h := &qemuHandle{
		pluginClient: pluginClient,
		executor:     exec,
		userPid:      ps.Pid,
		allocDir:     ctx.AllocDir,
		killTimeout:  d.DriverContext.KillTimeout(task),
		version:      d.config.Version,
		logger:       d.logger,
		doneCh:       make(chan struct{}),
		waitCh:       make(chan *cstructs.WaitResult, 1),
	}

	go h.run()
	return h, nil
}
예제 #14
0
파일: docker.go 프로젝트: zanella/nomad
func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	// Set environment variables.
	d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath).
		SetTaskLocalDir(allocdir.TaskLocalContainerPath).SetSecretsDir(allocdir.TaskSecretsContainerPath).Build()

	driverConfig, err := NewDockerDriverConfig(task, d.taskEnv)
	if err != nil {
		return nil, err
	}

	cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true)

	taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
	if !ok {
		return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
	}

	// Initialize docker API clients
	client, waitClient, err := d.dockerClients()
	if err != nil {
		return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err)
	}

	if err := d.createImage(driverConfig, client, taskDir); err != nil {
		return nil, err
	}

	image := driverConfig.ImageName
	// Now that we have the image we can get the image id
	dockerImage, err := client.InspectImage(image)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err)
		return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err)
	}
	d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID)

	bin, err := discover.NomadExecutable()
	if err != nil {
		return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
	}
	pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
	pluginConfig := &plugin.ClientConfig{
		Cmd: exec.Command(bin, "executor", pluginLogFile),
	}

	exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
	if err != nil {
		return nil, err
	}
	executorCtx := &executor.ExecutorContext{
		TaskEnv:        d.taskEnv,
		Task:           task,
		Driver:         "docker",
		AllocDir:       ctx.AllocDir,
		AllocID:        ctx.AllocID,
		PortLowerBound: d.config.ClientMinPort,
		PortUpperBound: d.config.ClientMaxPort,
	}
	if err := exec.SetContext(executorCtx); err != nil {
		pluginClient.Kill()
		return nil, fmt.Errorf("failed to set executor context: %v", err)
	}

	// Only launch syslog server if we're going to use it!
	syslogAddr := ""
	if runtime.GOOS == "darwin" && len(driverConfig.Logging) == 0 {
		d.logger.Printf("[DEBUG] driver.docker: disabling syslog driver as Docker for Mac workaround")
	} else if len(driverConfig.Logging) == 0 || driverConfig.Logging[0].Type == "syslog" {
		ss, err := exec.LaunchSyslogServer()
		if err != nil {
			pluginClient.Kill()
			return nil, fmt.Errorf("failed to start syslog collector: %v", err)
		}
		syslogAddr = ss.Addr
	}

	config, err := d.createContainerConfig(ctx, task, driverConfig, syslogAddr)
	if err != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err)
		pluginClient.Kill()
		return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err)
	}

	container, rerr := d.createContainer(config)
	if rerr != nil {
		d.logger.Printf("[ERR] driver.docker: failed to create container: %s", rerr)
		pluginClient.Kill()
		rerr.Err = fmt.Sprintf("Failed to create container: %s", rerr.Err)
		return nil, rerr
	}

	d.logger.Printf("[INFO] driver.docker: created container %s", container.ID)

	// We don't need to start the container if the container is already running
	// since we don't create containers which are already present on the host
	// and are running
	if !container.State.Running {
		// Start the container
		err := d.startContainer(container)
		if err != nil {
			d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err)
			pluginClient.Kill()
			err.Err = fmt.Sprintf("Failed to start container %s: %s", container.ID, err)
			return nil, err
		}
		d.logger.Printf("[INFO] driver.docker: started container %s", container.ID)
	} else {
		d.logger.Printf("[DEBUG] driver.docker: re-attaching to container %s with status %q",
			container.ID, container.State.String())
	}

	// Return a driver handle
	maxKill := d.DriverContext.config.MaxKillTimeout
	h := &DockerHandle{
		client:         client,
		waitClient:     waitClient,
		executor:       exec,
		pluginClient:   pluginClient,
		cleanupImage:   cleanupImage,
		logger:         d.logger,
		imageID:        dockerImage.ID,
		containerID:    container.ID,
		version:        d.config.Version,
		killTimeout:    GetKillTimeout(task.KillTimeout, maxKill),
		maxKillTimeout: maxKill,
		doneCh:         make(chan bool),
		waitCh:         make(chan *dstructs.WaitResult, 1),
	}
	if err := exec.SyncServices(consulContext(d.config, container.ID)); err != nil {
		d.logger.Printf("[ERR] driver.docker: error registering services with consul for task: %q: %v", task.Name, err)
	}
	go h.collectStats()
	go h.run()
	return h, nil
}