func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { var driverConfig ExecDriverConfig if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { return nil, err } // Get the tasks local directory. taskName := d.DriverContext.taskName taskDir, ok := ctx.AllocDir.TaskDirs[taskName] if !ok { return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) } // Get the command to be ran command := driverConfig.Command if err := validateCommand(command, "args"); err != nil { return nil, err } // Check if an artificat is specified and attempt to download it source, ok := task.Config["artifact_source"] if ok && source != "" { // Proceed to download an artifact to be executed. _, err := getter.GetArtifact( taskDir, driverConfig.ArtifactSource, driverConfig.Checksum, d.logger, ) if err != nil { return nil, err } } bin, err := discover.NomadExecutable() if err != nil { return nil, fmt.Errorf("unable to find the nomad binary: %v", err) } pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) pluginConfig := &plugin.ClientConfig{ Cmd: exec.Command(bin, "executor", pluginLogFile), } exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) if err != nil { return nil, err } executorCtx := &executor.ExecutorContext{ TaskEnv: d.taskEnv, AllocDir: ctx.AllocDir, TaskName: task.Name, TaskResources: task.Resources, LogConfig: task.LogConfig, } ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: command, Args: driverConfig.Args}, executorCtx) if err != nil { pluginClient.Kill() return nil, fmt.Errorf("error starting process via the plugin: %v", err) } d.logger.Printf("[DEBUG] driver.raw_exec: started process with pid: %v", ps.Pid) // Return a driver handle h := &rawExecHandle{ pluginClient: pluginClient, executor: exec, userPid: ps.Pid, killTimeout: d.DriverContext.KillTimeout(task), allocDir: ctx.AllocDir, version: d.config.Version, logger: d.logger, doneCh: make(chan struct{}), waitCh: make(chan *cstructs.WaitResult, 1), } go h.run() return h, nil }
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu // image and save it to the Drivers Allocation Dir func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { var driverConfig QemuDriverConfig if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { return nil, err } if len(driverConfig.PortMap) > 1 { return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config") } // Get the image source vmPath := driverConfig.ImagePath if vmPath == "" { return nil, fmt.Errorf("image_path must be set") } vmID := filepath.Base(vmPath) // Get the tasks local directory. taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] if !ok { return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) } // Parse configuration arguments // Create the base arguments accelerator := "tcg" if driverConfig.Accelerator != "" { accelerator = driverConfig.Accelerator } // TODO: Check a lower bounds, e.g. the default 128 of Qemu mem := fmt.Sprintf("%dM", task.Resources.MemoryMB) absPath, err := GetAbsolutePath("qemu-system-x86_64") if err != nil { return nil, err } args := []string{ absPath, "-machine", "type=pc,accel=" + accelerator, "-name", vmID, "-m", mem, "-drive", "file=" + vmPath, "-nographic", } // Add pass through arguments to qemu executable. A user can specify // these arguments in driver task configuration. These arguments are // passed directly to the qemu driver as command line options. // For example, args = [ "-nodefconfig", "-nodefaults" ] // This will allow a VM with embedded configuration to boot successfully. args = append(args, driverConfig.Args...) // Check the Resources required Networks to add port mappings. If no resources // are required, we assume the VM is a purely compute job and does not require // the outside world to be able to reach it. VMs ran without port mappings can // still reach out to the world, but without port mappings it is effectively // firewalled protocols := []string{"udp", "tcp"} if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 { // Loop through the port map and construct the hostfwd string, to map // reserved ports to the ports listenting in the VM // Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080 var forwarding []string taskPorts := task.Resources.Networks[0].MapLabelToValues(nil) for label, guest := range driverConfig.PortMap[0] { host, ok := taskPorts[label] if !ok { return nil, fmt.Errorf("Unknown port label %q", label) } for _, p := range protocols { forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest)) } } if len(forwarding) != 0 { args = append(args, "-netdev", fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")), "-device", "virtio-net,netdev=user.0", ) } } // If using KVM, add optimization args if accelerator == "kvm" { args = append(args, "-enable-kvm", "-cpu", "host", // Do we have cores information available to the Driver? // "-smp", fmt.Sprintf("%d", cores), ) } d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " ")) bin, err := discover.NomadExecutable() if err != nil { return nil, fmt.Errorf("unable to find the nomad binary: %v", err) } pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) pluginConfig := &plugin.ClientConfig{ Cmd: exec.Command(bin, "executor", pluginLogFile), } exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) if err != nil { return nil, err } executorCtx := &executor.ExecutorContext{ TaskEnv: d.taskEnv, Driver: "qemu", AllocDir: ctx.AllocDir, AllocID: ctx.AllocID, Task: task, } if err := exec.SetContext(executorCtx); err != nil { pluginClient.Kill() return nil, fmt.Errorf("failed to set executor context: %v", err) } execCmd := &executor.ExecCommand{ Cmd: args[0], Args: args[1:], User: task.User, } ps, err := exec.LaunchCmd(execCmd) if err != nil { pluginClient.Kill() return nil, err } d.logger.Printf("[INFO] Started new QemuVM: %s", vmID) // Create and Return Handle maxKill := d.DriverContext.config.MaxKillTimeout h := &qemuHandle{ pluginClient: pluginClient, executor: exec, userPid: ps.Pid, allocDir: ctx.AllocDir, killTimeout: GetKillTimeout(task.KillTimeout, maxKill), maxKillTimeout: maxKill, version: d.config.Version, logger: d.logger, doneCh: make(chan struct{}), waitCh: make(chan *dstructs.WaitResult, 1), } if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { h.logger.Printf("[ERR] driver.qemu: error registering services for task: %q: %v", task.Name, err) } go h.run() return h, nil }
func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { var driverConfig JavaDriverConfig if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { return nil, err } taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] if !ok { return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) } // Proceed to download an artifact to be executed. path, err := getter.GetArtifact( taskDir, driverConfig.ArtifactSource, driverConfig.Checksum, d.logger, ) if err != nil { return nil, err } jarName := filepath.Base(path) args := []string{} // Look for jvm options if len(driverConfig.JvmOpts) != 0 { d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts) args = append(args, driverConfig.JvmOpts...) } // Build the argument list. args = append(args, "-jar", jarName) if len(driverConfig.Args) != 0 { args = append(args, driverConfig.Args...) } bin, err := discover.NomadExecutable() if err != nil { return nil, fmt.Errorf("unable to find the nomad binary: %v", err) } pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) pluginConfig := &plugin.ClientConfig{ Cmd: exec.Command(bin, "executor", pluginLogFile), } exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) if err != nil { return nil, err } executorCtx := &executor.ExecutorContext{ TaskEnv: d.taskEnv, AllocDir: ctx.AllocDir, TaskName: task.Name, TaskResources: task.Resources, FSIsolation: true, ResourceLimits: true, UnprivilegedUser: true, } ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: "java", Args: args}, executorCtx) if err != nil { pluginClient.Kill() return nil, fmt.Errorf("error starting process via the plugin: %v", err) } d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid) // Return a driver handle h := &javaHandle{ pluginClient: pluginClient, executor: exec, userPid: ps.Pid, isolationConfig: ps.IsolationConfig, taskDir: taskDir, allocDir: ctx.AllocDir, killTimeout: d.DriverContext.KillTimeout(task), logger: d.logger, doneCh: make(chan struct{}), waitCh: make(chan *cstructs.WaitResult, 1), } go h.run() return h, nil }
func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { var driverConfig ExecDriverConfig if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { return nil, err } // Get the command to be ran command := driverConfig.Command if err := validateCommand(command, "args"); err != nil { return nil, err } // Set the host environment variables. filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",") d.taskEnv.AppendHostEnvvars(filter) // Get the task directory for storing the executor logs. taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] if !ok { return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) } bin, err := discover.NomadExecutable() if err != nil { return nil, fmt.Errorf("unable to find the nomad binary: %v", err) } pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) pluginConfig := &plugin.ClientConfig{ Cmd: exec.Command(bin, "executor", pluginLogFile), } exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) if err != nil { return nil, err } executorCtx := &executor.ExecutorContext{ TaskEnv: d.taskEnv, Driver: "exec", AllocDir: ctx.AllocDir, AllocID: ctx.AllocID, Task: task, } ps, err := exec.LaunchCmd(&executor.ExecCommand{ Cmd: command, Args: driverConfig.Args, FSIsolation: true, ResourceLimits: true, User: getExecutorUser(task), }, executorCtx) if err != nil { pluginClient.Kill() return nil, err } d.logger.Printf("[DEBUG] driver.exec: started process via plugin with pid: %v", ps.Pid) // Return a driver handle maxKill := d.DriverContext.config.MaxKillTimeout h := &execHandle{ pluginClient: pluginClient, userPid: ps.Pid, executor: exec, allocDir: ctx.AllocDir, isolationConfig: ps.IsolationConfig, killTimeout: GetKillTimeout(task.KillTimeout, maxKill), maxKillTimeout: maxKill, logger: d.logger, version: d.config.Version, doneCh: make(chan struct{}), waitCh: make(chan *cstructs.WaitResult, 1), } if err := exec.SyncServices(consulContext(d.config, "")); err != nil { d.logger.Printf("[ERR] driver.exec: error registering services with consul for task: %q: %v", task.Name, err) } go h.run() return h, nil }
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu // image and save it to the Drivers Allocation Dir func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { var driverConfig QemuDriverConfig if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { return nil, err } if len(driverConfig.PortMap) > 1 { return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config") } // Get the image source source, ok := task.Config["artifact_source"] if !ok || source == "" { return nil, fmt.Errorf("Missing source image Qemu driver") } // Qemu defaults to 128M of RAM for a given VM. Instead, we force users to // supply a memory size in the tasks resources if task.Resources == nil || task.Resources.MemoryMB == 0 { return nil, fmt.Errorf("Missing required Task Resource: Memory") } // Get the tasks local directory. taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] if !ok { return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) } // Proceed to download an artifact to be executed. vmPath, err := getter.GetArtifact( taskDir, driverConfig.ArtifactSource, driverConfig.Checksum, d.logger, ) if err != nil { return nil, err } vmID := filepath.Base(vmPath) // Parse configuration arguments // Create the base arguments accelerator := "tcg" if driverConfig.Accelerator != "" { accelerator = driverConfig.Accelerator } // TODO: Check a lower bounds, e.g. the default 128 of Qemu mem := fmt.Sprintf("%dM", task.Resources.MemoryMB) args := []string{ "qemu-system-x86_64", "-machine", "type=pc,accel=" + accelerator, "-name", vmID, "-m", mem, "-drive", "file=" + vmPath, "-nodefconfig", "-nodefaults", "-nographic", } // Check the Resources required Networks to add port mappings. If no resources // are required, we assume the VM is a purely compute job and does not require // the outside world to be able to reach it. VMs ran without port mappings can // still reach out to the world, but without port mappings it is effectively // firewalled protocols := []string{"udp", "tcp"} if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 { // Loop through the port map and construct the hostfwd string, to map // reserved ports to the ports listenting in the VM // Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080 var forwarding []string taskPorts := task.Resources.Networks[0].MapLabelToValues(nil) for label, guest := range driverConfig.PortMap[0] { host, ok := taskPorts[label] if !ok { return nil, fmt.Errorf("Unknown port label %q", label) } for _, p := range protocols { forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest)) } } if len(forwarding) != 0 { args = append(args, "-netdev", fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")), "-device", "virtio-net,netdev=user.0", ) } } // If using KVM, add optimization args if accelerator == "kvm" { args = append(args, "-enable-kvm", "-cpu", "host", // Do we have cores information available to the Driver? // "-smp", fmt.Sprintf("%d", cores), ) } d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " ")) bin, err := discover.NomadExecutable() if err != nil { return nil, fmt.Errorf("unable to find the nomad binary: %v", err) } pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) pluginConfig := &plugin.ClientConfig{ Cmd: exec.Command(bin, "executor", pluginLogFile), } exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) if err != nil { return nil, err } executorCtx := &executor.ExecutorContext{ TaskEnv: d.taskEnv, AllocDir: ctx.AllocDir, TaskName: task.Name, TaskResources: task.Resources, LogConfig: task.LogConfig, } ps, err := exec.LaunchCmd(&executor.ExecCommand{Cmd: args[0], Args: args[1:]}, executorCtx) if err != nil { pluginClient.Kill() return nil, fmt.Errorf("error starting process via the plugin: %v", err) } d.logger.Printf("[INFO] Started new QemuVM: %s", vmID) // Create and Return Handle h := &qemuHandle{ pluginClient: pluginClient, executor: exec, userPid: ps.Pid, allocDir: ctx.AllocDir, killTimeout: d.DriverContext.KillTimeout(task), version: d.config.Version, logger: d.logger, doneCh: make(chan struct{}), waitCh: make(chan *cstructs.WaitResult, 1), } go h.run() return h, nil }