示例#1
0
// NewExecutor returns an Executor
func NewExecutor(logger *log.Logger) Executor {
	exec := &UniversalExecutor{
		logger:         logger,
		processExited:  make(chan interface{}),
		totalCpuStats:  stats.NewCpuStats(),
		userCpuStats:   stats.NewCpuStats(),
		systemCpuStats: stats.NewCpuStats(),
		pids:           make(map[int]*nomadPid),
	}

	return exec
}
示例#2
0
文件: executor.go 项目: nak3/nomad
// NewExecutor returns an Executor
func NewExecutor(logger *log.Logger) Executor {
	if err := shelpers.Init(); err != nil {
		logger.Printf("[FATAL] executor: unable to initialize stats: %v", err)
		return nil
	}

	exec := &UniversalExecutor{
		logger:         logger,
		processExited:  make(chan interface{}),
		totalCpuStats:  stats.NewCpuStats(),
		userCpuStats:   stats.NewCpuStats(),
		systemCpuStats: stats.NewCpuStats(),
		pids:           make(map[int]*nomadPid),
	}

	return exec
}
示例#3
0
// NewExecutor returns an Executor
func NewExecutor(logger *log.Logger) Executor {
	exec := &UniversalExecutor{
		logger:        logger,
		processExited: make(chan interface{}),
		cpuStats:      stats.NewCpuStats(),
	}

	return exec
}
示例#4
0
文件: executor.go 项目: nak3/nomad
// scanPids scans all the pids on the machine running the current executor and
// returns the child processes of the executor.
func (e *UniversalExecutor) scanPids(parentPid int, allPids []ps.Process) (map[int]*nomadPid, error) {
	processFamily := make(map[int]struct{})
	processFamily[parentPid] = struct{}{}

	// A mapping of pids to their parent pids. It is used to build the process
	// tree of the executing task
	pidsRemaining := make(map[int]int, len(allPids))
	for _, pid := range allPids {
		pidsRemaining[pid.Pid()] = pid.PPid()
	}

	for {
		// flag to indicate if we have found a match
		foundNewPid := false

		for pid, ppid := range pidsRemaining {
			_, childPid := processFamily[ppid]

			// checking if the pid is a child of any of the parents
			if childPid {
				processFamily[pid] = struct{}{}
				delete(pidsRemaining, pid)
				foundNewPid = true
			}
		}

		// not scanning anymore if we couldn't find a single match
		if !foundNewPid {
			break
		}
	}

	res := make(map[int]*nomadPid)
	for pid := range processFamily {
		np := nomadPid{
			pid:           pid,
			cpuStatsTotal: stats.NewCpuStats(),
			cpuStatsUser:  stats.NewCpuStats(),
			cpuStatsSys:   stats.NewCpuStats(),
		}
		res[pid] = &np
	}
	return res, nil
}
示例#5
0
// scanPids scans all the pids on the machine running the current executor and
// returns the child processes of the executor.
func (e *UniversalExecutor) scanPids(parentPid int, allPids []ps.Process) (map[int]*nomadPid, error) {
	processFamily := make(map[int]struct{})
	processFamily[parentPid] = struct{}{}

	// A buffer for holding pids which haven't matched with any parent pid
	var pidsRemaining []ps.Process
	for {
		// flag to indicate if we have found a match
		foundNewPid := false

		for _, pid := range allPids {
			_, childPid := processFamily[pid.PPid()]

			// checking if the pid is a child of any of the parents
			if childPid {
				processFamily[pid.Pid()] = struct{}{}
				foundNewPid = true
			} else {
				// if it is not, then we add the pid to the buffer
				pidsRemaining = append(pidsRemaining, pid)
			}
			// scan only the pids which are left in the buffer
			allPids = pidsRemaining
		}

		// not scanning anymore if we couldn't find a single match
		if !foundNewPid {
			break
		}
	}
	res := make(map[int]*nomadPid)
	for pid := range processFamily {
		np := nomadPid{
			pid:           pid,
			cpuStatsTotal: stats.NewCpuStats(),
			cpuStatsUser:  stats.NewCpuStats(),
			cpuStatsSys:   stats.NewCpuStats(),
		}
		res[pid] = &np
	}
	return res, nil
}
示例#6
0
// getAllPids returns the pids of all the processes spun up by the executor. We
// use the libcontainer apis to get the pids when the user is using cgroup
// isolation and we scan the entire process table if the user is not using any
// isolation
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
	if e.command.ResourceLimits {
		manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
		pids, err := manager.GetAllPids()
		if err != nil {
			return nil, err
		}
		np := make(map[int]*nomadPid, len(pids))
		for _, pid := range pids {
			np[pid] = &nomadPid{
				pid:           pid,
				cpuStatsTotal: stats.NewCpuStats(),
				cpuStatsSys:   stats.NewCpuStats(),
				cpuStatsUser:  stats.NewCpuStats(),
			}
		}
		return np, nil
	}
	allProcesses, err := ps.Processes()
	if err != nil {
		return nil, err
	}
	return e.scanPids(os.Getpid(), allProcesses)
}
示例#7
0
文件: lxc.go 项目: zanella/nomad
// Open creates the driver to monitor an existing LXC container
func (d *LxcDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
	pid := &lxcPID{}
	if err := json.Unmarshal([]byte(handleID), pid); err != nil {
		return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
	}

	var container *lxc.Container
	containers := lxc.Containers(pid.LxcPath)
	for _, c := range containers {
		if c.Name() == pid.ContainerName {
			container = &c
			break
		}
	}

	if container == nil {
		return nil, fmt.Errorf("container %v not found", pid.ContainerName)
	}

	handle := lxcDriverHandle{
		container:      container,
		initPid:        container.InitPid(),
		lxcPath:        pid.LxcPath,
		logger:         d.logger,
		killTimeout:    pid.KillTimeout,
		maxKillTimeout: d.DriverContext.config.MaxKillTimeout,
		totalCpuStats:  stats.NewCpuStats(),
		userCpuStats:   stats.NewCpuStats(),
		systemCpuStats: stats.NewCpuStats(),
		waitCh:         make(chan *dstructs.WaitResult, 1),
		doneCh:         make(chan bool, 1),
	}
	go handle.run()

	return &handle, nil
}
示例#8
0
// getAllPids returns the pids of all the processes spun up by the executor. We
// use the libcontainer apis to get the pids when the user is using cgroup
// isolation and we scan the entire process table if the user is not using any
// isolation
func (e *UniversalExecutor) getAllPids() ([]*nomadPid, error) {
	if e.command.ResourceLimits {
		manager := getCgroupManager(e.groups, e.cgPaths)
		pids, err := manager.GetAllPids()
		if err != nil {
			return nil, err
		}
		np := make([]*nomadPid, len(pids))
		for idx, pid := range pids {
			np[idx] = &nomadPid{pid, stats.NewCpuStats()}
		}
		return np, nil
	}
	allProcesses, err := ps.Processes()
	if err != nil {
		return nil, err
	}
	return e.scanPids(os.Getpid(), allProcesses)
}
示例#9
0
文件: lxc.go 项目: zanella/nomad
// Start starts the LXC Driver
func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
	var driverConfig LxcDriverConfig
	if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
		return nil, err
	}
	lxcPath := lxc.DefaultConfigPath()
	if path := d.config.Read("driver.lxc.path"); path != "" {
		lxcPath = path
	}

	containerName := fmt.Sprintf("%s-%s", task.Name, ctx.AllocID)
	c, err := lxc.NewContainer(containerName, lxcPath)
	if err != nil {
		return nil, fmt.Errorf("unable to initialize container: %v", err)
	}

	var verbosity lxc.Verbosity
	switch driverConfig.Verbosity {
	case "verbose":
		verbosity = lxc.Verbose
	case "", "quiet":
		verbosity = lxc.Quiet
	default:
		return nil, fmt.Errorf("lxc driver config 'verbosity' can only be either quiet or verbose")
	}
	c.SetVerbosity(verbosity)

	var logLevel lxc.LogLevel
	switch driverConfig.LogLevel {
	case "trace":
		logLevel = lxc.TRACE
	case "debug":
		logLevel = lxc.DEBUG
	case "info":
		logLevel = lxc.INFO
	case "warn":
		logLevel = lxc.WARN
	case "", "error":
		logLevel = lxc.ERROR
	default:
		return nil, fmt.Errorf("lxc driver config 'log_level' can only be trace, debug, info, warn or error")
	}
	c.SetLogLevel(logLevel)

	logFile := filepath.Join(ctx.AllocDir.LogDir(), fmt.Sprintf("%v-lxc.log", task.Name))
	c.SetLogFile(logFile)

	options := lxc.TemplateOptions{
		Template:             driverConfig.Template,
		Distro:               driverConfig.Distro,
		Release:              driverConfig.Release,
		Arch:                 driverConfig.Arch,
		FlushCache:           driverConfig.FlushCache,
		DisableGPGValidation: driverConfig.DisableGPGValidation,
		ExtraArgs:            driverConfig.TemplateArgs,
	}

	if err := c.Create(options); err != nil {
		return nil, fmt.Errorf("unable to create container: %v", err)
	}

	// Set the network type to none
	if err := c.SetConfigItem("lxc.network.type", "none"); err != nil {
		return nil, fmt.Errorf("error setting network type configuration: %v", err)
	}

	// Bind mount the shared alloc dir and task local dir in the container
	taskDir, ok := ctx.AllocDir.TaskDirs[task.Name]
	if !ok {
		return nil, fmt.Errorf("failed to find task local directory: %v", task.Name)
	}
	secretdir, err := ctx.AllocDir.GetSecretDir(task.Name)
	if err != nil {
		return nil, fmt.Errorf("faild getting secret path for task: %v", err)
	}
	taskLocalDir := filepath.Join(taskDir, allocdir.TaskLocal)
	mounts := []string{
		fmt.Sprintf("%s local none rw,bind,create=dir", taskLocalDir),
		fmt.Sprintf("%s alloc none rw,bind,create=dir", ctx.AllocDir.SharedDir),
		fmt.Sprintf("%s secret none rw,bind,create=dir", secretdir),
	}
	for _, mnt := range mounts {
		if err := c.SetConfigItem("lxc.mount.entry", mnt); err != nil {
			return nil, fmt.Errorf("error setting bind mount %q error: %v", mnt, err)
		}
	}

	// Start the container
	if err := c.Start(); err != nil {
		return nil, fmt.Errorf("unable to start container: %v", err)
	}

	// Set the resource limits
	if err := c.SetMemoryLimit(lxc.ByteSize(task.Resources.MemoryMB) * lxc.MB); err != nil {
		return nil, fmt.Errorf("unable to set memory limits: %v", err)
	}
	if err := c.SetCgroupItem("cpu.shares", strconv.Itoa(task.Resources.CPU)); err != nil {
		return nil, fmt.Errorf("unable to set cpu shares: %v", err)
	}

	handle := lxcDriverHandle{
		container:      c,
		initPid:        c.InitPid(),
		lxcPath:        lxcPath,
		logger:         d.logger,
		killTimeout:    GetKillTimeout(task.KillTimeout, d.DriverContext.config.MaxKillTimeout),
		maxKillTimeout: d.DriverContext.config.MaxKillTimeout,
		totalCpuStats:  stats.NewCpuStats(),
		userCpuStats:   stats.NewCpuStats(),
		systemCpuStats: stats.NewCpuStats(),
		waitCh:         make(chan *dstructs.WaitResult, 1),
		doneCh:         make(chan bool, 1),
	}

	go handle.run()

	return &handle, nil
}