Exemple #1
0
func TestAllocRunner_Update(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	defer ar.Destroy()
	start := time.Now()

	// Update the alloc definition
	newAlloc := new(structs.Allocation)
	*newAlloc = *ar.alloc
	newAlloc.DesiredStatus = structs.AllocDesiredStatusStop
	ar.Update(newAlloc)

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count == 0 {
			return false, nil
		}
		last := upd.Allocs[upd.Count-1]
		return last.ClientStatus == structs.AllocClientStatusDead, nil
	}, func(err error) {
		t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
	})

	if time.Since(start) > 8*time.Second {
		t.Fatalf("took too long to terminate")
	}
}
func TestAllocRunner_Update(t *testing.T) {
	ctestutil.ExecCompatible(t)
	_, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	defer ar.Destroy()

	// Update the alloc definition
	newAlloc := new(structs.Allocation)
	*newAlloc = *ar.alloc
	newAlloc.Name = "FOO"
	newAlloc.AllocModifyIndex++
	ar.Update(newAlloc)

	// Check the alloc runner stores the update allocation.
	testutil.WaitForResult(func() (bool, error) {
		return ar.Alloc().Name == "FOO", nil
	}, func(err error) {
		t.Fatalf("err: %v %#v", err, ar.Alloc())
	})
}
Exemple #3
0
func TestExecDriver_Fingerprint(t *testing.T) {
	ctestutils.ExecCompatible(t)
	task := &structs.Task{
		Name:      "foo",
		Resources: structs.DefaultResources(),
	}
	driverCtx, execCtx := testDriverContexts(task)
	defer execCtx.AllocDir.Destroy()
	d := NewExecDriver(driverCtx)
	node := &structs.Node{
		Attributes: map[string]string{
			"unique.cgroup.mountpoint": "/sys/fs/cgroup",
		},
	}
	apply, err := d.Fingerprint(&config.Config{}, node)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if !apply {
		t.Fatalf("should apply")
	}
	if node.Attributes["driver.exec"] == "" {
		t.Fatalf("missing driver")
	}
}
Exemple #4
0
func TestExecDriver_StartOpen_Wait(t *testing.T) {
	t.Parallel()
	ctestutils.ExecCompatible(t)
	task := &structs.Task{
		Name: "sleep",
		Config: map[string]interface{}{
			"command": "/bin/sleep",
			"args":    []string{"5"},
		},
		Resources: basicResources,
	}

	driverCtx, execCtx := testDriverContexts(task)
	defer execCtx.AllocDir.Destroy()
	d := NewExecDriver(driverCtx)

	handle, err := d.Start(execCtx, task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle == nil {
		t.Fatalf("missing handle")
	}

	// Attempt to open
	handle2, err := d.Open(execCtx, handle.ID())
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle2 == nil {
		t.Fatalf("missing handle")
	}
}
Exemple #5
0
func TestAllocRunner_Destroy(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	start := time.Now()

	// Begin the tear down
	go func() {
		time.Sleep(100 * time.Millisecond)
		ar.Destroy()
	}()

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count == 0 {
			return false, nil
		}
		last := upd.Allocs[upd.Count-1]
		return last.ClientStatus == structs.AllocClientStatusDead, nil
	}, func(err error) {
		t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
	})

	if time.Since(start) > 8*time.Second {
		t.Fatalf("took too long to terminate")
	}
}
Exemple #6
0
func TestTaskRunner_Destroy(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, tr := testTaskRunner()
	defer tr.ctx.AllocDir.Destroy()

	// Change command to ensure we run for a bit
	tr.task.Config["command"] = "/bin/sleep"
	tr.task.Config["args"] = "10"
	go tr.Run()

	// Begin the tear down
	go func() {
		time.Sleep(100 * time.Millisecond)
		tr.Destroy()
	}()

	select {
	case <-tr.WaitCh():
	case <-time.After(2 * time.Second):
		t.Fatalf("timeout")
	}

	if upd.Count != 2 {
		t.Fatalf("should have 2 updates: %#v", upd)
	}
	if upd.Status[0] != structs.AllocClientStatusRunning {
		t.Fatalf("bad: %#v", upd.Status)
	}
	if upd.Status[1] != structs.AllocClientStatusDead {
		t.Fatalf("bad: %#v", upd.Status)
	}
	if !strings.Contains(upd.Description[1], "task failed") {
		t.Fatalf("bad: %#v", upd.Description)
	}
}
Exemple #7
0
func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
	t.Parallel()
	ctestutils.ExecCompatible(t)
	task := &structs.Task{
		Name: "sleep",
		Config: map[string]interface{}{
			"command": "/bin/sleep",
			"args":    []string{"1000000"},
		},
		LogConfig: &structs.LogConfig{
			MaxFiles:      10,
			MaxFileSizeMB: 10,
		},
		Resources: basicResources,
	}

	driverCtx, execCtx := testDriverContexts(task)
	defer execCtx.AllocDir.Destroy()
	d := NewExecDriver(driverCtx)

	handle, err := d.Start(execCtx, task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle == nil {
		t.Fatalf("missing handle")
	}
	defer handle.Kill()

	id := &execId{}
	if err := json.Unmarshal([]byte(handle.ID()), id); err != nil {
		t.Fatalf("Failed to parse handle '%s': %v", handle.ID(), err)
	}
	pluginPid := id.PluginConfig.Pid
	proc, err := os.FindProcess(pluginPid)
	if err != nil {
		t.Fatalf("can't find plugin pid: %v", pluginPid)
	}
	if err := proc.Kill(); err != nil {
		t.Fatalf("can't kill plugin pid: %v", err)
	}

	// Attempt to open
	handle2, err := d.Open(execCtx, handle.ID())
	if err == nil {
		t.Fatalf("expected error")
	}
	if handle2 != nil {
		handle2.Kill()
		t.Fatalf("expected handle2 to be nil")
	}
	// Test if the userpid is still present
	userProc, err := os.FindProcess(id.UserPid)

	err = userProc.Signal(syscall.Signal(0))

	if err == nil {
		t.Fatalf("expected user process to die")
	}
}
Exemple #8
0
func TestTaskRunner_SimpleRun(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, tr := testTaskRunner(false)
	tr.MarkReceived()
	go tr.Run()
	defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
	defer tr.ctx.AllocDir.Destroy()

	select {
	case <-tr.WaitCh():
	case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
		t.Fatalf("timeout")
	}

	if len(upd.events) != 3 {
		t.Fatalf("should have 3 updates: %#v", upd.events)
	}

	if upd.state != structs.TaskStateDead {
		t.Fatalf("TaskState %v; want %v", upd.state, structs.TaskStateDead)
	}

	if upd.events[0].Type != structs.TaskReceived {
		t.Fatalf("First Event was %v; want %v", upd.events[0].Type, structs.TaskReceived)
	}

	if upd.events[1].Type != structs.TaskStarted {
		t.Fatalf("Second Event was %v; want %v", upd.events[1].Type, structs.TaskStarted)
	}

	if upd.events[2].Type != structs.TaskTerminated {
		t.Fatalf("Third Event was %v; want %v", upd.events[2].Type, structs.TaskTerminated)
	}
}
Exemple #9
0
func TestTaskRunner_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, tr := testTaskRunner(false)

	// Change command to ensure we run for a bit
	tr.task.Config["command"] = "/bin/sleep"
	tr.task.Config["args"] = []string{"10"}
	go tr.Run()
	defer tr.Destroy()

	// Snapshot state
	time.Sleep(1 * time.Second)
	if err := tr.SaveState(); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new task runner
	consulClient, _ := NewConsulService(&consulServiceConfig{tr.logger, "127.0.0.1:8500", "", "", false, false, &structs.Node{}})
	tr2 := NewTaskRunner(tr.logger, tr.config, upd.Update,
		tr.ctx, tr.alloc, &structs.Task{Name: tr.task.Name}, tr.state, tr.restartTracker,
		consulClient)
	if err := tr2.RestoreState(); err != nil {
		t.Fatalf("err: %v", err)
	}
	go tr2.Run()
	defer tr2.Destroy()

	// Destroy and wait
	time.Sleep(1 * time.Second)
	if tr2.handle == nil {
		t.Fatalf("RestoreState() didn't open handle")
	}
}
Exemple #10
0
func TestTaskRunner_SimpleRun(t *testing.T) {
	ctestutil.ExecCompatible(t)
	_, tr := testTaskRunner(false)
	go tr.Run()
	defer tr.Destroy()
	defer tr.ctx.AllocDir.Destroy()

	select {
	case <-tr.WaitCh():
	case <-time.After(2 * time.Second):
		t.Fatalf("timeout")
	}

	if len(tr.state.Events) != 2 {
		t.Fatalf("should have 2 updates: %#v", tr.state.Events)
	}

	if tr.state.State != structs.TaskStateDead {
		t.Fatalf("TaskState %v; want %v", tr.state.State, structs.TaskStateDead)
	}

	if tr.state.Events[0].Type != structs.TaskStarted {
		t.Fatalf("First Event was %v; want %v", tr.state.Events[0].Type, structs.TaskStarted)
	}

	if tr.state.Events[1].Type != structs.TaskTerminated {
		t.Fatalf("First Event was %v; want %v", tr.state.Events[1].Type, structs.TaskTerminated)
	}
}
Exemple #11
0
func TestExecDriverUser(t *testing.T) {
	ctestutils.ExecCompatible(t)
	task := &structs.Task{
		Name: "sleep",
		User: "******",
		Config: map[string]interface{}{
			"command": "/bin/sleep",
			"args":    []string{"100"},
		},
		LogConfig: &structs.LogConfig{
			MaxFiles:      10,
			MaxFileSizeMB: 10,
		},
		Resources:   basicResources,
		KillTimeout: 10 * time.Second,
	}

	driverCtx, execCtx := testDriverContexts(task)
	defer execCtx.AllocDir.Destroy()
	d := NewExecDriver(driverCtx)

	handle, err := d.Start(execCtx, task)
	if err == nil {
		handle.Kill()
		t.Fatalf("Should've failed")
	}
	msg := "user alice"
	if !strings.Contains(err.Error(), msg) {
		t.Fatalf("Expecting '%v' in '%v'", msg, err)
	}
}
Exemple #12
0
func TestTaskRunner_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, tr := testTaskRunner()

	// Change command to ensure we run for a bit
	tr.task.Config["command"] = "/bin/sleep"
	tr.task.Config["args"] = "10"
	go tr.Run()
	defer tr.Destroy()

	// Snapshot state
	time.Sleep(1 * time.Second)
	if err := tr.SaveState(); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new task runner
	tr2 := NewTaskRunner(tr.logger, tr.config, upd.Update,
		tr.ctx, tr.allocID, &structs.Task{Name: tr.task.Name}, tr.restartTracker)
	if err := tr2.RestoreState(); err != nil {
		t.Fatalf("err: %v", err)
	}
	go tr2.Run()
	defer tr2.Destroy()

	// Destroy and wait
	time.Sleep(1 * time.Second)
	if tr2.handle == nil {
		t.Fatalf("RestoreState() didn't open handle")
	}
}
Exemple #13
0
func TestTaskRunner_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, tr := testTaskRunner(false)

	// Change command to ensure we run for a bit
	tr.task.Config["command"] = "/bin/sleep"
	tr.task.Config["args"] = []string{"10"}
	go tr.Run()
	defer tr.Destroy()

	// Snapshot state
	time.Sleep(2 * time.Second)
	if err := tr.SaveState(); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new task runner
	tr2 := NewTaskRunner(tr.logger, tr.config, upd.Update,
		tr.ctx, tr.alloc, &structs.Task{Name: tr.task.Name})
	if err := tr2.RestoreState(); err != nil {
		t.Fatalf("err: %v", err)
	}
	go tr2.Run()
	defer tr2.Destroy()

	// Destroy and wait
	testutil.WaitForResult(func() (bool, error) {
		return tr2.handle != nil, fmt.Errorf("RestoreState() didn't open handle")
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}
Exemple #14
0
func TestTaskRunner_Destroy(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, tr := testTaskRunner(true)
	tr.MarkReceived()
	defer tr.ctx.AllocDir.Destroy()

	// Change command to ensure we run for a bit
	tr.task.Config["command"] = "/bin/sleep"
	tr.task.Config["args"] = []string{"1000"}
	go tr.Run()

	testutil.WaitForResult(func() (bool, error) {
		if l := len(upd.events); l != 2 {
			return false, fmt.Errorf("Expect two events; got %v", l)
		}

		if upd.events[0].Type != structs.TaskReceived {
			return false, fmt.Errorf("First Event was %v; want %v", upd.events[0].Type, structs.TaskReceived)
		}

		if upd.events[1].Type != structs.TaskStarted {
			return false, fmt.Errorf("Second Event was %v; want %v", upd.events[1].Type, structs.TaskStarted)
		}

		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// Make sure we are collecting  afew stats
	time.Sleep(2 * time.Second)
	stats := tr.StatsReporter().ResourceUsage()
	if len(stats) == 0 {
		t.Fatalf("expected task runner to have some stats")
	}

	// Begin the tear down
	tr.Destroy()

	select {
	case <-tr.WaitCh():
	case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
		t.Fatalf("timeout")
	}

	if len(upd.events) != 3 {
		t.Fatalf("should have 3 updates: %#v", upd.events)
	}

	if upd.state != structs.TaskStateDead {
		t.Fatalf("TaskState %v; want %v", upd.state, structs.TaskStateDead)
	}

	if upd.events[2].Type != structs.TaskKilled {
		t.Fatalf("Third Event was %v; want %v", upd.events[2].Type, structs.TaskKilled)
	}

}
Exemple #15
0
// TestAllocRuner_RetryArtifact ensures that if one task in a task group is
// retrying fetching an artifact, other tasks in the group should be able
// to proceed.
func TestAllocRunner_RetryArtifact(t *testing.T) {
	ctestutil.ExecCompatible(t)

	alloc := mock.Alloc()
	alloc.Job.Type = structs.JobTypeBatch
	alloc.Job.TaskGroups[0].RestartPolicy.Mode = structs.RestartPolicyModeFail
	alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 1
	alloc.Job.TaskGroups[0].RestartPolicy.Delay = time.Duration(4*testutil.TestMultiplier()) * time.Second

	task := alloc.Job.TaskGroups[0].Tasks[0]
	task.Driver = "mock_driver"
	task.Config = map[string]interface{}{
		"exit_code": "0",
		"run_for":   "1s",
	}

	// Create a new task with a bad artifact
	badtask := alloc.Job.TaskGroups[0].Tasks[0].Copy()
	badtask.Name = "bad"
	badtask.Artifacts = []*structs.TaskArtifact{
		{GetterSource: "http://127.1.1.111:12315/foo/bar/baz"},
	}

	alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask)
	upd, ar := testAllocRunnerFromAlloc(alloc, true)
	go ar.Run()
	defer ar.Destroy()

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count < 6 {
			return false, fmt.Errorf("Not enough updates")
		}
		last := upd.Allocs[upd.Count-1]

		// web task should have completed successfully while bad task
		// retries artififact fetching
		webstate := last.TaskStates["web"]
		if webstate.State != structs.TaskStateDead {
			return false, fmt.Errorf("expected web to be dead but found %q", last.TaskStates["web"].State)
		}
		if !webstate.Successful() {
			return false, fmt.Errorf("expected web to have exited successfully")
		}

		// bad task should have failed
		badstate := last.TaskStates["bad"]
		if badstate.State != structs.TaskStateDead {
			return false, fmt.Errorf("expected bad to be dead but found %q", badstate.State)
		}
		if !badstate.Failed {
			return false, fmt.Errorf("expected bad to have failed: %#v", badstate.Events)
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}
func TestExecutor_IsolationAndConstraints(t *testing.T) {
	testutil.ExecCompatible(t)

	execCmd := ExecCommand{Cmd: "/bin/ls", Args: []string{"-F", "/", "/etc/"}}
	ctx := testExecutorContextWithChroot(t)
	defer ctx.AllocDir.Destroy()

	execCmd.FSIsolation = true
	execCmd.ResourceLimits = true
	execCmd.User = cstructs.DefaultUnpriviledgedUser

	executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
	ps, err := executor.LaunchCmd(&execCmd, ctx)
	if err != nil {
		t.Fatalf("error in launching command: %v", err)
	}
	if ps.Pid == 0 {
		t.Fatalf("expected process to start and have non zero pid")
	}
	_, err = executor.Wait()
	if err != nil {
		t.Fatalf("error in waiting for command: %v", err)
	}

	// Check if the resource contraints were applied
	memLimits := filepath.Join(ps.IsolationConfig.CgroupPaths["memory"], "memory.limit_in_bytes")
	data, err := ioutil.ReadFile(memLimits)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	expectedMemLim := strconv.Itoa(ctx.Task.Resources.MemoryMB * 1024 * 1024)
	actualMemLim := strings.TrimSpace(string(data))
	if actualMemLim != expectedMemLim {
		t.Fatalf("actual mem limit: %v, expected: %v", string(data), expectedMemLim)
	}

	if err := executor.Exit(); err != nil {
		t.Fatalf("error: %v", err)
	}

	// Check if Nomad has actually removed the cgroups
	if _, err := os.Stat(memLimits); err == nil {
		t.Fatalf("file %v hasn't been removed", memLimits)
	}

	expected := "/:\nalloc/\nbin/\ndev/\netc/\nlib/\nlib64/\nlocal/\nproc/\ntmp/\nusr/\n\n/etc/:\nld.so.cache\nld.so.conf\nld.so.conf.d/"
	file := filepath.Join(ctx.AllocDir.LogDir(), "web.stdout.0")
	output, err := ioutil.ReadFile(file)
	if err != nil {
		t.Fatalf("Couldn't read file %v", file)
	}

	act := strings.TrimSpace(string(output))
	if act != expected {
		t.Fatalf("Command output incorrectly: want %v; got %v", expected, act)
	}
}
Exemple #17
0
func TestClient_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	s1, _ := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	c1 := testClient(t, func(c *config.Config) {
		c.DevMode = false
		c.RPCHandler = s1
	})
	defer c1.Shutdown()

	// Create mock allocations
	alloc1 := mock.Alloc()
	alloc1.NodeID = c1.Node().ID
	task := alloc1.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}

	state := s1.State()
	err := state.UpsertAllocs(100,
		[]*structs.Allocation{alloc1})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Allocations should get registered
	testutil.WaitForResult(func() (bool, error) {
		c1.allocLock.RLock()
		ar := c1.allocs[alloc1.ID]
		c1.allocLock.RUnlock()
		return ar != nil && ar.Alloc().ClientStatus == structs.AllocClientStatusRunning, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// Shutdown the client, saves state
	err = c1.Shutdown()
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new client
	c2, err := NewClient(c1.config)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer c2.Shutdown()

	// Ensure the allocation is running
	c2.allocLock.RLock()
	ar := c2.allocs[alloc1.ID]
	c2.allocLock.RUnlock()
	if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
		t.Fatalf("bad: %#v", ar.Alloc())
	}
}
Exemple #18
0
func TestTaskRunner_Update(t *testing.T) {
	ctestutil.ExecCompatible(t)
	_, tr := testTaskRunner(false)

	// Change command to ensure we run for a bit
	tr.task.Config["command"] = "/bin/sleep"
	tr.task.Config["args"] = []string{"100"}
	go tr.Run()
	defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
	defer tr.ctx.AllocDir.Destroy()

	// Update the task definition
	updateAlloc := tr.alloc.Copy()

	// Update the restart policy
	newTG := updateAlloc.Job.TaskGroups[0]
	newMode := "foo"
	newTG.RestartPolicy.Mode = newMode

	newTask := updateAlloc.Job.TaskGroups[0].Tasks[0]
	newTask.Driver = "foobar"

	// Update the kill timeout
	testutil.WaitForResult(func() (bool, error) {
		if tr.handle == nil {
			return false, fmt.Errorf("task not started")
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	oldHandle := tr.handle.ID()
	newTask.KillTimeout = time.Hour

	tr.Update(updateAlloc)

	// Wait for update to take place
	testutil.WaitForResult(func() (bool, error) {
		if tr.task == newTask {
			return false, fmt.Errorf("We copied the pointer! This would be very bad")
		}
		if tr.task.Driver != newTask.Driver {
			return false, fmt.Errorf("Task not copied")
		}
		if tr.restartTracker.policy.Mode != newMode {
			return false, fmt.Errorf("restart policy not updated")
		}
		if tr.handle.ID() == oldHandle {
			return false, fmt.Errorf("handle not updated")
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}
Exemple #19
0
func TestClient_Drivers(t *testing.T) {
	ctestutil.ExecCompatible(t)
	c := testClient(t, nil)
	defer c.Shutdown()

	node := c.Node()
	if node.Attributes["driver.exec"] == "" {
		t.Fatalf("missing exec driver")
	}
}
Exemple #20
0
func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
	t.Parallel()
	ctestutils.ExecCompatible(t)

	exp := []byte{'w', 'i', 'n'}
	file := "output.txt"
	task := &structs.Task{
		Name: "sleep",
		Config: map[string]interface{}{
			"command": "/bin/bash",
			"args": []string{
				"-c",
				fmt.Sprintf(`sleep 1; echo -n %s > ${%s}/%s`, string(exp), env.AllocDir, file),
			},
		},
		LogConfig: &structs.LogConfig{
			MaxFiles:      10,
			MaxFileSizeMB: 10,
		},
		Resources: basicResources,
	}

	driverCtx, execCtx := testDriverContexts(task)
	defer execCtx.AllocDir.Destroy()
	d := NewExecDriver(driverCtx)

	handle, err := d.Start(execCtx, task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle == nil {
		t.Fatalf("missing handle")
	}

	// Task should terminate quickly
	select {
	case res := <-handle.WaitCh():
		if !res.Successful() {
			t.Fatalf("err: %v", res)
		}
	case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second):
		t.Fatalf("timeout")
	}

	// Check that data was written to the shared alloc directory.
	outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file)
	act, err := ioutil.ReadFile(outputFile)
	if err != nil {
		t.Fatalf("Couldn't read expected output: %v", err)
	}

	if !reflect.DeepEqual(act, exp) {
		t.Fatalf("Command outputted %v; want %v", act, exp)
	}
}
Exemple #21
0
func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
	ctestutil.ExecCompatible(t)
	c := testClient(t, func(c *config.Config) {
		c.Options["driver.whitelist"] = "foo,bar,baz"
	})
	defer c.Shutdown()

	node := c.Node()
	if node.Attributes["driver.exec"] != "" {
		t.Fatalf("found exec driver")
	}
}
Exemple #22
0
func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
	ctestutil.ExecCompatible(t)
	c := testClient(t, func(c *config.Config) {
		c.Options["fingerprint.whitelist"] = "arch,consul,cpu,env_aws,env_gce,host,memory,network,storage,foo,bar"
	})
	defer c.Shutdown()

	node := c.Node()
	if node.Attributes["cpu.frequency"] != "" {
		t.Fatalf("found cpu fingerprint module")
	}
}
Exemple #23
0
func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
	if !javaLocated() {
		t.Skip("Java not found; skipping")
	}

	ctestutils.ExecCompatible(t)
	task := &structs.Task{
		Name: "demo-app",
		Config: map[string]string{
			"jar_source": "https://dl.dropboxusercontent.com/u/47675/jar_thing/demoapp.jar",
			// "jar_source": "https://s3-us-west-2.amazonaws.com/java-jar-thing/demoapp.jar",
			// "args": "-d64",
		},
		Resources: basicResources,
	}

	driverCtx := testDriverContext(task.Name)
	ctx := testDriverExecContext(task, driverCtx)
	defer ctx.AllocDir.Destroy()
	d := NewJavaDriver(driverCtx)

	handle, err := d.Start(ctx, task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle == nil {
		t.Fatalf("missing handle")
	}

	go func() {
		time.Sleep(100 * time.Millisecond)
		err := handle.Kill()
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	}()

	// Task should terminate quickly
	select {
	case err := <-handle.WaitCh():
		if err == nil {
			t.Fatal("should err")
		}
	case <-time.After(2 * time.Second):
		t.Fatalf("timeout")
	}

	// need to kill long lived process
	err = handle.Kill()
	if err != nil {
		t.Fatalf("Error: %s", err)
	}
}
Exemple #24
0
func TestExecutorLinux_Open(t *testing.T) {
	ctestutil.ExecCompatible(t)
	task, alloc := mockAllocDir(t)
	defer alloc.Destroy()

	taskDir, ok := alloc.TaskDirs[task]
	if !ok {
		t.Fatalf("No task directory found for task %v", task)
	}

	expected := "hello world"
	file := filepath.Join(allocdir.TaskLocal, "output.txt")
	absFilePath := filepath.Join(taskDir, file)
	cmd := fmt.Sprintf(`"%v \"%v\" > %v"`, "/bin/sleep 1 ; echo -n", expected, file)
	e := Command("/bin/bash", "-c", cmd)

	if err := e.Limit(constraint); err != nil {
		t.Fatalf("Limit() failed: %v", err)
	}

	if err := e.ConfigureTaskDir(task, alloc); err != nil {
		t.Fatalf("ConfigureTaskDir(%v, %v) failed: %v", task, alloc, err)
	}

	if err := e.Start(); err != nil {
		t.Fatalf("Start() failed: %v", err)
	}

	id, err := e.ID()
	if err != nil {
		t.Fatalf("ID() failed: %v", err)
	}

	e2 := NewExecutor()
	if err := e2.Open(id); err != nil {
		t.Fatalf("Open(%v) failed: %v", id, err)
	}

	if err := e2.Wait(); err != nil {
		t.Fatalf("Wait() failed: %v", err)
	}

	output, err := ioutil.ReadFile(absFilePath)
	if err != nil {
		t.Fatalf("Couldn't read file %v", absFilePath)
	}

	act := string(output)
	if act != expected {
		t.Fatalf("Command output incorrectly: want %v; got %v", expected, act)
	}
}
Exemple #25
0
func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, ar := testAllocRunner(false)

	// Create two tasks in the task group
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"1000"}

	task2 := ar.alloc.Job.TaskGroups[0].Tasks[0].Copy()
	task2.Name = "task 2"
	task2.Config = map[string]interface{}{"command": "invalidBinaryToFail"}
	ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
	ar.alloc.TaskResources[task2.Name] = task2.Resources
	//t.Logf("%#v", ar.alloc.Job.TaskGroups[0])
	go ar.Run()

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count == 0 {
			return false, fmt.Errorf("No updates")
		}
		last := upd.Allocs[upd.Count-1]
		if last.ClientStatus != structs.AllocClientStatusFailed {
			return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusFailed)
		}

		// Task One should be killed
		state1 := last.TaskStates[task.Name]
		if state1.State != structs.TaskStateDead {
			return false, fmt.Errorf("got state %v; want %v", state1.State, structs.TaskStateDead)
		}
		if len(state1.Events) < 3 {
			return false, fmt.Errorf("Unexpected number of events")
		}
		if lastE := state1.Events[len(state1.Events)-3]; lastE.Type != structs.TaskSiblingFailed {
			return false, fmt.Errorf("got last event %v; want %v", lastE.Type, structs.TaskSiblingFailed)
		}

		// Task Two should be failed
		state2 := last.TaskStates[task2.Name]
		if state2.State != structs.TaskStateDead {
			return false, fmt.Errorf("got state %v; want %v", state2.State, structs.TaskStateDead)
		}
		if !state2.Failed {
			return false, fmt.Errorf("task2 should have failed")
		}

		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}
Exemple #26
0
func TestClient_Fingerprint_InWhitelist(t *testing.T) {
	ctestutil.ExecCompatible(t)
	c := testClient(t, func(c *config.Config) {
		// Weird spacing to test trimming. Whitelist all modules expect cpu.
		c.Options["fingerprint.whitelist"] = "  arch, consul,env_aws,env_gce,host,memory,network,storage,foo,bar	"
	})
	defer c.Shutdown()

	node := c.Node()
	if node.Attributes["cpu.frequency"] == "" {
		t.Fatalf("missing cpu fingerprint module")
	}
}
Exemple #27
0
func TestClient_Drivers_InWhitelist(t *testing.T) {
	ctestutil.ExecCompatible(t)
	c := testClient(t, func(c *config.Config) {
		// Weird spacing to test trimming
		c.Options["driver.whitelist"] = "   exec ,  foo	"
	})
	defer c.Shutdown()

	node := c.Node()
	if node.Attributes["driver.exec"] == "" {
		t.Fatalf("missing exec driver")
	}
}
Exemple #28
0
func TestAllocRunner_Destroy(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	start := time.Now()

	// Begin the tear down
	go func() {
		time.Sleep(100 * time.Millisecond)
		ar.Destroy()
	}()

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count == 0 {
			return false, nil
		}

		// Check the status has changed.
		last := upd.Allocs[upd.Count-1]
		if last.ClientStatus != structs.AllocClientStatusDead {
			return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusDead)
		}

		// Check the state was cleaned
		if _, err := os.Stat(ar.stateFilePath()); err == nil {
			return false, fmt.Errorf("state file still exists: %v", ar.stateFilePath())
		} else if !os.IsNotExist(err) {
			return false, fmt.Errorf("stat err: %v", err)
		}

		// Check the alloc directory was cleaned
		if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
			return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
		} else if !os.IsNotExist(err) {
			return false, fmt.Errorf("stat err: %v", err)
		}

		return true, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	if time.Since(start) > 15*time.Second {
		t.Fatalf("took too long to terminate")
	}
}
Exemple #29
0
func TestAllocRunner_SaveRestoreState(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	defer ar.Destroy()

	// Snapshot state
	testutil.WaitForResult(func() (bool, error) {
		return len(ar.tasks) == 1, nil
	}, func(err error) {
		t.Fatalf("task never started: %v", err)
	})

	err := ar.SaveState()
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a new alloc runner
	consulClient, err := NewConsulService(&consulServiceConfig{ar.logger, "127.0.0.1:8500", "", "", false, false, &structs.Node{}})
	ar2 := NewAllocRunner(ar.logger, ar.config, upd.Update,
		&structs.Allocation{ID: ar.alloc.ID}, consulClient)
	err = ar2.RestoreState()
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	go ar2.Run()

	// Destroy and wait
	ar2.Destroy()
	start := time.Now()

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count == 0 {
			return false, nil
		}
		last := upd.Allocs[upd.Count-1]
		return last.ClientStatus != structs.AllocClientStatusPending, nil
	}, func(err error) {
		t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
	})

	if time.Since(start) > time.Duration(testutil.TestMultiplier()*15)*time.Second {
		t.Fatalf("took too long to terminate")
	}
}
Exemple #30
0
func TestExecDriver_Start_Artifact_expanded(t *testing.T) {
	ctestutils.ExecCompatible(t)
	var file string
	switch runtime.GOOS {
	case "darwin":
		file = "hi_darwin_amd64"
	default:
		file = "hi_linux_amd64"
	}

	task := &structs.Task{
		Name: "sleep",
		Config: map[string]string{
			"artifact_source": fmt.Sprintf("https://dl.dropboxusercontent.com/u/47675/jar_thing/%s", file),
			"command":         "/bin/bash",
			"args":            fmt.Sprintf("-c '/bin/sleep 1 && %s'", filepath.Join("$NOMAD_TASK_DIR", file)),
		},
		Resources: basicResources,
	}

	driverCtx := testDriverContext(task.Name)
	ctx := testDriverExecContext(task, driverCtx)
	defer ctx.AllocDir.Destroy()
	d := NewExecDriver(driverCtx)

	handle, err := d.Start(ctx, task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if handle == nil {
		t.Fatalf("missing handle")
	}

	// Update should be a no-op
	err = handle.Update(task)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Task should terminate quickly
	select {
	case err := <-handle.WaitCh():
		if err != nil {
			t.Fatalf("err: %v", err)
		}
	case <-time.After(5 * time.Second):
		t.Fatalf("timeout")
	}
}