Beispiel #1
0
func TestAllocRunner_Update(t *testing.T) {
	ctestutil.ExecCompatible(t)
	upd, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	defer ar.Destroy()
	start := time.Now()

	// Update the alloc definition
	newAlloc := new(structs.Allocation)
	*newAlloc = *ar.alloc
	newAlloc.DesiredStatus = structs.AllocDesiredStatusStop
	ar.Update(newAlloc)

	testutil.WaitForResult(func() (bool, error) {
		if upd.Count == 0 {
			return false, nil
		}
		last := upd.Allocs[upd.Count-1]
		return last.ClientStatus == structs.AllocClientStatusDead, nil
	}, func(err error) {
		t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
	})

	if time.Since(start) > 8*time.Second {
		t.Fatalf("took too long to terminate")
	}
}
Beispiel #2
0
// setAlloc is used to update the allocation of the runner
// we preserve the existing client status and description
func (r *AllocRunner) setAlloc(alloc *structs.Allocation) {
	if r.alloc != nil {
		alloc.ClientStatus = r.alloc.ClientStatus
		alloc.ClientDescription = r.alloc.ClientDescription
	}
	r.alloc = alloc
}
Beispiel #3
0
func TestClient_UpdateAllocStatus(t *testing.T) {
	s1, _ := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	c1 := testClient(t, func(c *config.Config) {
		c.RPCHandler = s1
	})
	defer c1.Shutdown()

	alloc := mock.Alloc()
	alloc.NodeID = c1.Node().ID

	state := s1.State()
	state.UpsertAllocs(100, []*structs.Allocation{alloc})

	newAlloc := new(structs.Allocation)
	*newAlloc = *alloc
	newAlloc.ClientStatus = structs.AllocClientStatusRunning

	err := c1.updateAllocStatus(newAlloc)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	out, err := state.AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
		t.Fatalf("bad: %#v", out)
	}
}
Beispiel #4
0
func TestClientEndpoint_UpdateAlloc(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	testutil.WaitForLeader(t, s1.RPC)

	// Create the register request
	node := mock.Node()
	reg := &structs.NodeRegisterRequest{
		Node:         node,
		WriteRequest: structs.WriteRequest{Region: "global"},
	}

	// Fetch the response
	var resp structs.GenericResponse
	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Inject fake evaluations
	alloc := mock.Alloc()
	alloc.NodeID = node.ID
	state := s1.fsm.State()
	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Attempt update
	clientAlloc := new(structs.Allocation)
	*clientAlloc = *alloc
	clientAlloc.ClientStatus = structs.AllocClientStatusFailed

	// Update the alloc
	update := &structs.AllocUpdateRequest{
		Alloc:        []*structs.Allocation{clientAlloc},
		WriteRequest: structs.WriteRequest{Region: "global"},
	}
	var resp2 structs.NodeAllocsResponse
	start := time.Now()
	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2); err != nil {
		t.Fatalf("err: %v", err)
	}
	if resp2.Index == 0 {
		t.Fatalf("Bad index: %d", resp2.Index)
	}
	if diff := time.Since(start); diff < batchUpdateInterval {
		t.Fatalf("too fast: %v", diff)
	}

	// Lookup the alloc
	out, err := state.AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out.ClientStatus != structs.AllocClientStatusFailed {
		t.Fatalf("Bad: %#v", out)
	}
}
Beispiel #5
0
func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) {
	alloc := mock.Alloc()
	state := testStateStore(t)
	node := mock.Node()
	alloc.NodeID = node.ID
	node.Resources = alloc.Resources
	node.Reserved = nil
	node.Status = structs.NodeStatusDown
	state.UpsertNode(1000, node)
	state.UpsertAllocs(1001, []*structs.Allocation{alloc})
	snap, _ := state.Snapshot()

	allocEvict := new(structs.Allocation)
	*allocEvict = *alloc
	allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict
	plan := &structs.Plan{
		NodeUpdate: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{allocEvict},
		},
	}

	fit, err := evaluateNodePlan(snap, plan, node.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if !fit {
		t.Fatalf("bad")
	}
}
Beispiel #6
0
func TestAllocRunner_Update(t *testing.T) {
	ctestutil.ExecCompatible(t)
	_, ar := testAllocRunner(false)

	// Ensure task takes some time
	task := ar.alloc.Job.TaskGroups[0].Tasks[0]
	task.Config["command"] = "/bin/sleep"
	task.Config["args"] = []string{"10"}
	go ar.Run()
	defer ar.Destroy()

	// Update the alloc definition
	newAlloc := new(structs.Allocation)
	*newAlloc = *ar.alloc
	newAlloc.Name = "FOO"
	newAlloc.AllocModifyIndex++
	ar.Update(newAlloc)

	// Check the alloc runner stores the update allocation.
	testutil.WaitForResult(func() (bool, error) {
		return ar.Alloc().Name == "FOO", nil
	}, func(err error) {
		t.Fatalf("err: %v %#v", err, ar.Alloc())
	})
}
Beispiel #7
0
func TestFSM_UpdateAllocFromClient(t *testing.T) {
	fsm := testFSM(t)
	state := fsm.State()

	alloc := mock.Alloc()
	state.UpsertAllocs(1, []*structs.Allocation{alloc})

	clientAlloc := new(structs.Allocation)
	*clientAlloc = *alloc
	clientAlloc.ClientStatus = structs.AllocClientStatusFailed

	req := structs.AllocUpdateRequest{
		Alloc: []*structs.Allocation{clientAlloc},
	}
	buf, err := structs.Encode(structs.AllocClientUpdateRequestType, req)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	resp := fsm.Apply(makeLog(buf))
	if resp != nil {
		t.Fatalf("resp: %v", resp)
	}

	// Verify we are registered
	out, err := fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	clientAlloc.CreateIndex = out.CreateIndex
	clientAlloc.ModifyIndex = out.ModifyIndex
	if !reflect.DeepEqual(clientAlloc, out) {
		t.Fatalf("bad: %#v %#v", clientAlloc, out)
	}
}
Beispiel #8
0
func TestStateStore_EvictAlloc_Alloc(t *testing.T) {
	state := testStateStore(t)
	alloc := mock.Alloc()

	err := state.UpsertAllocs(1000, []*structs.Allocation{alloc})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	alloc2 := new(structs.Allocation)
	*alloc2 = *alloc
	alloc2.DesiredStatus = structs.AllocDesiredStatusEvict
	err = state.UpsertAllocs(1001, []*structs.Allocation{alloc2})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	out, err := state.AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	if out.DesiredStatus != structs.AllocDesiredStatusEvict {
		t.Fatalf("bad: %#v %#v", alloc, out)
	}

	index, err := state.Index("allocs")
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index != 1001 {
		t.Fatalf("bad: %d", index)
	}
}
Beispiel #9
0
func TestFSM_UpsertAllocs(t *testing.T) {
	fsm := testFSM(t)

	alloc := mock.Alloc()
	fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
	req := structs.AllocUpdateRequest{
		Alloc: []*structs.Allocation{alloc},
	}
	buf, err := structs.Encode(structs.AllocUpdateRequestType, req)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	resp := fsm.Apply(makeLog(buf))
	if resp != nil {
		t.Fatalf("resp: %v", resp)
	}

	// Verify we are registered
	out, err := fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	alloc.CreateIndex = out.CreateIndex
	alloc.ModifyIndex = out.ModifyIndex
	alloc.AllocModifyIndex = out.AllocModifyIndex
	if !reflect.DeepEqual(alloc, out) {
		t.Fatalf("bad: %#v %#v", alloc, out)
	}

	evictAlloc := new(structs.Allocation)
	*evictAlloc = *alloc
	evictAlloc.DesiredStatus = structs.AllocDesiredStatusEvict
	req2 := structs.AllocUpdateRequest{
		Alloc: []*structs.Allocation{evictAlloc},
	}
	buf, err = structs.Encode(structs.AllocUpdateRequestType, req2)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	resp = fsm.Apply(makeLog(buf))
	if resp != nil {
		t.Fatalf("resp: %v", resp)
	}

	// Verify we are evicted
	out, err = fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out.DesiredStatus != structs.AllocDesiredStatusEvict {
		t.Fatalf("alloc found!")
	}
}
Beispiel #10
0
func TestClientEndpoint_BatchUpdate(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	testutil.WaitForLeader(t, s1.RPC)

	// Create the register request
	node := mock.Node()
	reg := &structs.NodeRegisterRequest{
		Node:         node,
		WriteRequest: structs.WriteRequest{Region: "global"},
	}

	// Fetch the response
	var resp structs.GenericResponse
	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Inject fake evaluations
	alloc := mock.Alloc()
	alloc.NodeID = node.ID
	state := s1.fsm.State()
	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Attempt update
	clientAlloc := new(structs.Allocation)
	*clientAlloc = *alloc
	clientAlloc.ClientStatus = structs.AllocClientStatusFailed

	// Call to do the batch update
	bf := NewBatchFuture()
	endpoint := s1.endpoints.Node
	endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc})
	if err := bf.Wait(); err != nil {
		t.Fatalf("err: %v", err)
	}
	if bf.Index() == 0 {
		t.Fatalf("Bad index: %d", bf.Index())
	}

	// Lookup the alloc
	out, err := state.AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out.ClientStatus != structs.AllocClientStatusFailed {
		t.Fatalf("Bad: %#v", out)
	}
}
Beispiel #11
0
// UpdateAllocFromClient is used to update an allocation based on input
// from a client. While the schedulers are the authority on the allocation for
// most things, some updates are authoritative from the client. Specifically,
// the desired state comes from the schedulers, while the actual state comes
// from clients.
func (s *StateStore) UpdateAllocFromClient(index uint64, alloc *structs.Allocation) error {
	txn := s.db.Txn(true)
	defer txn.Abort()

	watcher := watch.NewItems()
	watcher.Add(watch.Item{Table: "allocs"})
	watcher.Add(watch.Item{Alloc: alloc.ID})
	watcher.Add(watch.Item{AllocEval: alloc.EvalID})
	watcher.Add(watch.Item{AllocJob: alloc.JobID})
	watcher.Add(watch.Item{AllocNode: alloc.NodeID})

	// Look for existing alloc
	existing, err := txn.First("allocs", "id", alloc.ID)
	if err != nil {
		return fmt.Errorf("alloc lookup failed: %v", err)
	}

	// Nothing to do if this does not exist
	if existing == nil {
		return nil
	}
	exist := existing.(*structs.Allocation)

	// Copy everything from the existing allocation
	copyAlloc := new(structs.Allocation)
	*copyAlloc = *exist

	// Pull in anything the client is the authority on
	copyAlloc.ClientStatus = alloc.ClientStatus
	copyAlloc.ClientDescription = alloc.ClientDescription
	copyAlloc.TaskStates = alloc.TaskStates

	// Update the modify index
	copyAlloc.ModifyIndex = index

	// Update the allocation
	if err := txn.Insert("allocs", copyAlloc); err != nil {
		return fmt.Errorf("alloc insert failed: %v", err)
	}

	// Update the indexes
	if err := txn.Insert("index", &IndexEntry{"allocs", index}); err != nil {
		return fmt.Errorf("index update failed: %v", err)
	}

	txn.Defer(func() { s.watch.notify(watcher) })
	txn.Commit()
	return nil
}
Beispiel #12
0
// AllocRestore is used to restore an allocation
func (r *StateRestore) AllocRestore(alloc *structs.Allocation) error {
	r.items.Add(watch.Item{Table: "allocs"})
	r.items.Add(watch.Item{Alloc: alloc.ID})
	r.items.Add(watch.Item{AllocEval: alloc.EvalID})
	r.items.Add(watch.Item{AllocJob: alloc.JobID})
	r.items.Add(watch.Item{AllocNode: alloc.NodeID})

	// Set the shared resources if it's not present
	// COMPAT 0.4.1 -> 0.5
	if alloc.SharedResources == nil {
		alloc.SharedResources = &structs.Resources{
			DiskMB: alloc.Resources.DiskMB,
		}
	}

	// Create the LocalDisk if it's nil by adding up DiskMB from task resources.
	if alloc.Job != nil {
		r.addLocalDiskToTaskGroups(alloc.Job)
	}

	if err := r.txn.Insert("allocs", alloc); err != nil {
		return fmt.Errorf("alloc insert failed: %v", err)
	}
	return nil
}
Beispiel #13
0
func TestStateStore_UpdateAllocFromClient(t *testing.T) {
	state := testStateStore(t)
	alloc := mock.Alloc()

	notify := setupNotifyTest(
		state,
		watch.Item{Table: "allocs"},
		watch.Item{Alloc: alloc.ID},
		watch.Item{AllocEval: alloc.EvalID},
		watch.Item{AllocJob: alloc.JobID},
		watch.Item{AllocNode: alloc.NodeID})

	err := state.UpsertAllocs(1000, []*structs.Allocation{alloc})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	update := new(structs.Allocation)
	*update = *alloc
	update.ClientStatus = structs.AllocClientStatusFailed

	err = state.UpdateAllocFromClient(1001, update)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	out, err := state.AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	update.ModifyIndex = 1001
	if !reflect.DeepEqual(update, out) {
		t.Fatalf("bad: %#v %#v", update, out)
	}

	index, err := state.Index("allocs")
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index != 1001 {
		t.Fatalf("bad: %d", index)
	}

	notify.verify(t)
}
Beispiel #14
0
func TestDiffAllocs(t *testing.T) {
	t.Parallel()
	alloc1 := mock.Alloc() // Ignore
	alloc2 := mock.Alloc() // Update
	alloc2u := new(structs.Allocation)
	*alloc2u = *alloc2
	alloc2u.AllocModifyIndex += 1
	alloc3 := mock.Alloc() // Remove
	alloc4 := mock.Alloc() // Add

	exist := []*structs.Allocation{
		alloc1,
		alloc2,
		alloc3,
	}
	update := &allocUpdates{
		pulled: map[string]*structs.Allocation{
			alloc2u.ID: alloc2u,
			alloc4.ID:  alloc4,
		},
		filtered: map[string]struct{}{
			alloc1.ID: struct{}{},
		},
	}

	result := diffAllocs(exist, update)

	if len(result.ignore) != 1 || result.ignore[0] != alloc1 {
		t.Fatalf("Bad: %#v", result.ignore)
	}
	if len(result.added) != 1 || result.added[0] != alloc4 {
		t.Fatalf("Bad: %#v", result.added)
	}
	if len(result.removed) != 1 || result.removed[0] != alloc3 {
		t.Fatalf("Bad: %#v", result.removed)
	}
	if len(result.updated) != 1 {
		t.Fatalf("Bad: %#v", result.updated)
	}
	if result.updated[0].exist != alloc2 || result.updated[0].updated != alloc2u {
		t.Fatalf("Bad: %#v", result.updated)
	}
}
Beispiel #15
0
// UpdateAllocFromClient is used to update an allocation based on input
// from a client. While the schedulers are the authority on the allocation for
// most things, some updates are authoritative from the client. Specifically,
// the desired state comes from the schedulers, while the actual state comes
// from clients.
func (s *StateStore) UpdateAllocFromClient(index uint64, alloc *structs.Allocation) error {
	txn := s.db.Txn(true)
	defer txn.Abort()

	// Look for existing alloc
	existing, err := txn.First("allocs", "id", alloc.ID)
	if err != nil {
		return fmt.Errorf("alloc lookup failed: %v", err)
	}

	// Nothing to do if this does not exist
	if existing == nil {
		return nil
	}
	exist := existing.(*structs.Allocation)

	// Copy everything from the existing allocation
	copyAlloc := new(structs.Allocation)
	*copyAlloc = *exist

	// Pull in anything the client is the authority on
	copyAlloc.ClientStatus = alloc.ClientStatus
	copyAlloc.ClientDescription = alloc.ClientDescription

	// Update the modify index
	copyAlloc.ModifyIndex = index

	// Update the allocation
	if err := txn.Insert("allocs", copyAlloc); err != nil {
		return fmt.Errorf("alloc insert failed: %v", err)
	}

	// Update the indexes
	if err := txn.Insert("index", &IndexEntry{"allocs", index}); err != nil {
		return fmt.Errorf("index update failed: %v", err)
	}

	nodes := map[string]struct{}{alloc.NodeID: struct{}{}}
	txn.Defer(func() { s.watch.notifyAllocs(nodes) })
	txn.Commit()
	return nil
}
Beispiel #16
0
func TestDiffAllocs(t *testing.T) {
	alloc1 := mock.Alloc() // Ignore
	alloc2 := mock.Alloc() // Update
	alloc2u := new(structs.Allocation)
	*alloc2u = *alloc2
	alloc2u.ModifyIndex += 1
	alloc3 := mock.Alloc() // Remove
	alloc4 := mock.Alloc() // Add

	exist := []*structs.Allocation{
		alloc1,
		alloc2,
		alloc3,
	}
	updated := []*structs.Allocation{
		alloc1,
		alloc2u,
		alloc4,
	}

	result := diffAllocs(exist, updated)

	if len(result.ignore) != 1 || result.ignore[0] != alloc1 {
		t.Fatalf("Bad: %#v", result.ignore)
	}
	if len(result.added) != 1 || result.added[0] != alloc4 {
		t.Fatalf("Bad: %#v", result.added)
	}
	if len(result.removed) != 1 || result.removed[0] != alloc3 {
		t.Fatalf("Bad: %#v", result.removed)
	}
	if len(result.updated) != 1 {
		t.Fatalf("Bad: %#v", result.updated)
	}
	if result.updated[0].exist != alloc2 || result.updated[0].updated != alloc2u {
		t.Fatalf("Bad: %#v", result.updated)
	}
}
Beispiel #17
0
func TestStateStore_UpdateAllocFromClient(t *testing.T) {
	state := testStateStore(t)

	alloc := mock.Alloc()
	err := state.UpsertAllocs(1000, []*structs.Allocation{alloc})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	update := new(structs.Allocation)
	*update = *alloc
	update.ClientStatus = structs.AllocClientStatusFailed

	err = state.UpdateAllocFromClient(1001, update)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	out, err := state.AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	update.ModifyIndex = 1001
	if !reflect.DeepEqual(update, out) {
		t.Fatalf("bad: %#v %#v", update, out)
	}

	index, err := state.Index("allocs")
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index != 1001 {
		t.Fatalf("bad: %d", index)
	}
}
Beispiel #18
0
// nestedUpdateAllocFromClient is used to nest an update of an allocation with client status
func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, watcher watch.Items, index uint64, alloc *structs.Allocation) error {
	// Look for existing alloc
	existing, err := txn.First("allocs", "id", alloc.ID)
	if err != nil {
		return fmt.Errorf("alloc lookup failed: %v", err)
	}

	// Nothing to do if this does not exist
	if existing == nil {
		return nil
	}
	exist := existing.(*structs.Allocation)
	// Trigger the watcher
	watcher.Add(watch.Item{Alloc: alloc.ID})
	watcher.Add(watch.Item{AllocEval: exist.EvalID})
	watcher.Add(watch.Item{AllocJob: exist.JobID})
	watcher.Add(watch.Item{AllocNode: exist.NodeID})

	// Copy everything from the existing allocation
	copyAlloc := new(structs.Allocation)
	*copyAlloc = *exist

	// Pull in anything the client is the authority on
	copyAlloc.ClientStatus = alloc.ClientStatus
	copyAlloc.ClientDescription = alloc.ClientDescription
	copyAlloc.TaskStates = alloc.TaskStates

	// Update the modify index
	copyAlloc.ModifyIndex = index

	if err := s.updateSummaryWithAlloc(index, copyAlloc, exist, watcher, txn); err != nil {
		return fmt.Errorf("error updating job summary: %v", err)
	}

	// Update the allocation
	if err := txn.Insert("allocs", copyAlloc); err != nil {
		return fmt.Errorf("alloc insert failed: %v", err)
	}

	// Set the job's status
	forceStatus := ""
	if !copyAlloc.TerminalStatus() {
		forceStatus = structs.JobStatusRunning
	}
	jobs := map[string]string{exist.JobID: forceStatus}
	if err := s.setJobStatuses(index, watcher, txn, jobs, false); err != nil {
		return fmt.Errorf("setting job status failed: %v", err)
	}
	return nil
}
Beispiel #19
0
// updateAllocStatus is used to update the status of an allocation
func (c *Client) updateAllocStatus(alloc *structs.Allocation) {
	// Only send the fields that are updatable by the client.
	stripped := new(structs.Allocation)
	stripped.ID = alloc.ID
	stripped.NodeID = c.Node().ID
	stripped.TaskStates = alloc.TaskStates
	stripped.ClientStatus = alloc.ClientStatus
	stripped.ClientDescription = alloc.ClientDescription
	select {
	case c.allocUpdates <- stripped:
	case <-c.shutdownCh:
	}
}
Beispiel #20
0
func TestPlanApply_applyPlan(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	// Register ndoe
	node := mock.Node()
	testRegisterNode(t, s1, node)

	// Register alloc
	alloc := mock.Alloc()
	allocFail := mock.Alloc()
	plan := &structs.PlanResult{
		NodeAllocation: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{alloc},
		},
		FailedAllocs: []*structs.Allocation{allocFail},
	}

	// Apply the plan
	index, err := s1.applyPlan(plan)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index == 0 {
		t.Fatalf("bad: %d", index)
	}

	// Lookup the allocation
	out, err := s1.fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out == nil {
		t.Fatalf("missing alloc")
	}

	// Lookup the allocation
	out, err = s1.fsm.State().AllocByID(allocFail.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out == nil {
		t.Fatalf("missing alloc")
	}

	// Evict alloc, Register alloc2
	allocEvict := new(structs.Allocation)
	*allocEvict = *alloc
	allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict
	alloc2 := mock.Alloc()
	plan = &structs.PlanResult{
		NodeUpdate: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{allocEvict},
		},
		NodeAllocation: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{alloc2},
		},
	}

	// Apply the plan
	index, err = s1.applyPlan(plan)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index == 0 {
		t.Fatalf("bad: %d", index)
	}

	// Lookup the allocation
	out, err = s1.fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out.DesiredStatus != structs.AllocDesiredStatusEvict {
		t.Fatalf("should be evicted alloc: %#v", out)
	}

	// Lookup the allocation
	out, err = s1.fsm.State().AllocByID(alloc2.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out == nil {
		t.Fatalf("missing alloc")
	}
}
Beispiel #21
0
func TestPlanApply_applyPlan(t *testing.T) {
	s1 := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	// Register ndoe
	node := mock.Node()
	testRegisterNode(t, s1, node)

	// Register alloc
	alloc := mock.Alloc()
	allocFail := mock.Alloc()
	plan := &structs.PlanResult{
		NodeAllocation: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{alloc},
		},
		FailedAllocs: []*structs.Allocation{allocFail},
	}

	// Snapshot the state
	snap, err := s1.State().Snapshot()
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Apply the plan
	future, err := s1.applyPlan(alloc.Job, plan, snap)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Verify our optimistic snapshot is updated
	if out, err := snap.AllocByID(alloc.ID); err != nil || out == nil {
		t.Fatalf("bad: %v %v", out, err)
	}

	// Check plan does apply cleanly
	index, err := planWaitFuture(future)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index == 0 {
		t.Fatalf("bad: %d", index)
	}

	// Lookup the allocation
	out, err := s1.fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out == nil {
		t.Fatalf("missing alloc")
	}

	// Lookup the allocation
	out, err = s1.fsm.State().AllocByID(allocFail.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out == nil {
		t.Fatalf("missing alloc")
	}

	// Evict alloc, Register alloc2
	allocEvict := new(structs.Allocation)
	*allocEvict = *alloc
	allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict
	job := allocEvict.Job
	allocEvict.Job = nil
	alloc2 := mock.Alloc()
	alloc2.Job = nil
	plan = &structs.PlanResult{
		NodeUpdate: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{allocEvict},
		},
		NodeAllocation: map[string][]*structs.Allocation{
			node.ID: []*structs.Allocation{alloc2},
		},
	}

	// Snapshot the state
	snap, err = s1.State().Snapshot()
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Apply the plan
	future, err = s1.applyPlan(job, plan, snap)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Check that our optimistic view is updated
	if out, _ := snap.AllocByID(allocEvict.ID); out.DesiredStatus != structs.AllocDesiredStatusEvict {
		t.Fatalf("bad: %#v", out)
	}

	// Verify plan applies cleanly
	index, err = planWaitFuture(future)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if index == 0 {
		t.Fatalf("bad: %d", index)
	}

	// Lookup the allocation
	out, err = s1.fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out.DesiredStatus != structs.AllocDesiredStatusEvict {
		t.Fatalf("should be evicted alloc: %#v", out)
	}
	if out.Job == nil {
		t.Fatalf("missing job")
	}

	// Lookup the allocation
	out, err = s1.fsm.State().AllocByID(alloc2.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out == nil {
		t.Fatalf("missing alloc")
	}
	if out.Job == nil {
		t.Fatalf("missing job")
	}
}
Beispiel #22
0
func TestClient_WatchAllocs(t *testing.T) {
	ctestutil.ExecCompatible(t)
	s1, _ := testServer(t, nil)
	defer s1.Shutdown()
	testutil.WaitForLeader(t, s1.RPC)

	c1 := testClient(t, func(c *config.Config) {
		c.RPCHandler = s1
	})
	defer c1.Shutdown()

	// Wait til the node is ready
	waitTilNodeReady(c1, t)

	// Create mock allocations
	job := mock.Job()
	alloc1 := mock.Alloc()
	alloc1.JobID = job.ID
	alloc1.Job = job
	alloc1.NodeID = c1.Node().ID
	alloc2 := mock.Alloc()
	alloc2.NodeID = c1.Node().ID
	alloc2.JobID = job.ID
	alloc2.Job = job

	// Insert at zero so they are pulled
	state := s1.State()
	if err := state.UpsertJob(100, job); err != nil {
		t.Fatal(err)
	}
	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
		t.Fatal(err)
	}
	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Both allocations should get registered
	testutil.WaitForResult(func() (bool, error) {
		c1.allocLock.RLock()
		num := len(c1.allocs)
		c1.allocLock.RUnlock()
		return num == 2, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// Delete one allocation
	err = state.DeleteEval(103, nil, []string{alloc1.ID})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Update the other allocation. Have to make a copy because the allocs are
	// shared in memory in the test and the modify index would be updated in the
	// alloc runner.
	alloc2_2 := new(structs.Allocation)
	*alloc2_2 = *alloc2
	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
	err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2})
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// One allocations should get de-registered
	testutil.WaitForResult(func() (bool, error) {
		c1.allocLock.RLock()
		num := len(c1.allocs)
		c1.allocLock.RUnlock()
		return num == 1, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})

	// One allocations should get updated
	testutil.WaitForResult(func() (bool, error) {
		c1.allocLock.RLock()
		ar := c1.allocs[alloc2.ID]
		c1.allocLock.RUnlock()
		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}
Beispiel #23
0
// inplaceUpdate attempts to update allocations in-place where possible.
func inplaceUpdate(ctx Context, eval *structs.Evaluation, job *structs.Job,
	stack Stack, updates []allocTuple) []allocTuple {

	n := len(updates)
	inplace := 0
	for i := 0; i < n; i++ {
		// Get the update
		update := updates[i]

		// Check if the task drivers or config has changed, requires
		// a rolling upgrade since that cannot be done in-place.
		existing := update.Alloc.Job.LookupTaskGroup(update.TaskGroup.Name)
		if tasksUpdated(update.TaskGroup, existing) {
			continue
		}

		// Get the existing node
		node, err := ctx.State().NodeByID(update.Alloc.NodeID)
		if err != nil {
			ctx.Logger().Printf("[ERR] sched: %#v failed to get node '%s': %v",
				eval, update.Alloc.NodeID, err)
			continue
		}
		if node == nil {
			continue
		}

		// Set the existing node as the base set
		stack.SetNodes([]*structs.Node{node})

		// Stage an eviction of the current allocation. This is done so that
		// the current allocation is discounted when checking for feasability.
		// Otherwise we would be trying to fit the tasks current resources and
		// updated resources. After select is called we can remove the evict.
		ctx.Plan().AppendUpdate(update.Alloc, structs.AllocDesiredStatusStop,
			allocInPlace)

		// Attempt to match the task group
		option, size := stack.Select(update.TaskGroup)

		// Pop the allocation
		ctx.Plan().PopUpdate(update.Alloc)

		// Skip if we could not do an in-place update
		if option == nil {
			continue
		}

		// Restore the network offers from the existing allocation.
		// We do not allow network resources (reserved/dynamic ports)
		// to be updated. This is guarded in taskUpdated, so we can
		// safely restore those here.
		for task, resources := range option.TaskResources {
			existing := update.Alloc.TaskResources[task]
			resources.Networks = existing.Networks
		}

		// Create a shallow copy
		newAlloc := new(structs.Allocation)
		*newAlloc = *update.Alloc

		// Update the allocation
		newAlloc.EvalID = eval.ID
		newAlloc.Job = job
		newAlloc.Resources = size
		newAlloc.TaskResources = option.TaskResources
		newAlloc.Metrics = ctx.Metrics()
		newAlloc.DesiredStatus = structs.AllocDesiredStatusRun
		newAlloc.ClientStatus = structs.AllocClientStatusPending
		ctx.Plan().AppendAlloc(newAlloc)

		// Remove this allocation from the slice
		updates[i] = updates[n-1]
		i--
		n--
		inplace++
	}
	if len(updates) > 0 {
		ctx.Logger().Printf("[DEBUG] sched: %#v: %d in-place updates of %d", eval, inplace, len(updates))
	}
	return updates[:n]
}
Beispiel #24
0
func TestFSM_UpsertAllocs_SharedJob(t *testing.T) {
	fsm := testFSM(t)

	alloc := mock.Alloc()
	fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
	job := alloc.Job
	alloc.Job = nil
	req := structs.AllocUpdateRequest{
		Job:   job,
		Alloc: []*structs.Allocation{alloc},
	}
	buf, err := structs.Encode(structs.AllocUpdateRequestType, req)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	resp := fsm.Apply(makeLog(buf))
	if resp != nil {
		t.Fatalf("resp: %v", resp)
	}

	// Verify we are registered
	out, err := fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	alloc.CreateIndex = out.CreateIndex
	alloc.ModifyIndex = out.ModifyIndex
	alloc.AllocModifyIndex = out.AllocModifyIndex

	// Job should be re-attached
	alloc.Job = job
	if !reflect.DeepEqual(alloc, out) {
		t.Fatalf("bad: %#v %#v", alloc, out)
	}

	// Ensure that the original job is used
	evictAlloc := new(structs.Allocation)
	*evictAlloc = *alloc
	job = mock.Job()
	job.Priority = 123

	evictAlloc.Job = nil
	evictAlloc.DesiredStatus = structs.AllocDesiredStatusEvict
	req2 := structs.AllocUpdateRequest{
		Job:   job,
		Alloc: []*structs.Allocation{evictAlloc},
	}
	buf, err = structs.Encode(structs.AllocUpdateRequestType, req2)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	resp = fsm.Apply(makeLog(buf))
	if resp != nil {
		t.Fatalf("resp: %v", resp)
	}

	// Verify we are evicted
	out, err = fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	if out.DesiredStatus != structs.AllocDesiredStatusEvict {
		t.Fatalf("alloc found!")
	}
	if out.Job == nil || out.Job.Priority == 123 {
		t.Fatalf("bad job")
	}
}
Beispiel #25
0
func TestFSM_UpdateAllocFromClient(t *testing.T) {
	fsm := testFSM(t)
	fsm.blockedEvals.SetEnabled(true)
	state := fsm.State()

	node := mock.Node()
	state.UpsertNode(1, node)

	// Mark an eval as blocked.
	eval := mock.Eval()
	eval.ClassEligibility = map[string]bool{node.ComputedClass: true}
	fsm.blockedEvals.Block(eval)

	bStats := fsm.blockedEvals.Stats()
	if bStats.TotalBlocked != 1 {
		t.Fatalf("bad: %#v", bStats)
	}

	// Create a completed eval
	alloc := mock.Alloc()
	alloc.NodeID = node.ID
	state.UpsertAllocs(1, []*structs.Allocation{alloc})

	clientAlloc := new(structs.Allocation)
	*clientAlloc = *alloc
	clientAlloc.ClientStatus = structs.AllocClientStatusDead

	req := structs.AllocUpdateRequest{
		Alloc: []*structs.Allocation{clientAlloc},
	}
	buf, err := structs.Encode(structs.AllocClientUpdateRequestType, req)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	resp := fsm.Apply(makeLog(buf))
	if resp != nil {
		t.Fatalf("resp: %v", resp)
	}

	// Verify we are registered
	out, err := fsm.State().AllocByID(alloc.ID)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	clientAlloc.CreateIndex = out.CreateIndex
	clientAlloc.ModifyIndex = out.ModifyIndex
	if !reflect.DeepEqual(clientAlloc, out) {
		t.Fatalf("bad: %#v %#v", clientAlloc, out)
	}

	// Verify the eval was unblocked.
	testutil.WaitForResult(func() (bool, error) {
		bStats = fsm.blockedEvals.Stats()
		if bStats.TotalBlocked != 0 {
			return false, fmt.Errorf("bad: %#v %#v", bStats, out)
		}
		return true, nil
	}, func(err error) {
		t.Fatalf("err: %s", err)
	})
}
Beispiel #26
0
Datei: env.go Projekt: nak3/nomad
// Helper method for setting all fields from an allocation.
func (t *TaskEnvironment) SetAlloc(alloc *structs.Allocation) *TaskEnvironment {
	t.AllocId = alloc.ID
	t.AllocName = alloc.Name
	t.AllocIndex = alloc.Index()
	return t
}