// This test ensures that the scheduler doesn't increment the queued allocation // count for a task group when allocations can't be created on currently // availabe nodes because of constrain mismatches. func TestSystemSched_Queued_With_Constraints(t *testing.T) { h := NewHarness(t) // Register a node node := mock.Node() node.Attributes["kernel.name"] = "darwin" noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a system job which can't be placed on the node job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deal eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure that queued allocations is zero if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { t.Fatalf("bad queued allocations: %#v", h.Evals[0].QueuedAllocations) } }
func TestSystemSched_JobRegister_AllocFail(t *testing.T) { h := NewHarness(t) // Create NO nodes // Create a job job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure no plan as this should be a no-op. if len(h.Plans) != 0 { t.Fatalf("bad: %#v", h.Plans) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestSystemSched_JobRegister(t *testing.T) { h := NewHarness(t) // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != 10 { t.Fatalf("bad: %#v", plan) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure all allocations placed if len(out) != 10 { t.Fatalf("bad: %#v", out) } // Check the available nodes if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 { t.Fatalf("bad: %#v", out[0].Metrics) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestSystemSched_NodeDrain_Down(t *testing.T) { h := NewHarness(t) // Register a draining node node := mock.Node() node.Drain = true node.Status = structs.NodeStatusDown noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan evicted non terminal allocs if len(plan.NodeUpdate[node.ID]) != 1 { t.Fatalf("bad: %#v", plan) } // Ensure that the allocation is marked as lost var lostAllocs []string for _, alloc := range plan.NodeUpdate[node.ID] { lostAllocs = append(lostAllocs, alloc.ID) } expected := []string{alloc.ID} if !reflect.DeepEqual(lostAllocs, expected) { t.Fatalf("expected: %v, actual: %v", expected, lostAllocs) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { h := NewHarness(t) // Register two nodes with two different classes node := mock.Node() node.NodeClass = "green" node.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" node2.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a Job with two task groups, each constrianed on node class job := mock.SystemJob() tg1 := job.TaskGroups[0] tg1.Constraints = append(tg1.Constraints, &structs.Constraint{ LTarget: "${node.class}", RTarget: "green", Operand: "==", }) tg2 := tg1.Copy() tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } qa := h.Evals[0].QueuedAllocations if qa["web"] != 0 || qa["web2"] != 0 { t.Fatalf("bad queued allocations %#v", qa) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestSystemSched_ExhaustResources(t *testing.T) { h := NewHarness(t) // Create a nodes node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a service job which consumes most of the system resources svcJob := mock.Job() svcJob.TaskGroups[0].Count = 1 svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 3600 noErr(t, h.State.UpsertJob(h.NextIndex(), svcJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: svcJob.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: svcJob.ID, } // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Create a system job job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval1 := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation if err := h.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) } // Ensure that we have one allocation queued from the system job eval queued := h.Evals[1].QueuedAllocations["web"] if queued != 1 { t.Fatalf("expected: %v, actual: %v", 1, queued) } }
func TestSystemSched_RetryLimit(t *testing.T) { h := NewHarness(t) h.Planner = &RejectPlan{h} // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure multiple plans if len(h.Plans) == 0 { t.Fatalf("bad: %#v", h.Plans) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure no allocations placed if len(out) != 0 { t.Fatalf("bad: %#v", out) } // Should hit the retry limit h.AssertEvalStatus(t, structs.EvalStatusFailed) }
func TestSystemSched_NodeUpdate(t *testing.T) { h := NewHarness(t) // Register a node node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure that queued allocations is zero if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { t.Fatalf("bad queued allocations: %#v", h.Evals[0].QueuedAllocations) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Register a system job. job := mock.SystemJob() state := s1.fsm.State() if err := state.UpsertJob(1, job); err != nil { t.Fatalf("err: %v", err) } // Create the register request node := mock.Node() node.Status = structs.NodeStatusInit reg := &structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.NodeUpdateResponse if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil { t.Fatalf("err: %v", err) } // Check for heartbeat interval ttl := resp.HeartbeatTTL if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL { t.Fatalf("bad: %#v", ttl) } // Update the status update := &structs.NodeUpdateStatusRequest{ NodeID: node.ID, Status: structs.NodeStatusReady, WriteRequest: structs.WriteRequest{Region: "global"}, } var resp2 structs.NodeUpdateResponse if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", update, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index == 0 { t.Fatalf("bad index: %d", resp2.Index) } // Check for an eval caused by the system job. if len(resp2.EvalIDs) != 1 { t.Fatalf("expected one eval; got %#v", resp2.EvalIDs) } evalID := resp2.EvalIDs[0] eval, err := state.EvalByID(evalID) if err != nil { t.Fatalf("could not get eval %v", evalID) } if eval.Type != "system" { t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system") } // Check for heartbeat interval ttl = resp2.HeartbeatTTL if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL { t.Fatalf("bad: %#v", ttl) } // Check for the node in the FSM out, err := state.NodeByID(node.ID) if err != nil { t.Fatalf("err: %v", err) } if out == nil { t.Fatalf("expected node") } if out.ModifyIndex != resp2.Index { t.Fatalf("index mis-match") } }
func TestSystemSched_JobDeregister(t *testing.T) { h := NewHarness(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() var allocs []*structs.Allocation for _, node := range nodes { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerJobDeregister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan evicted the job from all nodes. for _, node := range nodes { if len(plan.NodeUpdate[node.ID]) != 1 { t.Fatalf("bad: %#v", plan) } } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure no remaining allocations out = structs.FilterTerminalAllocs(out) if len(out) != 0 { t.Fatalf("bad: %#v", out) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestSystemSched_JobModify_Rolling(t *testing.T) { h := NewHarness(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() job2.ID = job.ID job2.Update = structs.UpdateStrategy{ Stagger: 30 * time.Second, MaxParallel: 5, } // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan evicted only MaxParallel var update []*structs.Allocation for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } if len(update) != job2.Update.MaxParallel { t.Fatalf("bad: %#v", plan) } // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != job2.Update.MaxParallel { t.Fatalf("bad: %#v", plan) } h.AssertEvalStatus(t, structs.EvalStatusComplete) // Ensure a follow up eval was created eval = h.Evals[0] if eval.NextEval == "" { t.Fatalf("missing next eval") } // Check for create if len(h.CreateEvals) == 0 { t.Fatalf("missing created eval") } create := h.CreateEvals[0] if eval.NextEval != create.ID { t.Fatalf("ID mismatch") } if create.PreviousEval != eval.ID { t.Fatalf("missing previous eval") } if create.TriggeredBy != structs.EvalTriggerRollingUpdate { t.Fatalf("bad: %#v", create) } }
func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) { h := NewHarness(t) // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() job.TaskGroups[0].LocalDisk.Sticky = true noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) } // Ensure the plan allocated plan := h.Plans[0] var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != 10 { t.Fatalf("bad: %#v", plan) } // Get an allocation and mark it as failed alloc := planned[4].Copy() alloc.ClientStatus = structs.AllocClientStatusFailed noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, } h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) } // Ensure we have created only one new allocation plan = h1.Plans[0] var newPlanned []*structs.Allocation for _, allocList := range plan.NodeAllocation { newPlanned = append(newPlanned, allocList...) } if len(newPlanned) != 1 { t.Fatalf("bad plan: %#v", plan) } // Ensure that the new allocation was placed on the same node as the older // one if newPlanned[0].NodeID != alloc.NodeID || newPlanned[0].PreviousAllocation != alloc.ID { t.Fatalf("expected: %#v, actual: %#v", alloc, newPlanned[0]) } }
func TestSystemSched_JobRegister_LocalDiskConstraint(t *testing.T) { h := NewHarness(t) // Create a nodes node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.SystemJob() job.TaskGroups[0].LocalDisk.DiskMB = 60 * 1024 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create another job with a lot of disk resource ask so that it doesn't fit // the node job1 := mock.SystemJob() job1.TaskGroups[0].LocalDisk.DiskMB = 60 * 1024 noErr(t, h.State.UpsertJob(h.NextIndex(), job1)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure all allocations placed if len(out) != 1 { t.Fatalf("bad: %#v", out) } // Create a new harness to test the scheduling result for the second job h1 := NewHarnessWithState(t, h.State) // Create a mock evaluation to register the job eval1 := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job1.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job1.ID, } // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) } out, err = h1.State.AllocsByJob(job1.ID) noErr(t, err) if len(out) != 0 { t.Fatalf("bad: %#v", out) } }
func TestSystemSched_ChainedAlloc(t *testing.T) { h := NewHarness(t) // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) } var allocIDs []string for _, allocList := range h.Plans[0].NodeAllocation { for _, alloc := range allocList { allocIDs = append(allocIDs, alloc.ID) } } sort.Strings(allocIDs) // Create a new harness to invoke the scheduler again h1 := NewHarnessWithState(t, h.State) job1 := mock.SystemJob() job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) // Insert two more nodes for i := 0; i < 2; i++ { node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a mock evaluation to update the job eval1 := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job1.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job1.ID, } // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) } plan := h1.Plans[0] // Collect all the chained allocation ids and the new allocations which // don't have any chained allocations var prevAllocs []string var newAllocs []string for _, allocList := range plan.NodeAllocation { for _, alloc := range allocList { if alloc.PreviousAllocation == "" { newAllocs = append(newAllocs, alloc.ID) continue } prevAllocs = append(prevAllocs, alloc.PreviousAllocation) } } sort.Strings(prevAllocs) // Ensure that the new allocations has their corresponging original // allocation ids if !reflect.DeepEqual(prevAllocs, allocIDs) { t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs)) } // Ensuring two new allocations don't have any chained allocations if len(newAllocs) != 2 { t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs)) } }
func TestClientEndpoint_CreateNodeEvals(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // Inject fake evaluations alloc := mock.Alloc() state := s1.fsm.State() if err := state.UpsertAllocs(1, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } // Inject a fake system job. job := mock.SystemJob() if err := state.UpsertJob(1, job); err != nil { t.Fatalf("err: %v", err) } // Create some evaluations ids, index, err := s1.endpoints.Node.createNodeEvals(alloc.NodeID, 1) if err != nil { t.Fatalf("err: %v", err) } if index == 0 { t.Fatalf("bad: %d", index) } if len(ids) != 2 { t.Fatalf("bad: %s", ids) } // Lookup the evaluations evalByType := make(map[string]*structs.Evaluation, 2) for _, id := range ids { eval, err := state.EvalByID(id) if err != nil { t.Fatalf("err: %v", err) } if eval == nil { t.Fatalf("expected eval") } if old, ok := evalByType[eval.Type]; ok { t.Fatalf("multiple evals of the same type: %v and %v", old, eval) } evalByType[eval.Type] = eval } if len(evalByType) != 2 { t.Fatalf("Expected a service and system job; got %#v", evalByType) } // Ensure the evals are correct. for schedType, eval := range evalByType { expPriority := alloc.Job.Priority expJobID := alloc.JobID if schedType == "system" { expPriority = job.Priority expJobID = job.ID } if eval.CreateIndex != index { t.Fatalf("CreateIndex mis-match on type %v: %#v", schedType, eval) } if eval.TriggeredBy != structs.EvalTriggerNodeUpdate { t.Fatalf("TriggeredBy incorrect on type %v: %#v", schedType, eval) } if eval.NodeID != alloc.NodeID { t.Fatalf("NodeID incorrect on type %v: %#v", schedType, eval) } if eval.NodeModifyIndex != 1 { t.Fatalf("NodeModifyIndex incorrect on type %v: %#v", schedType, eval) } if eval.Status != structs.EvalStatusPending { t.Fatalf("Status incorrect on type %v: %#v", schedType, eval) } if eval.Priority != expPriority { t.Fatalf("Priority incorrect on type %v: %#v", schedType, eval) } if eval.JobID != expJobID { t.Fatalf("JobID incorrect on type %v: %#v", schedType, eval) } } }
func TestSystemSched_JobRegister_AddNode(t *testing.T) { h := NewHarness(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Add a new node. node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan had no node updates var update []*structs.Allocation for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } if len(update) != 0 { t.Log(len(update)) t.Fatalf("bad: %#v", plan) } // Ensure the plan allocated on the new node var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != 1 { t.Fatalf("bad: %#v", plan) } // Ensure it allocated on the right node if _, ok := plan.NodeAllocation[node.ID]; !ok { t.Fatalf("allocated on wrong node: %#v", plan) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure all allocations placed out = structs.FilterTerminalAllocs(out) if len(out) != 11 { t.Fatalf("bad: %#v", out) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestDiffSystemAllocs(t *testing.T) { job := mock.SystemJob() // Create three alive nodes. nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}} // The "old" job has a previous modify index oldJob := new(structs.Job) *oldJob = *job oldJob.JobModifyIndex -= 1 drainNode := mock.Node() drainNode.Drain = true deadNode := mock.Node() deadNode.Status = structs.NodeStatusDown tainted := map[string]*structs.Node{ "dead": deadNode, "drainNode": drainNode, } allocs := []*structs.Allocation{ // Update allocation on baz &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "baz", Name: "my-job.web[0]", Job: oldJob, }, // Ignore allocation on bar &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "bar", Name: "my-job.web[0]", Job: job, }, // Stop allocation on draining node. &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "drainNode", Name: "my-job.web[0]", Job: oldJob, }, // Mark as lost on a dead node &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "dead", Name: "my-job.web[0]", Job: oldJob, }, } diff := diffSystemAllocs(job, nodes, tainted, allocs) place := diff.place update := diff.update migrate := diff.migrate stop := diff.stop ignore := diff.ignore lost := diff.lost // We should update the first alloc if len(update) != 1 || update[0].Alloc != allocs[0] { t.Fatalf("bad: %#v", update) } // We should ignore the second alloc if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { t.Fatalf("bad: %#v", ignore) } // We should stop the third alloc if len(stop) != 1 || stop[0].Alloc != allocs[2] { t.Fatalf("bad: %#v", stop) } // There should be no migrates. if len(migrate) != 0 { t.Fatalf("bad: %#v", migrate) } // We should mark the 5th alloc as lost if len(lost) != 1 || lost[0].Alloc != allocs[3] { t.Fatalf("bad: %#v", migrate) } // We should place 1 if len(place) != 1 { t.Fatalf("bad: %#v", place) } }
func TestSystemSched_JobModify(t *testing.T) { h := NewHarness(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation for i := 0; i < 5; i++ { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = nodes[i].ID alloc.Name = "my-job.web[0]" alloc.DesiredStatus = structs.AllocDesiredStatusFailed terminal = append(terminal, alloc) } noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) // Update the job job2 := mock.SystemJob() job2.ID = job.ID // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan evicted all allocs var update []*structs.Allocation for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } if len(update) != len(allocs) { t.Fatalf("bad: %#v", plan) } // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != 10 { t.Fatalf("bad: %#v", plan) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure all allocations placed out = structs.FilterTerminalAllocs(out) if len(out) != 10 { t.Fatalf("bad: %#v", out) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestStateStore_JobsByScheduler(t *testing.T) { state := testStateStore(t) var serviceJobs []*structs.Job var sysJobs []*structs.Job for i := 0; i < 10; i++ { job := mock.Job() serviceJobs = append(serviceJobs, job) err := state.UpsertJob(1000+uint64(i), job) if err != nil { t.Fatalf("err: %v", err) } } for i := 0; i < 10; i++ { job := mock.SystemJob() sysJobs = append(sysJobs, job) err := state.UpsertJob(2000+uint64(i), job) if err != nil { t.Fatalf("err: %v", err) } } iter, err := state.JobsByScheduler("service") if err != nil { t.Fatalf("err: %v", err) } var outService []*structs.Job for { raw := iter.Next() if raw == nil { break } outService = append(outService, raw.(*structs.Job)) } iter, err = state.JobsByScheduler("system") if err != nil { t.Fatalf("err: %v", err) } var outSystem []*structs.Job for { raw := iter.Next() if raw == nil { break } outSystem = append(outSystem, raw.(*structs.Job)) } sort.Sort(JobIDSort(serviceJobs)) sort.Sort(JobIDSort(sysJobs)) sort.Sort(JobIDSort(outService)) sort.Sort(JobIDSort(outSystem)) if !reflect.DeepEqual(serviceJobs, outService) { t.Fatalf("bad: %#v %#v", serviceJobs, outService) } if !reflect.DeepEqual(sysJobs, outSystem) { t.Fatalf("bad: %#v %#v", sysJobs, outSystem) } }
func TestSystemSched_JobModify_InPlace(t *testing.T) { h := NewHarness(t) // Create some nodes var nodes []*structs.Node for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() job2.ID = job.ID noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan did not evict any allocs var update []*structs.Allocation for _, updateList := range plan.NodeUpdate { update = append(update, updateList...) } if len(update) != 0 { t.Fatalf("bad: %#v", plan) } // Ensure the plan updated the existing allocs var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != 10 { t.Fatalf("bad: %#v", plan) } for _, p := range planned { if p.Job != job2 { t.Fatalf("should update job") } } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure all allocations placed if len(out) != 10 { t.Fatalf("bad: %#v", out) } h.AssertEvalStatus(t, structs.EvalStatusComplete) // Verify the network did not change rp := structs.Port{Label: "main", Value: 5000} for _, alloc := range out { for _, resources := range alloc.TaskResources { if resources.Networks[0].ReservedPorts[0] != rp { t.Fatalf("bad: %#v", alloc) } } } }
func TestDiffSystemAllocs(t *testing.T) { job := mock.SystemJob() drainNode := mock.Node() drainNode.Drain = true deadNode := mock.Node() deadNode.Status = structs.NodeStatusDown tainted := map[string]*structs.Node{ deadNode.ID: deadNode, drainNode.ID: drainNode, } // Create three alive nodes. nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}, {ID: "pipe"}, {ID: drainNode.ID}, {ID: deadNode.ID}} // The "old" job has a previous modify index oldJob := new(structs.Job) *oldJob = *job oldJob.JobModifyIndex -= 1 allocs := []*structs.Allocation{ // Update allocation on baz &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "baz", Name: "my-job.web[0]", Job: oldJob, }, // Ignore allocation on bar &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "bar", Name: "my-job.web[0]", Job: job, }, // Stop allocation on draining node. &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: drainNode.ID, Name: "my-job.web[0]", Job: oldJob, }, // Mark as lost on a dead node &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: deadNode.ID, Name: "my-job.web[0]", Job: oldJob, }, } // Have three terminal allocs terminalAllocs := map[string]*structs.Allocation{ "my-job.web[0]": &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "pipe", Name: "my-job.web[0]", Job: job, }, } diff := diffSystemAllocs(job, nodes, tainted, allocs, terminalAllocs) place := diff.place update := diff.update migrate := diff.migrate stop := diff.stop ignore := diff.ignore lost := diff.lost // We should update the first alloc if len(update) != 1 || update[0].Alloc != allocs[0] { t.Fatalf("bad: %#v", update) } // We should ignore the second alloc if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { t.Fatalf("bad: %#v", ignore) } // We should stop the third alloc if len(stop) != 1 || stop[0].Alloc != allocs[2] { t.Fatalf("bad: %#v", stop) } // There should be no migrates. if len(migrate) != 0 { t.Fatalf("bad: %#v", migrate) } // We should mark the 5th alloc as lost if len(lost) != 1 || lost[0].Alloc != allocs[3] { t.Fatalf("bad: %#v", migrate) } // We should place 1 if l := len(place); l != 2 { t.Fatalf("bad: %#v", l) } // Ensure that the allocations which are replacements of terminal allocs are // annotated for _, alloc := range terminalAllocs { for _, allocTuple := range diff.place { if alloc.NodeID == allocTuple.Alloc.NodeID { if !reflect.DeepEqual(alloc, allocTuple.Alloc) { t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc) } } } } }
func TestSystemSched_NodeDrain(t *testing.T) { h := NewHarness(t) // Register a draining node node := mock.Node() node.Drain = true noErr(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan evicted all allocs if len(plan.NodeUpdate[node.ID]) != 1 { t.Fatalf("bad: %#v", plan) } // Ensure the plan updated the allocation. var planned []*structs.Allocation for _, allocList := range plan.NodeUpdate { planned = append(planned, allocList...) } if len(planned) != 1 { t.Log(len(planned)) t.Fatalf("bad: %#v", plan) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure the allocations is stopped if planned[0].DesiredStatus != structs.AllocDesiredStatusStop { t.Fatalf("bad: %#v", out) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestDiffSystemAllocs(t *testing.T) { job := mock.SystemJob() // Create three alive nodes. nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}} // The "old" job has a previous modify index oldJob := new(structs.Job) *oldJob = *job oldJob.ModifyIndex -= 1 tainted := map[string]bool{ "dead": true, "baz": false, } allocs := []*structs.Allocation{ // Update allocation on baz &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "baz", Name: "my-job.web[0]", Job: oldJob, }, // Ignore allocation on bar &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "bar", Name: "my-job.web[0]", Job: job, }, // Stop allocation on dead. &structs.Allocation{ ID: structs.GenerateUUID(), NodeID: "dead", Name: "my-job.web[0]", }, } diff := diffSystemAllocs(job, nodes, tainted, allocs) place := diff.place update := diff.update migrate := diff.migrate stop := diff.stop ignore := diff.ignore // We should update the first alloc if len(update) != 1 || update[0].Alloc != allocs[0] { t.Fatalf("bad: %#v", update) } // We should ignore the second alloc if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { t.Fatalf("bad: %#v", ignore) } // We should stop the third alloc if len(stop) != 1 || stop[0].Alloc != allocs[2] { t.Fatalf("bad: %#v", stop) } // There should be no migrates. if len(migrate) != 0 { t.Fatalf("bad: %#v", migrate) } // We should place 1 if len(place) != 1 { t.Fatalf("bad: %#v", place) } }
func TestSystemSched_JobRegister_Annotate(t *testing.T) { h := NewHarness(t) // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: job.Priority, TriggeredBy: structs.EvalTriggerJobRegister, JobID: job.ID, AnnotatePlan: true, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan allocated var planned []*structs.Allocation for _, allocList := range plan.NodeAllocation { planned = append(planned, allocList...) } if len(planned) != 10 { t.Fatalf("bad: %#v", plan) } // Lookup the allocations by JobID out, err := h.State.AllocsByJob(job.ID) noErr(t, err) // Ensure all allocations placed if len(out) != 10 { t.Fatalf("bad: %#v", out) } // Check the available nodes if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 { t.Fatalf("bad: %#v", out[0].Metrics) } h.AssertEvalStatus(t, structs.EvalStatusComplete) // Ensure the plan had annotations. if plan.Annotations == nil { t.Fatalf("expected annotations") } desiredTGs := plan.Annotations.DesiredTGUpdates if l := len(desiredTGs); l != 1 { t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1) } desiredChanges, ok := desiredTGs["web"] if !ok { t.Fatalf("expected task group web to have desired changes") } expected := &structs.DesiredUpdates{Place: 10} if !reflect.DeepEqual(desiredChanges, expected) { t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) } }
func TestSystemSched_PlanWithDrainedNode(t *testing.T) { h := NewHarness(t) // Register two nodes with two different classes node := mock.Node() node.NodeClass = "green" node.Drain = true node.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" node2.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a Job with two task groups, each constrianed on node class job := mock.SystemJob() tg1 := job.TaskGroups[0] tg1.Constraints = append(tg1.Constraints, &structs.Constraint{ LTarget: "${node.class}", RTarget: "green", Operand: "==", }) tg2 := tg1.Copy() tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create an allocation on each node alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.TaskGroup = "web" alloc2 := mock.Alloc() alloc2.Job = job alloc2.JobID = job.ID alloc2.NodeID = node2.ID alloc2.Name = "my-job.web2[0]" alloc2.TaskGroup = "web2" noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } plan := h.Plans[0] // Ensure the plan evicted the alloc on the failed node planned := plan.NodeUpdate[node.ID] if len(planned) != 1 { t.Fatalf("bad: %#v", plan) } // Ensure the plan didn't place if len(plan.NodeAllocation) != 0 { t.Fatalf("bad: %#v", plan) } // Ensure the allocations is stopped if planned[0].DesiredStatus != structs.AllocDesiredStatusStop { t.Fatalf("bad: %#v", planned[0]) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }