func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) { state := testStateStore(t) node := mock.Node() state.UpsertNode(1000, node) node2 := mock.Node() state.UpsertNode(1001, node2) snap, _ := state.Snapshot() alloc := mock.Alloc() alloc2 := mock.Alloc() // Ensure alloc2 does not fit alloc2.Resources = node2.Resources plan := &structs.Plan{ AllAtOnce: true, // Require all to make progress NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc}, node2.ID: []*structs.Allocation{alloc2}, }, } result, err := evaluatePlan(snap, plan) if err != nil { t.Fatalf("err: %v", err) } if result == nil { t.Fatalf("missing result") } if len(result.NodeAllocation) != 0 { t.Fatalf("should not alloc: %v", result.NodeAllocation) } }
func TestServiceStack_Select_MetricsReset(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), mock.Node(), } stack := NewGenericStack(false, ctx, nodes) job := mock.Job() stack.SetJob(job) n1, _ := stack.Select(job.TaskGroups[0]) m1 := ctx.Metrics() if n1 == nil { t.Fatalf("missing node %#v", m1) } if m1.NodesEvaluated != 2 { t.Fatalf("should only be 2") } n2, _ := stack.Select(job.TaskGroups[0]) m2 := ctx.Metrics() if n2 == nil { t.Fatalf("missing node %#v", m2) } // If we don't reset, this would be 4 if m2.NodesEvaluated != 2 { t.Fatalf("should only be 2") } }
func TestPlanApply_EvalPlan_Partial(t *testing.T) { state := testStateStore(t) node := mock.Node() state.UpsertNode(1000, node) node2 := mock.Node() state.UpsertNode(1001, node2) snap, _ := state.Snapshot() alloc := mock.Alloc() alloc2 := mock.Alloc() // Ensure alloc2 does not fit alloc2.Resources = node2.Resources plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc}, node2.ID: []*structs.Allocation{alloc2}, }, } result, err := evaluatePlan(snap, plan) if err != nil { t.Fatalf("err: %v", err) } if result == nil { t.Fatalf("missing result") } if _, ok := result.NodeAllocation[node.ID]; !ok { t.Fatalf("should allow alloc") } if _, ok := result.NodeAllocation[node2.ID]; ok { t.Fatalf("should not allow alloc2") } }
func TestServiceStack_Select_ConstraintFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } zero := nodes[0] zero.Attributes["kernel.name"] = "freebsd" stack := NewGenericStack(false, ctx, nodes) job := mock.Job() job.Constraints[0].RTarget = "freebsd" stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } met := ctx.Metrics() if met.NodesFiltered != 1 { t.Fatalf("bad: %#v", met) } if met.ClassFiltered["linux-medium-pci"] != 1 { t.Fatalf("bad: %#v", met) } if met.ConstraintFiltered["$attr.kernel.name = freebsd"] != 1 { t.Fatalf("bad: %#v", met) } }
func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } zero := nodes[0] one := nodes[1] one.Reserved = one.Resources stack := NewGenericStack(false, ctx, nodes) job := mock.Job() stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } met := ctx.Metrics() if met.NodesExhausted != 1 { t.Fatalf("bad: %#v", met) } if met.ClassExhausted["linux-medium-pci"] != 1 { t.Fatalf("bad: %#v", met) } if len(met.Scores) != 1 { t.Fatalf("bad: %#v", met) } }
func TestServiceStack_Select_DriverFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } zero := nodes[0] zero.Attributes["driver.foo"] = "1" if err := zero.ComputeClass(); err != nil { t.Fatalf("ComputedClass() failed: %v", err) } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) job := mock.Job() job.TaskGroups[0].Tasks[0].Driver = "foo" stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } }
func TestProposedAllocConstraint_JobDistinctHosts_InfeasibleCount(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } static := NewStaticIterator(ctx, nodes) // Create a job with a distinct_hosts constraint and three task groups. tg1 := &structs.TaskGroup{Name: "bar"} tg2 := &structs.TaskGroup{Name: "baz"} tg3 := &structs.TaskGroup{Name: "bam"} job := &structs.Job{ ID: "foo", Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}}, TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3}, } propsed := NewProposedAllocConstraintIterator(ctx, static) propsed.SetTaskGroup(tg1) propsed.SetJob(job) // It should not be able to place 3 tasks with only two nodes. out := collectFeasible(propsed) if len(out) != 2 { t.Fatalf("Bad: %#v", out) } }
func TestServiceStack_Select_PreferringNodes(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) job := mock.Job() stack.SetJob(job) // Create a preferred node preferredNode := mock.Node() option, _ := stack.SelectPreferringNodes(job.TaskGroups[0], []*structs.Node{preferredNode}) if option == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if option.Node.ID != preferredNode.ID { t.Fatalf("expected: %v, actual: %v", option.Node.ID, preferredNode.ID) } // Change the preferred node's kernel to windows and ensure the allocations // are placed elsewhere preferredNode1 := preferredNode.Copy() preferredNode1.Attributes["kernel.name"] = "windows" preferredNode1.ComputeClass() option, _ = stack.SelectPreferringNodes(job.TaskGroups[0], []*structs.Node{preferredNode1}) if option == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if option.Node.ID != nodes[0].ID { t.Fatalf("expected: %#v, actual: %#v", nodes[0], option.Node) } }
func TestDriverIterator(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), mock.Node(), } static := NewStaticIterator(ctx, nodes) nodes[0].Attributes["driver.foo"] = "1" nodes[1].Attributes["driver.foo"] = "0" nodes[2].Attributes["driver.foo"] = "true" nodes[3].Attributes["driver.foo"] = "False" drivers := map[string]struct{}{ "exec": struct{}{}, "foo": struct{}{}, } driver := NewDriverIterator(ctx, static, drivers) out := collectFeasible(driver) if len(out) != 2 { t.Fatalf("missing nodes") } if out[0] != nodes[0] || out[1] != nodes[2] { t.Fatalf("bad: %#v", out) } }
func TestReadyNodesInDCs(t *testing.T) { state, err := state.NewStateStore(os.Stderr) if err != nil { t.Fatalf("err: %v", err) } node1 := mock.Node() node2 := mock.Node() node2.Datacenter = "dc2" node3 := mock.Node() node3.Datacenter = "dc2" node3.Status = structs.NodeStatusDown node4 := mock.Node() node4.Drain = true noErr(t, state.UpsertNode(1000, node1)) noErr(t, state.UpsertNode(1001, node2)) noErr(t, state.UpsertNode(1002, node3)) noErr(t, state.UpsertNode(1003, node4)) nodes, err := readyNodesInDCs(state, []string{"dc1", "dc2"}) if err != nil { t.Fatalf("err: %v", err) } if len(nodes) != 2 { t.Fatalf("bad: %v", nodes) } if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID { t.Fatalf("Bad: %#v", nodes) } }
func TestConstraintIterator(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), } static := NewStaticIterator(ctx, nodes) nodes[0].Attributes["kernel.name"] = "freebsd" nodes[1].Datacenter = "dc2" constraints := []*structs.Constraint{ &structs.Constraint{ Operand: "=", LTarget: "$node.datacenter", RTarget: "dc1", }, &structs.Constraint{ Operand: "is", LTarget: "$attr.kernel.name", RTarget: "linux", }, } constr := NewConstraintIterator(ctx, static, constraints) out := collectFeasible(constr) if len(out) != 1 { t.Fatalf("missing nodes") } if out[0] != nodes[2] { t.Fatalf("bad: %#v", out) } }
func TestProposedAllocConstraint_JobDistinctHosts_Infeasible(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } static := NewStaticIterator(ctx, nodes) // Create a job with a distinct_hosts constraint and two task groups. tg1 := &structs.TaskGroup{Name: "bar"} tg2 := &structs.TaskGroup{Name: "baz"} job := &structs.Job{ ID: "foo", Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}}, TaskGroups: []*structs.TaskGroup{tg1, tg2}, } // Add allocs placing tg1 on node1 and tg2 on node2. This should make the // job unsatisfiable. plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ &structs.Allocation{ TaskGroup: tg1.Name, JobID: job.ID, ID: structs.GenerateUUID(), }, // Should be ignored as it is a different job. &structs.Allocation{ TaskGroup: tg2.Name, JobID: "ignore 2", ID: structs.GenerateUUID(), }, } plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{ &structs.Allocation{ TaskGroup: tg2.Name, JobID: job.ID, ID: structs.GenerateUUID(), }, // Should be ignored as it is a different job. &structs.Allocation{ TaskGroup: tg1.Name, JobID: "ignore 2", ID: structs.GenerateUUID(), }, } propsed := NewProposedAllocConstraintIterator(ctx, static) propsed.SetTaskGroup(tg1) propsed.SetJob(job) out := collectFeasible(propsed) if len(out) != 0 { t.Fatalf("Bad: %#v", out) } }
func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { h := NewHarness(t) // Register two nodes with two different classes node := mock.Node() node.NodeClass = "green" node.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" node2.ComputeClass() noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a Job with two task groups, each constrianed on node class job := mock.SystemJob() tg1 := job.TaskGroups[0] tg1.Constraints = append(tg1.Constraints, &structs.Constraint{ LTarget: "${node.class}", RTarget: "green", Operand: "==", }) tg2 := tg1.Copy() tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Priority: 50, TriggeredBy: structs.EvalTriggerNodeUpdate, JobID: job.ID, NodeID: node.ID, } // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure a single plan if len(h.Plans) != 1 { t.Fatalf("bad: %#v", h.Plans) } qa := h.Evals[0].QueuedAllocations if qa["web"] != 0 || qa["web2"] != 0 { t.Fatalf("bad queued allocations %#v", qa) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestConstraintChecker(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), mock.Node(), } nodes[0].Attributes["kernel.name"] = "freebsd" nodes[1].Datacenter = "dc2" nodes[2].NodeClass = "large" constraints := []*structs.Constraint{ &structs.Constraint{ Operand: "=", LTarget: "${node.datacenter}", RTarget: "dc1", }, &structs.Constraint{ Operand: "is", LTarget: "${attr.kernel.name}", RTarget: "linux", }, &structs.Constraint{ Operand: "is", LTarget: "${node.class}", RTarget: "large", }, } checker := NewConstraintChecker(ctx, constraints) cases := []struct { Node *structs.Node Result bool }{ { Node: nodes[0], Result: false, }, { Node: nodes[1], Result: false, }, { Node: nodes[2], Result: true, }, } for i, c := range cases { if act := checker.Feasible(c.Node); act != c.Result { t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) } } }
func TestTaintedNodes(t *testing.T) { state, err := state.NewStateStore(os.Stderr) if err != nil { t.Fatalf("err: %v", err) } node1 := mock.Node() node2 := mock.Node() node2.Datacenter = "dc2" node3 := mock.Node() node3.Datacenter = "dc2" node3.Status = structs.NodeStatusDown node4 := mock.Node() node4.Drain = true noErr(t, state.UpsertNode(1000, node1)) noErr(t, state.UpsertNode(1001, node2)) noErr(t, state.UpsertNode(1002, node3)) noErr(t, state.UpsertNode(1003, node4)) allocs := []*structs.Allocation{ &structs.Allocation{NodeID: node1.ID}, &structs.Allocation{NodeID: node2.ID}, &structs.Allocation{NodeID: node3.ID}, &structs.Allocation{NodeID: node4.ID}, &structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"}, } tainted, err := taintedNodes(state, allocs) if err != nil { t.Fatalf("err: %v", err) } if len(tainted) != 3 { t.Fatalf("bad: %v", tainted) } if _, ok := tainted[node1.ID]; ok { t.Fatalf("Bad: %v", tainted) } if _, ok := tainted[node2.ID]; ok { t.Fatalf("Bad: %v", tainted) } if node, ok := tainted[node3.ID]; !ok || node == nil { t.Fatalf("Bad: %v", tainted) } if node, ok := tainted[node4.ID]; !ok || node == nil { t.Fatalf("Bad: %v", tainted) } if node, ok := tainted["12345678-abcd-efab-cdef-123456789abc"]; !ok || node != nil { t.Fatalf("Bad: %v", tainted) } }
func TestServiceStack_SetNodes(t *testing.T) { _, ctx := testContext(t) stack := NewGenericStack(false, ctx, nil) nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), mock.Node(), mock.Node(), mock.Node(), mock.Node(), mock.Node(), } stack.SetNodes(nodes) // Check that our scan limit is updated if stack.limit.limit != 3 { t.Fatalf("bad limit %d", stack.limit.limit) } out := collectFeasible(stack.source) if !reflect.DeepEqual(out, nodes) { t.Fatalf("bad: %#v", out) } }
func TestProposedAllocConstraint_TaskGroupDistinctHosts(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } static := NewStaticIterator(ctx, nodes) // Create a task group with a distinct_hosts constraint. taskGroup := &structs.TaskGroup{ Name: "example", Constraints: []*structs.Constraint{ {Operand: structs.ConstraintDistinctHosts}, }, } // Add a planned alloc to node1. plan := ctx.Plan() plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{ &structs.Allocation{ TaskGroup: taskGroup.Name, JobID: "foo", }, } // Add a planned alloc to node2 with the same task group name but a // different job. plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{ &structs.Allocation{ TaskGroup: taskGroup.Name, JobID: "bar", }, } propsed := NewProposedAllocConstraintIterator(ctx, static) propsed.SetTaskGroup(taskGroup) propsed.SetJob(&structs.Job{ID: "foo"}) out := collectFeasible(propsed) if len(out) != 1 { t.Fatalf("Bad: %#v", out) } // Expect it to skip the first node as there is a previous alloc on it for // the same task group. if out[0] != nodes[1] { t.Fatalf("Bad: %v", out) } }
func TestShuffleNodes(t *testing.T) { nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), mock.Node(), mock.Node(), } orig := make([]*structs.Node, len(nodes)) copy(orig, nodes) shuffleNodes(nodes) if reflect.DeepEqual(nodes, orig) { t.Fatalf("shoudl not match") } }
func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) { alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() alloc.NodeID = node.ID node.Resources = alloc.Resources node.Reserved = nil node.Status = structs.NodeStatusDown state.UpsertNode(1000, node) state.UpsertAllocs(1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() allocEvict := new(structs.Allocation) *allocEvict = *alloc allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict plan := &structs.Plan{ NodeUpdate: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{allocEvict}, }, } fit, err := evaluateNodePlan(snap, plan, node.ID) if err != nil { t.Fatalf("err: %v", err) } if !fit { t.Fatalf("bad") } }
func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() alloc.NodeID = node.ID alloc.DesiredStatus = structs.AllocDesiredStatusEvict node.Resources = alloc.Resources node.Reserved = nil state.UpsertNode(1000, node) state.UpsertAllocs(1001, []*structs.Allocation{alloc}) snap, _ := state.Snapshot() alloc2 := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc2}, }, } fit, err := evaluateNodePlan(snap, plan, node.ID) if err != nil { t.Fatalf("err: %v", err) } if !fit { t.Fatalf("bad") } }
func TestPlanApply_EvalPlan_Simple(t *testing.T) { state := testStateStore(t) node := mock.Node() state.UpsertNode(1000, node) snap, _ := state.Snapshot() alloc := mock.Alloc() allocFail := mock.Alloc() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc}, }, FailedAllocs: []*structs.Allocation{allocFail}, } pool := NewEvaluatePool(workerPoolSize, workerPoolBufferSize) defer pool.Shutdown() result, err := evaluatePlan(pool, snap, plan) if err != nil { t.Fatalf("err: %v", err) } if result == nil { t.Fatalf("missing result") } if !reflect.DeepEqual(result.FailedAllocs, plan.FailedAllocs) { t.Fatalf("missing failed allocs") } }
func TestClientEndpoint_Register(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request node := mock.Node() req := &structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.GenericResponse if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil { t.Fatalf("err: %v", err) } if resp.Index == 0 { t.Fatalf("bad index: %d", resp.Index) } // Check for the node in the FSM state := s1.fsm.State() out, err := state.NodeByID(node.ID) if err != nil { t.Fatalf("err: %v", err) } if out == nil { t.Fatalf("expected node") } if out.CreateIndex != resp.Index { t.Fatalf("index mis-match") } }
func TestWorker_SubmitPlan(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 c.EnabledSchedulers = []string{structs.JobTypeService} }) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // Register node node := mock.Node() testRegisterNode(t, s1, node) eval1 := mock.Eval() s1.fsm.State().UpsertJobSummary(1000, mock.JobSummary(eval1.JobID)) // Create the register request s1.evalBroker.Enqueue(eval1) evalOut, token, err := s1.evalBroker.Dequeue([]string{eval1.Type}, time.Second) if err != nil { t.Fatalf("err: %v", err) } if evalOut != eval1 { t.Fatalf("Bad eval") } // Create an allocation plan alloc := mock.Alloc() s1.fsm.State().UpsertJobSummary(1200, mock.JobSummary(alloc.JobID)) plan := &structs.Plan{ EvalID: eval1.ID, NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc}, }, } // Attempt to submit a plan w := &Worker{srv: s1, logger: s1.logger, evalToken: token} result, state, err := w.SubmitPlan(plan) if err != nil { t.Fatalf("err: %v", err) } // Should have no update if state != nil { t.Fatalf("unexpected state update") } // Result should have allocated if result == nil { t.Fatalf("missing result") } if result.AllocIndex == 0 { t.Fatalf("Bad: %#v", result) } if len(result.NodeAllocation) != 1 { t.Fatalf("Bad: %#v", result) } }
func TestWorker_waitForIndex(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 c.EnabledSchedulers = []string{structs.JobTypeService} }) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // Get the current index index := s1.raft.AppliedIndex() // Cause an increment go func() { time.Sleep(10 * time.Millisecond) n := mock.Node() if err := s1.fsm.state.UpsertNode(index+1, n); err != nil { t.Fatalf("failed to upsert node: %v", err) } }() // Wait for a future index w := &Worker{srv: s1, logger: s1.logger} err := w.waitForIndex(index+1, time.Second) if err != nil { t.Fatalf("err: %v", err) } // Cause a timeout err = w.waitForIndex(index+100, 10*time.Millisecond) if err == nil || !strings.Contains(err.Error(), "timeout") { t.Fatalf("err: %v", err) } }
func TestClientEndpoint_GetClientAllocs(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request node := mock.Node() reg := &structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.GenericResponse if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil { t.Fatalf("err: %v", err) } node.CreateIndex = resp.Index node.ModifyIndex = resp.Index // Inject fake evaluations alloc := mock.Alloc() alloc.NodeID = node.ID state := s1.fsm.State() err := state.UpsertAllocs(100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Lookup the allocs get := &structs.NodeSpecificRequest{ NodeID: node.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var resp2 structs.NodeClientAllocsResponse if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index != 100 { t.Fatalf("Bad index: %d %d", resp2.Index, 100) } if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 { t.Fatalf("bad: %#v", resp2.Allocs) } // Lookup non-existing node get.NodeID = "foobarbaz" var resp3 structs.NodeClientAllocsResponse if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp3); err != nil { t.Fatalf("err: %v", err) } if resp3.Index != 100 { t.Fatalf("Bad index: %d %d", resp3.Index, 100) } if len(resp3.Allocs) != 0 { t.Fatalf("unexpected node %#v", resp3.Allocs) } }
func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasiblityChecker(true) tgMock := newMockFeasiblityChecker(true) wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) // Set the job to escaped cc := nodes[0].ComputedClass ctx.Eligibility().job[cc] = EvalComputedClassEligible ctx.Eligibility().taskGroups["foo"] = map[string]ComputedClassFeasibility{cc: EvalComputedClassEscaped} wrapper.SetTaskGroup("foo") // Run the wrapper. out := collectFeasible(wrapper) if out == nil || tgMock.calls() != 1 { t.Fatalf("bad: %#v %v", out, tgMock.calls()) } if e, ok := ctx.Eligibility().taskGroups["foo"][cc]; !ok || e != EvalComputedClassEscaped { t.Fatalf("bad: %v %v", e, ok) } }
func TestStateStore_RestoreNode(t *testing.T) { state := testStateStore(t) node := mock.Node() notify := setupNotifyTest( state, watch.Item{Table: "nodes"}, watch.Item{Node: node.ID}) restore, err := state.Restore() if err != nil { t.Fatalf("err: %v", err) } err = restore.NodeRestore(node) if err != nil { t.Fatalf("err: %v", err) } restore.Commit() out, err := state.NodeByID(node.ID) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(out, node) { t.Fatalf("Bad: %#v %#v", out, node) } notify.verify(t) }
func TestStaticIterator_SetNodes(t *testing.T) { _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 3; i++ { nodes = append(nodes, mock.Node()) } static := NewStaticIterator(ctx, nodes) newNodes := []*structs.Node{mock.Node()} static.SetNodes(newNodes) out := collectFeasible(static) if !reflect.DeepEqual(out, newNodes) { t.Fatalf("bad: %#v", out) } }
func TestStateStore_Indexes(t *testing.T) { state := testStateStore(t) node := mock.Node() err := state.UpsertNode(1000, node) if err != nil { t.Fatalf("err: %v", err) } iter, err := state.Indexes() if err != nil { t.Fatalf("err: %v", err) } var out []*IndexEntry for { raw := iter.Next() if raw == nil { break } out = append(out, raw.(*IndexEntry)) } expect := []*IndexEntry{ &IndexEntry{"nodes", 1000}, } if !reflect.DeepEqual(expect, out) { t.Fatalf("bad: %#v %#v", expect, out) } }
func TestStaticIterator_Reset(t *testing.T) { _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 3; i++ { nodes = append(nodes, mock.Node()) } static := NewStaticIterator(ctx, nodes) for i := 0; i < 6; i++ { static.Reset() for j := 0; j < i; j++ { static.Next() } static.Reset() out := collectFeasible(static) if len(out) != len(nodes) { t.Fatalf("out: %#v", out) t.Fatalf("missing nodes %d %#v", i, static) } ids := make(map[string]struct{}) for _, o := range out { if _, ok := ids[o.ID]; ok { t.Fatalf("duplicate") } ids[o.ID] = struct{}{} } } }