func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() alloc.NodeID = node.ID node.Resources = alloc.Resources node.Reserved = nil state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) state.UpsertNode(1000, node) state.UpsertAllocs(1001, []*structs.Allocation{alloc}) alloc2 := mock.Alloc() alloc2.NodeID = node.ID state.UpsertJobSummary(1200, mock.JobSummary(alloc2.JobID)) snap, _ := state.Snapshot() plan := &structs.Plan{ NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc2}, }, } fit, err := evaluateNodePlan(snap, plan, node.ID) if err != nil { t.Fatalf("err: %v", err) } if fit { t.Fatalf("bad") } }
func TestWorker_SubmitPlan(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 c.EnabledSchedulers = []string{structs.JobTypeService} }) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // Register node node := mock.Node() testRegisterNode(t, s1, node) eval1 := mock.Eval() s1.fsm.State().UpsertJobSummary(1000, mock.JobSummary(eval1.JobID)) // Create the register request s1.evalBroker.Enqueue(eval1) evalOut, token, err := s1.evalBroker.Dequeue([]string{eval1.Type}, time.Second) if err != nil { t.Fatalf("err: %v", err) } if evalOut != eval1 { t.Fatalf("Bad eval") } // Create an allocation plan alloc := mock.Alloc() s1.fsm.State().UpsertJobSummary(1200, mock.JobSummary(alloc.JobID)) plan := &structs.Plan{ EvalID: eval1.ID, NodeAllocation: map[string][]*structs.Allocation{ node.ID: []*structs.Allocation{alloc}, }, } // Attempt to submit a plan w := &Worker{srv: s1, logger: s1.logger, evalToken: token} result, state, err := w.SubmitPlan(plan) if err != nil { t.Fatalf("err: %v", err) } // Should have no update if state != nil { t.Fatalf("unexpected state update") } // Result should have allocated if result == nil { t.Fatalf("missing result") } if result.AllocIndex == 0 { t.Fatalf("Bad: %#v", result) } if len(result.NodeAllocation) != 1 { t.Fatalf("Bad: %#v", result) } }
func TestFSM_SnapshotRestore_Allocs_NoSharedResources(t *testing.T) { // Add some state fsm := testFSM(t) state := fsm.State() alloc1 := mock.Alloc() alloc2 := mock.Alloc() alloc1.SharedResources = nil alloc2.SharedResources = nil state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) state.UpsertAllocs(1000, []*structs.Allocation{alloc1}) state.UpsertAllocs(1001, []*structs.Allocation{alloc2}) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) state2 := fsm2.State() out1, _ := state2.AllocByID(alloc1.ID) out2, _ := state2.AllocByID(alloc2.ID) alloc1.SharedResources = &structs.Resources{DiskMB: 150} alloc2.SharedResources = &structs.Resources{DiskMB: 150} if !reflect.DeepEqual(alloc1, out1) { t.Fatalf("bad: \n%#v\n%#v", out1, alloc1) } if !reflect.DeepEqual(alloc2, out2) { t.Fatalf("bad: \n%#v\n%#v", out2, alloc2) } }
func TestCoreScheduler_EvalGC_Force(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) // Insert "dead" eval state := s1.fsm.State() eval := mock.Eval() eval.Status = structs.EvalStatusFailed state.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) if err != nil { t.Fatalf("err: %v", err) } // Insert "dead" alloc alloc := mock.Alloc() alloc.EvalID = eval.ID alloc.DesiredStatus = structs.AllocDesiredStatusStop state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) err = state.UpsertAllocs(1002, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Create a core scheduler snap, err := state.Snapshot() if err != nil { t.Fatalf("err: %v", err) } core := NewCoreScheduler(s1, snap) // Attempt the GC gc := s1.coreJobEval(structs.CoreJobForceGC, 1002) err = core.Process(gc) if err != nil { t.Fatalf("err: %v", err) } // Should be gone out, err := state.EvalByID(eval.ID) if err != nil { t.Fatalf("err: %v", err) } if out != nil { t.Fatalf("bad: %v", out) } outA, err := state.AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } if outA != nil { t.Fatalf("bad: %v", outA) } }
func TestHTTP_AllocsPrefixList(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Directly manipulate the state state := s.Agent.server.State() alloc1 := mock.Alloc() alloc1.ID = "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" alloc2 := mock.Alloc() alloc2.ID = "aaabbbbb-e8f7-fd38-c855-ab94ceb89706" summary1 := mock.JobSummary(alloc1.JobID) summary2 := mock.JobSummary(alloc2.JobID) if err := state.UpsertJobSummary(998, summary1); err != nil { t.Fatal(err) } if err := state.UpsertJobSummary(999, summary2); err != nil { t.Fatal(err) } if err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}); err != nil { t.Fatalf("err: %v", err) } // Make the HTTP request req, err := http.NewRequest("GET", "/v1/allocations?prefix=aaab", nil) if err != nil { t.Fatalf("err: %v", err) } respW := httptest.NewRecorder() // Make the request obj, err := s.Server.AllocsRequest(respW, req) if err != nil { t.Fatalf("err: %v", err) } // Check for the index if respW.HeaderMap.Get("X-Nomad-Index") == "" { t.Fatalf("missing index") } if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { t.Fatalf("missing known leader") } if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { t.Fatalf("missing last contact") } // Check the alloc n := obj.([]*structs.AllocListStub) if len(n) != 1 { t.Fatalf("bad: %#v", n) } // Check the identifier if n[0].ID != alloc2.ID { t.Fatalf("expected alloc ID: %v, Actual: %v", alloc2.ID, n[0].ID) } }) }
func TestJobEndpoint_Allocations_Blocking(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request alloc1 := mock.Alloc() alloc2 := mock.Alloc() alloc2.JobID = "job1" state := s1.fsm.State() // First upsert an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) err := state.UpsertAllocs(100, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } }) // Upsert an alloc for the job we are interested in later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID)) err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } }) // Lookup the jobs get := &structs.JobSpecificRequest{ JobID: "job1", QueryOptions: structs.QueryOptions{ Region: "global", MinQueryIndex: 50, }, } var resp structs.JobAllocationsResponse start := time.Now() if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp); err != nil { t.Fatalf("err: %v", err) } if elapsed := time.Since(start); elapsed < 200*time.Millisecond { t.Fatalf("should block (returned in %s) %#v", elapsed, resp) } if resp.Index != 200 { t.Fatalf("Bad index: %d %d", resp.Index, 200) } if len(resp.Allocations) != 1 || resp.Allocations[0].JobID != "job1" { t.Fatalf("bad: %#v", resp.Allocations) } }
func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() state := s1.fsm.State() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the allocs alloc1 := mock.Alloc() alloc2 := mock.Alloc() // First create an unrelated alloc time.AfterFunc(100*time.Millisecond, func() { state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID)) err := state.UpsertAllocs(100, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } }) // Create the alloc we are watching later time.AfterFunc(200*time.Millisecond, func() { state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}) if err != nil { t.Fatalf("err: %v", err) } }) // Lookup the allocs get := &structs.AllocSpecificRequest{ AllocID: alloc2.ID, QueryOptions: structs.QueryOptions{ Region: "global", MinQueryIndex: 50, }, } var resp structs.SingleAllocResponse start := time.Now() if err := msgpackrpc.CallWithCodec(codec, "Alloc.GetAlloc", get, &resp); err != nil { t.Fatalf("err: %v", err) } if elapsed := time.Since(start); elapsed < 200*time.Millisecond { t.Fatalf("should block (returned in %s) %#v", elapsed, resp) } if resp.Index != 200 { t.Fatalf("Bad index: %d %d", resp.Index, 200) } if resp.Alloc == nil || resp.Alloc.ID != alloc2.ID { t.Fatalf("bad: %#v", resp.Alloc) } }
func TestAllocEndpoint_GetAlloc(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request alloc := mock.Alloc() state := s1.fsm.State() state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)) err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Lookup the jobs get := &structs.AllocSpecificRequest{ AllocID: alloc.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var resp structs.SingleAllocResponse if err := msgpackrpc.CallWithCodec(codec, "Alloc.GetAlloc", get, &resp); err != nil { t.Fatalf("err: %v", err) } if resp.Index != 1000 { t.Fatalf("Bad index: %d %d", resp.Index, 1000) } if !reflect.DeepEqual(alloc, resp.Alloc) { t.Fatalf("bad: %#v", resp.Alloc) } }
func TestClientEndpoint_GetAllocs(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request node := mock.Node() reg := &structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.GenericResponse if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil { t.Fatalf("err: %v", err) } node.CreateIndex = resp.Index node.ModifyIndex = resp.Index // Inject fake evaluations alloc := mock.Alloc() alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) err := state.UpsertAllocs(100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Lookup the allocs get := &structs.NodeSpecificRequest{ NodeID: node.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var resp2 structs.NodeAllocsResponse if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index != 100 { t.Fatalf("Bad index: %d %d", resp2.Index, 100) } if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID { t.Fatalf("bad: %#v", resp2.Allocs) } // Lookup non-existing node get.NodeID = "foobarbaz" if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index != 100 { t.Fatalf("Bad index: %d %d", resp2.Index, 100) } if len(resp2.Allocs) != 0 { t.Fatalf("unexpected node") } }
func TestClientEndpoint_UpdateAlloc(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request node := mock.Node() reg := &structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.GenericResponse if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil { t.Fatalf("err: %v", err) } // Inject fake evaluations alloc := mock.Alloc() alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) err := state.UpsertAllocs(100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Attempt update clientAlloc := new(structs.Allocation) *clientAlloc = *alloc clientAlloc.ClientStatus = structs.AllocClientStatusFailed // Update the alloc update := &structs.AllocUpdateRequest{ Alloc: []*structs.Allocation{clientAlloc}, WriteRequest: structs.WriteRequest{Region: "global"}, } var resp2 structs.NodeAllocsResponse start := time.Now() if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index == 0 { t.Fatalf("Bad index: %d", resp2.Index) } if diff := time.Since(start); diff < batchUpdateInterval { t.Fatalf("too fast: %v", diff) } // Lookup the alloc out, err := state.AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } if out.ClientStatus != structs.AllocClientStatusFailed { t.Fatalf("Bad: %#v", out) } }
func TestHTTP_EvalAllocations(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Directly manipulate the state state := s.Agent.server.State() alloc1 := mock.Alloc() alloc2 := mock.Alloc() alloc2.EvalID = alloc1.EvalID state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } // Make the HTTP request req, err := http.NewRequest("GET", "/v1/evaluation/"+alloc1.EvalID+"/allocations", nil) if err != nil { t.Fatalf("err: %v", err) } respW := httptest.NewRecorder() // Make the request obj, err := s.Server.EvalSpecificRequest(respW, req) if err != nil { t.Fatalf("err: %v", err) } // Check for the index if respW.HeaderMap.Get("X-Nomad-Index") == "" { t.Fatalf("missing index") } if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { t.Fatalf("missing known leader") } if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { t.Fatalf("missing last contact") } // Check the ouptput allocs := obj.([]*structs.AllocListStub) if len(allocs) != 2 { t.Fatalf("bad: %#v", allocs) } }) }
func TestAllocEndpoint_List(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request alloc := mock.Alloc() summary := mock.JobSummary(alloc.JobID) state := s1.fsm.State() if err := state.UpsertJobSummary(999, summary); err != nil { t.Fatalf("err: %v", err) } if err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } // Lookup the allocations get := &structs.AllocListRequest{ QueryOptions: structs.QueryOptions{Region: "global"}, } var resp structs.AllocListResponse if err := msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp); err != nil { t.Fatalf("err: %v", err) } if resp.Index != 1000 { t.Fatalf("Bad index: %d %d", resp.Index, 1000) } if len(resp.Allocations) != 1 { t.Fatalf("bad: %#v", resp.Allocations) } if resp.Allocations[0].ID != alloc.ID { t.Fatalf("bad: %#v", resp.Allocations[0]) } // Lookup the allocations by prefix get = &structs.AllocListRequest{ QueryOptions: structs.QueryOptions{Region: "global", Prefix: alloc.ID[:4]}, } var resp2 structs.AllocListResponse if err := msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index != 1000 { t.Fatalf("Bad index: %d %d", resp2.Index, 1000) } if len(resp2.Allocations) != 1 { t.Fatalf("bad: %#v", resp2.Allocations) } if resp2.Allocations[0].ID != alloc.ID { t.Fatalf("bad: %#v", resp2.Allocations[0]) } }
func TestHTTP_NodeAllocations(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job node := mock.Node() args := structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } var resp structs.NodeUpdateResponse if err := s.Agent.RPC("Node.Register", &args, &resp); err != nil { t.Fatalf("err: %v", err) } // Directly manipulate the state state := s.Agent.server.State() alloc1 := mock.Alloc() alloc1.NodeID = node.ID if err := state.UpsertJobSummary(999, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } // Make the HTTP request req, err := http.NewRequest("GET", "/v1/node/"+node.ID+"/allocations", nil) if err != nil { t.Fatalf("err: %v", err) } respW := httptest.NewRecorder() // Make the request obj, err := s.Server.NodeSpecificRequest(respW, req) if err != nil { t.Fatalf("err: %v", err) } // Check for the index if respW.HeaderMap.Get("X-Nomad-Index") == "" { t.Fatalf("missing index") } if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { t.Fatalf("missing known leader") } if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { t.Fatalf("missing last contact") } // Check the node allocs := obj.([]*structs.Allocation) if len(allocs) != 1 || allocs[0].ID != alloc1.ID { t.Fatalf("bad: %#v", allocs) } }) }
func TestAllocEndpoint_GetAllocs(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request alloc := mock.Alloc() alloc2 := mock.Alloc() state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) err := state.UpsertAllocs(1000, []*structs.Allocation{alloc, alloc2}) if err != nil { t.Fatalf("err: %v", err) } // Lookup the allocs get := &structs.AllocsGetRequest{ AllocIDs: []string{alloc.ID, alloc2.ID}, QueryOptions: structs.QueryOptions{Region: "global"}, } var resp structs.AllocsGetResponse if err := msgpackrpc.CallWithCodec(codec, "Alloc.GetAllocs", get, &resp); err != nil { t.Fatalf("err: %v", err) } if resp.Index != 1000 { t.Fatalf("Bad index: %d %d", resp.Index, 1000) } if len(resp.Allocs) != 2 { t.Fatalf("bad: %#v", resp.Allocs) } // Lookup non-existent allocs. get = &structs.AllocsGetRequest{ AllocIDs: []string{"foo"}, QueryOptions: structs.QueryOptions{Region: "global"}, } if err := msgpackrpc.CallWithCodec(codec, "Alloc.GetAllocs", get, &resp); err == nil { t.Fatalf("expect error") } }
func TestFSM_UpsertAllocs(t *testing.T) { fsm := testFSM(t) alloc := mock.Alloc() fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID)) req := structs.AllocUpdateRequest{ Alloc: []*structs.Allocation{alloc}, } buf, err := structs.Encode(structs.AllocUpdateRequestType, req) if err != nil { t.Fatalf("err: %v", err) } resp := fsm.Apply(makeLog(buf)) if resp != nil { t.Fatalf("resp: %v", resp) } // Verify we are registered out, err := fsm.State().AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } alloc.CreateIndex = out.CreateIndex alloc.ModifyIndex = out.ModifyIndex alloc.AllocModifyIndex = out.AllocModifyIndex if !reflect.DeepEqual(alloc, out) { t.Fatalf("bad: %#v %#v", alloc, out) } evictAlloc := new(structs.Allocation) *evictAlloc = *alloc evictAlloc.DesiredStatus = structs.AllocDesiredStatusEvict req2 := structs.AllocUpdateRequest{ Alloc: []*structs.Allocation{evictAlloc}, } buf, err = structs.Encode(structs.AllocUpdateRequestType, req2) if err != nil { t.Fatalf("err: %v", err) } resp = fsm.Apply(makeLog(buf)) if resp != nil { t.Fatalf("resp: %v", resp) } // Verify we are evicted out, err = fsm.State().AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } if out.DesiredStatus != structs.AllocDesiredStatusEvict { t.Fatalf("alloc found!") } }
func TestClientEndpoint_BatchUpdate(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request node := mock.Node() reg := &structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.GenericResponse if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil { t.Fatalf("err: %v", err) } // Inject fake evaluations alloc := mock.Alloc() alloc.NodeID = node.ID state := s1.fsm.State() state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) err := state.UpsertAllocs(100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Attempt update clientAlloc := new(structs.Allocation) *clientAlloc = *alloc clientAlloc.ClientStatus = structs.AllocClientStatusFailed // Call to do the batch update bf := NewBatchFuture() endpoint := s1.endpoints.Node endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc}) if err := bf.Wait(); err != nil { t.Fatalf("err: %v", err) } if bf.Index() == 0 { t.Fatalf("Bad index: %d", bf.Index()) } // Lookup the alloc out, err := state.AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } if out.ClientStatus != structs.AllocClientStatusFailed { t.Fatalf("Bad: %#v", out) } }
func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) // Insert "dead" node state := s1.fsm.State() node := mock.Node() node.Status = structs.NodeStatusDown err := state.UpsertNode(1000, node) if err != nil { t.Fatalf("err: %v", err) } // Insert a running alloc on that node alloc := mock.Alloc() alloc.NodeID = node.ID alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) if err := state.UpsertAllocs(1002, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } // Update the time tables to make this work tt := s1.fsm.TimeTable() tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold)) // Create a core scheduler snap, err := state.Snapshot() if err != nil { t.Fatalf("err: %v", err) } core := NewCoreScheduler(s1, snap) // Attempt the GC gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000) err = core.Process(gc) if err != nil { t.Fatalf("err: %v", err) } // Should still be here out, err := state.NodeByID(node.ID) if err != nil { t.Fatalf("err: %v", err) } if out == nil { t.Fatalf("bad: %v", out) } }
func TestHTTP_NodeDrain(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the node node := mock.Node() args := structs.NodeRegisterRequest{ Node: node, WriteRequest: structs.WriteRequest{Region: "global"}, } var resp structs.NodeUpdateResponse if err := s.Agent.RPC("Node.Register", &args, &resp); err != nil { t.Fatalf("err: %v", err) } // Directly manipulate the state state := s.Agent.server.State() alloc1 := mock.Alloc() alloc1.NodeID = node.ID if err := state.UpsertJobSummary(999, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1}) if err != nil { t.Fatalf("err: %v", err) } // Make the HTTP request req, err := http.NewRequest("POST", "/v1/node/"+node.ID+"/drain?enable=1", nil) if err != nil { t.Fatalf("err: %v", err) } respW := httptest.NewRecorder() // Make the request obj, err := s.Server.NodeSpecificRequest(respW, req) if err != nil { t.Fatalf("err: %v", err) } // Check for the index if respW.HeaderMap.Get("X-Nomad-Index") == "" { t.Fatalf("missing index") } // Check the response upd := obj.(structs.NodeDrainUpdateResponse) if len(upd.EvalIDs) == 0 { t.Fatalf("bad: %v", upd) } }) }
func TestJobEndpoint_Allocations(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request alloc1 := mock.Alloc() alloc2 := mock.Alloc() alloc2.JobID = alloc1.JobID state := s1.fsm.State() state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)) state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)) err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } // Lookup the jobs get := &structs.JobSpecificRequest{ JobID: alloc1.JobID, QueryOptions: structs.QueryOptions{Region: "global"}, } var resp2 structs.JobAllocationsResponse if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index != 1000 { t.Fatalf("Bad index: %d %d", resp2.Index, 1000) } if len(resp2.Allocations) != 2 { t.Fatalf("bad: %#v", resp2.Allocations) } }
func TestClient_UpdateAllocStatus(t *testing.T) { s1, _ := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) c1 := testClient(t, func(c *config.Config) { c.RPCHandler = s1 }) defer c1.Shutdown() // Wait til the node is ready waitTilNodeReady(c1, t) job := mock.Job() alloc := mock.Alloc() alloc.NodeID = c1.Node().ID alloc.Job = job alloc.JobID = job.ID originalStatus := "foo" alloc.ClientStatus = originalStatus // Insert at zero so they are pulled state := s1.State() if err := state.UpsertJob(0, job); err != nil { t.Fatal(err) } if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil { t.Fatal(err) } state.UpsertAllocs(101, []*structs.Allocation{alloc}) testutil.WaitForResult(func() (bool, error) { out, err := state.AllocByID(alloc.ID) if err != nil { return false, err } if out == nil { return false, fmt.Errorf("no such alloc") } if out.ClientStatus == originalStatus { return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) }
func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() node := mock.Node() noErr(t, state.UpsertNode(900, node)) // Register an alloc alloc := &structs.Allocation{ ID: structs.GenerateUUID(), EvalID: eval.ID, NodeID: node.ID, JobID: job.ID, Job: job, Resources: &structs.Resources{ CPU: 2048, MemoryMB: 2048, }, DesiredStatus: structs.AllocDesiredStatusRun, TaskGroup: "web", } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) // Create a new task group that prevents in-place updates. tg := &structs.TaskGroup{} *tg = *job.TaskGroups[0] task := &structs.Task{Name: "FOO"} tg.Tasks = nil tg.Tasks = append(tg.Tasks, task) updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} stack := NewGenericStack(false, ctx) // Do the inplace update. unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) if len(unplaced) != 1 || len(inplace) != 0 { t.Fatal("inplaceUpdate incorrectly did an inplace update") } if len(ctx.plan.NodeAllocation) != 0 { t.Fatal("inplaceUpdate incorrectly did an inplace update") } }
func TestHTTP_AllocQuery(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Directly manipulate the state state := s.Agent.server.State() alloc := mock.Alloc() if err := state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)); err != nil { t.Fatal(err) } err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } // Make the HTTP request req, err := http.NewRequest("GET", "/v1/allocation/"+alloc.ID, nil) if err != nil { t.Fatalf("err: %v", err) } respW := httptest.NewRecorder() // Make the request obj, err := s.Server.AllocSpecificRequest(respW, req) if err != nil { t.Fatalf("err: %v", err) } // Check for the index if respW.HeaderMap.Get("X-Nomad-Index") == "" { t.Fatalf("missing index") } if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { t.Fatalf("missing known leader") } if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { t.Fatalf("missing last contact") } // Check the job a := obj.(*structs.Allocation) if a.ID != alloc.ID { t.Fatalf("bad: %#v", a) } }) }
func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { fsm := testFSM(t) alloc := mock.Alloc() fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID)) job := alloc.Job resources := alloc.Resources alloc.Resources = nil req := structs.AllocUpdateRequest{ Job: job, Alloc: []*structs.Allocation{alloc}, } buf, err := structs.Encode(structs.AllocUpdateRequestType, req) if err != nil { t.Fatalf("err: %v", err) } resp := fsm.Apply(makeLog(buf)) if resp != nil { t.Fatalf("resp: %v", resp) } // Verify we are registered out, err := fsm.State().AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } alloc.CreateIndex = out.CreateIndex alloc.ModifyIndex = out.ModifyIndex alloc.AllocModifyIndex = out.AllocModifyIndex // Resources should be recomputed resources.DiskMB = alloc.Job.TaskGroups[0].LocalDisk.DiskMB alloc.Resources = resources if !reflect.DeepEqual(alloc, out) { t.Fatalf("bad: %#v %#v", alloc, out) } }
func TestFSM_UpdateAllocFromClient(t *testing.T) { fsm := testFSM(t) state := fsm.State() alloc := mock.Alloc() state.UpsertJobSummary(9, mock.JobSummary(alloc.JobID)) state.UpsertAllocs(10, []*structs.Allocation{alloc}) clientAlloc := new(structs.Allocation) *clientAlloc = *alloc clientAlloc.ClientStatus = structs.AllocClientStatusFailed req := structs.AllocUpdateRequest{ Alloc: []*structs.Allocation{clientAlloc}, } buf, err := structs.Encode(structs.AllocClientUpdateRequestType, req) if err != nil { t.Fatalf("err: %v", err) } resp := fsm.Apply(makeLog(buf)) if resp != nil { t.Fatalf("resp: %v", resp) } // Verify we are registered out, err := fsm.State().AllocByID(alloc.ID) if err != nil { t.Fatalf("err: %v", err) } clientAlloc.CreateIndex = out.CreateIndex clientAlloc.ModifyIndex = out.ModifyIndex if !reflect.DeepEqual(clientAlloc, out) { t.Fatalf("err: %#v,%#v", clientAlloc, out) } }
func TestClient_WatchAllocs(t *testing.T) { ctestutil.ExecCompatible(t) s1, _ := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) c1 := testClient(t, func(c *config.Config) { c.RPCHandler = s1 }) defer c1.Shutdown() // Wait til the node is ready waitTilNodeReady(c1, t) // Create mock allocations job := mock.Job() alloc1 := mock.Alloc() alloc1.JobID = job.ID alloc1.Job = job alloc1.NodeID = c1.Node().ID alloc2 := mock.Alloc() alloc2.NodeID = c1.Node().ID alloc2.JobID = job.ID alloc2.Job = job // Insert at zero so they are pulled state := s1.State() if err := state.UpsertJob(100, job); err != nil { t.Fatal(err) } if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2}) if err != nil { t.Fatalf("err: %v", err) } // Both allocations should get registered testutil.WaitForResult(func() (bool, error) { c1.allocLock.RLock() num := len(c1.allocs) c1.allocLock.RUnlock() return num == 2, nil }, func(err error) { t.Fatalf("err: %v", err) }) // Delete one allocation err = state.DeleteEval(103, nil, []string{alloc1.ID}) if err != nil { t.Fatalf("err: %v", err) } // Update the other allocation. Have to make a copy because the allocs are // shared in memory in the test and the modify index would be updated in the // alloc runner. alloc2_2 := new(structs.Allocation) *alloc2_2 = *alloc2 alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2}) if err != nil { t.Fatalf("err: %v", err) } // One allocations should get de-registered testutil.WaitForResult(func() (bool, error) { c1.allocLock.RLock() num := len(c1.allocs) c1.allocLock.RUnlock() return num == 1, nil }, func(err error) { t.Fatalf("err: %v", err) }) // One allocations should get updated testutil.WaitForResult(func() (bool, error) { c1.allocLock.RLock() ar := c1.allocs[alloc2.ID] c1.allocLock.RUnlock() return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil }, func(err error) { t.Fatalf("err: %v", err) }) }
func TestWorker_ReblockEval(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 c.EnabledSchedulers = []string{structs.JobTypeService} }) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) // Create the blocked eval eval1 := mock.Eval() eval1.Status = structs.EvalStatusBlocked eval1.QueuedAllocations = map[string]int{"cache": 100} // Insert it into the state store if err := s1.fsm.State().UpsertEvals(1000, []*structs.Evaluation{eval1}); err != nil { t.Fatal(err) } // Create the job summary js := mock.JobSummary(eval1.JobID) tg := js.Summary["web"] tg.Queued = 100 js.Summary["web"] = tg if err := s1.fsm.State().UpsertJobSummary(1001, js); err != nil { t.Fatal(err) } // Enqueue the eval and then dequeue s1.evalBroker.Enqueue(eval1) evalOut, token, err := s1.evalBroker.Dequeue([]string{eval1.Type}, time.Second) if err != nil { t.Fatalf("err: %v", err) } if evalOut != eval1 { t.Fatalf("Bad eval") } eval2 := evalOut.Copy() eval2.QueuedAllocations = map[string]int{"web": 50} // Attempt to reblock eval w := &Worker{srv: s1, logger: s1.logger, evalToken: token} err = w.ReblockEval(eval2) if err != nil { t.Fatalf("err: %v", err) } // Ack the eval w.sendAck(evalOut.ID, token, true) // Check that it is blocked bStats := s1.blockedEvals.Stats() if bStats.TotalBlocked+bStats.TotalEscaped != 1 { t.Fatalf("ReblockEval didn't insert eval into the blocked eval tracker: %#v", bStats) } // Check that the eval was updated eval, err := s1.fsm.State().EvalByID(eval2.ID) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(eval.QueuedAllocations, eval2.QueuedAllocations) { t.Fatalf("expected: %#v, actual: %#v", eval2.QueuedAllocations, eval.QueuedAllocations) } // Check that the snapshot index was set properly by unblocking the eval and // then dequeuing. s1.blockedEvals.Unblock("foobar", 1000) reblockedEval, _, err := s1.evalBroker.Dequeue([]string{eval1.Type}, 1*time.Second) if err != nil { t.Fatalf("err: %v", err) } if reblockedEval == nil { t.Fatalf("Nil eval") } if reblockedEval.ID != eval1.ID { t.Fatalf("Bad eval") } // Check that the SnapshotIndex is set if reblockedEval.SnapshotIndex != w.snapshotIndex { t.Fatalf("incorrect snapshot index; got %d; want %d", reblockedEval.SnapshotIndex, w.snapshotIndex) } }
func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { state, ctx := testContext(t) nodes := []*RankedNode{ &RankedNode{ Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), Resources: &structs.Resources{ CPU: 2048, MemoryMB: 2048, }, }, }, &RankedNode{ Node: &structs.Node{ // Perfect fit ID: structs.GenerateUUID(), Resources: &structs.Resources{ CPU: 2048, MemoryMB: 2048, }, }, }, } static := NewStaticRankIterator(ctx, nodes) // Add existing allocations alloc1 := &structs.Allocation{ ID: structs.GenerateUUID(), EvalID: structs.GenerateUUID(), NodeID: nodes[0].Node.ID, JobID: structs.GenerateUUID(), Resources: &structs.Resources{ CPU: 2048, MemoryMB: 2048, }, DesiredStatus: structs.AllocDesiredStatusRun, ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } alloc2 := &structs.Allocation{ ID: structs.GenerateUUID(), EvalID: structs.GenerateUUID(), NodeID: nodes[1].Node.ID, JobID: structs.GenerateUUID(), Resources: &structs.Resources{ CPU: 1024, MemoryMB: 1024, }, DesiredStatus: structs.AllocDesiredStatusRun, ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{alloc1} task := &structs.Task{ Name: "web", Resources: &structs.Resources{ CPU: 1024, MemoryMB: 1024, }, } binp := NewBinPackIterator(ctx, static, false, 0) binp.SetTasks([]*structs.Task{task}) out := collectRanked(binp) if len(out) != 2 { t.Fatalf("Bad: %#v", out) } if out[0] != nodes[0] || out[1] != nodes[1] { t.Fatalf("Bad: %v", out) } if out[0].Score < 10 || out[0].Score > 16 { t.Fatalf("Bad: %v", out[0]) } if out[1].Score != 18 { t.Fatalf("Bad: %v", out[1]) } }
func TestClient_SaveRestoreState(t *testing.T) { ctestutil.ExecCompatible(t) s1, _ := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) c1 := testClient(t, func(c *config.Config) { c.DevMode = false c.RPCHandler = s1 }) defer c1.Shutdown() // Wait til the node is ready waitTilNodeReady(c1, t) // Create mock allocations job := mock.Job() alloc1 := mock.Alloc() alloc1.NodeID = c1.Node().ID alloc1.Job = job alloc1.JobID = job.ID alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver" task := alloc1.Job.TaskGroups[0].Tasks[0] task.Config["run_for"] = "10s" state := s1.State() if err := state.UpsertJob(100, job); err != nil { t.Fatal(err) } if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil { t.Fatal(err) } if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("err: %v", err) } // Allocations should get registered testutil.WaitForResult(func() (bool, error) { c1.allocLock.RLock() ar := c1.allocs[alloc1.ID] c1.allocLock.RUnlock() if ar == nil { return false, fmt.Errorf("nil alloc runner") } if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning { return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) // Shutdown the client, saves state if err := c1.Shutdown(); err != nil { t.Fatalf("err: %v", err) } // Create a new client shutdownCh := make(chan struct{}) logger := log.New(c1.config.LogOutput, "", log.LstdFlags) consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, logger) if err != nil { t.Fatalf("err: %v", err) } c2, err := NewClient(c1.config, consulSyncer, logger) if err != nil { t.Fatalf("err: %v", err) } defer c2.Shutdown() // Ensure the allocation is running testutil.WaitForResult(func() (bool, error) { c2.allocLock.RLock() ar := c2.allocs[alloc1.ID] c2.allocLock.RUnlock() status := ar.Alloc().ClientStatus alive := status != structs.AllocClientStatusRunning || status != structs.AllocClientStatusPending if !alive { return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc()) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) // Destroy all the allocations c2.allocLock.Lock() for _, ar := range c2.allocs { ar.Destroy() <-ar.WaitCh() } c2.allocLock.Unlock() }
func TestClient_BlockedAllocations(t *testing.T) { s1, _ := testServer(t, nil) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC) c1 := testClient(t, func(c *config.Config) { c.RPCHandler = s1 }) defer c1.Shutdown() // Wait for the node to be ready state := s1.State() testutil.WaitForResult(func() (bool, error) { out, err := state.NodeByID(c1.Node().ID) if err != nil { return false, err } if out == nil || out.Status != structs.NodeStatusReady { return false, fmt.Errorf("bad node: %#v", out) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) // Add an allocation alloc := mock.Alloc() alloc.NodeID = c1.Node().ID alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver" alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{ "kill_after": "1s", "run_for": "100s", "exit_code": 0, "exit_signal": 0, "exit_err": "", } state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) state.UpsertAllocs(100, []*structs.Allocation{alloc}) // Wait until the client downloads and starts the allocation testutil.WaitForResult(func() (bool, error) { out, err := state.AllocByID(alloc.ID) if err != nil { return false, err } if out == nil || out.ClientStatus != structs.AllocClientStatusRunning { return false, fmt.Errorf("bad alloc: %#v", out) } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) // Add a new chained alloc alloc2 := alloc.Copy() alloc2.ID = structs.GenerateUUID() alloc2.Job = alloc.Job alloc2.JobID = alloc.JobID alloc2.PreviousAllocation = alloc.ID if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil { t.Fatalf("err: %v", err) } // Enusre that the chained allocation is being tracked as blocked testutil.WaitForResult(func() (bool, error) { alloc, ok := c1.blockedAllocations[alloc2.PreviousAllocation] if ok && alloc.ID == alloc2.ID { return true, nil } return false, fmt.Errorf("no blocked allocations") }, func(err error) { t.Fatalf("err: %v", err) }) // Change the desired state of the parent alloc to stop alloc1 := alloc.Copy() alloc1.DesiredStatus = structs.AllocDesiredStatusStop if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil { t.Fatalf("err: %v", err) } // Ensure that there are no blocked allocations testutil.WaitForResult(func() (bool, error) { _, ok := c1.blockedAllocations[alloc2.PreviousAllocation] if ok { return false, fmt.Errorf("blocked evals present") } return true, nil }, func(err error) { t.Fatalf("err: %v", err) }) // Destroy all the allocations c1.allocLock.Lock() for _, ar := range c1.allocs { ar.Destroy() <-ar.WaitCh() } c1.allocLock.Unlock() }
func TestInplaceUpdate_Success(t *testing.T) { state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() node := mock.Node() noErr(t, state.UpsertNode(900, node)) // Register an alloc alloc := &structs.Allocation{ ID: structs.GenerateUUID(), EvalID: eval.ID, NodeID: node.ID, JobID: job.ID, Job: job, TaskGroup: job.TaskGroups[0].Name, Resources: &structs.Resources{ CPU: 2048, MemoryMB: 2048, }, DesiredStatus: structs.AllocDesiredStatusRun, } alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) // Create a new task group that updates the resources. tg := &structs.TaskGroup{} *tg = *job.TaskGroups[0] resource := &structs.Resources{CPU: 737} tg.Tasks[0].Resources = resource newServices := []*structs.Service{ { Name: "dummy-service", PortLabel: "http", }, { Name: "dummy-service2", PortLabel: "http", }, } // Delete service 2 tg.Tasks[0].Services = tg.Tasks[0].Services[:1] // Add the new services tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...) updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} stack := NewGenericStack(false, ctx) stack.SetJob(job) // Do the inplace update. unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) if len(unplaced) != 0 || len(inplace) != 1 { t.Fatal("inplaceUpdate did not do an inplace update") } if len(ctx.plan.NodeAllocation) != 1 { t.Fatal("inplaceUpdate did not do an inplace update") } if inplace[0].Alloc.ID != alloc.ID { t.Fatalf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace) } // Get the alloc we inserted. a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0] if a.Job == nil { t.Fatalf("bad") } if len(a.Job.TaskGroups) != 1 { t.Fatalf("bad") } if len(a.Job.TaskGroups[0].Tasks) != 1 { t.Fatalf("bad") } if len(a.Job.TaskGroups[0].Tasks[0].Services) != 3 { t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services)) } serviceNames := make(map[string]struct{}, 3) for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services { serviceNames[consulService.Name] = struct{}{} } if len(serviceNames) != 3 { t.Fatalf("bad") } for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} { if _, found := serviceNames[name]; !found { t.Errorf("Expected consul service name missing: %v", name) } } }