func TestFSM_SnapshotRestore_JobSummary(t *testing.T) { // Add some state fsm := testFSM(t) state := fsm.State() job1 := mock.Job() state.UpsertJob(1000, job1) js1, _ := state.JobSummaryByID(job1.ID) job2 := mock.Job() state.UpsertJob(1001, job2) js2, _ := state.JobSummaryByID(job2.ID) // Verify the contents fsm2 := testSnapshotRestore(t, fsm) state2 := fsm2.State() out1, _ := state2.JobSummaryByID(job1.ID) out2, _ := state2.JobSummaryByID(job2.ID) if !reflect.DeepEqual(js1, out1) { t.Fatalf("bad: \n%#v\n%#v", js1, out1) } if !reflect.DeepEqual(js2, out2) { t.Fatalf("bad: \n%#v\n%#v", js2, out2) } }
func TestStateStore_JobsByGC(t *testing.T) { state := testStateStore(t) var gc, nonGc []*structs.Job for i := 0; i < 10; i++ { job := mock.Job() nonGc = append(nonGc, job) if err := state.UpsertJob(1000+uint64(i), job); err != nil { t.Fatalf("err: %v", err) } } for i := 0; i < 10; i++ { job := mock.Job() job.GC = true gc = append(gc, job) if err := state.UpsertJob(2000+uint64(i), job); err != nil { t.Fatalf("err: %v", err) } } iter, err := state.JobsByGC(true) if err != nil { t.Fatalf("err: %v", err) } var outGc []*structs.Job for i := iter.Next(); i != nil; i = iter.Next() { outGc = append(outGc, i.(*structs.Job)) } iter, err = state.JobsByGC(false) if err != nil { t.Fatalf("err: %v", err) } var outNonGc []*structs.Job for i := iter.Next(); i != nil; i = iter.Next() { outNonGc = append(outNonGc, i.(*structs.Job)) } sort.Sort(JobIDSort(gc)) sort.Sort(JobIDSort(nonGc)) sort.Sort(JobIDSort(outGc)) sort.Sort(JobIDSort(outNonGc)) if !reflect.DeepEqual(gc, outGc) { t.Fatalf("bad: %#v %#v", gc, outGc) } if !reflect.DeepEqual(nonGc, outNonGc) { t.Fatalf("bad: %#v %#v", nonGc, outNonGc) } }
func TestStateStore_DeleteJob_Job(t *testing.T) { state := testStateStore(t) job := mock.Job() err := state.UpsertJob(1000, job) if err != nil { t.Fatalf("err: %v", err) } err = state.DeleteJob(1001, job.ID) if err != nil { t.Fatalf("err: %v", err) } out, err := state.JobByID(job.ID) if err != nil { t.Fatalf("err: %v", err) } if out != nil { t.Fatalf("bad: %#v %#v", job, out) } index, err := state.Index("jobs") if err != nil { t.Fatalf("err: %v", err) } if index != 1001 { t.Fatalf("bad: %d", index) } }
func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Enable vault tr, f := true, false s1.config.VaultConfig.Enabled = &tr s1.config.VaultConfig.AllowUnauthenticated = &f // Replace the Vault Client on the server s1.vault = &TestVaultClient{} // Create the register request with a job asking for a vault policy but // don't send a Vault token job := mock.Job() job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{ Policies: []string{"foo"}, ChangeMode: structs.VaultChangeModeRestart, } req := &structs.JobRegisterRequest{ Job: job, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.JobRegisterResponse err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp) if err == nil || !strings.Contains(err.Error(), "missing Vault Token") { t.Fatalf("expected Vault not enabled error: %v", err) } }
func TestJobEndpoint_ListJobs(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request job := mock.Job() state := s1.fsm.State() err := state.UpsertJob(1000, job) if err != nil { t.Fatalf("err: %v", err) } // Lookup the jobs get := &structs.JobListRequest{ QueryOptions: structs.QueryOptions{Region: "global"}, } var resp2 structs.JobListResponse if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp2); err != nil { t.Fatalf("err: %v", err) } if resp2.Index != 1000 { t.Fatalf("Bad index: %d %d", resp2.Index, 1000) } if len(resp2.Jobs) != 1 { t.Fatalf("bad: %#v", resp2.Jobs) } if resp2.Jobs[0].ID != job.ID { t.Fatalf("bad: %#v", resp2.Jobs[0]) } }
func TestStateStore_RestorePeriodicLaunch(t *testing.T) { state := testStateStore(t) job := mock.Job() launch := &structs.PeriodicLaunch{ID: job.ID, Launch: time.Now()} notify := setupNotifyTest( state, watch.Item{Table: "periodic_launch"}, watch.Item{Job: job.ID}) restore, err := state.Restore() if err != nil { t.Fatalf("err: %v", err) } err = restore.PeriodicLaunchRestore(launch) if err != nil { t.Fatalf("err: %v", err) } restore.Commit() out, err := state.PeriodicLaunchByID(job.ID) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(out, launch) { t.Fatalf("Bad: %#v %#v", out, job) } notify.verify(t) }
func TestStateStore_UpsertJob_Job(t *testing.T) { state := testStateStore(t) job := mock.Job() notify := setupNotifyTest( state, watch.Item{Table: "jobs"}, watch.Item{Job: job.ID}) err := state.UpsertJob(1000, job) if err != nil { t.Fatalf("err: %v", err) } out, err := state.JobByID(job.ID) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(job, out) { t.Fatalf("bad: %#v %#v", job, out) } index, err := state.Index("jobs") if err != nil { t.Fatalf("err: %v", err) } if index != 1000 { t.Fatalf("bad: %d", index) } notify.verify(t) }
func TestStateStore_SetJobStatus(t *testing.T) { state := testStateStore(t) watcher := watch.NewItems() txn := state.db.Txn(true) // Create and insert a mock job that should be pending but has an incorrect // status. job := mock.Job() job.Status = "foobar" job.ModifyIndex = 10 if err := txn.Insert("jobs", job); err != nil { t.Fatalf("job insert failed: %v", err) } index := uint64(1000) if err := state.setJobStatus(index, watcher, txn, job, false, ""); err != nil { t.Fatalf("setJobStatus() failed: %v", err) } i, err := txn.First("jobs", "id", job.ID) if err != nil { t.Fatalf("job lookup failed: %v", err) } updated := i.(*structs.Job) if updated.Status != structs.JobStatusPending { t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, structs.JobStatusPending) } if updated.ModifyIndex != index { t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) } }
func TestServiceStack_Select_DriverFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } zero := nodes[0] zero.Attributes["driver.foo"] = "1" if err := zero.ComputeClass(); err != nil { t.Fatalf("ComputedClass() failed: %v", err) } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) job := mock.Job() job.TaskGroups[0].Tasks[0].Driver = "foo" stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } }
func TestServiceStack_Select_PreferringNodes(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) job := mock.Job() stack.SetJob(job) // Create a preferred node preferredNode := mock.Node() option, _ := stack.SelectPreferringNodes(job.TaskGroups[0], []*structs.Node{preferredNode}) if option == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if option.Node.ID != preferredNode.ID { t.Fatalf("expected: %v, actual: %v", option.Node.ID, preferredNode.ID) } // Change the preferred node's kernel to windows and ensure the allocations // are placed elsewhere preferredNode1 := preferredNode.Copy() preferredNode1.Attributes["kernel.name"] = "windows" preferredNode1.ComputeClass() option, _ = stack.SelectPreferringNodes(job.TaskGroups[0], []*structs.Node{preferredNode1}) if option == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if option.Node.ID != nodes[0].ID { t.Fatalf("expected: %#v, actual: %#v", nodes[0], option.Node) } }
func BenchmarkHTTPRequests(b *testing.B) { s := makeHTTPServerNoLogs(b, func(c *Config) { c.Client.Enabled = false }) defer s.Cleanup() job := mock.Job() var allocs []*structs.Allocation count := 1000 for i := 0; i < count; i++ { alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) { return allocs[:count], nil } b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/v1/kv/key", nil) s.Server.wrap(handler)(resp, req) } }) }
func TestSystemStack_Select_DriverFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), } zero := nodes[0] zero.Attributes["driver.foo"] = "1" stack := NewSystemStack(ctx) stack.SetNodes(nodes) job := mock.Job() job.TaskGroups[0].Tasks[0].Driver = "foo" stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } zero.Attributes["driver.foo"] = "0" stack = NewSystemStack(ctx) stack.SetNodes(nodes) stack.SetJob(job) node, _ = stack.Select(job.TaskGroups[0]) if node != nil { t.Fatalf("node not filtered %#v", node) } }
// This test just ensures the scheduler handles the eval type to avoid // regressions. func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { h := NewHarness(t) // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ ID: structs.GenerateUUID(), Status: structs.EvalStatusBlocked, Priority: job.Priority, TriggeredBy: structs.EvalTriggerMaxPlans, JobID: job.ID, } // Insert it into the state store noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { t.Fatalf("err: %v", err) } // Ensure there was no plan if len(h.Plans) != 0 { t.Fatalf("bad: %#v", h.Plans) } h.AssertEvalStatus(t, structs.EvalStatusComplete) }
func TestStateStore_RestoreJob(t *testing.T) { state := testStateStore(t) restore, err := state.Restore() if err != nil { t.Fatalf("err: %v", err) } job := mock.Job() err = restore.JobRestore(job) if err != nil { t.Fatalf("err: %v", err) } restore.Commit() out, err := state.JobByID(job.ID) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(out, job) { t.Fatalf("Bad: %#v %#v", out, job) } }
func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { state := testStateStore(t) watcher := watch.NewItems() txn := state.db.Txn(true) // Create and insert a mock job. job := mock.Job() job.Status = "" job.ModifyIndex = 0 if err := txn.Insert("jobs", job); err != nil { t.Fatalf("job insert failed: %v", err) } exp := "foobar" index := uint64(1000) if err := state.setJobStatus(index, watcher, txn, job, false, exp); err != nil { t.Fatalf("setJobStatus() failed: %v", err) } i, err := txn.First("jobs", "id", job.ID) if err != nil { t.Fatalf("job lookup failed: %v", err) } updated := i.(*structs.Job) if updated.Status != exp { t.Fatalf("setJobStatus() set %v; expected %v", updated.Status, exp) } if updated.ModifyIndex != index { t.Fatalf("setJobStatus() set %d; expected %d", updated.ModifyIndex, index) } }
// benchmarkServiceStack_MetaKeyConstraint creates the passed number of nodes // and sets the meta data key to have nodePartitions number of values. It then // benchmarks the stack by selecting a job that constrains against one of the // partitions. func benchmarkServiceStack_MetaKeyConstraint(b *testing.B, key string, numNodes, nodePartitions int) { _, ctx := testContext(b) stack := NewGenericStack(false, ctx) // Create 4 classes of nodes. nodes := make([]*structs.Node, numNodes) for i := 0; i < numNodes; i++ { n := mock.Node() n.Meta[key] = fmt.Sprintf("%d", i%nodePartitions) nodes[i] = n } stack.SetNodes(nodes) // Create a job whose constraint meets two node classes. job := mock.Job() job.Constraints[0] = &structs.Constraint{ LTarget: fmt.Sprintf("${meta.%v}", key), RTarget: "1", Operand: "<", } stack.SetJob(job) b.ResetTimer() for i := 0; i < b.N; i++ { stack.Select(job.TaskGroups[0]) } }
func TestStateStore_SetJobStatus_NoOp(t *testing.T) { state := testStateStore(t) watcher := watch.NewItems() txn := state.db.Txn(true) // Create and insert a mock job that should be pending. job := mock.Job() job.Status = structs.JobStatusPending job.ModifyIndex = 10 if err := txn.Insert("jobs", job); err != nil { t.Fatalf("job insert failed: %v", err) } index := uint64(1000) if err := state.setJobStatus(index, watcher, txn, job, false, ""); err != nil { t.Fatalf("setJobStatus() failed: %v", err) } i, err := txn.First("jobs", "id", job.ID) if err != nil { t.Fatalf("job lookup failed: %v", err) } updated := i.(*structs.Job) if updated.ModifyIndex == index { t.Fatalf("setJobStatus() should have been a no-op") } }
func TestServiceStack_Select_Size(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) job := mock.Job() stack.SetJob(job) node, size := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if size == nil { t.Fatalf("missing size") } if size.CPU != 500 || size.MemoryMB != 256 { t.Fatalf("bad: %#v", size) } // Note: On Windows time.Now currently has a best case granularity of 1ms. // We skip the following assertion on Windows because this test usually // runs too fast to measure an allocation time on Windows. met := ctx.Metrics() if runtime.GOOS != "windows" && met.AllocationTime == 0 { t.Fatalf("missing time") } }
func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { state := testStateStore(t) job := mock.Job() // Create a mock alloc that is dead. alloc := mock.Alloc() alloc.JobID = job.ID alloc.DesiredStatus = structs.AllocDesiredStatusFailed if err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}); err != nil { t.Fatalf("err: %v", err) } // Create a mock eval that is complete eval := mock.Eval() eval.JobID = job.ID eval.Status = structs.EvalStatusComplete if err := state.UpsertEvals(1001, []*structs.Evaluation{eval}); err != nil { t.Fatalf("err: %v", err) } txn := state.db.Txn(false) status, err := state.getJobStatus(txn, job, false) if err != nil { t.Fatalf("getJobStatus() failed: %v", err) } if status != structs.JobStatusDead { t.Fatalf("getJobStatus() returned %v; expected %v", status, structs.JobStatusDead) } }
func TestServiceStack_Select_ConstraintFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } zero := nodes[0] zero.Attributes["kernel.name"] = "freebsd" stack := NewGenericStack(false, ctx, nodes) job := mock.Job() job.Constraints[0].RTarget = "freebsd" stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } met := ctx.Metrics() if met.NodesFiltered != 1 { t.Fatalf("bad: %#v", met) } if met.ClassFiltered["linux-medium-pci"] != 1 { t.Fatalf("bad: %#v", met) } if met.ConstraintFiltered["$attr.kernel.name = freebsd"] != 1 { t.Fatalf("bad: %#v", met) } }
func TestStateStore_Jobs(t *testing.T) { state := testStateStore(t) var jobs []*structs.Job for i := 0; i < 10; i++ { job := mock.Job() jobs = append(jobs, job) err := state.UpsertJob(1000+uint64(i), job) if err != nil { t.Fatalf("err: %v", err) } } iter, err := state.Jobs() if err != nil { t.Fatalf("err: %v", err) } var out []*structs.Job for { raw := iter.Next() if raw == nil { break } out = append(out, raw.(*structs.Job)) } sort.Sort(JobIDSort(jobs)) sort.Sort(JobIDSort(out)) if !reflect.DeepEqual(jobs, out) { t.Fatalf("bad: %#v %#v", jobs, out) } }
func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), } zero := nodes[0] one := nodes[1] one.Reserved = one.Resources stack := NewGenericStack(false, ctx, nodes) job := mock.Job() stack.SetJob(job) node, _ := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if node.Node != zero { t.Fatalf("bad") } met := ctx.Metrics() if met.NodesExhausted != 1 { t.Fatalf("bad: %#v", met) } if met.ClassExhausted["linux-medium-pci"] != 1 { t.Fatalf("bad: %#v", met) } if len(met.Scores) != 1 { t.Fatalf("bad: %#v", met) } }
func TestStateStore_RestoreJob(t *testing.T) { state := testStateStore(t) job := mock.Job() notify := setupNotifyTest( state, watch.Item{Table: "jobs"}, watch.Item{Job: job.ID}) restore, err := state.Restore() if err != nil { t.Fatalf("err: %v", err) } err = restore.JobRestore(job) if err != nil { t.Fatalf("err: %v", err) } restore.Commit() out, err := state.JobByID(job.ID) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(out, job) { t.Fatalf("Bad: %#v %#v", out, job) } notify.verify(t) }
func TestServiceStack_Select_Size(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), } stack := NewGenericStack(false, ctx, nodes) job := mock.Job() stack.SetJob(job) node, size := stack.Select(job.TaskGroups[0]) if node == nil { t.Fatalf("missing node %#v", ctx.Metrics()) } if size == nil { t.Fatalf("missing size") } if size.CPU != 500 || size.MemoryMB != 256 { t.Fatalf("bad: %#v", size) } met := ctx.Metrics() if met.AllocationTime == 0 { t.Fatalf("missing time") } }
func TestHTTP_JobPlan(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job job := mock.Job() args := structs.JobPlanRequest{ Job: job, Diff: true, WriteRequest: structs.WriteRequest{Region: "global"}, } buf := encodeReq(args) // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/plan", buf) if err != nil { t.Fatalf("err: %v", err) } respW := httptest.NewRecorder() // Make the request obj, err := s.Server.JobSpecificRequest(respW, req) if err != nil { t.Fatalf("err: %v", err) } // Check the response plan := obj.(structs.JobPlanResponse) if plan.Annotations == nil { t.Fatalf("bad: %v", plan) } if plan.Diff == nil { t.Fatalf("bad: %v", plan) } }) }
func TestServiceStack_Select_MetricsReset(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), mock.Node(), mock.Node(), mock.Node(), } stack := NewGenericStack(false, ctx, nodes) job := mock.Job() stack.SetJob(job) n1, _ := stack.Select(job.TaskGroups[0]) m1 := ctx.Metrics() if n1 == nil { t.Fatalf("missing node %#v", m1) } if m1.NodesEvaluated != 2 { t.Fatalf("should only be 2") } n2, _ := stack.Select(job.TaskGroups[0]) m2 := ctx.Metrics() if n2 == nil { t.Fatalf("missing node %#v", m2) } // If we don't reset, this would be 4 if m2.NodesEvaluated != 2 { t.Fatalf("should only be 2") } }
func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue f := false c.VaultConfig.Enabled = &f }) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request with a job asking for a vault policy job := mock.Job() job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{ Policies: []string{"foo"}, ChangeMode: structs.VaultChangeModeRestart, } req := &structs.JobRegisterRequest{ Job: job, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.JobRegisterResponse err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp) if err == nil || !strings.Contains(err.Error(), "Vault not enabled") { t.Fatalf("expected Vault not enabled error: %v", err) } }
func TestFSM_RegisterJob(t *testing.T) { fsm := testFSM(t) req := structs.JobRegisterRequest{ Job: mock.Job(), } buf, err := structs.Encode(structs.JobRegisterRequestType, req) if err != nil { t.Fatalf("err: %v", err) } resp := fsm.Apply(makeLog(buf)) if resp != nil { t.Fatalf("resp: %v", resp) } // Verify we are registered job, err := fsm.State().JobByID(req.Job.ID) if err != nil { t.Fatalf("err: %v", err) } if job == nil { t.Fatalf("not found!") } if job.CreateIndex != 1 { t.Fatalf("bad index: %d", job.CreateIndex) } }
func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) defer s1.Shutdown() codec := rpcClient(t, s1) testutil.WaitForLeader(t, s1.RPC) // Create the register request with a job containing an invalid driver // config job := mock.Job() job.TaskGroups[0].Tasks[0].Config["foo"] = 1 req := &structs.JobRegisterRequest{ Job: job, WriteRequest: structs.WriteRequest{Region: "global"}, } // Fetch the response var resp structs.JobRegisterResponse err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp) if err == nil { t.Fatalf("expected a validation error") } if !strings.Contains(err.Error(), "-> config:") { t.Fatalf("expected a driver config validation error but got: %v", err) } }
func TestStateStore_UpsertJob_Job(t *testing.T) { state := testStateStore(t) job := mock.Job() err := state.UpsertJob(1000, job) if err != nil { t.Fatalf("err: %v", err) } out, err := state.JobByID(job.ID) if err != nil { t.Fatalf("err: %v", err) } if !reflect.DeepEqual(job, out) { t.Fatalf("bad: %#v %#v", job, out) } index, err := state.Index("jobs") if err != nil { t.Fatalf("err: %v", err) } if index != 1000 { t.Fatalf("bad: %d", index) } }