func TestDeleteService(t *testing.T) { store := store.NewMemoryStore(nil) assert.NotNil(t, store) defer store.Close() watch, cancel := state.Watch(store.WatchQueue()) defer cancel() SetupCluster(t, store, watch) deleteService(t, store, service1) // task should be deleted observedTask := testutils.WatchTaskDelete(t, watch) assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1") assert.Equal(t, observedTask.NodeID, "id1") }
func TestReplicatedOrchestrator(t *testing.T) { ctx := context.Background() s := store.NewMemoryStore(nil) assert.NotNil(t, s) defer s.Close() orchestrator := NewReplicatedOrchestrator(s) defer orchestrator.Stop() watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/) defer cancel() // Create a service with two instances specified before the orchestrator is // started. This should result in two tasks when the orchestrator // starts up. err := s.Update(func(tx store.Tx) error { s1 := &api.Service{ ID: "id1", Spec: api.ServiceSpec{ Annotations: api.Annotations{ Name: "name1", }, Task: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{}, }, }, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 2, }, }, }, } assert.NoError(t, store.CreateService(tx, s1)) return nil }) assert.NoError(t, err) // Start the orchestrator. go func() { assert.NoError(t, orchestrator.Run(ctx)) }() observedTask1 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") observedTask2 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") // Create a second service. err = s.Update(func(tx store.Tx) error { s2 := &api.Service{ ID: "id2", Spec: api.ServiceSpec{ Annotations: api.Annotations{ Name: "name2", }, Task: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{}, }, }, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 1, }, }, }, } assert.NoError(t, store.CreateService(tx, s2)) return nil }) assert.NoError(t, err) observedTask3 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name2") // Update a service to scale it out to 3 instances err = s.Update(func(tx store.Tx) error { s2 := &api.Service{ ID: "id2", Spec: api.ServiceSpec{ Annotations: api.Annotations{ Name: "name2", }, Task: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{}, }, }, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 3, }, }, }, } assert.NoError(t, store.UpdateService(tx, s2)) return nil }) assert.NoError(t, err) observedTask4 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name2") observedTask5 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask5.Status.State, api.TaskStateNew) assert.Equal(t, observedTask5.ServiceAnnotations.Name, "name2") // Now scale it back down to 1 instance err = s.Update(func(tx store.Tx) error { s2 := &api.Service{ ID: "id2", Spec: api.ServiceSpec{ Annotations: api.Annotations{ Name: "name2", }, Task: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{}, }, }, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 1, }, }, }, } assert.NoError(t, store.UpdateService(tx, s2)) return nil }) assert.NoError(t, err) observedDeletion1 := testutils.WatchTaskDelete(t, watch) assert.Equal(t, observedDeletion1.Status.State, api.TaskStateNew) assert.Equal(t, observedDeletion1.ServiceAnnotations.Name, "name2") observedDeletion2 := testutils.WatchTaskDelete(t, watch) assert.Equal(t, observedDeletion2.Status.State, api.TaskStateNew) assert.Equal(t, observedDeletion2.ServiceAnnotations.Name, "name2") // There should be one remaining task attached to service id2/name2. var liveTasks []*api.Task s.View(func(readTx store.ReadTx) { var tasks []*api.Task tasks, err = store.FindTasks(readTx, store.ByServiceID("id2")) for _, t := range tasks { if t.DesiredState == api.TaskStateRunning { liveTasks = append(liveTasks, t) } } }) assert.NoError(t, err) assert.Len(t, liveTasks, 1) // Delete the remaining task directly. It should be recreated by the // orchestrator. err = s.Update(func(tx store.Tx) error { assert.NoError(t, store.DeleteTask(tx, liveTasks[0].ID)) return nil }) assert.NoError(t, err) observedTask6 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask6.Status.State, api.TaskStateNew) assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name2") // Delete the service. Its remaining task should go away. err = s.Update(func(tx store.Tx) error { assert.NoError(t, store.DeleteService(tx, "id2")) return nil }) assert.NoError(t, err) deletedTask := testutils.WatchTaskDelete(t, watch) assert.Equal(t, deletedTask.Status.State, api.TaskStateNew) assert.Equal(t, deletedTask.ServiceAnnotations.Name, "name2") }
func TestReplicatedScaleDown(t *testing.T) { ctx := context.Background() s := store.NewMemoryStore(nil) assert.NotNil(t, s) defer s.Close() orchestrator := NewReplicatedOrchestrator(s) defer orchestrator.Stop() watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{}, state.EventDeleteTask{}) defer cancel() s1 := &api.Service{ ID: "id1", Spec: api.ServiceSpec{ Annotations: api.Annotations{ Name: "name1", }, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 6, }, }, }, } err := s.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateService(tx, s1)) nodes := []*api.Node{ { ID: "node1", Spec: api.NodeSpec{ Annotations: api.Annotations{ Name: "name1", }, Availability: api.NodeAvailabilityActive, }, Status: api.NodeStatus{ State: api.NodeStatus_READY, }, }, { ID: "node2", Spec: api.NodeSpec{ Annotations: api.Annotations{ Name: "name2", }, Availability: api.NodeAvailabilityActive, }, Status: api.NodeStatus{ State: api.NodeStatus_READY, }, }, { ID: "node3", Spec: api.NodeSpec{ Annotations: api.Annotations{ Name: "name3", }, Availability: api.NodeAvailabilityActive, }, Status: api.NodeStatus{ State: api.NodeStatus_READY, }, }, } for _, node := range nodes { assert.NoError(t, store.CreateNode(tx, node)) } // task1 is assigned to node1 // task2 - task3 are assigned to node2 // task4 - task6 are assigned to node3 // task7 is unassigned tasks := []*api.Task{ { ID: "task1", Slot: 1, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateStarting, }, ServiceAnnotations: api.Annotations{ Name: "task1", }, ServiceID: "id1", NodeID: "node1", }, { ID: "task2", Slot: 2, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateRunning, }, ServiceAnnotations: api.Annotations{ Name: "task2", }, ServiceID: "id1", NodeID: "node2", }, { ID: "task3", Slot: 3, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateRunning, }, ServiceAnnotations: api.Annotations{ Name: "task3", }, ServiceID: "id1", NodeID: "node2", }, { ID: "task4", Slot: 4, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateRunning, }, ServiceAnnotations: api.Annotations{ Name: "task4", }, ServiceID: "id1", NodeID: "node3", }, { ID: "task5", Slot: 5, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateRunning, }, ServiceAnnotations: api.Annotations{ Name: "task5", }, ServiceID: "id1", NodeID: "node3", }, { ID: "task6", Slot: 6, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateRunning, }, ServiceAnnotations: api.Annotations{ Name: "task6", }, ServiceID: "id1", NodeID: "node3", }, { ID: "task7", Slot: 7, DesiredState: api.TaskStateRunning, Status: api.TaskStatus{ State: api.TaskStateNew, }, ServiceAnnotations: api.Annotations{ Name: "task7", }, ServiceID: "id1", }, } for _, task := range tasks { assert.NoError(t, store.CreateTask(tx, task)) } return nil }) assert.NoError(t, err) // Start the orchestrator. go func() { assert.NoError(t, orchestrator.Run(ctx)) }() // Replicas was set to 6, but we started with 7 tasks. task7 should // be the one the orchestrator chose to shut down because it was not // assigned yet. observedShutdown := testutils.WatchTaskDelete(t, watch) assert.Equal(t, "task7", observedShutdown.ID) // Now scale down to 2 instances. err = s.Update(func(tx store.Tx) error { s1.Spec.Mode = &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 2, }, } assert.NoError(t, store.UpdateService(tx, s1)) return nil }) assert.NoError(t, err) // Tasks should be shut down in a way that balances the remaining tasks. // node2 and node3 should be preferred over node1 because node1's task // is not running yet. shutdowns := make(map[string]int) for i := 0; i != 4; i++ { observedShutdown := testutils.WatchTaskDelete(t, watch) shutdowns[observedShutdown.NodeID]++ } assert.Equal(t, 1, shutdowns["node1"]) assert.Equal(t, 1, shutdowns["node2"]) assert.Equal(t, 2, shutdowns["node3"]) // There should be remaining tasks on node2 and node3. s.View(func(readTx store.ReadTx) { tasks, err := store.FindTasks(readTx, store.ByDesiredState(api.TaskStateRunning)) require.NoError(t, err) require.Len(t, tasks, 2) if tasks[0].NodeID == "node2" { assert.Equal(t, "node3", tasks[1].NodeID) } else { assert.Equal(t, "node3", tasks[0].NodeID) assert.Equal(t, "node2", tasks[1].NodeID) } }) }
func TestTaskHistory(t *testing.T) { ctx := context.Background() s := store.NewMemoryStore(nil) assert.NotNil(t, s) defer s.Close() assert.NoError(t, s.Update(func(tx store.Tx) error { store.CreateCluster(tx, &api.Cluster{ ID: identity.NewID(), Spec: api.ClusterSpec{ Annotations: api.Annotations{ Name: store.DefaultClusterName, }, Orchestration: api.OrchestrationConfig{ TaskHistoryRetentionLimit: 2, }, }, }) return nil })) taskReaper := taskreaper.New(s) defer taskReaper.Stop() orchestrator := NewReplicatedOrchestrator(s) defer orchestrator.Stop() watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/) defer cancel() // Create a service with two instances specified before the orchestrator is // started. This should result in two tasks when the orchestrator // starts up. err := s.Update(func(tx store.Tx) error { j1 := &api.Service{ ID: "id1", Spec: api.ServiceSpec{ Annotations: api.Annotations{ Name: "name1", }, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: 2, }, }, Task: api.TaskSpec{ Restart: &api.RestartPolicy{ Condition: api.RestartOnAny, Delay: ptypes.DurationProto(0), }, }, }, } assert.NoError(t, store.CreateService(tx, j1)) return nil }) assert.NoError(t, err) // Start the orchestrator. go func() { assert.NoError(t, orchestrator.Run(ctx)) }() go taskReaper.Run() observedTask1 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") observedTask2 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") // Fail both tasks. They should both get restarted. updatedTask1 := observedTask1.Copy() updatedTask1.Status.State = api.TaskStateFailed updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"} updatedTask2 := observedTask2.Copy() updatedTask2.Status.State = api.TaskStateFailed updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"} err = s.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, updatedTask1)) assert.NoError(t, store.UpdateTask(tx, updatedTask2)) return nil }) testutils.Expect(t, watch, state.EventCommit{}) testutils.Expect(t, watch, state.EventUpdateTask{}) testutils.Expect(t, watch, state.EventUpdateTask{}) testutils.Expect(t, watch, state.EventCommit{}) testutils.Expect(t, watch, state.EventUpdateTask{}) observedTask3 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") testutils.Expect(t, watch, state.EventUpdateTask{}) observedTask4 := testutils.WatchTaskCreate(t, watch) assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") // Fail these replacement tasks. Since TaskHistory is set to 2, this // should cause the oldest tasks for each instance to get deleted. updatedTask3 := observedTask3.Copy() updatedTask3.Status.State = api.TaskStateFailed updatedTask4 := observedTask4.Copy() updatedTask4.Status.State = api.TaskStateFailed err = s.Update(func(tx store.Tx) error { assert.NoError(t, store.UpdateTask(tx, updatedTask3)) assert.NoError(t, store.UpdateTask(tx, updatedTask4)) return nil }) deletedTask1 := testutils.WatchTaskDelete(t, watch) deletedTask2 := testutils.WatchTaskDelete(t, watch) assert.Equal(t, api.TaskStateFailed, deletedTask1.Status.State) assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) assert.Equal(t, api.TaskStateFailed, deletedTask2.Status.State) assert.Equal(t, "original", deletedTask2.ServiceAnnotations.Name) var foundTasks []*api.Task s.View(func(tx store.ReadTx) { foundTasks, err = store.FindTasks(tx, store.All) }) assert.NoError(t, err) assert.Len(t, foundTasks, 4) }