func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
	apiStubResponses["/arvados/v1/api_client_authorizations/current"] =
		arvadostest.StubResponse{200, string(`{"uuid": "` + arvadostest.Dispatch1AuthUUID + `", "api_token": "xyz"}`)}

	apiStub := arvadostest.ServerStub{apiStubResponses}

	api := httptest.NewServer(&apiStub)
	defer api.Close()

	arv := arvadosclient.ArvadosClient{
		Scheme:    "http",
		ApiServer: api.URL[7:],
		ApiToken:  "abc123",
		Client:    &http.Client{Transport: &http.Transport{}},
		Retries:   0,
	}

	buf := bytes.NewBuffer(nil)
	log.SetOutput(io.MultiWriter(buf, os.Stderr))
	defer log.SetOutput(os.Stderr)

	*crunchRunCommand = crunchCmd

	doneProcessing := make(chan struct{})
	dispatcher := dispatch.Dispatcher{
		Arv:          arv,
		PollInterval: time.Duration(1) * time.Second,
		RunContainer: func(dispatcher *dispatch.Dispatcher,
			container dispatch.Container,
			status chan dispatch.Container) {
			run(dispatcher, container, status)
			doneProcessing <- struct{}{}
		},
		DoneProcessing: doneProcessing}

	startCmd = func(container dispatch.Container, cmd *exec.Cmd) error {
		dispatcher.UpdateState(container.UUID, "Running")
		dispatcher.UpdateState(container.UUID, "Complete")
		return cmd.Start()
	}

	go func() {
		for i := 0; i < 80 && !strings.Contains(buf.String(), expected); i++ {
			time.Sleep(100 * time.Millisecond)
		}
		dispatcher.DoneProcessing <- struct{}{}
	}()

	err := dispatcher.RunDispatcher()
	c.Assert(err, IsNil)

	// Wait for all running crunch jobs to complete / terminate
	waitGroup.Wait()

	c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
}
// If the container is marked as Locked, check if it is already in the slurm
// queue.  If not, submit it.
//
// If the container is marked as Running, check if it is in the slurm queue.
// If not, mark it as Cancelled.
func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container dispatch.Container, monitorDone *bool) {
	submitted := false
	for !*monitorDone {
		if squeueUpdater.CheckSqueue(container.UUID) {
			// Found in the queue, so continue monitoring
			submitted = true
		} else if container.State == dispatch.Locked && !submitted {
			// Not in queue but in Locked state and we haven't
			// submitted it yet, so submit it.

			log.Printf("About to submit queued container %v", container.UUID)

			if _, err := submit(dispatcher, container, *crunchRunCommand); err != nil {
				log.Printf("Error submitting container %s to slurm: %v",
					container.UUID, err)
				// maybe sbatch is broken, put it back to queued
				dispatcher.UpdateState(container.UUID, dispatch.Queued)
			}
			submitted = true
		} else {
			// Not in queue and we are not going to submit it.
			// Refresh the container state. If it is
			// Complete/Cancelled, do nothing, if it is Locked then
			// release it back to the Queue, if it is Running then
			// clean up the record.

			var con dispatch.Container
			err := dispatcher.Arv.Get("containers", container.UUID, nil, &con)
			if err != nil {
				log.Printf("Error getting final container state: %v", err)
			}

			var st string
			switch con.State {
			case dispatch.Locked:
				st = dispatch.Queued
			case dispatch.Running:
				st = dispatch.Cancelled
			default:
				// Container state is Queued, Complete or Cancelled so stop monitoring it.
				return
			}

			log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
				container.UUID, con.State, st)
			dispatcher.UpdateState(container.UUID, st)
		}
	}
}
func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
	apiStub := arvadostest.ServerStub{apiStubResponses}

	api := httptest.NewServer(&apiStub)
	defer api.Close()

	arv := arvadosclient.ArvadosClient{
		Scheme:    "http",
		ApiServer: api.URL[7:],
		ApiToken:  "abc123",
		Client:    &http.Client{Transport: &http.Transport{}},
		Retries:   0,
	}

	buf := bytes.NewBuffer(nil)
	log.SetOutput(io.MultiWriter(buf, os.Stderr))
	defer log.SetOutput(os.Stderr)

	crunchRunCommand = &crunchCmd

	doneProcessing := make(chan struct{})
	dispatcher := dispatch.Dispatcher{
		Arv:          arv,
		PollInterval: time.Duration(1) * time.Second,
		RunContainer: func(dispatcher *dispatch.Dispatcher,
			container dispatch.Container,
			status chan dispatch.Container) {
			go func() {
				time.Sleep(1 * time.Second)
				dispatcher.UpdateState(container.UUID, dispatch.Running)
				dispatcher.UpdateState(container.UUID, dispatch.Complete)
			}()
			run(dispatcher, container, status)
			doneProcessing <- struct{}{}
		},
		DoneProcessing: doneProcessing}

	go func() {
		for i := 0; i < 80 && !strings.Contains(buf.String(), expected); i++ {
			time.Sleep(100 * time.Millisecond)
		}
		dispatcher.DoneProcessing <- struct{}{}
	}()

	err := dispatcher.RunDispatcher()
	c.Assert(err, IsNil)

	c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
}
func (s *TestSuite) TestIntegration(c *C) {
	arv, err := arvadosclient.MakeArvadosClient()
	c.Assert(err, IsNil)

	echo := "echo"
	crunchRunCommand = &echo

	doneProcessing := make(chan struct{})
	dispatcher := dispatch.Dispatcher{
		Arv:          arv,
		PollInterval: time.Duration(1) * time.Second,
		RunContainer: func(dispatcher *dispatch.Dispatcher,
			container dispatch.Container,
			status chan dispatch.Container) {
			run(dispatcher, container, status)
			doneProcessing <- struct{}{}
		},
		DoneProcessing: doneProcessing}

	startCmd = func(container dispatch.Container, cmd *exec.Cmd) error {
		dispatcher.UpdateState(container.UUID, "Running")
		dispatcher.UpdateState(container.UUID, "Complete")
		return cmd.Start()
	}

	err = dispatcher.RunDispatcher()
	c.Assert(err, IsNil)

	// Wait for all running crunch jobs to complete / terminate
	waitGroup.Wait()

	// There should be no queued containers now
	params := arvadosclient.Dict{
		"filters": [][]string{[]string{"state", "=", "Queued"}},
	}
	var containers dispatch.ContainerList
	err = arv.List("containers", params, &containers)
	c.Check(err, IsNil)
	c.Assert(len(containers.Items), Equals, 0)

	// Previously "Queued" container should now be in "Complete" state
	var container dispatch.Container
	err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
	c.Check(err, IsNil)
	c.Check(container.State, Equals, "Complete")
}
// Run or monitor a container.
//
// Monitor status updates.  If the priority changes to zero, cancel the
// container using scancel.
func run(dispatcher *dispatch.Dispatcher,
	container dispatch.Container,
	status chan dispatch.Container) {

	log.Printf("Monitoring container %v started", container.UUID)
	defer log.Printf("Monitoring container %v finished", container.UUID)

	monitorDone := false
	go monitorSubmitOrCancel(dispatcher, container, &monitorDone)

	for container = range status {
		if container.State == dispatch.Locked || container.State == dispatch.Running {
			if container.Priority == 0 {
				log.Printf("Canceling container %s", container.UUID)

				// Mutex between squeue sync and running sbatch or scancel.
				squeueUpdater.SlurmLock.Lock()
				err := scancelCmd(container).Run()
				squeueUpdater.SlurmLock.Unlock()

				if err != nil {
					log.Printf("Error stopping container %s with scancel: %v",
						container.UUID, err)
					if squeueUpdater.CheckSqueue(container.UUID) {
						log.Printf("Container %s is still in squeue after scancel.",
							container.UUID)
						continue
					}
				}

				err = dispatcher.UpdateState(container.UUID, dispatch.Cancelled)
			}
		}
	}
	monitorDone = true
}
// Run a container.
//
// If the container is Locked, start a new crunch-run process and wait until
// crunch-run completes.  If the priority is set to zero, set an interrupt
// signal to the crunch-run process.
//
// If the container is in any other state, or is not Complete/Cancelled after
// crunch-run terminates, mark the container as Cancelled.
func run(dispatcher *dispatch.Dispatcher,
	container dispatch.Container,
	status chan dispatch.Container) {

	uuid := container.UUID

	if container.State == dispatch.Locked {
		waitGroup.Add(1)

		cmd := exec.Command(*crunchRunCommand, uuid)
		cmd.Stdin = nil
		cmd.Stderr = os.Stderr
		cmd.Stdout = os.Stderr

		log.Printf("Starting container %v", uuid)

		// Add this crunch job to the list of runningCmds only if we
		// succeed in starting crunch-run.

		runningCmdsMutex.Lock()
		if err := startCmd(container, cmd); err != nil {
			runningCmdsMutex.Unlock()
			log.Printf("Error starting %v for %v: %q", *crunchRunCommand, uuid, err)
			dispatcher.UpdateState(uuid, dispatch.Cancelled)
		} else {
			runningCmds[uuid] = cmd
			runningCmdsMutex.Unlock()

			// Need to wait for crunch-run to exit
			done := make(chan struct{})

			go func() {
				if _, err := cmd.Process.Wait(); err != nil {
					log.Printf("Error while waiting for crunch job to finish for %v: %q", uuid, err)
				}
				log.Printf("sending done")
				done <- struct{}{}
			}()

		Loop:
			for {
				select {
				case <-done:
					break Loop
				case c := <-status:
					// Interrupt the child process if priority changes to 0
					if (c.State == dispatch.Locked || c.State == dispatch.Running) && c.Priority == 0 {
						log.Printf("Sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
						cmd.Process.Signal(os.Interrupt)
					}
				}
			}
			close(done)

			log.Printf("Finished container run for %v", uuid)

			// Remove the crunch job from runningCmds
			runningCmdsMutex.Lock()
			delete(runningCmds, uuid)
			runningCmdsMutex.Unlock()
		}
		waitGroup.Done()
	}

	// If the container is not finalized, then change it to "Cancelled".
	err := dispatcher.Arv.Get("containers", uuid, nil, &container)
	if err != nil {
		log.Printf("Error getting final container state: %v", err)
	}
	if container.LockedByUUID == dispatcher.Auth.UUID &&
		(container.State == dispatch.Locked || container.State == dispatch.Running) {
		log.Printf("After %s process termination, container state for %v is %q.  Updating it to %q",
			*crunchRunCommand, container.State, uuid, dispatch.Cancelled)
		dispatcher.UpdateState(uuid, dispatch.Cancelled)
	}

	// drain any subsequent status changes
	for _ = range status {
	}

	log.Printf("Finalized container %v", uuid)
}