Пример #1
0
func (s *GitreceiveSuite) SetUpSuite(t *c.C) {
	// Unencrypted SSH private key for the flynn-test GitHub account.
	// Omits header/footer to avoid any GitHub auto-revoke key crawlers
	sshKey := `MIIEpAIBAAKCAQEA2UnQ/17TfzQRt4HInuP1SYz/tSNaCGO3NDIPLydVu8mmxuKT
zlJtH3pz3uWpMEKdZtSjV+QngJL8OFzanQVZtRBJjF2m+cywHJoZA5KsplMon+R+
QmVqu92WlcRdkcft1F1CLoTXTmHHfvuhOkG6GgJONNLP9Z14EsQ7MbBh5guafWOX
kdGFajyd+T2aj27yIkK44WjWqiLjxRIAtgOJrmd/3H0w3E+O1cgNrA2gkFEUhvR1
OHz8SmugYva0VZWKvxZ6muZvn26L1tajYsCntCRR3/a74cAnVFAXjqSatL6YTbSH
sdtE91kEC73/U4SL3OFdDiCrAvXpJ480C2/GQQIDAQABAoIBAHNQNVYRIPS00WIt
wiZwm8/4wAuFQ1aIdMWCe4Ruv5T1I0kRHZe1Lqwx9CQqhWtTLu1Pk5AlSMF3P9s5
i9sg58arahzP5rlS43OKZBP9Vxq9ryWLwWXDJK2mny/EElQ3YgP9qg29+fVi9thw
+dNM5lK/PnnSFwMmGn77HN712D6Yl3CCJJjsAunTfPzR9hyEqX5YvUB5eq/TNhXe
sqrKcGORIoNfv7WohlFSkTAXIvoMxmFWXg8piZ9/b1W4NwvO4wup3ZSErIk0AQ97
HtyXJIXgtj6pLkPqvPXPGvS3quYAddNxvGIdvge7w5LHnrxOzdqbeDAVmJLVwVlv
oo+7aQECgYEA8ZliUuA8q86SWE0N+JZUqbTvE6VzyWG0/u0BJYDkH7yHkbpFOIEy
KTw048WOZLQ6/wPwL8Hb090Cas/6pmRFMgCedarzXc9fvGEwW95em7jA4AyOVBMC
KIAmaYkm6LcUFeyR6ektZeCkT0MNoi4irjBC3/hMRyZu+6RL4jXxHLkCgYEA5j13
2nkbV99GtRRjyGB7uMkrhMere2MekANXEm4dW+LZFZUda4YCqdzfjDfBTxsuyGqi
DnvI7bZFzIQPiiEzvL2Mpiy7JqxmPLGmwzxDp3z75T5vOrGs4g9IQ7yDjp5WPzjz
KCJJHn8Qt9tNZb5h0hBM+NWLT0c1XxtTIVFfgckCgYAfNpTYZjYQcFDB7bqXWjy3
7DNTE3YhF2l94fra8IsIep/9ONaGlVJ4t1mR780Uv6A7oDOgx+fxuET+rb4RTzUN
X70ZMKvee9M/kELiK5mHftgUWirtO8N0nhHYYqrPOA/1QSoc0U5XMi2oO96ADHvY
i02oh/i63IFMK47OO+/ZqQKBgQCY8bY/Y/nc+o4O1hee0TD+xGvrTXRFh8eSpRVf
QdSw6FWKt76OYbw9OGMr0xHPyd/e9K7obiRAfLeLLyLfgETNGSFodghwnU9g/CYq
RUsv5J+0XjAnTkXo+Xvouz6tK9NhNiSYwYXPA1uItt6IOtriXz+ygLCFHml+3zju
xg5quQKBgQCEL95Di6WD+155gEG2NtqeAOWhgxqAbGjFjfpV+pVBksBCrWOHcBJp
QAvAdwDIZpqRWWMcLS7zSDrzn3ZscuHCMxSOe40HbrVdDUee24/I4YQ+R8EcuzcA
3IV9ai+Bxs6PvklhXmarYxJl62LzPLyv0XFscGRes/2yIIxNfNzFug==`

	t.Assert(flynn(t, "/", "-a", "gitreceive", "env", "set",
		"SSH_CLIENT_HOSTS=github.com,192.30.252.131 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==",
		fmt.Sprintf("SSH_CLIENT_KEY=-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----\n", sshKey)),
		Succeeds)

}
Пример #2
0
func (s *SchedulerSuite) TestJobMeta(t *c.C) {
	app, release := s.createApp(t)

	events := make(chan *ct.JobEvent)
	stream, err := s.controllerClient(t).StreamJobEvents(app.ID, 0, events)
	t.Assert(err, c.IsNil)
	defer stream.Close()

	// start 1 one-off job
	_, err = s.controllerClient(t).RunJobDetached(app.ID, &ct.NewJob{
		ReleaseID: release.ID,
		Cmd:       []string{"sh", "-c", "while true; do echo one-off-job; sleep 1; done"},
		Meta: map[string]string{
			"foo": "baz",
		},
	})
	t.Assert(err, c.IsNil)
	waitForJobEvents(t, stream, events, jobEvents{"": {"up": 1}})

	list, err := s.controllerClient(t).JobList(app.ID)
	t.Assert(err, c.IsNil)
	t.Assert(list, c.HasLen, 1)
	t.Assert(list[0].Meta, c.DeepEquals, map[string]string{
		"foo": "baz",
	})
}
Пример #3
0
func (s *SchedulerSuite) TestOmniJobs(t *c.C) {
	if args.ClusterAPI == "" {
		t.Skip("cannot boot new hosts")
	}

	app, release := s.createApp(t)

	stream, err := s.controllerClient(t).StreamJobEvents(app.ID, 0)
	t.Assert(err, c.IsNil)
	defer stream.Close()

	formation := &ct.Formation{
		AppID:     app.ID,
		ReleaseID: release.ID,
		Processes: make(map[string]int),
	}

	current := make(map[string]int)
	updates := []map[string]int{
		{"printer": 2},
		{"printer": 3, "omni": 2},
		{"printer": 1, "omni": 1},
	}

	for _, procs := range updates {
		debugf(t, "scaling formation to %v", procs)
		formation.Processes = procs
		t.Assert(s.controllerClient(t).PutFormation(formation), c.IsNil)

		expected := make(jobEvents)
		for typ, count := range procs {
			diff := count - current[typ]
			if typ == "omni" {
				diff *= testCluster.Size()
			}
			if diff > 0 {
				expected[typ] = map[string]int{"up": diff}
			} else {
				expected[typ] = map[string]int{"down": -diff}
			}
		}
		for typ, count := range current {
			if _, ok := procs[typ]; !ok {
				diff := count
				if typ == "omni" {
					diff *= testCluster.Size()
				}
				expected[typ] = map[string]int{"down": diff}
			}
		}
		waitForJobEvents(t, stream.Events, expected)

		current = procs
	}

	// Check that new hosts get omni jobs
	newHosts := s.addHosts(t, 2)
	defer s.removeHosts(t, newHosts)
	waitForJobEvents(t, stream.Events, jobEvents{"omni": {"up": 2}})
}
Пример #4
0
func (s *CLISuite) TestLogStderr(t *c.C) {
	app := s.newCliTestApp(t)
	t.Assert(app.flynn("run", "-d", "sh", "-c", "echo hello && echo world >&2"), Succeeds)
	app.waitFor(ct.JobEvents{"": {"up": 1, "down": 1}})
	runLog := func(split bool) (stdout, stderr bytes.Buffer) {
		args := []string{"log", "--raw-output"}
		if split {
			args = append(args, "--split-stderr")
		}
		args = append(args)
		log := app.flynnCmd(args...)
		log.Stdout = &stdout
		log.Stderr = &stderr
		t.Assert(log.Run(), c.IsNil, c.Commentf("STDERR = %q", stderr.String()))
		return
	}
	stdout, stderr := runLog(false)
	// non-deterministic order
	t.Assert(stdout.String(), Matches, "hello")
	t.Assert(stdout.String(), Matches, "world")
	t.Assert(stderr.String(), c.Equals, "")
	stdout, stderr = runLog(true)
	t.Assert(stdout.String(), c.Equals, "hello\n")
	t.Assert(stderr.String(), c.Equals, "world\n")
}
Пример #5
0
func (s *CLISuite) TestLog(t *c.C) {
	app := s.newCliTestApp(t)
	defer app.cleanup()
	t.Assert(app.flynn("run", "-d", "echo", "hello", "world"), Succeeds)
	app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}})
	t.Assert(app.flynn("log", "--raw-output"), Outputs, "hello world\n")
}
Пример #6
0
func waitForDeploymentEvents(t *c.C, stream chan *ct.DeploymentEvent, expected []*ct.DeploymentEvent) {
	debugf(t, "waiting for %d deployment events", len(expected))
	actual := make([]*ct.DeploymentEvent, 0, len(expected))
loop:
	for {
		select {
		case e, ok := <-stream:
			if !ok {
				t.Fatal("unexpected close of deployment event stream")
			}
			actual = append(actual, e)
			if e.Status == "complete" || e.Status == "failed" {
				debugf(t, "got deployment event: %s", e.Status)
				break loop
			}
			debugf(t, "got deployment event: %s %s", e.JobType, e.JobState)
		case <-time.After(60 * time.Second):
			t.Fatal("timed out waiting for deployment event")
		}
	}
	compare := func(t *c.C, i *ct.DeploymentEvent, j *ct.DeploymentEvent) {
		t.Assert(i.ReleaseID, c.Equals, j.ReleaseID)
		t.Assert(i.JobType, c.Equals, j.JobType)
		t.Assert(i.JobState, c.Equals, j.JobState)
		t.Assert(i.Status, c.Equals, j.Status)
		t.Assert(i.Error, c.Equals, j.Error)
	}

	for i, e := range expected {
		compare(t, actual[i], e)
	}
}
Пример #7
0
func (h *Helper) newSlugrunnerArtifact(t *c.C) *ct.Artifact {
	r, err := h.controllerClient(t).GetAppRelease("gitreceive")
	t.Assert(err, c.IsNil)
	slugrunnerURI := r.Processes["app"].Env["SLUGRUNNER_IMAGE_URI"]
	t.Assert(slugrunnerURI, c.Not(c.Equals), "")
	return &ct.Artifact{Type: "docker", URI: slugrunnerURI}
}
Пример #8
0
func (s *ControllerSuite) TestExampleOutput(t *c.C) {
	examples := s.generateControllerExamples(t)
	exampleKeys := make([]string, 0, len(examples))
	skipExamples := []string{"migrate_cluster_domain"}
examplesLoop:
	for key := range examples {
		for _, skipKey := range skipExamples {
			if key == skipKey {
				continue examplesLoop
			}
		}
		exampleKeys = append(exampleKeys, key)
	}
	sort.Strings(exampleKeys)
	for _, key := range exampleKeys {
		cacheKey := "https://flynn.io/schema/examples/controller/" + key
		schema := s.schemaCache[cacheKey]
		if schema == nil {
			continue
		}
		data := examples[key]
		errs := schema.Validate(nil, data)
		var jsonData []byte
		if len(errs) > 0 {
			jsonData, _ = json.MarshalIndent(data, "", "\t")
		}
		t.Assert(errs, c.HasLen, 0, c.Commentf("%s validation errors: %v\ndata: %v\n", cacheKey, errs, string(jsonData)))
	}
}
Пример #9
0
func (s *HostSuite) TestResourceLimits(t *c.C) {
	cmd := exec.JobUsingCluster(
		s.clusterClient(t),
		exec.DockerImage(imageURIs["test-apps"]),
		&host.Job{
			Config:    host.ContainerConfig{Cmd: []string{"sh", "-c", resourceCmd}},
			Resources: testResources(),
		},
	)
	var out bytes.Buffer
	cmd.Stdout = &out

	runErr := make(chan error)
	go func() {
		runErr <- cmd.Run()
	}()
	select {
	case err := <-runErr:
		t.Assert(err, c.IsNil)
	case <-time.After(30 * time.Second):
		t.Fatal("timed out waiting for resource limits job")
	}

	assertResourceLimits(t, out.String())
}
Пример #10
0
func (h *Helper) removeHosts(t *c.C, hosts []*tc.Instance) {
	debugf(t, "removing %d hosts", len(hosts))
	for _, host := range hosts {
		t.Assert(testCluster.RemoveHost(host), c.IsNil)
		debugf(t, "host removed: %s", host.ID)
	}
}
Пример #11
0
func (h *Helper) discoverdClient(t *c.C) *discoverd.Client {
	if h.disc == nil {
		var err error
		h.disc, err = discoverd.NewClientWithAddr(routerIP + ":1111")
		t.Assert(err, c.IsNil)
	}
	return h.disc
}
Пример #12
0
func (s *VolumeSuite) TestInterhostVolumeTransmitAPI(t *c.C) {
	hosts, err := s.clusterClient(t).Hosts()
	t.Assert(err, c.IsNil)
	if len(hosts) < 2 {
		t.Skip("need multiple hosts for this test")
	}
	s.doVolumeTransmitAPI(hosts[0], hosts[1], t)
}
Пример #13
0
func (h *Helper) clusterClient(t *c.C) *cluster.Client {
	if h.cluster == nil {
		var err error
		h.cluster, err = cluster.NewClientWithDial(nil, h.discoverdClient(t).NewServiceSet)
		t.Assert(err, c.IsNil)
	}
	return h.cluster
}
Пример #14
0
func (h *Helper) clusterConf(t *c.C) *config.Cluster {
	if h.config == nil {
		conf, err := config.ReadFile(flynnrc)
		t.Assert(err, c.IsNil)
		t.Assert(conf.Clusters, c.HasLen, 1)
		h.config = conf.Clusters[0]
	}
	return h.config
}
Пример #15
0
func (s *HostSuite) TestAttachNonExistentJob(t *c.C) {
	cluster := s.clusterClient(t)
	hosts, err := cluster.Hosts()
	t.Assert(err, c.IsNil)

	// Attaching to a non-existent job should error
	_, err = hosts[0].Attach(&host.AttachReq{JobID: "none", Flags: host.AttachFlagLogs}, false)
	t.Assert(err, c.NotNil)
}
Пример #16
0
func (s *HostSuite) TestGetNonExistentJob(t *c.C) {
	cluster := s.clusterClient(t)
	hosts, err := cluster.Hosts()
	t.Assert(err, c.IsNil)

	// Getting a non-existent job should error
	_, err = hosts[0].GetJob("i-dont-exist")
	t.Assert(hh.IsObjectNotFoundError(err), c.Equals, true)
}
Пример #17
0
func (h *Helper) sshKeys(t *c.C) *sshData {
	h.sshMtx.Lock()
	defer h.sshMtx.Unlock()
	if h.ssh == nil {
		var err error
		h.ssh, err = genSSHKey()
		t.Assert(err, c.IsNil)
	}
	return h.ssh
}
Пример #18
0
func (h *Helper) controllerClient(t *c.C) *controller.Client {
	if h.controller == nil {
		conf := h.clusterConf(t)
		pin, err := base64.StdEncoding.DecodeString(conf.TLSPin)
		t.Assert(err, c.IsNil)
		h.controller, err = controller.NewClientWithPin(conf.URL, conf.Key, pin)
		t.Assert(err, c.IsNil)
	}
	return h.controller
}
Пример #19
0
func (s *GitDeploySuite) TestSlugbuilderLimit(t *c.C) {
	r := s.newGitRepo(t, "slugbuilder-limit")
	t.Assert(r.flynn("create"), Succeeds)
	t.Assert(r.flynn("env", "set", "[email protected]:kr/heroku-buildpack-inline.git"), Succeeds)
	t.Assert(r.flynn("limit", "set", "slugbuilder", "memory=500MB"), Succeeds)

	push := r.git("push", "flynn", "master")
	t.Assert(push, Succeeds)
	t.Assert(push, OutputContains, "524288000")
}
Пример #20
0
// TestAppEvents checks that streaming events for an app only receives events
// for that particular app.
func (s *ControllerSuite) TestAppEvents(t *c.C) {
	client := s.controllerClient(t)
	app1, release1 := s.createApp(t)
	app2, release2 := s.createApp(t)

	// stream events for app1
	events := make(chan *ct.Job)
	stream, err := client.StreamJobEvents(app1.ID, events)
	t.Assert(err, c.IsNil)
	defer stream.Close()

	runJob := func(appID, releaseID string) {
		rwc, err := client.RunJobAttached(appID, &ct.NewJob{
			ReleaseID:  releaseID,
			Cmd:        []string{"/bin/true"},
			DisableLog: true,
		})
		t.Assert(err, c.IsNil)
		rwc.Close()
	}

	// generate events for app2 and wait for them
	watcher, err := client.WatchJobEvents(app2.ID, release2.ID)
	t.Assert(err, c.IsNil)
	defer watcher.Close()
	runJob(app2.ID, release2.ID)
	t.Assert(watcher.WaitFor(
		ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}},
		10*time.Second,
		func(e *ct.Job) error {
			debugf(t, "got %s job event for app2", e.State)
			return nil
		},
	), c.IsNil)

	// generate events for app1
	runJob(app1.ID, release1.ID)

	// check the stream only gets events for app1
	for {
		select {
		case e, ok := <-events:
			if !ok {
				t.Fatal("unexpected close of job event stream")
			}
			t.Assert(e.AppID, c.Equals, app1.ID)
			debugf(t, "got %s job event for app1", e.State)
			if e.State == ct.JobStateDown {
				return
			}
		case <-time.After(10 * time.Second):
			t.Fatal("timed out waiting for job events for app1")
		}
	}
}
Пример #21
0
func (h *Helper) controllerClient(t *c.C) *controller.Client {
	h.controllerMtx.Lock()
	defer h.controllerMtx.Unlock()
	if h.controller == nil {
		conf := h.clusterConf(t)
		var err error
		h.controller, err = conf.Client()
		t.Assert(err, c.IsNil)
	}
	return h.controller
}
Пример #22
0
func (s *HealthcheckSuite) TestChecker(t *c.C) {
	// start app with ping service, register with checker
	app, _ := s.createAppWithService(t, "ping", &host.Service{
		Name:   "ping-checker",
		Create: true,
		Check:  &host.HealthCheck{Type: "tcp"},
	})
	t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds)
	_, err := s.discoverdClient(t).Instances("ping-checker", 10*time.Second)
	t.Assert(err, c.IsNil)
}
Пример #23
0
func (s *CLISuite) newCliTestApp(t *c.C) *cliTestApp {
	app, release := s.createApp(t)
	watcher, err := s.controllerClient(t).WatchJobEvents(app.Name, release.ID)
	t.Assert(err, c.IsNil)
	return &cliTestApp{
		name:    app.Name,
		disc:    s.discoverdClient(t),
		t:       t,
		watcher: watcher,
	}
}
Пример #24
0
func (s *CLISuite) TestLogFollow(t *c.C) {
	app := s.newCliTestApp(t)
	defer app.cleanup()

	t.Assert(app.flynn("run", "-d", "sh", "-c", "sleep 2 && for i in 1 2 3 4 5; do echo \"line $i\"; done"), Succeeds)
	app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1}})

	log := app.flynnCmd("log", "--raw-output", "--follow")
	logStdout, err := log.StdoutPipe()
	t.Assert(err, c.IsNil)
	t.Assert(log.Start(), c.IsNil)
	defer log.Process.Kill()

	// use a goroutine + channel so we can timeout the stdout read
	type line struct {
		text string
		err  error
	}
	lines := make(chan line)
	go func() {
		buf := bufio.NewReader(logStdout)
		for {
			text, err := buf.ReadBytes('\n')
			if err != nil {
				if err != io.EOF {
					lines <- line{"", err}
				}
				break
			}
			lines <- line{string(text), nil}
		}
	}()
	readline := func() (string, error) {
		select {
		case l := <-lines:
			if l.err != nil {
				return "", fmt.Errorf("could not read log output: %s", l.err)
			}
			return l.text, nil
		case <-time.After(5 * time.Second):
			return "", errors.New("timed out waiting for log output")
		}
	}
	var stderr bytes.Buffer
	for i := 1; i < 6; i++ {
		expected := fmt.Sprintf("line %d\n", i)
		actual, err := readline()
		if err != nil {
			t.Logf("STDERR = %q", stderr.String())
		}
		t.Assert(err, c.IsNil)
		t.Assert(actual, c.Equals, expected)
	}
}
Пример #25
0
func (s *CLISuite) TestRunLimits(t *c.C) {
	app := s.newCliTestApp(t)
	defer app.cleanup()
	cmd := app.flynn("run", "sh", "-c", resourceCmd)
	t.Assert(cmd, Succeeds)
	defaults := resource.Defaults()
	limits := strings.Split(strings.TrimSpace(cmd.Output), "\n")
	t.Assert(limits, c.HasLen, 3)
	t.Assert(limits[0], c.Equals, strconv.FormatInt(*defaults[resource.TypeMemory].Limit, 10))
	t.Assert(limits[1], c.Equals, strconv.FormatInt(1024, 10))
	t.Assert(limits[2], c.Equals, strconv.FormatInt(*defaults[resource.TypeMaxFD].Limit, 10))
}
Пример #26
0
func (h *Helper) hostClient(t *c.C, hostID string) cluster.Host {
	if h.hosts == nil {
		h.hosts = make(map[string]cluster.Host)
	}
	if client, ok := h.hosts[hostID]; ok {
		return client
	}
	client, err := h.clusterClient(t).DialHost(hostID)
	t.Assert(err, c.IsNil)
	h.hosts[hostID] = client
	return client
}
Пример #27
0
func (s *HealthcheckSuite) TestFailure(t *c.C) {
	// start an app that is failing checks
	app, _ := s.createAppWithService(t, "printer", &host.Service{
		Name:   "healthcheck-failure",
		Create: true,
		Check:  &host.HealthCheck{Type: "tcp"},
	})
	t.Assert(flynn(t, "/", "-a", app.Name, "scale", "printer=1"), Succeeds)
	// confirm that it's never registered
	_, err := s.discoverdClient(t).Instances("healthcheck-failure", 5*time.Second)
	t.Assert(err, c.NotNil)
}
Пример #28
0
func (s *GitDeploySuite) TestEmptyRelease(t *c.C) {
	r := s.newGitRepo(t, "empty-release")
	t.Assert(r.flynn("create"), Succeeds)
	t.Assert(r.flynn("env", "set", "BUILDPACK_URL=https://github.com/kr/heroku-buildpack-inline"), Succeeds)

	push := r.git("push", "flynn", "master")
	t.Assert(push, Succeeds)

	run := r.flynn("run", "echo", "foo")
	t.Assert(run, Succeeds)
	t.Assert(run, Outputs, "foo\n")
}
Пример #29
0
func (h *Helper) newCliTestApp(t *c.C) *cliTestApp {
	app, release := h.createApp(t)
	watcher, err := h.controllerClient(t).WatchJobEvents(app.Name, release.ID)
	t.Assert(err, c.IsNil)
	return &cliTestApp{
		id:      app.ID,
		name:    app.Name,
		release: release,
		disc:    h.discoverdClient(t),
		t:       t,
		watcher: watcher,
	}
}
Пример #30
0
func (s *HostSuite) TestExecCrashingJob(t *c.C) {
	cluster := s.clusterClient(t)

	for _, attach := range []bool{true, false} {
		t.Logf("attach = %v", attach)
		cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "exit 1")
		if attach {
			cmd.Stdout = ioutil.Discard
			cmd.Stderr = ioutil.Discard
		}
		t.Assert(cmd.Run(), c.DeepEquals, exec.ExitError(1))
	}
}