func (s *HostSuite) TestAttachFinishedInteractiveJob(t *c.C) { cluster := s.clusterClient(t) // run a quick interactive job cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "/bin/true") cmd.TTY = true runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for interactive job") } h, err := cluster.Host(cmd.HostID) t.Assert(err, c.IsNil) // Getting the logs for the job should fail, as it has none because it was // interactive attachErr := make(chan error) go func() { _, err = h.Attach(&host.AttachReq{JobID: cmd.Job.ID, Flags: host.AttachFlagLogs}, false) attachErr <- err }() select { case err := <-attachErr: t.Assert(err, c.NotNil) case <-time.After(time.Second): t.Error("timed out waiting for attach") } }
func (s *HostSuite) TestDevStdout(t *c.C) { cmd := exec.CommandUsingCluster( s.clusterClient(t), s.createArtifact(t, "test-apps"), "sh", ) cmd.Stdin = strings.NewReader(` echo foo > /dev/stdout echo bar > /dev/stderr echo "SUBSHELL: $(echo baz > /dev/stdout)" echo "SUBSHELL: $(echo qux 2>&1 > /dev/stderr)" >&2`) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for /dev/stdout job") } t.Assert(stdout.String(), c.Equals, "foo\nSUBSHELL: baz\n") t.Assert(stderr.String(), c.Equals, "bar\nSUBSHELL: qux\n") }
func (s *ControllerSuite) generateControllerExamples(t *c.C) map[string]interface{} { cmd := exec.CommandUsingCluster( s.clusterClient(t), s.createArtifact(t, "controller-examples"), "/bin/flynn-controller-examples", ) cmd.Env = map[string]string{ "CONTROLLER_KEY": s.clusterConf(t).Key, "SKIP_MIGRATE_DOMAIN": "true", } var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() t.Logf("stdout: %q", stdout.String()) t.Logf("stderr: %q", stderr.String()) t.Assert(err, c.IsNil) var controllerExamples map[string]json.RawMessage t.Assert(json.Unmarshal(stdout.Bytes(), &controllerExamples), c.IsNil) examples := make(map[string]interface{}, len(controllerExamples)) for key, data := range controllerExamples { example, err := unmarshalControllerExample(data) t.Assert(err, c.IsNil) examples[key] = example } return examples }
func (s *HostSuite) TestExecCrashingJob(t *c.C) { cluster := s.clusterClient(t) for _, attach := range []bool{true, false} { t.Logf("attach = %v", attach) cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "exit 1") if attach { cmd.Stdout = ioutil.Discard cmd.Stderr = ioutil.Discard } t.Assert(cmd.Run(), c.DeepEquals, exec.ExitError(1)) } }
func (s *HostSuite) TestDevSHM(t *c.C) { cmd := exec.CommandUsingCluster( s.clusterClient(t), exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "df -h /dev/shm && echo foo > /dev/shm/asdf", ) var out bytes.Buffer cmd.Stdout = &out cmd.Stderr = &out runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for /dev/shm job") } t.Assert(out.String(), c.Equals, "Filesystem Size Used Available Use% Mounted on\nshm 64.0M 0 64.0M 0% /dev/shm\n") }
func (s *HostSuite) TestDevStdout(t *c.C) { cmd := exec.CommandUsingCluster( s.clusterClient(t), exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "echo foo > /dev/stdout; echo bar > /dev/stderr", ) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for /dev/stdout job") } t.Assert(stdout.String(), c.Equals, "foo\n") t.Assert(stderr.String(), c.Equals, "bar\n") }
func (s *HostSuite) TestNotifyOOM(t *c.C) { appID := random.UUID() // subscribe to init log messages from the logaggregator client, err := logaggc.New("") t.Assert(err, c.IsNil) opts := logagg.LogOpts{ Follow: true, StreamTypes: []logagg.StreamType{logagg.StreamTypeInit}, } rc, err := client.GetLog(appID, &opts) t.Assert(err, c.IsNil) defer rc.Close() msgs := make(chan *logaggc.Message) stream := stream.New() defer stream.Close() go func() { defer close(msgs) dec := json.NewDecoder(rc) for { var msg logaggc.Message if err := dec.Decode(&msg); err != nil { stream.Error = err return } select { case msgs <- &msg: case <-stream.StopCh: return } } }() // run the OOM job cmd := exec.CommandUsingCluster( s.clusterClient(t), s.createArtifact(t, "test-apps"), "/bin/oom", ) cmd.Meta = map[string]string{"flynn-controller.app": appID} runErr := make(chan error) go func() { runErr <- cmd.Run() }() // wait for the OOM notification for { select { case err := <-runErr: t.Assert(err, c.IsNil) case msg, ok := <-msgs: if !ok { t.Fatalf("message stream closed unexpectedly: %s", stream.Err()) } t.Log(msg.Msg) if strings.Contains(msg.Msg, "FATAL: a container process was killed due to lack of available memory") { return } case <-time.After(30 * time.Second): t.Fatal("timed out waiting for OOM notification") } } }