func (s *ControllerSuite) TestExampleOutput(t *c.C) { examples := s.generateControllerExamples(t) exampleKeys := make([]string, 0, len(examples)) skipExamples := []string{"migrate_cluster_domain"} examplesLoop: for key := range examples { for _, skipKey := range skipExamples { if key == skipKey { continue examplesLoop } } exampleKeys = append(exampleKeys, key) } sort.Strings(exampleKeys) for _, key := range exampleKeys { cacheKey := "https://flynn.io/schema/examples/controller/" + key schema := s.schemaCache[cacheKey] if schema == nil { continue } data := examples[key] errs := schema.Validate(nil, data) var jsonData []byte if len(errs) > 0 { jsonData, _ = json.MarshalIndent(data, "", "\t") } t.Assert(errs, c.HasLen, 0, c.Commentf("%s validation errors: %v\ndata: %v\n", cacheKey, errs, string(jsonData))) } }
func (s *HostSuite) TestDevStdout(t *c.C) { cmd := exec.CommandUsingCluster( s.clusterClient(t), s.createArtifact(t, "test-apps"), "sh", ) cmd.Stdin = strings.NewReader(` echo foo > /dev/stdout echo bar > /dev/stderr echo "SUBSHELL: $(echo baz > /dev/stdout)" echo "SUBSHELL: $(echo qux 2>&1 > /dev/stderr)" >&2`) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for /dev/stdout job") } t.Assert(stdout.String(), c.Equals, "foo\nSUBSHELL: baz\n") t.Assert(stderr.String(), c.Equals, "bar\nSUBSHELL: qux\n") }
func waitForDeploymentEvents(t *c.C, stream chan *ct.DeploymentEvent, expected []*ct.DeploymentEvent) { debugf(t, "waiting for %d deployment events", len(expected)) actual := make([]*ct.DeploymentEvent, 0, len(expected)) loop: for { select { case e, ok := <-stream: if !ok { t.Fatal("unexpected close of deployment event stream") } actual = append(actual, e) if e.Status == "complete" || e.Status == "failed" { debugf(t, "got deployment event: %s", e.Status) break loop } debugf(t, "got deployment event: %s %s", e.JobType, e.JobState) case <-time.After(60 * time.Second): t.Fatal("timed out waiting for deployment event") } } compare := func(t *c.C, i *ct.DeploymentEvent, j *ct.DeploymentEvent) { t.Assert(i.ReleaseID, c.Equals, j.ReleaseID) t.Assert(i.JobType, c.Equals, j.JobType) t.Assert(i.JobState, c.Equals, j.JobState) t.Assert(i.Status, c.Equals, j.Status) t.Assert(i.Error, c.Equals, j.Error) } for i, e := range expected { compare(t, actual[i], e) } }
func (s *HostSuite) TestResourceLimits(t *c.C) { cmd := exec.JobUsingCluster( s.clusterClient(t), exec.DockerImage(imageURIs["test-apps"]), &host.Job{ Config: host.ContainerConfig{Args: []string{"sh", "-c", resourceCmd}}, Resources: testResources(), }, ) var out bytes.Buffer cmd.Stdout = &out runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for resource limits job") } assertResourceLimits(t, out.String()) }
func (s *CLISuite) TestLogStderr(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("run", "-d", "sh", "-c", "echo hello && echo world >&2"), Succeeds) app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}}) runLog := func(split bool) (stdout, stderr bytes.Buffer) { args := []string{"log", "--raw-output"} if split { args = append(args, "--split-stderr") } args = append(args) log := app.flynnCmd(args...) log.Stdout = &stdout log.Stderr = &stderr t.Assert(log.Run(), c.IsNil, c.Commentf("STDERR = %q", stderr.String())) return } stdout, stderr := runLog(false) // non-deterministic order t.Assert(stdout.String(), Matches, "hello") t.Assert(stdout.String(), Matches, "world") t.Assert(stderr.String(), c.Equals, "") stdout, stderr = runLog(true) t.Assert(stdout.String(), c.Equals, "hello\n") t.Assert(stderr.String(), c.Equals, "world\n") }
func (s *CLISuite) TestLog(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("run", "-d", "echo", "hello", "world"), Succeeds) app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}}) t.Assert(app.flynn("log", "--raw-output"), Outputs, "hello world\n") }
func (s *VolumeSuite) TestInterhostVolumeTransmitAPI(t *c.C) { hosts, err := s.clusterClient(t).Hosts() t.Assert(err, c.IsNil) if len(hosts) < 2 { t.Skip("need multiple hosts for this test") } s.doVolumeTransmitAPI(hosts[0], hosts[1], t) }
func (s *HostSuite) TestGetNonExistentJob(t *c.C) { cluster := s.clusterClient(t) hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) // Getting a non-existent job should error _, err = hosts[0].GetJob("i-dont-exist") t.Assert(hh.IsObjectNotFoundError(err), c.Equals, true) }
func (s *HostSuite) TestAttachNonExistentJob(t *c.C) { cluster := s.clusterClient(t) hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) // Attaching to a non-existent job should error _, err = hosts[0].Attach(&host.AttachReq{JobID: "none", Flags: host.AttachFlagLogs}, false) t.Assert(err, c.NotNil) }
func (s *CLISuite) TestDeployTimeout(t *c.C) { timeout := flynn(t, "/", "-a", "status", "deployment", "timeout") t.Assert(timeout, Succeeds) t.Assert(timeout.Output, c.Equals, "120\n") t.Assert(flynn(t, "/", "-a", "status", "deployment", "timeout", "150"), Succeeds) timeout = flynn(t, "/", "-a", "status", "deployment", "timeout") t.Assert(timeout, Succeeds) t.Assert(timeout.Output, c.Equals, "150\n") }
// TestAppEvents checks that streaming events for an app only receives events // for that particular app. func (s *ControllerSuite) TestAppEvents(t *c.C) { client := s.controllerClient(t) app1, release1 := s.createApp(t) app2, release2 := s.createApp(t) // stream events for app1 events := make(chan *ct.Job) stream, err := client.StreamJobEvents(app1.ID, events) t.Assert(err, c.IsNil) defer stream.Close() runJob := func(appID, releaseID string) { rwc, err := client.RunJobAttached(appID, &ct.NewJob{ ReleaseID: releaseID, Args: []string{"/bin/true"}, DisableLog: true, }) t.Assert(err, c.IsNil) rwc.Close() } // generate events for app2 and wait for them watcher, err := client.WatchJobEvents(app2.ID, release2.ID) t.Assert(err, c.IsNil) defer watcher.Close() runJob(app2.ID, release2.ID) t.Assert(watcher.WaitFor( ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}}, 10*time.Second, func(e *ct.Job) error { debugf(t, "got %s job event for app2", e.State) return nil }, ), c.IsNil) // generate events for app1 runJob(app1.ID, release1.ID) // check the stream only gets events for app1 for { select { case e, ok := <-events: if !ok { t.Fatal("unexpected close of job event stream") } t.Assert(e.AppID, c.Equals, app1.ID) debugf(t, "got %s job event for app1", e.State) if e.State == ct.JobStateDown { return } case <-time.After(10 * time.Second): t.Fatal("timed out waiting for job events for app1") } } }
func (s *HealthcheckSuite) TestChecker(t *c.C) { // start app with ping service, register with checker app, _ := s.createAppWithService(t, "ping", &host.Service{ Name: "ping-checker", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds) _, err := s.discoverdClient(t).Instances("ping-checker", 10*time.Second) t.Assert(err, c.IsNil) }
func (h *Helper) clusterConf(t *c.C) *config.Cluster { h.configMtx.Lock() defer h.configMtx.Unlock() if h.config == nil { conf, err := config.ReadFile(flynnrc) t.Assert(err, c.IsNil) t.Assert(conf.Clusters, c.HasLen, 1) h.config = conf.Clusters[0] } return h.config }
func (s *CLISuite) TestLogFollow(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("run", "-d", "sh", "-c", "sleep 2 && for i in 1 2 3 4 5; do echo \"line $i\"; done"), Succeeds) app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1}}) log := app.flynnCmd("log", "--raw-output", "--follow") logStdout, err := log.StdoutPipe() t.Assert(err, c.IsNil) t.Assert(log.Start(), c.IsNil) defer log.Process.Kill() // use a goroutine + channel so we can timeout the stdout read type line struct { text string err error } lines := make(chan line) go func() { buf := bufio.NewReader(logStdout) for { text, err := buf.ReadBytes('\n') if err != nil { if err != io.EOF { lines <- line{"", err} } break } lines <- line{string(text), nil} } }() readline := func() (string, error) { select { case l := <-lines: if l.err != nil { return "", fmt.Errorf("could not read log output: %s", l.err) } return l.text, nil case <-time.After(5 * time.Second): return "", errors.New("timed out waiting for log output") } } var stderr bytes.Buffer for i := 1; i < 6; i++ { expected := fmt.Sprintf("line %d\n", i) actual, err := readline() if err != nil { t.Logf("STDERR = %q", stderr.String()) } t.Assert(err, c.IsNil) t.Assert(actual, c.Equals, expected) } }
func (h *Helper) controllerClient(t *c.C) controller.Client { h.controllerMtx.Lock() defer h.controllerMtx.Unlock() if h.controller == nil { conf := h.clusterConf(t) var err error h.controller, err = conf.Client() t.Assert(err, c.IsNil) } return h.controller }
func (s *HealthcheckSuite) TestFailure(t *c.C) { // start an app that is failing checks app, _ := s.createAppWithService(t, "printer", &host.Service{ Name: "healthcheck-failure", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "printer=1"), Succeeds) // confirm that it's never registered _, err := s.discoverdClient(t).Instances("healthcheck-failure", 5*time.Second) t.Assert(err, c.NotNil) }
func (s *CLISuite) TestRunLimits(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() cmd := app.flynn("run", "sh", "-c", resourceCmd) t.Assert(cmd, Succeeds) defaults := resource.Defaults() limits := strings.Split(strings.TrimSpace(cmd.Output), "\n") t.Assert(limits, c.HasLen, 3) t.Assert(limits[0], c.Equals, strconv.FormatInt(*defaults[resource.TypeMemory].Limit, 10)) t.Assert(limits[1], c.Equals, strconv.FormatInt(1024, 10)) t.Assert(limits[2], c.Equals, strconv.FormatInt(*defaults[resource.TypeMaxFD].Limit, 10)) }
func (s *GitDeploySuite) TestEmptyRelease(t *c.C) { r := s.newGitRepo(t, "empty-release") t.Assert(r.flynn("create"), Succeeds) t.Assert(r.flynn("env", "set", "BUILDPACK_URL=https://github.com/kr/heroku-buildpack-inline"), Succeeds) push := r.git("push", "flynn", "master") t.Assert(push, Succeeds) run := r.flynn("run", "echo", "foo") t.Assert(run, Succeeds) t.Assert(run, Outputs, "foo\n") }
func (s *HostSuite) TestExecCrashingJob(t *c.C) { cluster := s.clusterClient(t) for _, attach := range []bool{true, false} { t.Logf("attach = %v", attach) cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "exit 1") if attach { cmd.Stdout = ioutil.Discard cmd.Stderr = ioutil.Discard } t.Assert(cmd.Run(), c.DeepEquals, exec.ExitError(1)) } }
func (h *Helper) createArtifact(t *c.C, name string) *ct.Artifact { path := fmt.Sprintf("../image/%s.json", name) manifest, err := ioutil.ReadFile(path) t.Assert(err, c.IsNil) artifact := &ct.Artifact{ Type: ct.ArtifactTypeFlynn, URI: fmt.Sprintf("https://example.com?target=/images/%s.json", name), RawManifest: manifest, LayerURLTemplate: "file:///var/lib/flynn/layer-cache/{id}.squashfs", } t.Assert(h.controllerClient(t).CreateArtifact(artifact), c.IsNil) return artifact }
func (h *Helper) newCliTestApp(t *c.C) *cliTestApp { app, release := h.createApp(t) watcher, err := h.controllerClient(t).WatchJobEvents(app.Name, release.ID) t.Assert(err, c.IsNil) return &cliTestApp{ id: app.ID, name: app.Name, release: release, disc: h.discoverdClient(t), t: t, watcher: watcher, } }
func (h *Helper) hostClient(t *c.C, hostID string) *cluster.Host { h.hostsMtx.Lock() defer h.hostsMtx.Unlock() if h.hosts == nil { h.hosts = make(map[string]*cluster.Host) } if client, ok := h.hosts[hostID]; ok { return client } client, err := h.clusterClient(t).Host(hostID) t.Assert(err, c.IsNil) h.hosts[hostID] = client return client }
func (s *SchedulerSuite) TestTCPApp(t *c.C) { app, _ := s.createApp(t) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "echoer=1"), Succeeds) newRoute := flynn(t, "/", "-a", app.Name, "route", "add", "tcp", "-s", "echo-service") t.Assert(newRoute, Succeeds) t.Assert(newRoute.Output, Matches, `.+ on port \d+`) str := strings.Split(strings.TrimSpace(string(newRoute.Output)), " ") port := str[len(str)-1] // use Attempts to give the processes time to start if err := Attempts.Run(func() error { servAddr := routerIP + ":" + port conn, err := net.Dial("tcp", servAddr) if err != nil { return err } defer conn.Close() msg := []byte("hello there!\n") _, err = conn.Write(msg) if err != nil { return err } reply := make([]byte, len(msg)) _, err = conn.Read(reply) if err != nil { return err } t.Assert(reply, c.DeepEquals, msg) return nil }); err != nil { t.Fatal(err) } }
func (s *HealthcheckSuite) TestWithoutChecker(t *c.C) { // start app with a service but no checker app, _ := s.createAppWithService(t, "ping", &host.Service{ Name: "ping-without-checker", Create: true, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds) // make sure app is registered and unregistered when the process terminates _, err := s.discoverdClient(t).Instances("ping-without-checker", 3*time.Second) t.Assert(err, c.IsNil) events := make(chan *discoverd.Event) stream, err := s.discoverdClient(t).Service("ping-without-checker").Watch(events) defer stream.Close() t.Assert(err, c.IsNil) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=0"), Succeeds) outer: for { select { case e := <-events: if e.Kind != discoverd.EventKindDown { continue } break outer case <-time.After(time.Second * 30): t.Error("Timed out waiting for a down event!") } } }
func (s *ControllerSuite) SetUpSuite(t *c.C) { var schemaPaths []string walkFn := func(path string, info os.FileInfo, err error) error { if !info.IsDir() && filepath.Ext(path) == ".json" { schemaPaths = append(schemaPaths, path) } return nil } schemaRoot, err := filepath.Abs(filepath.Join("..", "schema")) t.Assert(err, c.IsNil) t.Assert(filepath.Walk(schemaRoot, walkFn), c.IsNil) s.schemaCache = make(map[string]*jsonschema.Schema, len(schemaPaths)) for _, path := range schemaPaths { file, err := os.Open(path) t.Assert(err, c.IsNil) schema := &jsonschema.Schema{Cache: s.schemaCache} err = schema.ParseWithoutRefs(file) t.Assert(err, c.IsNil) cacheKey := "https://flynn.io/schema" + strings.TrimSuffix(strings.TrimPrefix(path, schemaRoot), ".json") s.schemaCache[cacheKey] = schema file.Close() } for _, schema := range s.schemaCache { schema.ResolveRefs(false) } }
func assertResourceLimits(t *c.C, out string) { limits := strings.Split(strings.TrimSpace(out), "\n") t.Assert(limits, c.HasLen, 3) t.Assert(limits[0], c.Equals, strconv.FormatInt(resourceMem, 10)) t.Assert(limits[1], c.Equals, strconv.FormatInt(768, 10)) t.Assert(limits[2], c.Equals, strconv.FormatInt(resourceMaxFD, 10)) }
func (s *GitDeploySuite) TestPushTwice(t *c.C) { r := s.newGitRepo(t, "https://github.com/flynn-examples/nodejs-flynn-example") t.Assert(r.flynn("create"), Succeeds) t.Assert(r.git("push", "flynn", "master"), Succeeds) t.Assert(r.git("commit", "-m", "second", "--allow-empty"), Succeeds) t.Assert(r.git("push", "flynn", "master"), Succeeds) }
func (s *HostSuite) TestUpdateTags(t *c.C) { events := make(chan *discoverd.Event) stream, err := s.discoverdClient(t).Service("flynn-host").Watch(events) t.Assert(err, c.IsNil) defer stream.Close() nextEvent := func() *discoverd.Event { select { case e, ok := <-events: if !ok { t.Fatal("unexpected close of discoverd stream") } return e case <-time.After(10 * time.Second): t.Fatal("timed out waiting for discoverd event") } return nil } var client *cluster.Host for { e := nextEvent() if e.Kind == discoverd.EventKindUp && client == nil { client = cluster.NewHost(e.Instance.Meta["id"], e.Instance.Addr, nil, nil) } if e.Kind == discoverd.EventKindCurrent { break } } if client == nil { t.Fatal("did not initialize flynn-host client") } t.Assert(client.UpdateTags(map[string]string{"foo": "bar"}), c.IsNil) var meta map[string]string for { e := nextEvent() if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() { meta = e.Instance.Meta break } } t.Assert(meta["tag:foo"], c.Equals, "bar") // setting to empty string should delete the tag t.Assert(client.UpdateTags(map[string]string{"foo": ""}), c.IsNil) for { e := nextEvent() if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() { meta = e.Instance.Meta break } } if _, ok := meta["tag:foo"]; ok { t.Fatal("expected tag to be deleted but is still present") } }
func (s *DeployerSuite) assertRolledBack(t *c.C, deployment *ct.Deployment, processes map[string]int) { client := s.controllerClient(t) // check that we're running the old release release, err := client.GetAppRelease(deployment.AppID) t.Assert(err, c.IsNil) t.Assert(release.ID, c.Equals, deployment.OldReleaseID) // check that the old formation is the same and there's no new formation formation, err := client.GetFormation(deployment.AppID, deployment.OldReleaseID) t.Assert(err, c.IsNil) t.Assert(formation.Processes, c.DeepEquals, processes) _, err = client.GetFormation(deployment.AppID, deployment.NewReleaseID) t.Assert(err, c.NotNil) }
func (s *BlobstoreSuite) TestBlobstoreBackendGCS(t *c.C) { gcsConfig := os.Getenv("BLOBSTORE_GCS_CONFIG") if gcsConfig == "" { // BLOBSTORE_S3_CONFIG should be set to a JSON-encoded Google Cloud // Service Account key that includes an extra field named "bucket" that // specifies the bucket to use t.Skip("missing BLOBSTORE_GCS_CONFIG env var") } var data struct{ Bucket string } err := json.Unmarshal([]byte(gcsConfig), &data) t.Assert(err, c.IsNil) s.testBlobstoreBackend(t, "gcs", ".+google.+", fmt.Sprintf(`"BACKEND_GCS=backend=gcs bucket=%s"`, data.Bucket), `"BACKEND_GCS_KEY=$BLOBSTORE_GCS_CONFIG"`) }