func (s *DeployerSuite) waitForJobEvents(t *c.C, jobType string, events chan *ct.Job, expected []*ct.Job) { debugf(t, "waiting for %d job events", len(expected)) actual := make([]*ct.Job, 0, len(expected)) loop: for { select { case e, ok := <-events: if !ok { t.Fatal("unexpected close of job event stream") } // only track up and down events as we can't always // predict the order of pending / starting / stopping // events when scaling multiple jobs if e.State != ct.JobStateUp && e.State != ct.JobStateDown { continue } actual = append(actual, e) if len(actual) == len(expected) { break loop } case <-time.After(60 * time.Second): t.Fatal("timed out waiting for job events") } } for i, event := range expected { t.Assert(actual[i].ReleaseID, c.Equals, event.ReleaseID) t.Assert(actual[i].State, c.Equals, event.State) t.Assert(actual[i].Type, c.Equals, jobType) } }
func (s *HostSuite) TestAttachFinishedInteractiveJob(t *c.C) { cluster := s.clusterClient(t) // run a quick interactive job cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "/bin/true") cmd.TTY = true runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for interactive job") } h, err := cluster.Host(cmd.HostID) t.Assert(err, c.IsNil) // Getting the logs for the job should fail, as it has none because it was // interactive attachErr := make(chan error) go func() { _, err = h.Attach(&host.AttachReq{JobID: cmd.Job.ID, Flags: host.AttachFlagLogs}, false) attachErr <- err }() select { case err := <-attachErr: t.Assert(err, c.NotNil) case <-time.After(time.Second): t.Error("timed out waiting for attach") } }
func (s *HostSuite) TestResourceLimits(t *c.C) { cmd := exec.JobUsingCluster( s.clusterClient(t), exec.DockerImage(imageURIs["test-apps"]), &host.Job{ Config: host.ContainerConfig{Args: []string{"sh", "-c", resourceCmd}}, Resources: testResources(), }, ) var out bytes.Buffer cmd.Stdout = &out runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for resource limits job") } assertResourceLimits(t, out.String()) }
func (s *ControllerSuite) TestExampleOutput(t *c.C) { examples := s.generateControllerExamples(t) exampleKeys := make([]string, 0, len(examples)) skipExamples := []string{"migrate_cluster_domain"} examplesLoop: for key := range examples { for _, skipKey := range skipExamples { if key == skipKey { continue examplesLoop } } exampleKeys = append(exampleKeys, key) } sort.Strings(exampleKeys) for _, key := range exampleKeys { cacheKey := "https://flynn.io/schema/examples/controller/" + key schema := s.schemaCache[cacheKey] if schema == nil { continue } data := examples[key] errs := schema.Validate(nil, data) var jsonData []byte if len(errs) > 0 { jsonData, _ = json.MarshalIndent(data, "", "\t") } t.Assert(errs, c.HasLen, 0, c.Commentf("%s validation errors: %v\ndata: %v\n", cacheKey, errs, string(jsonData))) } }
func (s *CLISuite) TestLog(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("run", "-d", "echo", "hello", "world"), Succeeds) app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}}) t.Assert(app.flynn("log", "--raw-output"), Outputs, "hello world\n") }
func (s *HealthcheckSuite) TestWithoutChecker(t *c.C) { // start app with a service but no checker app, _ := s.createAppWithService(t, "ping", &host.Service{ Name: "ping-without-checker", Create: true, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds) // make sure app is registered and unregistered when the process terminates _, err := s.discoverdClient(t).Instances("ping-without-checker", 3*time.Second) t.Assert(err, c.IsNil) events := make(chan *discoverd.Event) stream, err := s.discoverdClient(t).Service("ping-without-checker").Watch(events) defer stream.Close() t.Assert(err, c.IsNil) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=0"), Succeeds) outer: for { select { case e := <-events: if e.Kind != discoverd.EventKindDown { continue } break outer case <-time.After(time.Second * 30): t.Error("Timed out waiting for a down event!") } } }
func (s *SchedulerSuite) TestTCPApp(t *c.C) { app, _ := s.createApp(t) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "echoer=1"), Succeeds) newRoute := flynn(t, "/", "-a", app.Name, "route", "add", "tcp", "-s", "echo-service") t.Assert(newRoute, Succeeds) t.Assert(newRoute.Output, Matches, `.+ on port \d+`) str := strings.Split(strings.TrimSpace(string(newRoute.Output)), " ") port := str[len(str)-1] // use Attempts to give the processes time to start if err := Attempts.Run(func() error { servAddr := routerIP + ":" + port conn, err := net.Dial("tcp", servAddr) if err != nil { return err } defer conn.Close() msg := []byte("hello there!\n") _, err = conn.Write(msg) if err != nil { return err } reply := make([]byte, len(msg)) _, err = conn.Read(reply) if err != nil { return err } t.Assert(reply, c.DeepEquals, msg) return nil }); err != nil { t.Fatal(err) } }
func (s *CLISuite) TestLogStderr(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("run", "-d", "sh", "-c", "echo hello && echo world >&2"), Succeeds) app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}}) runLog := func(split bool) (stdout, stderr bytes.Buffer) { args := []string{"log", "--raw-output"} if split { args = append(args, "--split-stderr") } args = append(args) log := app.flynnCmd(args...) log.Stdout = &stdout log.Stderr = &stderr t.Assert(log.Run(), c.IsNil, c.Commentf("STDERR = %q", stderr.String())) return } stdout, stderr := runLog(false) // non-deterministic order t.Assert(stdout.String(), Matches, "hello") t.Assert(stdout.String(), Matches, "world") t.Assert(stderr.String(), c.Equals, "") stdout, stderr = runLog(true) t.Assert(stdout.String(), c.Equals, "hello\n") t.Assert(stderr.String(), c.Equals, "world\n") }
func (s *VolumeSuite) TestInterhostVolumeTransmitAPI(t *c.C) { hosts, err := s.clusterClient(t).Hosts() t.Assert(err, c.IsNil) if len(hosts) < 2 { t.Skip("need multiple hosts for this test") } s.doVolumeTransmitAPI(hosts[0], hosts[1], t) }
func (s *HostSuite) TestGetNonExistentJob(t *c.C) { cluster := s.clusterClient(t) hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) // Getting a non-existent job should error _, err = hosts[0].GetJob("i-dont-exist") t.Assert(hh.IsObjectNotFoundError(err), c.Equals, true) }
func (s *HostSuite) TestAttachNonExistentJob(t *c.C) { cluster := s.clusterClient(t) hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) // Attaching to a non-existent job should error _, err = hosts[0].Attach(&host.AttachReq{JobID: "none", Flags: host.AttachFlagLogs}, false) t.Assert(err, c.NotNil) }
func (s *CLISuite) TestDeployTimeout(t *c.C) { timeout := flynn(t, "/", "-a", "status", "deployment", "timeout") t.Assert(timeout, Succeeds) t.Assert(timeout.Output, c.Equals, "120\n") t.Assert(flynn(t, "/", "-a", "status", "deployment", "timeout", "150"), Succeeds) timeout = flynn(t, "/", "-a", "status", "deployment", "timeout") t.Assert(timeout, Succeeds) t.Assert(timeout.Output, c.Equals, "150\n") }
func (s *BlobstoreSuite) TestBlobstoreBackendAzure(t *c.C) { s3Config := os.Getenv("BLOBSTORE_AZURE_CONFIG") if s3Config == "" { // BLOBSTORE_AZURE_CONFIG should be set to a valid configuration like: // backend=azure account_name=xxx account_key=xxx container=blobstore-ci t.Skip("missing BLOBSTORE_AZURE_CONFIG env var") } s.testBlobstoreBackend(t, "azure", ".+blob.core.windows.net.+", `"BACKEND_AZURE=$BLOBSTORE_AZURE_CONFIG"`) }
func (s *BlobstoreSuite) TestBlobstoreBackendS3(t *c.C) { s3Config := os.Getenv("BLOBSTORE_S3_CONFIG") if s3Config == "" { // BLOBSTORE_S3_CONFIG should be set to a valid configuration like: // backend=s3 access_key_id=xxx secret_access_key=xxx bucket=blobstore-ci region=us-east-1 t.Skip("missing BLOBSTORE_S3_CONFIG env var") } s.testBlobstoreBackend(t, "s3", ".+s3.amazonaws.com.+", `"BACKEND_S3=$BLOBSTORE_S3_CONFIG"`) }
func (s *HealthcheckSuite) TestChecker(t *c.C) { // start app with ping service, register with checker app, _ := s.createAppWithService(t, "ping", &host.Service{ Name: "ping-checker", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds) _, err := s.discoverdClient(t).Instances("ping-checker", 10*time.Second) t.Assert(err, c.IsNil) }
func (h *Helper) controllerClient(t *c.C) controller.Client { h.controllerMtx.Lock() defer h.controllerMtx.Unlock() if h.controller == nil { conf := h.clusterConf(t) var err error h.controller, err = conf.Client() t.Assert(err, c.IsNil) } return h.controller }
func (h *Helper) clusterConf(t *c.C) *config.Cluster { h.configMtx.Lock() defer h.configMtx.Unlock() if h.config == nil { conf, err := config.ReadFile(flynnrc) t.Assert(err, c.IsNil) t.Assert(conf.Clusters, c.HasLen, 1) h.config = conf.Clusters[0] } return h.config }
func (s *CLISuite) TestRunLimits(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() cmd := app.flynn("run", "sh", "-c", resourceCmd) t.Assert(cmd, Succeeds) defaults := resource.Defaults() limits := strings.Split(strings.TrimSpace(cmd.Output), "\n") t.Assert(limits, c.HasLen, 3) t.Assert(limits[0], c.Equals, strconv.FormatInt(*defaults[resource.TypeMemory].Limit, 10)) t.Assert(limits[1], c.Equals, strconv.FormatInt(1024, 10)) t.Assert(limits[2], c.Equals, strconv.FormatInt(*defaults[resource.TypeMaxFD].Limit, 10)) }
func (s *HealthcheckSuite) TestFailure(t *c.C) { // start an app that is failing checks app, _ := s.createAppWithService(t, "printer", &host.Service{ Name: "healthcheck-failure", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "printer=1"), Succeeds) // confirm that it's never registered _, err := s.discoverdClient(t).Instances("healthcheck-failure", 5*time.Second) t.Assert(err, c.NotNil) }
func (s *GitDeploySuite) TestEmptyRelease(t *c.C) { r := s.newGitRepo(t, "empty-release") t.Assert(r.flynn("create"), Succeeds) t.Assert(r.flynn("env", "set", "BUILDPACK_URL=https://github.com/kr/heroku-buildpack-inline"), Succeeds) push := r.git("push", "flynn", "master") t.Assert(push, Succeeds) run := r.flynn("run", "echo", "foo") t.Assert(run, Succeeds) t.Assert(run, Outputs, "foo\n") }
func (h *Helper) createArtifact(t *c.C, name string) *ct.Artifact { path := fmt.Sprintf("../image/%s.json", name) manifest, err := ioutil.ReadFile(path) t.Assert(err, c.IsNil) artifact := &ct.Artifact{ Type: ct.ArtifactTypeFlynn, URI: fmt.Sprintf("https://example.com?target=/images/%s.json", name), RawManifest: manifest, LayerURLTemplate: "file:///var/lib/flynn/layer-cache/{id}.squashfs", } t.Assert(h.controllerClient(t).CreateArtifact(artifact), c.IsNil) return artifact }
func (s *HostSuite) TestExecCrashingJob(t *c.C) { cluster := s.clusterClient(t) for _, attach := range []bool{true, false} { t.Logf("attach = %v", attach) cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "exit 1") if attach { cmd.Stdout = ioutil.Discard cmd.Stderr = ioutil.Discard } t.Assert(cmd.Run(), c.DeepEquals, exec.ExitError(1)) } }
func (h *Helper) newCliTestApp(t *c.C) *cliTestApp { app, release := h.createApp(t) watcher, err := h.controllerClient(t).WatchJobEvents(app.Name, release.ID) t.Assert(err, c.IsNil) return &cliTestApp{ id: app.ID, name: app.Name, release: release, disc: h.discoverdClient(t), t: t, watcher: watcher, } }
func (s *ControllerSuite) generateControllerExamples(t *c.C) map[string]interface{} { cmd := exec.CommandUsingCluster( s.clusterClient(t), s.createArtifact(t, "controller-examples"), "/bin/flynn-controller-examples", ) cmd.Env = map[string]string{ "CONTROLLER_KEY": s.clusterConf(t).Key, "SKIP_MIGRATE_DOMAIN": "true", } var stdout bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() t.Logf("stdout: %q", stdout.String()) t.Logf("stderr: %q", stderr.String()) t.Assert(err, c.IsNil) var controllerExamples map[string]json.RawMessage t.Assert(json.Unmarshal(stdout.Bytes(), &controllerExamples), c.IsNil) examples := make(map[string]interface{}, len(controllerExamples)) for key, data := range controllerExamples { example, err := unmarshalControllerExample(data) t.Assert(err, c.IsNil) examples[key] = example } return examples }
func (s *ControllerSuite) SetUpSuite(t *c.C) { var schemaPaths []string walkFn := func(path string, info os.FileInfo, err error) error { if !info.IsDir() && filepath.Ext(path) == ".json" { schemaPaths = append(schemaPaths, path) } return nil } schemaRoot, err := filepath.Abs(filepath.Join("..", "schema")) t.Assert(err, c.IsNil) t.Assert(filepath.Walk(schemaRoot, walkFn), c.IsNil) s.schemaCache = make(map[string]*jsonschema.Schema, len(schemaPaths)) for _, path := range schemaPaths { file, err := os.Open(path) t.Assert(err, c.IsNil) schema := &jsonschema.Schema{Cache: s.schemaCache} err = schema.ParseWithoutRefs(file) t.Assert(err, c.IsNil) cacheKey := "https://flynn.io/schema" + strings.TrimSuffix(strings.TrimPrefix(path, schemaRoot), ".json") s.schemaCache[cacheKey] = schema file.Close() } for _, schema := range s.schemaCache { schema.ResolveRefs(false) } }
func assertResourceLimits(t *c.C, out string) { limits := strings.Split(strings.TrimSpace(out), "\n") t.Assert(limits, c.HasLen, 3) t.Assert(limits[0], c.Equals, strconv.FormatInt(resourceMem, 10)) t.Assert(limits[1], c.Equals, strconv.FormatInt(768, 10)) t.Assert(limits[2], c.Equals, strconv.FormatInt(resourceMaxFD, 10)) }
func (h *Helper) hostClient(t *c.C, hostID string) *cluster.Host { h.hostsMtx.Lock() defer h.hostsMtx.Unlock() if h.hosts == nil { h.hosts = make(map[string]*cluster.Host) } if client, ok := h.hosts[hostID]; ok { return client } client, err := h.clusterClient(t).Host(hostID) t.Assert(err, c.IsNil) h.hosts[hostID] = client return client }
func (s *GitDeploySuite) TestPushTwice(t *c.C) { r := s.newGitRepo(t, "https://github.com/flynn-examples/nodejs-flynn-example") t.Assert(r.flynn("create"), Succeeds) t.Assert(r.git("push", "flynn", "master"), Succeeds) t.Assert(r.git("commit", "-m", "second", "--allow-empty"), Succeeds) t.Assert(r.git("push", "flynn", "master"), Succeeds) }
func (s *DeployerSuite) assertRolledBack(t *c.C, deployment *ct.Deployment, processes map[string]int) { client := s.controllerClient(t) // check that we're running the old release release, err := client.GetAppRelease(deployment.AppID) t.Assert(err, c.IsNil) t.Assert(release.ID, c.Equals, deployment.OldReleaseID) // check that the old formation is the same and there's no new formation formation, err := client.GetFormation(deployment.AppID, deployment.OldReleaseID) t.Assert(err, c.IsNil) t.Assert(formation.Processes, c.DeepEquals, processes) _, err = client.GetFormation(deployment.AppID, deployment.NewReleaseID) t.Assert(err, c.NotNil) }
func (s *BlobstoreSuite) TestBlobstoreBackendGCS(t *c.C) { gcsConfig := os.Getenv("BLOBSTORE_GCS_CONFIG") if gcsConfig == "" { // BLOBSTORE_S3_CONFIG should be set to a JSON-encoded Google Cloud // Service Account key that includes an extra field named "bucket" that // specifies the bucket to use t.Skip("missing BLOBSTORE_GCS_CONFIG env var") } var data struct{ Bucket string } err := json.Unmarshal([]byte(gcsConfig), &data) t.Assert(err, c.IsNil) s.testBlobstoreBackend(t, "gcs", ".+google.+", fmt.Sprintf(`"BACKEND_GCS=backend=gcs bucket=%s"`, data.Bucket), `"BACKEND_GCS_KEY=$BLOBSTORE_GCS_CONFIG"`) }