func (s *GitreceiveSuite) SetUpSuite(t *c.C) { // Unencrypted SSH private key for the flynn-test GitHub account. // Omits header/footer to avoid any GitHub auto-revoke key crawlers sshKey := `MIIEpAIBAAKCAQEA2UnQ/17TfzQRt4HInuP1SYz/tSNaCGO3NDIPLydVu8mmxuKT zlJtH3pz3uWpMEKdZtSjV+QngJL8OFzanQVZtRBJjF2m+cywHJoZA5KsplMon+R+ QmVqu92WlcRdkcft1F1CLoTXTmHHfvuhOkG6GgJONNLP9Z14EsQ7MbBh5guafWOX kdGFajyd+T2aj27yIkK44WjWqiLjxRIAtgOJrmd/3H0w3E+O1cgNrA2gkFEUhvR1 OHz8SmugYva0VZWKvxZ6muZvn26L1tajYsCntCRR3/a74cAnVFAXjqSatL6YTbSH sdtE91kEC73/U4SL3OFdDiCrAvXpJ480C2/GQQIDAQABAoIBAHNQNVYRIPS00WIt wiZwm8/4wAuFQ1aIdMWCe4Ruv5T1I0kRHZe1Lqwx9CQqhWtTLu1Pk5AlSMF3P9s5 i9sg58arahzP5rlS43OKZBP9Vxq9ryWLwWXDJK2mny/EElQ3YgP9qg29+fVi9thw +dNM5lK/PnnSFwMmGn77HN712D6Yl3CCJJjsAunTfPzR9hyEqX5YvUB5eq/TNhXe sqrKcGORIoNfv7WohlFSkTAXIvoMxmFWXg8piZ9/b1W4NwvO4wup3ZSErIk0AQ97 HtyXJIXgtj6pLkPqvPXPGvS3quYAddNxvGIdvge7w5LHnrxOzdqbeDAVmJLVwVlv oo+7aQECgYEA8ZliUuA8q86SWE0N+JZUqbTvE6VzyWG0/u0BJYDkH7yHkbpFOIEy KTw048WOZLQ6/wPwL8Hb090Cas/6pmRFMgCedarzXc9fvGEwW95em7jA4AyOVBMC KIAmaYkm6LcUFeyR6ektZeCkT0MNoi4irjBC3/hMRyZu+6RL4jXxHLkCgYEA5j13 2nkbV99GtRRjyGB7uMkrhMere2MekANXEm4dW+LZFZUda4YCqdzfjDfBTxsuyGqi DnvI7bZFzIQPiiEzvL2Mpiy7JqxmPLGmwzxDp3z75T5vOrGs4g9IQ7yDjp5WPzjz KCJJHn8Qt9tNZb5h0hBM+NWLT0c1XxtTIVFfgckCgYAfNpTYZjYQcFDB7bqXWjy3 7DNTE3YhF2l94fra8IsIep/9ONaGlVJ4t1mR780Uv6A7oDOgx+fxuET+rb4RTzUN X70ZMKvee9M/kELiK5mHftgUWirtO8N0nhHYYqrPOA/1QSoc0U5XMi2oO96ADHvY i02oh/i63IFMK47OO+/ZqQKBgQCY8bY/Y/nc+o4O1hee0TD+xGvrTXRFh8eSpRVf QdSw6FWKt76OYbw9OGMr0xHPyd/e9K7obiRAfLeLLyLfgETNGSFodghwnU9g/CYq RUsv5J+0XjAnTkXo+Xvouz6tK9NhNiSYwYXPA1uItt6IOtriXz+ygLCFHml+3zju xg5quQKBgQCEL95Di6WD+155gEG2NtqeAOWhgxqAbGjFjfpV+pVBksBCrWOHcBJp QAvAdwDIZpqRWWMcLS7zSDrzn3ZscuHCMxSOe40HbrVdDUee24/I4YQ+R8EcuzcA 3IV9ai+Bxs6PvklhXmarYxJl62LzPLyv0XFscGRes/2yIIxNfNzFug==` t.Assert(flynn(t, "/", "-a", "gitreceive", "env", "set", "SSH_CLIENT_HOSTS=github.com,192.30.252.131 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==", fmt.Sprintf("SSH_CLIENT_KEY=-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----\n", sshKey)), Succeeds) }
func (s *ControllerSuite) TestExampleOutput(t *c.C) { examples := s.generateControllerExamples(t) exampleKeys := make([]string, 0, len(examples)) skipExamples := []string{"migrate_cluster_domain"} examplesLoop: for key := range examples { for _, skipKey := range skipExamples { if key == skipKey { continue examplesLoop } } exampleKeys = append(exampleKeys, key) } sort.Strings(exampleKeys) for _, key := range exampleKeys { cacheKey := "https://flynn.io/schema/examples/controller/" + key schema := s.schemaCache[cacheKey] if schema == nil { continue } data := examples[key] errs := schema.Validate(nil, data) var jsonData []byte if len(errs) > 0 { jsonData, _ = json.MarshalIndent(data, "", "\t") } t.Assert(errs, c.HasLen, 0, c.Commentf("%s validation errors: %v\ndata: %v\n", cacheKey, errs, string(jsonData))) } }
func (s *HostSuite) TestAttachFinishedInteractiveJob(t *c.C) { cluster := s.clusterClient(t) // run a quick interactive job cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "/bin/true") cmd.TTY = true runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for interactive job") } h, err := cluster.Host(cmd.HostID) t.Assert(err, c.IsNil) // Getting the logs for the job should fail, as it has none because it was // interactive attachErr := make(chan error) go func() { _, err = h.Attach(&host.AttachReq{JobID: cmd.Job.ID, Flags: host.AttachFlagLogs}, false) attachErr <- err }() select { case err := <-attachErr: t.Assert(err, c.NotNil) case <-time.After(time.Second): t.Error("timed out waiting for attach") } }
func (s *HostSuite) TestResourceLimits(t *c.C) { cmd := exec.JobUsingCluster( s.clusterClient(t), exec.DockerImage(imageURIs["test-apps"]), &host.Job{ Config: host.ContainerConfig{Cmd: []string{"sh", "-c", resourceCmd}}, Resources: testResources(), }, ) var out bytes.Buffer cmd.Stdout = &out runErr := make(chan error) go func() { runErr <- cmd.Run() }() select { case err := <-runErr: t.Assert(err, c.IsNil) case <-time.After(30 * time.Second): t.Fatal("timed out waiting for resource limits job") } assertResourceLimits(t, out.String()) }
func (s *SchedulerSuite) TestTCPApp(t *c.C) { app, _ := s.createApp(t) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "echoer=1"), Succeeds) newRoute := flynn(t, "/", "-a", app.Name, "route", "add", "tcp", "-s", "echo-service") t.Assert(newRoute, Succeeds) t.Assert(newRoute.Output, Matches, `.+ on port \d+`) str := strings.Split(strings.TrimSpace(string(newRoute.Output)), " ") port := str[len(str)-1] // use Attempts to give the processes time to start if err := Attempts.Run(func() error { servAddr := routerIP + ":" + port conn, err := net.Dial("tcp", servAddr) if err != nil { return err } defer conn.Close() msg := []byte("hello there!\n") _, err = conn.Write(msg) if err != nil { return err } reply := make([]byte, len(msg)) _, err = conn.Read(reply) if err != nil { return err } t.Assert(reply, c.DeepEquals, msg) return nil }); err != nil { t.Fatal(err) } }
func waitForJobEvents(t *c.C, events chan *ct.JobEvent, expected jobEvents) (lastID int64, jobID string) { debugf(t, "waiting for job events: %v", expected) actual := make(jobEvents) for { inner: select { case event := <-events: debug(t, "got job event: ", event.Type, event.JobID, event.State) lastID = event.ID jobID = event.JobID if _, ok := actual[event.Type]; !ok { actual[event.Type] = make(map[string]int) } switch event.State { case "up": actual[event.Type]["up"] += 1 case "down", "crashed": actual[event.Type]["down"] += 1 default: break inner } if jobEventsEqual(expected, actual) { return } case <-time.After(60 * time.Second): t.Fatal("timed out waiting for job events: ", expected) } } }
func (s *CLISuite) TestLogStderr(t *c.C) { app := s.newCliTestApp(t) t.Assert(app.flynn("run", "-d", "sh", "-c", "echo hello && echo world >&2"), Succeeds) app.waitFor(ct.JobEvents{"": {"up": 1, "down": 1}}) runLog := func(split bool) (stdout, stderr bytes.Buffer) { args := []string{"log", "--raw-output"} if split { args = append(args, "--split-stderr") } args = append(args) log := app.flynnCmd(args...) log.Stdout = &stdout log.Stderr = &stderr t.Assert(log.Run(), c.IsNil, c.Commentf("STDERR = %q", stderr.String())) return } stdout, stderr := runLog(false) // non-deterministic order t.Assert(stdout.String(), Matches, "hello") t.Assert(stdout.String(), Matches, "world") t.Assert(stderr.String(), c.Equals, "") stdout, stderr = runLog(true) t.Assert(stdout.String(), c.Equals, "hello\n") t.Assert(stderr.String(), c.Equals, "world\n") }
func (s *TaffyDeploySuite) deployWithTaffy(t *c.C, app *ct.App, github map[string]string) { client := s.controllerClient(t) taffyRelease, err := client.GetAppRelease("taffy") t.Assert(err, c.IsNil) rwc, err := client.RunJobAttached("taffy", &ct.NewJob{ ReleaseID: taffyRelease.ID, ReleaseEnv: true, Cmd: []string{ app.Name, github["clone_url"], github["ref"], github["sha"], }, Meta: map[string]string{ "type": "github", "user_login": github["user_login"], "repo_name": github["repo_name"], "ref": github["ref"], "sha": github["sha"], "clone_url": github["clone_url"], "app": app.ID, }, }) t.Assert(err, c.IsNil) attachClient := cluster.NewAttachClient(rwc) var outBuf bytes.Buffer exit, err := attachClient.Receive(&outBuf, &outBuf) t.Log(outBuf.String()) t.Assert(exit, c.Equals, 0) t.Assert(err, c.IsNil) }
func (h *Helper) removeHosts(t *c.C, hosts []*tc.Instance) { debugf(t, "removing %d hosts", len(hosts)) for _, host := range hosts { t.Assert(testCluster.RemoveHost(host), c.IsNil) debugf(t, "host removed: %s", host.ID) } }
func (h *Helper) newSlugrunnerArtifact(t *c.C) *ct.Artifact { r, err := h.controllerClient(t).GetAppRelease("gitreceive") t.Assert(err, c.IsNil) slugrunnerURI := r.Processes["app"].Env["SLUGRUNNER_IMAGE_URI"] t.Assert(slugrunnerURI, c.Not(c.Equals), "") return &ct.Artifact{Type: "docker", URI: slugrunnerURI} }
func (s *HealthcheckSuite) TestWithoutChecker(t *c.C) { // start app with a service but no checker app, _ := s.createAppWithService(t, "ping", &host.Service{ Name: "ping-without-checker", Create: true, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds) // make sure app is registered and unregistered when the process terminates _, err := s.discoverdClient(t).Instances("ping-without-checker", 3*time.Second) t.Assert(err, c.IsNil) events := make(chan *discoverd.Event) stream, err := s.discoverdClient(t).Service("ping-without-checker").Watch(events) defer stream.Close() t.Assert(err, c.IsNil) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=0"), Succeeds) outer: for { select { case e := <-events: if e.Kind != discoverd.EventKindDown { continue } break outer case <-time.After(time.Second * 30): t.Error("Timed out waiting for a down event!") } } }
func (s *CLISuite) TestLog(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() t.Assert(app.flynn("run", "-d", "echo", "hello", "world"), Succeeds) app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}}) t.Assert(app.flynn("log", "--raw-output"), Outputs, "hello world\n") }
func (s *SchedulerSuite) TestJobMeta(t *c.C) { app, release := s.createApp(t) events := make(chan *ct.JobEvent) stream, err := s.controllerClient(t).StreamJobEvents(app.ID, 0, events) t.Assert(err, c.IsNil) defer stream.Close() // start 1 one-off job _, err = s.controllerClient(t).RunJobDetached(app.ID, &ct.NewJob{ ReleaseID: release.ID, Cmd: []string{"sh", "-c", "while true; do echo one-off-job; sleep 1; done"}, Meta: map[string]string{ "foo": "baz", }, }) t.Assert(err, c.IsNil) waitForJobEvents(t, stream, events, jobEvents{"": {"up": 1}}) list, err := s.controllerClient(t).JobList(app.ID) t.Assert(err, c.IsNil) t.Assert(list, c.HasLen, 1) t.Assert(list[0].Meta, c.DeepEquals, map[string]string{ "foo": "baz", }) }
func (h *Helper) discoverdClient(t *c.C) *discoverd.Client { if h.disc == nil { var err error h.disc, err = discoverd.NewClientWithAddr(routerIP + ":1111") t.Assert(err, c.IsNil) } return h.disc }
func (h *Helper) clusterClient(t *c.C) *cluster.Client { if h.cluster == nil { var err error h.cluster, err = cluster.NewClientWithDial(nil, h.discoverdClient(t).NewServiceSet) t.Assert(err, c.IsNil) } return h.cluster }
func (s *VolumeSuite) TestInterhostVolumeTransmitAPI(t *c.C) { hosts, err := s.clusterClient(t).Hosts() t.Assert(err, c.IsNil) if len(hosts) < 2 { t.Skip("need multiple hosts for this test") } s.doVolumeTransmitAPI(hosts[0], hosts[1], t) }
func (s *HostSuite) TestGetNonExistentJob(t *c.C) { cluster := s.clusterClient(t) hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) // Getting a non-existent job should error _, err = hosts[0].GetJob("i-dont-exist") t.Assert(hh.IsObjectNotFoundError(err), c.Equals, true) }
func (s *HostSuite) TestAttachNonExistentJob(t *c.C) { cluster := s.clusterClient(t) hosts, err := cluster.Hosts() t.Assert(err, c.IsNil) // Attaching to a non-existent job should error _, err = hosts[0].Attach(&host.AttachReq{JobID: "none", Flags: host.AttachFlagLogs}, false) t.Assert(err, c.NotNil) }
func (h *Helper) clusterConf(t *c.C) *config.Cluster { if h.config == nil { conf, err := config.ReadFile(flynnrc) t.Assert(err, c.IsNil) t.Assert(conf.Clusters, c.HasLen, 1) h.config = conf.Clusters[0] } return h.config }
func (h *Helper) sshKeys(t *c.C) *sshData { h.sshMtx.Lock() defer h.sshMtx.Unlock() if h.ssh == nil { var err error h.ssh, err = genSSHKey() t.Assert(err, c.IsNil) } return h.ssh }
func (s *GitDeploySuite) TestSlugbuilderLimit(t *c.C) { r := s.newGitRepo(t, "slugbuilder-limit") t.Assert(r.flynn("create"), Succeeds) t.Assert(r.flynn("env", "set", "[email protected]:kr/heroku-buildpack-inline.git"), Succeeds) t.Assert(r.flynn("limit", "set", "slugbuilder", "memory=500MB"), Succeeds) push := r.git("push", "flynn", "master") t.Assert(push, Succeeds) t.Assert(push, OutputContains, "524288000") }
func (h *Helper) controllerClient(t *c.C) *controller.Client { if h.controller == nil { conf := h.clusterConf(t) pin, err := base64.StdEncoding.DecodeString(conf.TLSPin) t.Assert(err, c.IsNil) h.controller, err = controller.NewClientWithPin(conf.URL, conf.Key, pin) t.Assert(err, c.IsNil) } return h.controller }
func (s *HealthcheckSuite) TestChecker(t *c.C) { // start app with ping service, register with checker app, _ := s.createAppWithService(t, "ping", &host.Service{ Name: "ping-checker", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "ping=1"), Succeeds) _, err := s.discoverdClient(t).Instances("ping-checker", 10*time.Second) t.Assert(err, c.IsNil) }
func (h *Helper) controllerClient(t *c.C) *controller.Client { h.controllerMtx.Lock() defer h.controllerMtx.Unlock() if h.controller == nil { conf := h.clusterConf(t) var err error h.controller, err = conf.Client() t.Assert(err, c.IsNil) } return h.controller }
func (s *CLISuite) newCliTestApp(t *c.C) *cliTestApp { app, release := s.createApp(t) watcher, err := s.controllerClient(t).WatchJobEvents(app.Name, release.ID) t.Assert(err, c.IsNil) return &cliTestApp{ name: app.Name, disc: s.discoverdClient(t), t: t, watcher: watcher, } }
func (s *CLISuite) TestRunLimits(t *c.C) { app := s.newCliTestApp(t) defer app.cleanup() cmd := app.flynn("run", "sh", "-c", resourceCmd) t.Assert(cmd, Succeeds) defaults := resource.Defaults() limits := strings.Split(strings.TrimSpace(cmd.Output), "\n") t.Assert(limits, c.HasLen, 3) t.Assert(limits[0], c.Equals, strconv.FormatInt(*defaults[resource.TypeMemory].Limit, 10)) t.Assert(limits[1], c.Equals, strconv.FormatInt(1024, 10)) t.Assert(limits[2], c.Equals, strconv.FormatInt(*defaults[resource.TypeMaxFD].Limit, 10)) }
func (s *HealthcheckSuite) TestFailure(t *c.C) { // start an app that is failing checks app, _ := s.createAppWithService(t, "printer", &host.Service{ Name: "healthcheck-failure", Create: true, Check: &host.HealthCheck{Type: "tcp"}, }) t.Assert(flynn(t, "/", "-a", app.Name, "scale", "printer=1"), Succeeds) // confirm that it's never registered _, err := s.discoverdClient(t).Instances("healthcheck-failure", 5*time.Second) t.Assert(err, c.NotNil) }
func (h *Helper) hostClient(t *c.C, hostID string) cluster.Host { if h.hosts == nil { h.hosts = make(map[string]cluster.Host) } if client, ok := h.hosts[hostID]; ok { return client } client, err := h.clusterClient(t).DialHost(hostID) t.Assert(err, c.IsNil) h.hosts[hostID] = client return client }
func (s *GitDeploySuite) TestEmptyRelease(t *c.C) { r := s.newGitRepo(t, "empty-release") t.Assert(r.flynn("create"), Succeeds) t.Assert(r.flynn("env", "set", "BUILDPACK_URL=https://github.com/kr/heroku-buildpack-inline"), Succeeds) push := r.git("push", "flynn", "master") t.Assert(push, Succeeds) run := r.flynn("run", "echo", "foo") t.Assert(run, Succeeds) t.Assert(run, Outputs, "foo\n") }
func (s *HostSuite) TestExecCrashingJob(t *c.C) { cluster := s.clusterClient(t) for _, attach := range []bool{true, false} { t.Logf("attach = %v", attach) cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "sh", "-c", "exit 1") if attach { cmd.Stdout = ioutil.Discard cmd.Stderr = ioutil.Discard } t.Assert(cmd.Run(), c.DeepEquals, exec.ExitError(1)) } }