func runClusterMigrateDomain(args *docopt.Args) error { client, err := getClusterClient() if err != nil { shutdown.Fatal(err) } dm := &ct.DomainMigration{ Domain: args.String["<domain>"], } release, err := client.GetAppRelease("controller") if err != nil { return err } dm.OldDomain = release.Env["DEFAULT_ROUTE_DOMAIN"] if !promptYesNo(fmt.Sprintf("Migrate cluster domain from %q to %q?", dm.OldDomain, dm.Domain)) { fmt.Println("Aborted") return nil } maxDuration := 2 * time.Minute fmt.Printf("Migrating cluster domain (this can take up to %s)...\n", maxDuration) events := make(chan *ct.Event) stream, err := client.StreamEvents(controller.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) if err != nil { return nil } defer stream.Close() if err := client.PutDomain(dm); err != nil { return err } timeout := time.After(maxDuration) for { select { case event, ok := <-events: if !ok { return stream.Err() } var e *ct.DomainMigrationEvent if err := json.Unmarshal(event.Data, &e); err != nil { return err } if e.Error != "" { fmt.Println(e.Error) } if e.DomainMigration.FinishedAt != nil { fmt.Printf("Changed cluster domain from %q to %q\n", dm.OldDomain, dm.Domain) return nil } case <-timeout: return errors.New("timed out waiting for domain migration to complete") } } }
func (s *ControllerSuite) TestRouteEvents(t *c.C) { app := "app-route-events-" + random.String(8) client := s.controllerClient(t) // create and push app r := s.newGitRepo(t, "http") t.Assert(r.flynn("create", app), Succeeds) t.Assert(r.flynn("key", "add", r.ssh.Pub), Succeeds) t.Assert(r.git("push", "flynn", "master"), Succeeds) // wait for it to start service := app + "-web" _, err := s.discoverdClient(t).Instances(service, 10*time.Second) t.Assert(err, c.IsNil) // stream events events := make(chan *ct.Event) stream, err := client.StreamEvents(controller.StreamEventsOptions{ AppID: app, ObjectTypes: []ct.EventType{ct.EventTypeRoute, ct.EventTypeRouteDeletion}, Past: true, }, events) t.Assert(err, c.IsNil) defer stream.Close() assertEventType := func(typ ct.EventType) { select { case event, ok := <-events: t.Assert(ok, c.Equals, true) t.Assert(event.ObjectType, c.Equals, typ, c.Commentf("event: %#v", event)) case <-time.After(30 * time.Second): t.Assert(true, c.Equals, false, c.Commentf("timed out waiting for %s event", string(typ))) } } // default app route assertEventType(ct.EventTypeRoute) // create some routes routes := []string{"baz.example.com"} for _, route := range routes { t.Assert(r.flynn("route", "add", "http", route), Succeeds) assertEventType(ct.EventTypeRoute) } routeList, err := client.RouteList(app) t.Assert(err, c.IsNil) numRoutes := len(routes) + 1 // includes default app route t.Assert(routeList, c.HasLen, numRoutes) // delete app cmd := r.flynn("delete", "--yes") t.Assert(cmd, Succeeds) // check route deletion event assertEventType(ct.EventTypeRouteDeletion) }
func (s *DomainMigrationSuite) migrateDomain(t *c.C, dm *ct.DomainMigration) { debugf(t, "migrating domain from %s to %s", dm.OldDomain, dm.Domain) client := s.controllerClient(t) events := make(chan *ct.Event) stream, err := client.StreamEvents(controller.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) t.Assert(err, c.IsNil) defer stream.Close() prevRouterRelease, err := client.GetAppRelease("router") t.Assert(err, c.IsNil) err = client.PutDomain(dm) t.Assert(err, c.IsNil) waitEvent := func(typ string, timeout time.Duration) (event ct.DomainMigrationEvent) { debugf(t, "waiting for %s domain migration event", typ) var e *ct.Event var ok bool select { case e, ok = <-events: if !ok { t.Fatal("event stream closed unexpectedly") } debugf(t, "got %s domain migration event", typ) case <-time.After(timeout): t.Fatalf("timed out waiting for %s domain migration event", typ) } t.Assert(e.Data, c.NotNil) t.Assert(json.Unmarshal(e.Data, &event), c.IsNil) return } // created event := waitEvent("initial", 2*time.Minute) t.Assert(event.Error, c.Equals, "") t.Assert(event.DomainMigration, c.NotNil) t.Assert(event.DomainMigration.ID, c.Equals, dm.ID) t.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain) t.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain) t.Assert(event.DomainMigration.TLSCert, c.IsNil) t.Assert(event.DomainMigration.OldTLSCert, c.NotNil) t.Assert(event.DomainMigration.CreatedAt, c.NotNil) t.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true) t.Assert(event.DomainMigration.FinishedAt, c.IsNil) // complete event = waitEvent("final", 3*time.Minute) t.Assert(event.Error, c.Equals, "") t.Assert(event.DomainMigration, c.NotNil) t.Assert(event.DomainMigration.ID, c.Equals, dm.ID) t.Assert(event.DomainMigration.OldDomain, c.Equals, dm.OldDomain) t.Assert(event.DomainMigration.Domain, c.Equals, dm.Domain) t.Assert(event.DomainMigration.TLSCert, c.NotNil) t.Assert(event.DomainMigration.OldTLSCert, c.NotNil) t.Assert(event.DomainMigration.CreatedAt, c.NotNil) t.Assert(event.DomainMigration.CreatedAt.Equal(*dm.CreatedAt), c.Equals, true) t.Assert(event.DomainMigration.FinishedAt, c.NotNil) cert := event.DomainMigration.TLSCert controllerRelease, err := client.GetAppRelease("controller") t.Assert(err, c.IsNil) t.Assert(controllerRelease.Env["DEFAULT_ROUTE_DOMAIN"], c.Equals, dm.Domain) t.Assert(controllerRelease.Env["CA_CERT"], c.Equals, cert.CACert) routerRelease, err := client.GetAppRelease("router") t.Assert(err, c.IsNil) t.Assert(routerRelease.Env["TLSCERT"], c.Equals, cert.Cert) t.Assert(routerRelease.Env["TLSKEY"], c.Not(c.Equals), "") t.Assert(routerRelease.Env["TLSKEY"], c.Not(c.Equals), prevRouterRelease.Env["TLSKEY"]) dashboardRelease, err := client.GetAppRelease("dashboard") t.Assert(err, c.IsNil) t.Assert(dashboardRelease.Env["DEFAULT_ROUTE_DOMAIN"], c.Equals, dm.Domain) t.Assert(dashboardRelease.Env["CONTROLLER_DOMAIN"], c.Equals, fmt.Sprintf("controller.%s", dm.Domain)) t.Assert(dashboardRelease.Env["URL"], c.Equals, fmt.Sprintf("dashboard.%s", dm.Domain)) t.Assert(dashboardRelease.Env["CA_CERT"], c.Equals, cert.CACert) var doPing func(string, int) doPing = func(component string, retriesRemaining int) { url := fmt.Sprintf("http://%s.%s/ping", component, dm.Domain) res, err := (&http.Client{}).Get(url) if (err != nil || res.StatusCode != 200) && retriesRemaining > 0 { time.Sleep(100 * time.Millisecond) doPing(component, retriesRemaining-1) return } t.Assert(err, c.IsNil) t.Assert(res.StatusCode, c.Equals, 200, c.Commentf("failed to ping %s", component)) } doPing("controller", 3) doPing("dashboard", 3) }
func (s *CLISuite) TestSlugReleaseGarbageCollection(t *c.C) { client := s.controllerClient(t) // create app with gc.max_inactive_slug_releases=3 maxInactiveSlugReleases := 3 app := &ct.App{Meta: map[string]string{"gc.max_inactive_slug_releases": strconv.Itoa(maxInactiveSlugReleases)}} t.Assert(client.CreateApp(app), c.IsNil) // create an image artifact imageArtifact := s.createArtifact(t, "test-apps") // create 5 slug artifacts tmp, err := ioutil.TempFile("", "squashfs-") t.Assert(err, c.IsNil) defer os.Remove(tmp.Name()) defer tmp.Close() t.Assert(exec.Command("mksquashfs", t.MkDir(), tmp.Name(), "-noappend").Run(), c.IsNil) slug, err := ioutil.ReadAll(tmp) t.Assert(err, c.IsNil) slugHash := sha512.Sum512(slug) slugs := []string{ "http://blobstore.discoverd/layer/1.squashfs", "http://blobstore.discoverd/layer/2.squashfs", "http://blobstore.discoverd/layer/3.squashfs", "http://blobstore.discoverd/layer/4.squashfs", "http://blobstore.discoverd/layer/5.squashfs", } slugArtifacts := make([]*ct.Artifact, len(slugs)) put := func(url string, data []byte) { req, err := http.NewRequest("PUT", url, bytes.NewReader(data)) t.Assert(err, c.IsNil) res, err := http.DefaultClient.Do(req) t.Assert(err, c.IsNil) res.Body.Close() t.Assert(res.StatusCode, c.Equals, http.StatusOK) } for i, layerURL := range slugs { manifest := &ct.ImageManifest{ Type: ct.ImageManifestTypeV1, Rootfs: []*ct.ImageRootfs{{ Layers: []*ct.ImageLayer{{ ID: strconv.Itoa(i + 1), Type: ct.ImageLayerTypeSquashfs, Length: int64(len(slug)), Hashes: map[string]string{"sha512": hex.EncodeToString(slugHash[:])}, }}, }}, } data := manifest.RawManifest() url := fmt.Sprintf("http://blobstore.discoverd/image/%s.json", manifest.ID()) put(url, data) put(layerURL, slug) artifact := &ct.Artifact{ Type: ct.ArtifactTypeFlynn, URI: url, Meta: map[string]string{"blobstore": "true"}, RawManifest: data, Hashes: manifest.Hashes(), Size: int64(len(data)), LayerURLTemplate: "http://blobstore.discoverd/layer/{id}.squashfs", } t.Assert(client.CreateArtifact(artifact), c.IsNil) slugArtifacts[i] = artifact } // create 6 releases, the second being scaled up and having the // same slug as the third (so prevents the slug being deleted) releases := make([]*ct.Release, 6) for i, r := range []struct { slug *ct.Artifact active bool }{ {slugArtifacts[0], false}, {slugArtifacts[1], true}, {slugArtifacts[1], false}, {slugArtifacts[2], false}, {slugArtifacts[3], false}, {slugArtifacts[4], false}, } { release := &ct.Release{ ArtifactIDs: []string{imageArtifact.ID, r.slug.ID}, Processes: map[string]ct.ProcessType{ "app": {Args: []string{"/bin/pingserv"}, Ports: []ct.Port{{Proto: "tcp"}}}, }, Meta: map[string]string{"git": "true"}, } t.Assert(client.CreateRelease(release), c.IsNil) procs := map[string]int{"app": 0} if r.active { procs["app"] = 1 } t.Assert(client.PutFormation(&ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: procs, }), c.IsNil) releases[i] = release } // scale the last release so we can deploy it lastRelease := releases[len(releases)-1] watcher, err := client.WatchJobEvents(app.ID, lastRelease.ID) t.Assert(err, c.IsNil) defer watcher.Close() t.Assert(client.PutFormation(&ct.Formation{ AppID: app.ID, ReleaseID: lastRelease.ID, Processes: map[string]int{"app": 1}, }), c.IsNil) t.Assert(watcher.WaitFor(ct.JobEvents{"app": ct.JobUpEvents(1)}, scaleTimeout, nil), c.IsNil) t.Assert(client.SetAppRelease(app.ID, lastRelease.ID), c.IsNil) // subscribe to garbage collection events gcEvents := make(chan *ct.Event) stream, err := client.StreamEvents(ct.StreamEventsOptions{ AppID: app.ID, ObjectTypes: []ct.EventType{ct.EventTypeAppGarbageCollection}, }, gcEvents) t.Assert(err, c.IsNil) defer stream.Close() // deploy a new release with the same slug as the last release timeoutCh := make(chan struct{}) time.AfterFunc(5*time.Minute, func() { close(timeoutCh) }) newRelease := *lastRelease newRelease.ID = "" t.Assert(client.CreateRelease(&newRelease), c.IsNil) t.Assert(client.DeployAppRelease(app.ID, newRelease.ID, timeoutCh), c.IsNil) // wait for garbage collection select { case event, ok := <-gcEvents: if !ok { t.Fatalf("event stream closed unexpectedly: %s", stream.Err()) } var e ct.AppGarbageCollectionEvent t.Assert(json.Unmarshal(event.Data, &e), c.IsNil) if e.Error != "" { t.Fatalf("garbage collection failed: %s", e.Error) } case <-time.After(60 * time.Second): t.Fatal("timed out waiting for garbage collection") } // check we have 4 distinct slug releases (so 5 in total, only 3 are // inactive) list, err := client.AppReleaseList(app.ID) t.Assert(err, c.IsNil) t.Assert(list, c.HasLen, maxInactiveSlugReleases+2) distinctSlugs := make(map[string]struct{}, len(list)) for _, release := range list { t.Assert(release.ArtifactIDs, c.HasLen, 2) distinctSlugs[release.ArtifactIDs[1]] = struct{}{} } t.Assert(distinctSlugs, c.HasLen, maxInactiveSlugReleases+1) // check the first and third releases got deleted, but the rest remain assertDeleted := func(release *ct.Release, deleted bool) { _, err := client.GetRelease(release.ID) if deleted { t.Assert(err, c.Equals, controller.ErrNotFound) } else { t.Assert(err, c.IsNil) } } assertDeleted(releases[0], true) assertDeleted(releases[1], false) assertDeleted(releases[2], true) assertDeleted(releases[3], false) assertDeleted(releases[4], false) assertDeleted(releases[5], false) assertDeleted(&newRelease, false) // check the first slug got deleted, but the rest remain s.assertURI(t, slugs[0], http.StatusNotFound) for i := 1; i < len(slugs); i++ { s.assertURI(t, slugs[i], http.StatusOK) } }
func runClusterMigrateDomain(args *docopt.Args) error { cluster, err := getCluster() if err != nil { shutdown.Fatal(err) } client, err := cluster.Client() if err != nil { shutdown.Fatal(err) } dm := &ct.DomainMigration{ Domain: args.String["<domain>"], } release, err := client.GetAppRelease("controller") if err != nil { return err } dm.OldDomain = release.Env["DEFAULT_ROUTE_DOMAIN"] if !promptYesNo(fmt.Sprintf("Migrate cluster domain from %q to %q?", dm.OldDomain, dm.Domain)) { fmt.Println("Aborted") return nil } maxDuration := 2 * time.Minute fmt.Printf("Migrating cluster domain (this can take up to %s)...\n", maxDuration) events := make(chan *ct.Event) stream, err := client.StreamEvents(ct.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) if err != nil { return nil } defer stream.Close() if err := client.PutDomain(dm); err != nil { return err } timeout := time.After(maxDuration) for { select { case event, ok := <-events: if !ok { return stream.Err() } var e *ct.DomainMigrationEvent if err := json.Unmarshal(event.Data, &e); err != nil { return err } if e.Error != "" { fmt.Println(e.Error) } if e.DomainMigration.FinishedAt != nil { dm = e.DomainMigration fmt.Printf("Changed cluster domain from %q to %q\n", dm.OldDomain, dm.Domain) // update flynnrc cluster.TLSPin = dm.TLSCert.Pin cluster.ControllerURL = fmt.Sprintf("https://controller.%s", dm.Domain) cluster.GitURL = fmt.Sprintf("https://git.%s", dm.Domain) cluster.DockerPushURL = fmt.Sprintf("https://docker.%s", dm.Domain) if err := config.SaveTo(configPath()); err != nil { return fmt.Errorf("Error saving config: %s", err) } // update git config caFile, err := cfg.CACertFile(cluster.Name) if err != nil { return err } defer caFile.Close() if _, err := caFile.Write([]byte(dm.TLSCert.CACert)); err != nil { return err } if err := cfg.WriteGlobalGitConfig(cluster.GitURL, caFile.Name()); err != nil { return err } cfg.RemoveGlobalGitConfig(fmt.Sprintf("https://git.%s", dm.OldDomain)) // try to run "docker login" for the new domain, but just print a warning // if it fails so the user can fix it later if host, err := cluster.DockerPushHost(); err == nil { if err := dockerLogin(host, cluster.Key); err == ErrDockerTLSError { printDockerTLSWarning(host, caFile.Name()) } } dockerLogout(dm.OldDomain) fmt.Println("Updated local CLI configuration") return nil } case <-timeout: return errors.New("timed out waiting for domain migration to complete") } } }
func (s *CLISuite) TestSlugReleaseGarbageCollection(t *c.C) { client := s.controllerClient(t) // create app with gc.max_inactive_slug_releases=3 maxInactiveSlugReleases := 3 app := &ct.App{Meta: map[string]string{"gc.max_inactive_slug_releases": strconv.Itoa(maxInactiveSlugReleases)}} t.Assert(client.CreateApp(app), c.IsNil) // create an image artifact imageArtifact := &ct.Artifact{Type: host.ArtifactTypeDocker, URI: imageURIs["test-apps"]} t.Assert(client.CreateArtifact(imageArtifact), c.IsNil) // create 5 slug artifacts var slug bytes.Buffer gz := gzip.NewWriter(&slug) t.Assert(tar.NewWriter(gz).Close(), c.IsNil) t.Assert(gz.Close(), c.IsNil) slugs := []string{ "http://blobstore.discoverd/1/slug.tgz", "http://blobstore.discoverd/2/slug.tgz", "http://blobstore.discoverd/3/slug.tgz", "http://blobstore.discoverd/4/slug.tgz", "http://blobstore.discoverd/5/slug.tgz", } slugArtifacts := make([]*ct.Artifact, len(slugs)) for i, uri := range slugs { req, err := http.NewRequest("PUT", uri, bytes.NewReader(slug.Bytes())) t.Assert(err, c.IsNil) res, err := http.DefaultClient.Do(req) t.Assert(err, c.IsNil) res.Body.Close() t.Assert(res.StatusCode, c.Equals, http.StatusOK) artifact := &ct.Artifact{ Type: host.ArtifactTypeFile, URI: uri, Meta: map[string]string{"blobstore": "true"}, } t.Assert(client.CreateArtifact(artifact), c.IsNil) slugArtifacts[i] = artifact } // create 6 releases, the second being scaled up and having the // same slug as the third (so prevents the slug being deleted) releases := make([]*ct.Release, 6) for i, r := range []struct { slug *ct.Artifact active bool }{ {slugArtifacts[0], false}, {slugArtifacts[1], true}, {slugArtifacts[1], false}, {slugArtifacts[2], false}, {slugArtifacts[3], false}, {slugArtifacts[4], false}, } { release := &ct.Release{ ArtifactIDs: []string{imageArtifact.ID, r.slug.ID}, Processes: map[string]ct.ProcessType{ "app": {Args: []string{"/bin/pingserv"}, Ports: []ct.Port{{Proto: "tcp"}}}, }, } t.Assert(client.CreateRelease(release), c.IsNil) procs := map[string]int{"app": 0} if r.active { procs["app"] = 1 } t.Assert(client.PutFormation(&ct.Formation{ AppID: app.ID, ReleaseID: release.ID, Processes: procs, }), c.IsNil) releases[i] = release } // scale the last release so we can deploy it lastRelease := releases[len(releases)-1] watcher, err := client.WatchJobEvents(app.ID, lastRelease.ID) t.Assert(err, c.IsNil) defer watcher.Close() t.Assert(client.PutFormation(&ct.Formation{ AppID: app.ID, ReleaseID: lastRelease.ID, Processes: map[string]int{"app": 1}, }), c.IsNil) t.Assert(watcher.WaitFor(ct.JobEvents{"app": ct.JobUpEvents(1)}, scaleTimeout, nil), c.IsNil) t.Assert(client.SetAppRelease(app.ID, lastRelease.ID), c.IsNil) // subscribe to garbage collection events gcEvents := make(chan *ct.Event) stream, err := client.StreamEvents(ct.StreamEventsOptions{ AppID: app.ID, ObjectTypes: []ct.EventType{ct.EventTypeAppGarbageCollection}, }, gcEvents) t.Assert(err, c.IsNil) defer stream.Close() // deploy a new release with the same slug as the last release timeoutCh := make(chan struct{}) time.AfterFunc(5*time.Minute, func() { close(timeoutCh) }) newRelease := *lastRelease newRelease.ID = "" t.Assert(client.CreateRelease(&newRelease), c.IsNil) t.Assert(client.DeployAppRelease(app.ID, newRelease.ID, timeoutCh), c.IsNil) // wait for garbage collection select { case event, ok := <-gcEvents: if !ok { t.Fatalf("event stream closed unexpectedly: %s", stream.Err()) } var e ct.AppGarbageCollectionEvent t.Assert(json.Unmarshal(event.Data, &e), c.IsNil) if e.Error != "" { t.Fatalf("garbage collection failed: %s", e.Error) } case <-time.After(60 * time.Second): t.Fatal("timed out waiting for garbage collection") } // check we have 4 distinct slug releases (so 5 in total, only 3 are // inactive) list, err := client.AppReleaseList(app.ID) t.Assert(err, c.IsNil) t.Assert(list, c.HasLen, maxInactiveSlugReleases+2) distinctSlugs := make(map[string]struct{}, len(list)) for _, release := range list { files := release.FileArtifactIDs() t.Assert(files, c.HasLen, 1) distinctSlugs[files[0]] = struct{}{} } t.Assert(distinctSlugs, c.HasLen, maxInactiveSlugReleases+1) // check the first and third releases got deleted, but the rest remain assertDeleted := func(release *ct.Release, deleted bool) { _, err := client.GetRelease(release.ID) if deleted { t.Assert(err, c.Equals, controller.ErrNotFound) } else { t.Assert(err, c.IsNil) } } assertDeleted(releases[0], true) assertDeleted(releases[1], false) assertDeleted(releases[2], true) assertDeleted(releases[3], false) assertDeleted(releases[4], false) assertDeleted(releases[5], false) assertDeleted(&newRelease, false) // check the first slug got deleted, but the rest remain s.assertURI(t, slugs[0], http.StatusNotFound) for i := 1; i < len(slugs); i++ { s.assertURI(t, slugs[i], http.StatusOK) } }