Exemplo n.º 1
0
// DeleteApp deletes an app.
func (c *Client) DeleteApp(appID string) (*ct.AppDeletion, error) {
	events := make(chan *ct.AppEvent)
	stream, err := c.ResumingStream("GET", fmt.Sprintf("/apps/%s/events?object_type=%s", appID, ct.EventTypeAppDeletion), events)
	if err != nil {
		return nil, err
	}
	defer stream.Close()

	if err := c.Delete(fmt.Sprintf("/apps/%s", appID)); err != nil {
		return nil, err
	}

	select {
	case event, ok := <-events:
		if !ok {
			return nil, stream.Err()
		}
		var e ct.AppDeletionEvent
		if err := json.Unmarshal(event.Data, &e); err != nil {
			return nil, err
		}
		if e.Error != "" {
			return nil, errors.New(e.Error)
		}
		return e.AppDeletion, nil
	case <-time.After(60 * time.Second):
		return nil, errors.New("timed out waiting for app deletion")
	}
}
Exemplo n.º 2
0
// DeleteRelease deletes a release and any associated file artifacts.
func (c *Client) DeleteRelease(appID, releaseID string) (*ct.ReleaseDeletion, error) {
	events := make(chan *ct.Event)
	stream, err := c.StreamEvents(ct.StreamEventsOptions{
		AppID:       appID,
		ObjectID:    releaseID,
		ObjectTypes: []ct.EventType{ct.EventTypeReleaseDeletion},
	}, events)
	if err != nil {
		return nil, err
	}
	defer stream.Close()

	if err := c.Delete(fmt.Sprintf("/apps/%s/releases/%s", appID, releaseID), nil); err != nil {
		return nil, err
	}

	select {
	case event, ok := <-events:
		if !ok {
			return nil, stream.Err()
		}
		var e ct.ReleaseDeletionEvent
		if err := json.Unmarshal(event.Data, &e); err != nil {
			return nil, err
		}
		if e.Error != "" {
			return nil, errors.New(e.Error)
		}
		return e.ReleaseDeletion, nil
	case <-time.After(60 * time.Second):
		return nil, errors.New("timed out waiting for release deletion")
	}
}
Exemplo n.º 3
0
func (r *Runner) getBuildLog(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
	id := ps.ByName("build")
	b := &Build{}
	if err := r.db.View(func(tx *bolt.Tx) error {
		v := tx.Bucket(dbBucket).Get([]byte(id))
		if err := json.Unmarshal(v, b); err != nil {
			return fmt.Errorf("could not decode build %s: %s", v, err)
		}
		return nil
	}); err != nil {
		http.Error(w, err.Error(), 500)
		return
	}

	// if it's a V1 build, redirect to the log in S3
	if b.Version == BuildVersion1 {
		http.Redirect(w, req, b.LogURL, http.StatusMovedPermanently)
		return
	}

	// if it's a browser, serve the build-log.html template
	if strings.Contains(req.Header.Get("Accept"), "text/html") {
		tpl, err := template.ParseFiles(path.Join(args.AssetsDir, "build-log.html"))
		if err != nil {
			http.Error(w, err.Error(), 500)
			return
		}
		w.Header().Set("Content-Type", "text/html; charset=utf-8")
		if err := tpl.Execute(w, b); err != nil {
			log.Printf("error executing build-log template: %s", err)
		}
		return
	}

	// serve the build log as either an SSE or plain text stream
	ch := make(chan string)
	stream, err := getBuildLogStream(b, ch)
	if err != nil {
		http.Error(w, err.Error(), 500)
		return
	}
	if cn, ok := w.(http.CloseNotifier); ok {
		go func() {
			<-cn.CloseNotify()
			stream.Close()
		}()
	} else {
		defer stream.Close()
	}

	if strings.Contains(req.Header.Get("Accept"), "text/event-stream") {
		sse.ServeStream(w, ch, nil)
	} else {
		servePlainStream(w, ch)
	}

	if err := stream.Err(); err != nil {
		log.Println("error serving build log stream:", err)
	}
}
Exemplo n.º 4
0
func main() {
	defer shutdown.Exit()

	grohl.AddContext("app", "controller-scheduler")
	grohl.Log(grohl.Data{"at": "start"})

	go startHTTPServer()

	if period := os.Getenv("BACKOFF_PERIOD"); period != "" {
		var err error
		backoffPeriod, err = time.ParseDuration(period)
		if err != nil {
			shutdown.Fatal(err)
		}
		grohl.Log(grohl.Data{"at": "backoff_period", "period": backoffPeriod.String()})
	}

	cc, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		shutdown.Fatal(err)
	}
	c := newContext(cc, cluster.NewClient())

	c.watchHosts()

	grohl.Log(grohl.Data{"at": "leaderwait"})
	hb, err := discoverd.AddServiceAndRegister("controller-scheduler", ":"+os.Getenv("PORT"))
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	leaders := make(chan *discoverd.Instance)
	stream, err := discoverd.NewService("controller-scheduler").Leaders(leaders)
	if err != nil {
		shutdown.Fatal(err)
	}
	for leader := range leaders {
		if leader.Addr == hb.Addr() {
			break
		}
	}
	if err := stream.Err(); err != nil {
		// TODO: handle discoverd errors
		shutdown.Fatal(err)
	}
	stream.Close()
	// TODO: handle demotion

	grohl.Log(grohl.Data{"at": "leader"})

	// TODO: periodic full cluster sync for anti-entropy
	c.watchFormations()
}
Exemplo n.º 5
0
func (h *httpAPI) handleStream(w http.ResponseWriter, params httprouter.Params, kind discoverd.EventKind) {
	ch := make(chan *discoverd.Event, 64) // TODO: figure out how big this buffer should be
	stream := h.Store.Subscribe(params.ByName("service"), true, kind, ch)
	s := sse.NewStream(w, ch, nil)
	s.Serve()
	s.Wait()
	stream.Close()
	if err := stream.Err(); err != nil {
		s.CloseWithError(err)
	}
}
Exemplo n.º 6
0
func (s *HostSuite) TestAddFailingJob(t *c.C) {
	// get a host and watch events
	hosts, err := s.clusterClient(t).Hosts()
	t.Assert(err, c.IsNil)
	t.Assert(hosts, c.Not(c.HasLen), 0)
	h := hosts[0]
	jobID := random.UUID()
	events := make(chan *host.Event)
	stream, err := h.StreamEvents(jobID, events)
	t.Assert(err, c.IsNil)
	defer stream.Close()

	// add a job with a non existent partition
	job := &host.Job{
		ID:         jobID,
		Mountspecs: []*host.Mountspec{{}},
		Partition:  "nonexistent",
	}
	t.Assert(h.AddJob(job), c.IsNil)

	// check we get a create then error event
	actual := make(map[host.JobEventType]*host.Event, 2)
loop:
	for {
		select {
		case e, ok := <-events:
			if !ok {
				t.Fatalf("job event stream closed unexpectedly: %s", stream.Err())
			}
			if _, ok := actual[e.Event]; ok {
				t.Fatalf("unexpected event: %v", e)
			}
			actual[e.Event] = e
			if len(actual) >= 2 {
				break loop
			}
		case <-time.After(30 * time.Second):
			t.Fatal("timed out waiting for job event")
		}
	}
	t.Assert(actual[host.JobEventCreate], c.NotNil)
	e := actual[host.JobEventError]
	t.Assert(e, c.NotNil)
	t.Assert(e.Job, c.NotNil)
	t.Assert(e.Job.Error, c.NotNil)
	t.Assert(*e.Job.Error, c.Equals, `host: invalid job partition "nonexistent"`)
}
Exemplo n.º 7
0
func (c *Client) DeployAppRelease(appID, releaseID string) error {
	d, err := c.CreateDeployment(appID, releaseID)
	if err != nil {
		return err
	}

	// if initial deploy, just stop here
	if d.FinishedAt != nil {
		return nil
	}

	events := make(chan *ct.DeploymentEvent)
	stream, err := c.StreamDeployment(d, events)
	if err != nil {
		return err
	}
	defer stream.Close()

	timeout := d.DeployTimeout
	if timeout == 0 {
		// although a non-zero timeout is set for all new apps, it
		// could still be zero in the case of updating a cluster which
		// doesn't have deploy timeouts set (as the controller
		// migration may not have run yet) so use the default
		timeout = ct.DefaultDeployTimeout
	}
outer:
	for {
		select {
		case e, ok := <-events:
			if !ok {
				return fmt.Errorf("unexpected close of deployment event stream: %s", stream.Err())
			}
			switch e.Status {
			case "complete":
				break outer
			case "failed":
				return e.Err()
			}
		case <-time.After(time.Duration(timeout) * time.Second):
			return errors.New("timed out waiting for deployment completion")

		}
	}
	return nil
}
Exemplo n.º 8
0
// serveStream creates a subscription and streams out events in SSE format.
func (h *Handler) serveStream(w http.ResponseWriter, params httprouter.Params, kind discoverd.EventKind) {
	// Create a buffered channel to receive events.
	ch := make(chan *discoverd.Event, StreamBufferSize)

	// Subscribe to events on the store.
	service := params.ByName("service")
	stream := h.Store.Subscribe(service, true, kind, ch)

	// Create and serve an SSE stream.
	s := sse.NewStream(w, ch, nil)
	s.Serve()
	s.Wait()
	stream.Close()

	// Check if there was an error while closing.
	if err := stream.Err(); err != nil {
		s.CloseWithError(err)
	}
}
Exemplo n.º 9
0
func (c *Client) Instances(service string, timeout time.Duration) ([]*Instance, error) {
	s := c.Service(service)
	instances, err := s.Instances()
	if len(instances) > 0 || err != nil && !IsNotFound(err) {
		return instances, err
	}

	events := make(chan *Event)
	stream, err := s.Watch(events)
	if err != nil {
		return nil, err
	}
	defer stream.Close()
	// get any current instances
outer:
	for event := range events {
		switch event.Kind {
		case EventKindCurrent:
			break outer
		case EventKindUp:
			instances = append(instances, event.Instance)
		}
	}
	if len(instances) > 0 {
		return instances, nil
	}
	// wait for an instance to come up
	for {
		select {
		case event, ok := <-events:
			if !ok {
				return nil, stream.Err()
			}
			if event.Kind != EventKindUp {
				continue
			}
			return []*Instance{event.Instance}, nil
		case <-time.After(timeout):
			return nil, ErrTimedOut
		}
	}
}
Exemplo n.º 10
0
func (c *Client) DeployAppRelease(appID, releaseID string, stopWait <-chan struct{}) error {
	d, err := c.CreateDeployment(appID, releaseID)
	if err != nil {
		return err
	}

	// if initial deploy, just stop here
	if d.FinishedAt != nil {
		return nil
	}

	events := make(chan *ct.DeploymentEvent)
	stream, err := c.StreamDeployment(d, events)
	if err != nil {
		return err
	}
	defer stream.Close()

outer:
	for {
		select {
		case e, ok := <-events:
			if !ok {
				return fmt.Errorf("unexpected close of deployment event stream: %s", stream.Err())
			}
			switch e.Status {
			case "complete":
				break outer
			case "failed":
				return e.Err()
			}
		case <-stopWait:
			return errors.New("deploy wait cancelled")

		}
	}
	return nil
}
Exemplo n.º 11
0
func (s *HostSuite) TestNotifyOOM(t *c.C) {
	appID := random.UUID()

	// subscribe to init log messages from the logaggregator
	client, err := logaggc.New("")
	t.Assert(err, c.IsNil)
	opts := logagg.LogOpts{
		Follow:      true,
		StreamTypes: []logagg.StreamType{logagg.StreamTypeInit},
	}
	rc, err := client.GetLog(appID, &opts)
	t.Assert(err, c.IsNil)
	defer rc.Close()
	msgs := make(chan *logaggc.Message)
	stream := stream.New()
	defer stream.Close()
	go func() {
		defer close(msgs)
		dec := json.NewDecoder(rc)
		for {
			var msg logaggc.Message
			if err := dec.Decode(&msg); err != nil {
				stream.Error = err
				return
			}
			select {
			case msgs <- &msg:
			case <-stream.StopCh:
				return
			}
		}
	}()

	// run the OOM job
	cmd := exec.CommandUsingCluster(
		s.clusterClient(t),
		s.createArtifact(t, "test-apps"),
		"/bin/oom",
	)
	cmd.Meta = map[string]string{"flynn-controller.app": appID}
	runErr := make(chan error)
	go func() {
		runErr <- cmd.Run()
	}()

	// wait for the OOM notification
	for {
		select {
		case err := <-runErr:
			t.Assert(err, c.IsNil)
		case msg, ok := <-msgs:
			if !ok {
				t.Fatalf("message stream closed unexpectedly: %s", stream.Err())
			}
			t.Log(msg.Msg)
			if strings.Contains(msg.Msg, "FATAL: a container process was killed due to lack of available memory") {
				return
			}
		case <-time.After(30 * time.Second):
			t.Fatal("timed out waiting for OOM notification")
		}
	}
}