Exemple #1
0
func (s *S) TestGetAppLog(c *C) {
	app := s.createTestApp(c, &ct.App{Name: "get-app-log-test"})

	tests := []struct {
		opts     *logagg.LogOpts
		expected []logaggc.Message
	}{
		{
			expected: sampleMessages,
		},
		{
			opts:     &logagg.LogOpts{Lines: typeconv.IntPtr(1)},
			expected: sampleMessages[2:],
		},
		{
			opts:     &logagg.LogOpts{ProcessType: typeconv.StringPtr("web")},
			expected: sampleMessages[1:2],
		},
		{
			opts:     &logagg.LogOpts{ProcessType: typeconv.StringPtr("")},
			expected: sampleMessages[:1],
		},
		{
			opts:     &logagg.LogOpts{JobID: "11111111111111111111111111111111"},
			expected: sampleMessages[1:2],
		},
	}

	for _, test := range tests {
		opts := test.opts
		if opts != nil {
			numLines := ""
			if opts.Lines != nil {
				numLines = strconv.Itoa(*opts.Lines)
			}
			processType := "<nil>"
			if opts.ProcessType != nil {
				processType = *opts.ProcessType
			}
			c.Logf("Follow=%t Lines=%q JobID=%q ProcessType=%q", opts.Follow, numLines, opts.JobID, processType)
		}
		rc, err := s.c.GetAppLog(app.Name, opts)
		c.Assert(err, IsNil)
		defer rc.Close()

		msgs := make([]logaggc.Message, 0)
		dec := json.NewDecoder(rc)
		for {
			var msg logaggc.Message
			err := dec.Decode(&msg)
			if err == io.EOF {
				break
			}
			c.Assert(err, IsNil)
			msgs = append(msgs, msg)
		}

		c.Assert(msgs, DeepEquals, test.expected)
	}
}
Exemple #2
0
func (s *LogAggregatorTestSuite) TestAPIGetLogBuffer(c *C) {
	appID := "test-app"
	msg1 := newMessageForApp(appID, "web.1", "log message 1")
	msg2 := newMessageForApp(appID, "web.2", "log message 2")
	msg3 := newMessageForApp(appID, "worker.3", "log message 3")
	msg4 := newMessageForApp(appID, "web.1", "log message 4")
	msg5 := newMessageForApp(appID, ".5", "log message 5")

	s.agg.feed(msg1)
	s.agg.feed(msg2)
	s.agg.feed(msg3)
	s.agg.feed(msg4)
	s.agg.feed(msg5)

	runtest := func(opts client.LogOpts, expected string) {
		numLines := -1
		if opts.Lines != nil {
			numLines = *opts.Lines
		}
		processType := "<nil>"
		if opts.ProcessType != nil {
			processType = *opts.ProcessType
		}
		c.Logf("Follow=%t Lines=%d JobID=%q ProcessType=%q", opts.Follow, numLines, opts.JobID, processType)
		logrc, err := s.client.GetLog(appID, &opts)
		c.Assert(err, IsNil)
		defer logrc.Close()

		assertAllLogsEquals(c, logrc, expected)
	}

	tests := []struct {
		numLogs     *int
		jobID       string
		processType *string
		expected    []*rfc5424.Message
	}{
		{
			numLogs:  typeconv.IntPtr(-1),
			expected: []*rfc5424.Message{msg1, msg2, msg3, msg4, msg5},
		},
		{
			numLogs:  typeconv.IntPtr(1),
			expected: []*rfc5424.Message{msg5},
		},
		{
			numLogs:  typeconv.IntPtr(1),
			jobID:    "3",
			expected: []*rfc5424.Message{msg3},
		},
		{
			numLogs:  typeconv.IntPtr(-1),
			jobID:    "1",
			expected: []*rfc5424.Message{msg1, msg4},
		},
		{
			numLogs:     typeconv.IntPtr(-1),
			processType: typeconv.StringPtr("web"),
			expected:    []*rfc5424.Message{msg1, msg2, msg4},
		},
		{
			numLogs:     typeconv.IntPtr(-1),
			processType: typeconv.StringPtr(""),
			expected:    []*rfc5424.Message{msg5},
		},
	}
	for _, test := range tests {
		opts := client.LogOpts{
			Follow: false,
			JobID:  test.jobID,
		}
		if test.processType != nil {
			opts.ProcessType = test.processType
		}
		if test.numLogs != nil {
			opts.Lines = test.numLogs
		}
		expected := ""
		for _, msg := range test.expected {
			expected += marshalMessage(msg)
		}
		runtest(opts, expected)
	}
}
Exemple #3
0
func (s *SchedulerSuite) TestScaleTags(t *c.C) {
	// ensure we have more than 1 host to test with
	hosts, err := s.clusterClient(t).Hosts()
	t.Assert(err, c.IsNil)
	if len(hosts) <= 1 {
		t.Skip("not enough hosts to test tagged based scheduling")
	}

	// stream the scheduler leader log so we can synchronize tag changes
	leader, err := s.discoverdClient(t).Service("controller-scheduler").Leader()
	t.Assert(err, c.IsNil)
	client := s.controllerClient(t)
	res, err := client.GetAppLog("controller", &ct.LogOpts{
		Follow:      true,
		JobID:       leader.Meta["FLYNN_JOB_ID"],
		ProcessType: typeconv.StringPtr("scheduler"),
		Lines:       typeconv.IntPtr(0),
	})
	t.Assert(err, c.IsNil)
	defer res.Close()
	tagChange := make(chan struct{})
	go func() {
		dec := json.NewDecoder(res)
		for {
			var msg logaggc.Message
			if err := dec.Decode(&msg); err != nil {
				return
			}
			if strings.Contains(msg.Msg, "host tags changed") {
				tagChange <- struct{}{}
			}
		}
	}()
	waitSchedulerTagChange := func() {
		select {
		case <-tagChange:
			return
		case <-time.After(10 * time.Second):
			t.Fatalf("timed out waiting for scheduler leader to see tag change")
		}
	}

	// watch service events so we can wait for tag changes
	events := make(chan *discoverd.Event)
	stream, err := s.discoverdClient(t).Service("flynn-host").Watch(events)
	t.Assert(err, c.IsNil)
	defer stream.Close()
	waitServiceEvent := func(kind discoverd.EventKind) *discoverd.Event {
		for {
			select {
			case event, ok := <-events:
				if !ok {
					t.Fatalf("service event stream closed unexpectedly: %s", stream.Err())
				}
				if event.Kind == kind {
					return event
				}
			case <-time.After(10 * time.Second):
				t.Fatalf("timed out waiting for service %s event", kind)
			}
		}
	}

	// wait for the watch to be current before changing tags
	waitServiceEvent(discoverd.EventKindCurrent)

	updateTags := func(host *cluster.Host, tags map[string]string) {
		debugf(t, "setting host tags: %s => %v", host.ID(), tags)
		t.Assert(host.UpdateTags(tags), c.IsNil)
		event := waitServiceEvent(discoverd.EventKindUpdate)
		t.Assert(event.Instance.Meta["id"], c.Equals, host.ID())
		for key, val := range tags {
			t.Assert(event.Instance.Meta["tag:"+key], c.Equals, val)
		}
		waitSchedulerTagChange()
	}

	// create an app with a tagged process and watch job events
	app, release := s.createApp(t)
	formation := &ct.Formation{
		AppID:     app.ID,
		ReleaseID: release.ID,
		Tags:      map[string]map[string]string{"printer": {"active": "true"}},
	}
	watcher, err := client.WatchJobEvents(app.ID, release.ID)
	t.Assert(err, c.IsNil)
	defer watcher.Close()

	// add tag to host 1
	host1 := hosts[0]
	updateTags(host1, map[string]string{"active": "true"})

	// start jobs
	debug(t, "scaling printer=2")
	formation.Processes = map[string]int{"printer": 2}
	t.Assert(client.PutFormation(formation), c.IsNil)
	t.Assert(watcher.WaitFor(ct.JobEvents{"printer": ct.JobUpEvents(2)}, scaleTimeout, nil), c.IsNil)

	assertHostJobCounts := func(expected map[string]int) {
		jobs, err := client.JobList(app.ID)
		t.Assert(err, c.IsNil)
		actual := make(map[string]int)
		for _, job := range jobs {
			if job.State == ct.JobStateUp {
				actual[job.HostID]++
			}
		}
		t.Assert(actual, c.DeepEquals, expected)
	}

	// check all jobs on host 1
	assertHostJobCounts(map[string]int{host1.ID(): 2})

	// add tag to host 2
	host2 := hosts[1]
	updateTags(host2, map[string]string{"active": "true"})

	// scale up
	debug(t, "scaling printer=4")
	formation.Processes["printer"] = 4
	t.Assert(client.PutFormation(formation), c.IsNil)
	t.Assert(watcher.WaitFor(ct.JobEvents{"printer": ct.JobUpEvents(2)}, scaleTimeout, nil), c.IsNil)

	// check jobs distributed across hosts 1 and 2
	assertHostJobCounts(map[string]int{host1.ID(): 2, host2.ID(): 2})

	// remove tag from host 2
	updateTags(host2, map[string]string{"active": ""})

	// check jobs are moved to host1
	jobEvents := ct.JobEvents{"printer": map[ct.JobState]int{
		ct.JobStateDown: 2,
		ct.JobStateUp:   2,
	}}
	t.Assert(watcher.WaitFor(jobEvents, scaleTimeout, nil), c.IsNil)
	assertHostJobCounts(map[string]int{host1.ID(): 4})

	// remove tag from host 1
	updateTags(host1, map[string]string{"active": ""})

	assertStateCounts := func(expected map[ct.JobState]int) {
		jobs, err := client.JobList(app.ID)
		t.Assert(err, c.IsNil)
		actual := make(map[ct.JobState]int)
		for _, job := range jobs {
			actual[job.State]++
		}
		t.Assert(actual, c.DeepEquals, expected)
	}

	// check 4 pending jobs, rest are stopped
	t.Assert(watcher.WaitFor(ct.JobEvents{"printer": ct.JobDownEvents(4)}, scaleTimeout, nil), c.IsNil)
	assertStateCounts(map[ct.JobState]int{ct.JobStatePending: 4, ct.JobStateDown: 6})

	// re-add tag to host 1
	updateTags(host1, map[string]string{"active": "true"})

	// check pending jobs are started on host 1
	t.Assert(watcher.WaitFor(ct.JobEvents{"printer": ct.JobUpEvents(4)}, scaleTimeout, nil), c.IsNil)
	assertHostJobCounts(map[string]int{host1.ID(): 4})
	assertStateCounts(map[ct.JobState]int{ct.JobStateUp: 4, ct.JobStateDown: 6})

	// add different tag to host 2
	updateTags(host2, map[string]string{"disk": "ssd"})

	// update formation tags, check jobs are moved to host 2
	debug(t, "updating formation tags to disk=ssd")
	formation.Tags["printer"] = map[string]string{"disk": "ssd"}
	t.Assert(client.PutFormation(formation), c.IsNil)
	jobEvents = ct.JobEvents{"printer": map[ct.JobState]int{
		ct.JobStateDown: 4,
		ct.JobStateUp:   4,
	}}
	t.Assert(watcher.WaitFor(jobEvents, scaleTimeout, nil), c.IsNil)
	assertHostJobCounts(map[string]int{host2.ID(): 4})
	assertStateCounts(map[ct.JobState]int{ct.JobStateUp: 4, ct.JobStateDown: 10})

	// scale down stops the jobs
	debug(t, "scaling printer=0")
	formation.Processes = nil
	t.Assert(client.PutFormation(formation), c.IsNil)
	t.Assert(watcher.WaitFor(ct.JobEvents{"printer": ct.JobDownEvents(4)}, scaleTimeout, nil), c.IsNil)
	assertStateCounts(map[ct.JobState]int{ct.JobStateDown: 14})
}