Exemplo n.º 1
0
// run executes the application.
func (a *application) run(ctx context.Context) error {
	// Setup common context parameters.
	ctx, cancelFunc := context.WithCancel(ctx)

	// Monitor our shutdown singal. Cancel our context if it is closed.
	go func() {
		<-a.shutdownC

		log.Infof(ctx, "Shutdown signal received; cancelling context.")
		cancelFunc()
	}()

	wg := sync.WaitGroup{}
	for i := 0; i < a.config.numWorkers; i++ {
		i := i
		wg.Add(1)
		go func(id int) {
			defer wg.Done()

			a.process(log.SetField(ctx, "worker", i))
		}(i)
	}

	wg.Wait()
	return nil
}
Exemplo n.º 2
0
// Main buffering function, which runs in a goroutine.
func (m *meter) consumeWork() {
	// Acknowledge when this goroutine finishes.
	defer close(m.closeAckC)

	ctx := log.SetFilter(m.ctx, "meter")

	timerRunning := false
	flushTimer := clock.NewTimer(ctx)
	defer func() {
		flushTimer.Stop()
	}()

	flushCount := m.Count
	flushTime := m.Delay

	// Build our work buffer.
	workBufferSize := initialWorkBufferSize
	if flushCount > 0 {
		// Will never buffer more than this much Work.
		workBufferSize = flushCount
	}
	bufferedWork := make([]interface{}, 0, workBufferSize)

	log.Debugf(ctx, "Starting work loop.")
	active := true
	for active {
		flushRound := false

		select {
		case work, ok := <-m.workC:
			if !ok {
				log.Debugf(ctx, "Received instance close signal; terminating.")

				// Don't immediately exit the loop, since there may still be buffered
				// Work to flush.
				active = false
				flushRound = true
				break
			}

			// Count the work in this group. If we're not using a given metric, try
			// and avoid wasting time collecting it.
			bufferedWork = append(bufferedWork, work)

			// Handle work count threshold. We do this first, since it's trivial to
			// setup/compute.
			if flushCount > 0 && len(bufferedWork) >= flushCount {
				flushRound = true
			}
			// Start our flush timer, if it's not already ticking. Only waste time
			// doing this if we're not already flushing, since flushing will clear the
			// timer.
			if !flushRound && flushTime > 0 && !timerRunning {
				log.Infof(log.SetFields(ctx, log.Fields{
					"flushInterval": flushTime,
				}), "Starting flush timer.")
				flushTimer.Reset(flushTime)
				timerRunning = true
			}

			// Invoke work callback, if one is set.
			if m.IngestCallback != nil {
				flushRound = m.IngestCallback(work) || flushRound
			}

		case <-m.flushNowC:
			flushRound = true

		case <-flushTimer.GetC():
			log.Infof(ctx, "Flush timer has triggered.")
			timerRunning = false
			flushRound = true
		}

		// Should we flush?
		if flushRound {
			flushTimer.Stop()
			timerRunning = false

			if len(bufferedWork) > 0 {
				// Clone bufferedWork, since we re-use it.
				workToSend := make([]interface{}, len(bufferedWork))
				copy(workToSend, bufferedWork)

				// Clear our Work slice for re-use. This does not resize the underlying
				// array, since it's likely to fill again.
				for idx := range bufferedWork {
					bufferedWork[idx] = nil
				}
				bufferedWork = bufferedWork[:0]

				// Callback with this work.
				m.Callback(workToSend)
			}
		}
	}
}
Exemplo n.º 3
0
// Start runs the event flushing loop. It does not return until either
// an error occurs or ctx.Done().
func (l *evtLogger) start(ctx context.Context) error {
	defer close(l.evtCh)

	evtBuf := []*crit_event.LogRequestLite_LogEventLite{}
	clck := clock.Get(ctx)

	flush := func(evts []*crit_event.LogRequestLite_LogEventLite) error {
		// TODO: Auth, retry/backoff etc.
		req := &crit_event.LogRequestLite{
			RequestTimeMs: proto.Int64(clck.Now().Unix()),
			LogSourceName: proto.String("CHROME_INFRA"),
			LogEvent:      evts,
		}

		logging.Infof(ctx, "Flushing %v events\n", len(req.LogEvent))

		b, err := proto.Marshal(req)
		if err != nil {
			return err
		}

		resp, err := l.cfg.Client.Post(l.cfg.Endpoint, "application/octet-stream", bytes.NewBuffer(b))
		if err != nil {
			return err
		}

		defer resp.Body.Close()
		if resp.StatusCode >= 400 {
			b, err := httputil.DumpResponse(resp, true)
			logging.Errorf(ctx, "Error body: %v %v", string(b), err)
			return fmt.Errorf("http status %d: %s", resp.StatusCode, l.cfg.Endpoint)
		}

		return nil
	}

	// Loop:
	// - If you can read an event off ch, append it to the array of events.
	// - If you can't read an event off ch, and it's been at least 1s since the
	// last flush call, then flush the events if there are any buffered up.
	// - if ctx.Done() returns something, try one last flush and exit.

	// Ways this can act poorly:
	// - If the capacity of l.evtCh is full and a client attempts to log an event,
	// that client will block.  This is unlikely to happen because the select
	// statement spends very little time on anything else, including the flush
	// operation.
	// - If flush operations take longer than the flushTicker delay, then
	// multiple simultaneous flush operations can stack up.
	// - If flush operation fail, there is no retry mechanism (yet) and they
	// will just log an error locally.
	ticker := clck.After(l.cfg.Interval)
	for {
		select {
		case evt := <-l.evtCh:
			evtBuf = append(evtBuf, evt)
		case <-ticker:
			if len(evtBuf) > 0 {
				// Flush in a separate goroutine so we can free this
				// one up to keep reading off of l.evtCh and not block
				// clients.  TODO(seanmccullough): add retry logic on
				// POST failure, but make sure to not leak memory. This
				// may mean dropping events or persisting them to disk.
				go func(evts []*crit_event.LogRequestLite_LogEventLite) {
					if err := flush(evts); err != nil {
						logging.Errorf(ctx, "Error flushing: %v", err.Error())
					}
				}(evtBuf)
				// The above goroutine has a copy of evtBuf, so
				// we can now reset it safely.
				evtBuf = []*crit_event.LogRequestLite_LogEventLite{}
			}
			ticker = clck.After(time.Second)
		case <-ctx.Done():
			if len(evtBuf) > 0 {
				if err := flush(evtBuf); err != nil {
					logging.Errorf(ctx, "Error flushing: %v", err.Error())
				}
			}
			logging.Infof(ctx, "Event logger exiting.")
			return nil
		}
	}
}
Exemplo n.º 4
0
// mainImpl is the main execution function.
func mainImpl(args []string) int {
	// Use all of teh corez.
	if os.Getenv("GOMAXPROCS") == "" {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	// Install a console logger by default.
	ctx := context.Background()
	ctx = gologger.Use(ctx)

	loggerConfig := newLoggerConfig() // Internal logging config (cloud logging).
	logConfig := log.Config{Level: log.Debug}
	config := config{}

	fs := flag.CommandLine
	config.addFlags(fs)
	loggerConfig.addFlags(fs)
	logConfig.AddFlags(fs)
	fs.Parse(args)

	// TODO(dnj): Fix this once LUCI logging CL lands.
	ctx = log.SetLevel(ctx, logConfig.Level)
	ctx = logConfig.Set(ctx)

	// Load authenticated client.
	client, err := config.createAuthenticatedClient(ctx)
	if err != nil {
		log.Errorf(log.SetError(ctx, err), "Failed to create authenticated service client.")
		return 1
	}

	// Setup local logging configuration.
	ctx, logFlushFunc, err := loggerConfig.use(ctx, client)
	if err == nil {
		defer logFlushFunc()
	} else {
		log.Warningf(log.SetError(ctx, err), "Failed to setup cloud logging.")
	}

	app := newApplication(config)
	if err := app.loadServices(ctx, client); err != nil {
		log.Errorf(log.SetError(ctx, err), "Failed to initialize services.")
		return 1
	}

	// Set up interrupt handler.
	signalC := make(chan os.Signal, 1)
	go func() {
		triggered := false
		for sig := range signalC {
			if !triggered {
				triggered = true

				log.Infof(log.SetField(ctx, "signal", sig),
					"Received signal; starting shutdown.")
				app.shutdown()
			} else {
				// Triggered multiple times; immediately shut down.
				os.Exit(1)
			}
		}
	}()
	signal.Notify(signalC, os.Interrupt, os.Kill)
	defer func() {
		signal.Stop(signalC)
		close(signalC)
	}()

	log.Infof(ctx, "Starting application execution...")
	if err := app.run(ctx); err != nil {
		log.Errorf(log.SetError(ctx, err), "Error during application execution.")
		return 1
	}

	return 0
}
Exemplo n.º 5
0
func TestBasicDatastore(t *testing.T) {
	t.Parallel()

	Convey("basic", t, func() {
		inst, err := aetest.NewInstance(&aetest.Options{
			StronglyConsistentDatastore: true,
		})
		So(err, ShouldBeNil)
		defer inst.Close()

		req, err := inst.NewRequest("GET", "/", nil)
		So(err, ShouldBeNil)

		ctx := Use(context.Background(), req)
		ds := datastore.Get(ctx)
		mc := memcache.Get(ctx)
		inf := info.Get(ctx)

		Convey("logging allows you to tweak the level", func() {
			// You have to visually confirm that this actually happens in the stdout
			// of the test... yeah I know.
			logging.Debugf(ctx, "SHOULD NOT SEE")
			logging.Infof(ctx, "SHOULD SEE")

			ctx = logging.SetLevel(ctx, logging.Debug)
			logging.Debugf(ctx, "SHOULD SEE")
			logging.Infof(ctx, "SHOULD SEE (2)")
		})

		Convey("Can probe/change Namespace", func() {
			So(inf.GetNamespace(), ShouldEqual, "")
			ctx, err = inf.Namespace("wat")
			So(err, ShouldBeNil)
			inf = info.Get(ctx)
			So(inf.GetNamespace(), ShouldEqual, "wat")
			ds = datastore.Get(ctx)
			So(ds.MakeKey("Hello", "world").Namespace(), ShouldEqual, "wat")
		})

		Convey("Can get non-transactional context", func() {
			ctx, err := inf.Namespace("foo")
			So(err, ShouldBeNil)
			ds = datastore.Get(ctx)
			inf = info.Get(ctx)

			ds.RunInTransaction(func(ctx context.Context) error {
				So(ds.MakeKey("Foo", "bar").Namespace(), ShouldEqual, "foo")

				So(ds.Put(&TestStruct{ValueI: []int64{100}}), ShouldBeNil)

				err = datastore.GetNoTxn(ctx).RunInTransaction(func(ctx context.Context) error {
					ds = datastore.Get(ctx)
					So(ds.MakeKey("Foo", "bar").Namespace(), ShouldEqual, "foo")
					So(ds.Put(&TestStruct{ValueI: []int64{100}}), ShouldBeNil)
					return nil
				}, nil)
				So(err, ShouldBeNil)

				return nil
			}, nil)
		})

		Convey("Can Put/Get", func() {
			orig := TestStruct{
				ValueI: []int64{1, 7, 946688461000000, 996688461000000},
				ValueB: []bool{true, false},
				ValueS: []string{"hello", "world"},
				ValueF: []float64{1.0, 7.0, 946688461000000.0, 996688461000000.0},
				ValueBS: [][]byte{
					[]byte("allo"),
					[]byte("hello"),
					[]byte("world"),
					[]byte("zurple"),
				},
				ValueK: []*datastore.Key{
					ds.NewKey("Something", "Cool", 0, nil),
					ds.NewKey("Something", "", 1, nil),
					ds.NewKey("Something", "Recursive", 0,
						ds.NewKey("Parent", "", 2, nil)),
				},
				ValueBK: []blobstore.Key{"bellow", "hello"},
				ValueGP: []datastore.GeoPoint{
					{Lat: 120.7, Lng: 95.5},
				},
			}
			So(ds.Put(&orig), ShouldBeNil)

			ret := TestStruct{ID: orig.ID}
			So(ds.Get(&ret), ShouldBeNil)
			So(ret, ShouldResemble, orig)

			// can't be sure the indexes have caught up... so sleep
			time.Sleep(time.Second)

			Convey("Can query", func() {
				q := datastore.NewQuery("TestStruct")
				ds.Run(q, func(ts *TestStruct) {
					So(*ts, ShouldResemble, orig)
				})
				count, err := ds.Count(q)
				So(err, ShouldBeNil)
				So(count, ShouldEqual, 1)
			})

			Convey("Can project", func() {
				q := datastore.NewQuery("TestStruct").Project("ValueS")
				rslts := []datastore.PropertyMap{}
				So(ds.GetAll(q, &rslts), ShouldBeNil)
				So(rslts, ShouldResemble, []datastore.PropertyMap{
					{
						"$key":   {mpNI(ds.KeyForObj(&orig))},
						"ValueS": {mp("hello")},
					},
					{
						"$key":   {mpNI(ds.KeyForObj(&orig))},
						"ValueS": {mp("world")},
					},
				})

				q = datastore.NewQuery("TestStruct").Project("ValueBS")
				rslts = []datastore.PropertyMap{}
				So(ds.GetAll(q, &rslts), ShouldBeNil)
				So(rslts, ShouldResemble, []datastore.PropertyMap{
					{
						"$key":    {mpNI(ds.KeyForObj(&orig))},
						"ValueBS": {mp("allo")},
					},
					{
						"$key":    {mpNI(ds.KeyForObj(&orig))},
						"ValueBS": {mp("hello")},
					},
					{
						"$key":    {mpNI(ds.KeyForObj(&orig))},
						"ValueBS": {mp("world")},
					},
					{
						"$key":    {mpNI(ds.KeyForObj(&orig))},
						"ValueBS": {mp("zurple")},
					},
				})

				count, err := ds.Count(q)
				So(err, ShouldBeNil)
				So(count, ShouldEqual, 4)

				q = datastore.NewQuery("TestStruct").Lte("ValueI", 7).Project("ValueS").Distinct(true)
				rslts = []datastore.PropertyMap{}
				So(ds.GetAll(q, &rslts), ShouldBeNil)
				So(rslts, ShouldResemble, []datastore.PropertyMap{
					{
						"$key":   {mpNI(ds.KeyForObj(&orig))},
						"ValueI": {mp(1)},
						"ValueS": {mp("hello")},
					},
					{
						"$key":   {mpNI(ds.KeyForObj(&orig))},
						"ValueI": {mp(1)},
						"ValueS": {mp("world")},
					},
					{
						"$key":   {mpNI(ds.KeyForObj(&orig))},
						"ValueI": {mp(7)},
						"ValueS": {mp("hello")},
					},
					{
						"$key":   {mpNI(ds.KeyForObj(&orig))},
						"ValueI": {mp(7)},
						"ValueS": {mp("world")},
					},
				})

				count, err = ds.Count(q)
				So(err, ShouldBeNil)
				So(count, ShouldEqual, 4)
			})
		})

		Convey("Can Put/Get (time)", func() {
			// time comparisons in Go are wonky, so this is pulled out
			pm := datastore.PropertyMap{
				"$key": {mpNI(ds.NewKey("Something", "value", 0, nil))},
				"Time": {
					mp(time.Date(1938, time.January, 1, 1, 1, 1, 1, time.UTC)),
					mp(time.Time{}),
				},
			}
			So(ds.Put(&pm), ShouldBeNil)

			rslt := datastore.PropertyMap{}
			rslt.SetMeta("key", ds.KeyForObj(pm))
			So(ds.Get(&rslt), ShouldBeNil)

			So(pm["Time"][0].Value(), ShouldResemble, rslt["Time"][0].Value())

			q := datastore.NewQuery("Something").Project("Time")
			all := []datastore.PropertyMap{}
			So(ds.GetAll(q, &all), ShouldBeNil)
			So(len(all), ShouldEqual, 2)
			prop := all[0]["Time"][0]
			So(prop.Type(), ShouldEqual, datastore.PTInt)

			tval, err := prop.Project(datastore.PTTime)
			So(err, ShouldBeNil)
			So(tval, ShouldResemble, time.Time{}.UTC())

			tval, err = all[1]["Time"][0].Project(datastore.PTTime)
			So(err, ShouldBeNil)
			So(tval, ShouldResemble, pm["Time"][0].Value())

			ent := datastore.PropertyMap{
				"$key": {mpNI(ds.MakeKey("Something", "value"))},
			}
			So(ds.Get(&ent), ShouldBeNil)
			So(ent["Time"], ShouldResemble, pm["Time"])
		})

		Convey("memcache: Set (nil) is the same as Set ([]byte{})", func() {
			So(mc.Set(mc.NewItem("bob")), ShouldBeNil) // normally would panic because Value is nil

			bob, err := mc.Get("bob")
			So(err, ShouldBeNil)
			So(bob.Value(), ShouldResemble, []byte{})
		})
	})
}
Exemplo n.º 6
0
func TestGoLogger(t *testing.T) {
	Convey(`A new Go Logger instance`, t, func() {
		// Regex to pull log information from "formatString".

		buf := bytes.Buffer{}
		l := New(&buf, gol.DEBUG)

		for _, entry := range []struct {
			L logging.Level
			F func(string, ...interface{})
			T string
		}{
			{logging.Debug, l.Debugf, "DEBU"},
			{logging.Info, l.Infof, "INFO"},
			{logging.Warning, l.Warningf, "WARN"},
			{logging.Error, l.Errorf, "ERRO"},
		} {
			Convey(fmt.Sprintf("Can log to: %s", entry.L), func() {
				entry.F("Test logging %s", entry.L)
				matches := lre.FindAllStringSubmatch(normalizeLog(buf.String()), -1)
				So(len(matches), ShouldEqual, 1)
				So(len(matches[0]), ShouldEqual, 4)
				So(matches[0][1], ShouldEqual, "gologger_test.go")
				So(matches[0][2], ShouldEqual, entry.T)
				So(matches[0][3], ShouldEqual, fmt.Sprintf("Test logging %s", entry.L))
			})
		}
	})

	Convey(`A Go Logger instance installed in a Context at Info.`, t, func() {
		buf := bytes.Buffer{}
		lc := &LoggerConfig{
			Format: standardConfig.Format,
			Out:    &buf,
			Level:  gol.DEBUG,
		}
		c := logging.SetLevel(lc.Use(context.Background()), logging.Info)

		Convey(`Should log through top-level Context methods.`, func() {
			for _, entry := range []struct {
				L logging.Level
				F func(context.Context, string, ...interface{})
				T string
			}{
				{logging.Info, logging.Infof, "INFO"},
				{logging.Warning, logging.Warningf, "WARN"},
				{logging.Error, logging.Errorf, "ERRO"},
			} {
				Convey(fmt.Sprintf("Can log to: %s", entry.L), func() {
					entry.F(c, "Test logging %s", entry.L)
					matches := lre.FindAllStringSubmatch(normalizeLog(buf.String()), -1)
					So(len(matches), ShouldEqual, 1)
					So(len(matches[0]), ShouldEqual, 4)
					So(matches[0][1], ShouldEqual, "gologger_test.go")
					So(matches[0][2], ShouldEqual, entry.T)
					So(matches[0][3], ShouldEqual, fmt.Sprintf("Test logging %s", entry.L))
				})
			}
		})

		Convey(`With Fields installed in the Context`, func() {
			c = logging.SetFields(c, logging.Fields{
				logging.ErrorKey: "An error!",
				"reason":         "test",
			})

			Convey(`Should log Fields.`, func() {
				logging.Infof(c, "Here is a %s", "log")
				matches := lre.FindAllStringSubmatch(normalizeLog(buf.String()), -1)
				So(len(matches), ShouldEqual, 1)
				So(len(matches[0]), ShouldEqual, 4)
				So(matches[0][1], ShouldEqual, "gologger_test.go")
				So(matches[0][2], ShouldEqual, "INFO")
				So(matches[0][3], ShouldEqual,
					`Here is a log                               {"error":"An error!", "reason":"test"}`)
			})

			Convey(`Should log fields installed immediately`, func() {
				logging.Fields{
					"foo":    "bar",
					"reason": "override",
				}.Infof(c, "Here is another %s", "log")

				matches := lre.FindAllStringSubmatch(normalizeLog(buf.String()), -1)
				So(len(matches), ShouldEqual, 1)
				So(len(matches[0]), ShouldEqual, 4)
				So(matches[0][1], ShouldEqual, "gologger_test.go")
				So(matches[0][2], ShouldEqual, "INFO")
				So(matches[0][3], ShouldEqual,
					`Here is another log                         {"error":"An error!", "foo":"bar", "reason":"override"}`)
			})
		})

		Convey(`Will not log to Debug, as it's below level.`, func() {
			logging.Debugf(c, "Hello!")
			So(buf.Len(), ShouldEqual, 0)
		})
	})
}