// SendMetrics will send any unsent metrics onto the metric collection service. func (api *MetricsManagerAPI) SendMetrics(args params.Entities) (params.ErrorResults, error) { result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } canAccess, err := api.accessEnviron() if err != nil { return result, err } for i, arg := range args.Entities { tag, err := names.ParseEnvironTag(arg.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } if !canAccess(tag) { result.Results[i].Error = common.ServerError(common.ErrPerm) continue } err = metricsender.SendMetrics(api.state, sender, maxBatchesPerSend) if err != nil { err = errors.Annotate(err, "failed to send metrics") logger.Warningf("%v", err) result.Results[i].Error = common.ServerError(err) continue } } return result, nil }
// TestErrorCodes checks that for a set of error codes SendMetrics returns an // error and metrics are marked as not being sent func (s *SenderSuite) TestErrorCodes(c *gc.C) { tests := []struct { errorCode int expectedErr string }{ {http.StatusBadRequest, "failed to send metrics http 400"}, {http.StatusServiceUnavailable, "failed to send metrics http 503"}, {http.StatusMovedPermanently, "failed to send metrics http 301"}, } for _, test := range tests { killServer := s.startServer(c, errorHandler(c, test.errorCode)) now := time.Now() batches := make([]*state.MetricBatch, 3) for i := range batches { batches[i] = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } var sender metricsender.HttpSender err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, gc.ErrorMatches, test.expectedErr) for _, batch := range batches { m, err := s.State.MetricBatch(batch.UUID()) c.Assert(err, jc.ErrorIsNil) c.Assert(m.Sent(), jc.IsFalse) } killServer() } }
// TestHttpSender checks that if the default sender // is in use metrics get sent func (s *SenderSuite) TestHttpSender(c *gc.C) { metricCount := 3 expectedCharmUrl, _ := s.unit.CharmURL() receiverChan := make(chan wireformat.MetricBatch, metricCount) cleanup := s.startServer(c, testHandler(c, receiverChan, nil, 0)) defer cleanup() now := time.Now() metrics := make([]*state.MetricBatch, metricCount) for i := range metrics { metrics[i] = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } var sender metricsender.HttpSender err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) c.Assert(receiverChan, gc.HasLen, metricCount) close(receiverChan) for batch := range receiverChan { c.Assert(batch.CharmUrl, gc.Equals, expectedCharmUrl.String()) } for _, metric := range metrics { m, err := s.State.MetricBatch(metric.UUID()) c.Assert(err, jc.ErrorIsNil) c.Assert(m.Sent(), jc.IsTrue) } }
func (s *SenderSuite) TestGracePeriodResponse(c *gc.C) { _ = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false}) cleanup := s.startServer(c, testHandler(c, nil, nil, 47*time.Hour)) defer cleanup() var sender metricsender.HttpSender err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) c.Assert(mm.GracePeriod(), gc.Equals, 47*time.Hour) }
// TestDontSendWithNopSender check that if the default sender // is nil we don't send anything, but still mark the items as sent func (s *MetricSenderSuite) TestDontSendWithNopSender(c *gc.C) { now := time.Now() for i := 0; i < 3; i++ { s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } err := metricsender.SendMetrics(s.State, metricsender.NopSender{}, 10) c.Assert(err, jc.ErrorIsNil) sent, err := s.State.CountOfSentMetrics() c.Assert(err, jc.ErrorIsNil) c.Assert(sent, gc.Equals, 3) }
func (s *MetricSenderSuite) TestFailureIncrementsConsecutiveFailures(c *gc.C) { sender := &testing.ErrorSender{Err: errors.New("something went wrong")} now := time.Now() for i := 0; i < 3; i++ { s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } err := metricsender.SendMetrics(s.State, sender, 1) c.Assert(err, gc.ErrorMatches, "something went wrong") mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) c.Assert(mm.ConsecutiveErrors(), gc.Equals, 1) }
// TestMeterStatusInvalid checks that the metric sender deals with invalid // meter status data properly. func (s *SenderSuite) TestMeterStatusInvalid(c *gc.C) { unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.meteredService, SetCharmURL: true}) unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.meteredService, SetCharmURL: true}) unit3 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.meteredService, SetCharmURL: true}) statusFunc := func(unitName string) (string, string, string) { switch unitName { case unit1.Name(): // valid meter status return unitName, "GREEN", "" case unit2.Name(): // invalid meter status return unitName, "blah", "" case unit3.Name(): // invalid unit name return "no-such-unit", "GREEN", "" default: return unitName, "GREEN", "" } } cleanup := s.startServer(c, testHandler(c, nil, statusFunc, 0)) defer cleanup() _ = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit1, Sent: false}) _ = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Sent: false}) _ = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit3, Sent: false}) for _, unit := range []*state.Unit{unit1, unit2, unit3} { status, err := unit.GetMeterStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterNotSet) } var sender metricsender.HttpSender err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) status, err := unit1.GetMeterStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterGreen) status, err = unit2.GetMeterStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterNotSet) status, err = unit3.GetMeterStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterNotSet) }
// TestSendBulkMetrics tests the logic of splitting sends // into batches is done correctly. The batch size is changed // to send batches of 10 metrics. If we create 100 metrics 10 calls // will be made to the sender func (s *MetricSenderSuite) TestSendBulkMetrics(c *gc.C) { var sender testing.MockSender now := time.Now() for i := 0; i < 100; i++ { s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Time: &now}) } err := metricsender.SendMetrics(s.State, &sender, 10) c.Assert(err, jc.ErrorIsNil) c.Assert(sender.Data, gc.HasLen, 10) for i := 0; i < 10; i++ { c.Assert(sender.Data, gc.HasLen, 10) } }
func (s *MetricSenderSuite) TestFailuresResetOnSuccessfulSend(c *gc.C) { mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) err = mm.IncrementConsecutiveErrors() c.Assert(err, jc.ErrorIsNil) now := time.Now() for i := 0; i < 3; i++ { s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } err = metricsender.SendMetrics(s.State, metricsender.NopSender{}, 10) c.Assert(err, jc.ErrorIsNil) mm, err = s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) c.Assert(mm.ConsecutiveErrors(), gc.Equals, 0) }
// SendMetrics will send any unsent metrics onto the metric collection service. func (api *MetricsManagerAPI) SendMetrics(args params.Entities) (params.ErrorResults, error) { result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } canAccess, err := api.accessEnviron() if err != nil { return result, err } for i, arg := range args.Entities { tag, err := names.ParseModelTag(arg.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } if !canAccess(tag) { result.Results[i].Error = common.ServerError(common.ErrPerm) continue } modelState := api.state if tag != api.state.ModelTag() { modelState, err = api.state.ForModel(tag) if err != nil { err = errors.Annotatef(err, "failed to access state for %s", tag) result.Results[i].Error = common.ServerError(err) continue } } txVendorMetrics, err := transmitVendorMetrics(modelState) if err != nil { result.Results[i].Error = common.ServerError(err) continue } err = metricsender.SendMetrics(modelState, sender, api.clock, maxBatchesPerSend, txVendorMetrics) if err != nil { err = errors.Annotatef(err, "failed to send metrics for %s", tag) logger.Warningf("%v", err) result.Results[i].Error = common.ServerError(err) continue } } return result, nil }
// TestSendMetrics creates 2 unsent metrics and a sent metric // and checks that the 2 unsent metrics get sent and have their // sent field set to true. func (s *MetricSenderSuite) TestSendMetrics(c *gc.C) { var sender testing.MockSender now := time.Now() unsent1 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Time: &now}) unsent2 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Time: &now}) s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, Time: &now}) err := metricsender.SendMetrics(s.State, &sender, 10) c.Assert(err, jc.ErrorIsNil) c.Assert(sender.Data, gc.HasLen, 1) c.Assert(sender.Data[0], gc.HasLen, 2) sent1, err := s.State.MetricBatch(unsent1.UUID()) c.Assert(err, jc.ErrorIsNil) c.Assert(sent1.Sent(), jc.IsTrue) sent2, err := s.State.MetricBatch(unsent2.UUID()) c.Assert(err, jc.ErrorIsNil) c.Assert(sent2.Sent(), jc.IsTrue) }
// TestMeterStatus checks that the meter status information returned // by the collector service is propagated to the unit. // is in use metrics get sent func (s *SenderSuite) TestMeterStatus(c *gc.C) { statusFunc := func(unitName string) (string, string, string) { return unitName, "GREEN", "" } cleanup := s.startServer(c, testHandler(c, nil, statusFunc, 0)) defer cleanup() _ = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false}) status, err := s.unit.GetMeterStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterNotSet) var sender metricsender.HttpSender err = metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) status, err = s.unit.GetMeterStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterGreen) }
// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package common import ( "github.com/juju/errors" "github.com/juju/names" "github.com/juju/juju/apiserver/metricsender" "github.com/juju/juju/state" ) var sendMetrics = func(st *state.State) error { err := metricsender.SendMetrics(st, metricsender.DefaultMetricSender(), metricsender.DefaultMaxBatchesPerSend()) return errors.Trace(err) } // DestroyEnvironment sets the environment to dying. Cleanup jobs then destroy // all services and non-manager, non-manual machine instances in the specified // environment. This function assumes that all necessary authentication checks // have been done. If the environment is a controller hosting other // environments, they will also be destroyed. func DestroyEnvironmentIncludingHosted(st *state.State, environTag names.EnvironTag) error { return destroyEnvironment(st, environTag, true) } // DestroyEnvironment sets the environment to dying. Cleanup jobs then destroy // all services and non-manager, non-manual machine instances in the specified // environment. This function assumes that all necessary authentication checks // have been done. An error will be returned if this environment is a
import ( "github.com/juju/errors" "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/metricsender" ) var sendMetrics = func(st metricsender.ModelBackend) error { cfg, err := st.ModelConfig() if err != nil { return errors.Annotatef(err, "failed to get model config for %s", st.ModelTag()) } err = metricsender.SendMetrics(st, metricsender.DefaultMetricSender(), clock.WallClock, metricsender.DefaultMaxBatchesPerSend(), cfg.TransmitVendorMetrics()) return errors.Trace(err) } // DestroyModelIncludingHosted sets the model to dying. Cleanup jobs then destroy // all services and non-manager, non-manual machine instances in the specified // model. This function assumes that all necessary authentication checks // have been done. If the model is a controller hosting other // models, they will also be destroyed. func DestroyModelIncludingHosted(st ModelManagerBackend, systemTag names.ModelTag) error { return destroyModel(st, systemTag, true) } // DestroyModel sets the environment to dying. Cleanup jobs then destroy // all services and non-manager, non-manual machine instances in the specified // model. This function assumes that all necessary authentication checks