示例#1
0
func (s *StatusHistorySuite) TestPruneStatusHistoryBySize(c *gc.C) {
	clock := testing.NewClock(time.Now())
	err := s.State.SetClockForTesting(clock)
	c.Assert(err, jc.ErrorIsNil)
	service := s.Factory.MakeApplication(c, nil)
	unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: service})
	state.PrimeUnitStatusHistory(c, clock, unit, status.Active, 20000, 1000, nil)

	history, err := unit.StatusHistory(status.StatusHistoryFilter{Size: 25000})
	c.Assert(err, jc.ErrorIsNil)
	c.Logf("%d\n", len(history))
	c.Assert(history, gc.HasLen, 20001)

	err = state.PruneStatusHistory(s.State, 0, 1)
	c.Assert(err, jc.ErrorIsNil)

	history, err = unit.StatusHistory(status.StatusHistoryFilter{Size: 25000})
	c.Assert(err, jc.ErrorIsNil)
	historyLen := len(history)
	// When writing this test, the size was 6670 for about 0,00015 MB per entry
	// but that is a size that can most likely change so I wont risk a flaky test
	// here, enough to say that if this size suddenly is no longer less than
	// half its good reason for suspicion.
	c.Assert(historyLen, jc.LessThan, 10000)
}
示例#2
0
文件: worker.go 项目: Pankov404/juju
// TODO(perrito666) Adda comprehensive test for the worker features
func (w *pruneWorker) loop(stopCh <-chan struct{}) error {
	p := w.params
	for {
		select {
		case <-stopCh:
			return tomb.ErrDying
		case <-time.After(p.PruneInterval):
			err := state.PruneStatusHistory(w.st, p.MaxLogsPerState)
			if err != nil {
				return errors.Trace(err)
			}
		}
	}
}
示例#3
0
func (s *statusSuite) TestPruneStatusHistory(c *gc.C) {
	var oldDoc state.StatusDoc
	var err error
	st := s.State
	globalKey := "BogusKey"
	for changeno := 1; changeno <= 200; changeno++ {
		oldDoc = state.StatusDoc{
			Status:     "AGivenStatus",
			StatusInfo: fmt.Sprintf("Status change %d", changeno),
			StatusData: nil,
		}
		timestamp := state.NowToTheSecond()
		oldDoc.Updated = &timestamp

		hDoc := state.NewHistoricalStatusDoc(oldDoc, globalKey)

		h := txn.Op{
			C:      state.StatusesHistoryC,
			Id:     changeno,
			Insert: hDoc,
		}

		err = state.RunTransaction(st, []txn.Op{h})
		c.Logf("Adding a history entry attempt n: %d", changeno)
		c.Assert(err, jc.ErrorIsNil)
	}
	history, err := state.StatusHistory(500, globalKey, st)
	c.Assert(history, gc.HasLen, 200)
	c.Assert(history[0].Message, gc.Equals, "Status change 200")
	c.Assert(history[199].Message, gc.Equals, "Status change 1")

	err = state.PruneStatusHistory(st, 100)
	c.Assert(err, jc.ErrorIsNil)
	history, err = state.StatusHistory(500, globalKey, st)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 100)
	c.Assert(history[0].Message, gc.Equals, "Status change 200")
	c.Assert(history[99].Message, gc.Equals, "Status change 101")
}
示例#4
0
func (s *statusSuite) TestPruneStatusHistory(c *gc.C) {
	var oldDoc state.StatusDoc
	var err error
	st := s.State
	globalKey := "BogusKey"
	for changeno := 1; changeno <= 200; changeno++ {
		oldDoc = state.StatusDoc{
			EnvUUID:    st.EnvironUUID(),
			Status:     "AGivenStatus",
			StatusInfo: fmt.Sprintf("Status change %d", changeno),
			StatusData: nil,
		}
		timestamp := state.NowToTheSecond()
		oldDoc.Updated = &timestamp

		hDoc := state.NewHistoricalStatusDoc(changeno, oldDoc, globalKey)

		history, closer := state.GetCollection(st, state.StatusesHistoryC)
		historyW := history.Writeable()
		err = historyW.Insert(hDoc)
		closer()

		c.Logf("Adding a history entry attempt n: %d", changeno)
		c.Assert(err, jc.ErrorIsNil)
	}
	history, err := state.StatusHistory(500, globalKey, st)
	c.Assert(history, gc.HasLen, 200)
	c.Assert(history[0].Message, gc.Equals, "Status change 200")
	c.Assert(history[199].Message, gc.Equals, "Status change 1")

	err = state.PruneStatusHistory(st, 100)
	c.Assert(err, jc.ErrorIsNil)
	history, err = state.StatusHistory(500, globalKey, st)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 100)
	c.Assert(history[0].Message, gc.Equals, "Status change 200")
	c.Assert(history[99].Message, gc.Equals, "Status change 101")
}
示例#5
0
文件: pruner.go 项目: pmatulis/juju
// Prune endpoint removes status history entries until
// only the N newest records per unit remain.
func (api *API) Prune(p params.StatusHistoryPruneArgs) error {
	if !api.authorizer.AuthModelManager() {
		return common.ErrPerm
	}
	return state.PruneStatusHistory(api.st, p.MaxLogsPerEntity)
}
示例#6
0
func (s *StatusHistorySuite) TestPruneStatusHistoryByDate(c *gc.C) {

	// NOTE: the behaviour is bad, and the test is ugly. I'm just verifying
	// the existing logic here.
	//
	// If you get the opportunity to fix this, you'll want a better shape of
	// test (that injects a usable clock dependency, apart from anything else,
	// and checks that we do our best to maintain a usable span of history
	// rather than an arbitrary limit per entity. And isn't O(N) on status
	// count in the model).

	const count = 3
	units := make([]*state.Unit, count)
	agents := make([]*state.UnitAgent, count)
	service := s.Factory.MakeApplication(c, nil)
	for i := 0; i < count; i++ {
		units[i] = s.Factory.MakeUnit(c, &factory.UnitParams{Application: service})
		agents[i] = units[i].Agent()
	}

	primeUnitStatusHistory(c, units[0], 10, 0)
	primeUnitStatusHistory(c, units[0], 10, 24*time.Hour)
	primeUnitStatusHistory(c, units[1], 50, 0)
	primeUnitStatusHistory(c, units[1], 50, 24*time.Hour)
	primeUnitStatusHistory(c, units[2], 100, 0)
	primeUnitStatusHistory(c, units[2], 100, 24*time.Hour)
	primeUnitAgentStatusHistory(c, agents[0], 100, 0)
	primeUnitAgentStatusHistory(c, agents[0], 100, 24*time.Hour)
	primeUnitAgentStatusHistory(c, agents[1], 50, 0)
	primeUnitAgentStatusHistory(c, agents[1], 50, 24*time.Hour)
	primeUnitAgentStatusHistory(c, agents[2], 10, 0)
	primeUnitAgentStatusHistory(c, agents[2], 10, 24*time.Hour)

	history, err := units[0].StatusHistory(status.StatusHistoryFilter{Size: 50})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 21)
	checkInitialWorkloadStatus(c, history[10])
	for i, statusInfo := range history[:10] {
		checkPrimedUnitStatus(c, statusInfo, 9-i, 0)
	}
	for i, statusInfo := range history[11:20] {
		checkPrimedUnitStatus(c, statusInfo, 9-i, 24*time.Hour)
	}

	err = state.PruneStatusHistory(s.State, 10*time.Hour, 1024)
	c.Assert(err, jc.ErrorIsNil)

	history, err = units[0].StatusHistory(status.StatusHistoryFilter{Size: 50})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 11)
	checkInitialWorkloadStatus(c, history[10])
	for i, statusInfo := range history[:10] {
		checkPrimedUnitStatus(c, statusInfo, 9-i, 0)
	}

	history, err = units[1].StatusHistory(status.StatusHistoryFilter{Size: 100})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 51)
	for i, statusInfo := range history[:50] {
		checkPrimedUnitStatus(c, statusInfo, 49-i, 0)
	}

	history, err = units[2].StatusHistory(status.StatusHistoryFilter{Size: 200})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 101)
	for i, statusInfo := range history[:100] {
		checkPrimedUnitStatus(c, statusInfo, 99-i, 0)
	}

	history, err = agents[0].StatusHistory(status.StatusHistoryFilter{Size: 200})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 101)
	for i, statusInfo := range history[:100] {
		checkPrimedUnitAgentStatus(c, statusInfo, 99-i, 0)
	}

	history, err = agents[1].StatusHistory(status.StatusHistoryFilter{Size: 100})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 51)
	for i, statusInfo := range history[:50] {
		checkPrimedUnitAgentStatus(c, statusInfo, 49-i, 0)
	}

	history, err = agents[2].StatusHistory(status.StatusHistoryFilter{Size: 50})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(history, gc.HasLen, 11)
	checkInitialUnitAgentStatus(c, history[10])
	for i, statusInfo := range history[:10] {
		checkPrimedUnitAgentStatus(c, statusInfo, 9-i, 0)
	}
}