Пример #1
0
// NewUndertaker returns a worker which processes a dying environment.
func NewUndertaker(client apiundertaker.UndertakerClient, clock uc.Clock) worker.Worker {
	f := func(stopCh <-chan struct{}) error {
		result, err := client.EnvironInfo()
		if err != nil {
			return errors.Trace(err)
		}
		if result.Error != nil {
			return errors.Trace(result.Error)
		}
		envInfo := result.Result

		if envInfo.Life == params.Alive {
			return errors.Errorf("undertaker worker should not be started for an alive environment: %q", envInfo.GlobalName)
		}

		if envInfo.Life == params.Dying {
			// Process the dying environment. This blocks until the environment
			// is dead.
			processDyingEnv(client, clock, stopCh)
		}

		// If environ is not alive or dying, it must be dead.

		if envInfo.IsSystem {
			// Nothing to do. We don't remove environment docs for a state server
			// environment.
			return nil
		}

		cfg, err := client.EnvironConfig()
		if err != nil {
			return errors.Trace(err)
		}

		env, err := environs.New(cfg)
		if err != nil {
			return errors.Trace(err)
		}

		err = env.Destroy()
		if err != nil {
			return errors.Trace(err)
		}

		tod := clock.Now()
		if envInfo.TimeOfDeath != nil {
			// If TimeOfDeath is not nil, the environment was already dead
			// before the worker was started. So we use the recorded time of
			// death. This may happen if the system is rebooted after an
			// environment is set to dead, but before the environ docs are
			// removed.
			tod = *envInfo.TimeOfDeath
		}

		// Process the dead environment
		return processDeadEnv(client, clock, tod, stopCh)
	}
	return worker.NewSimpleWorker(f)
}
Пример #2
0
func processDeadEnv(client apiundertaker.UndertakerClient, clock uc.Clock, tod time.Time, stopCh <-chan struct{}) error {
	timeDead := clock.Now().Sub(tod)
	wait := ripTime - timeDead
	if wait < 0 {
		wait = 0
	}

	select {
	case <-clock.After(wait):
		err := client.RemoveEnviron()
		return errors.Annotate(err, "could not remove all docs for dead environment")
	case <-stopCh:
		return tomb.ErrDying
	}
}
Пример #3
0
func (eph logStreamEndpointHandler) newTailer(source logStreamSource, cfg params.LogStreamConfig, clock clock.Clock) (state.LogTailer, error) {
	start, err := source.getStart(cfg.Sink, cfg.AllModels)
	if err != nil {
		return nil, errors.Annotate(err, "getting log start position")
	}
	if cfg.MaxLookbackDuration != "" {
		d, err := time.ParseDuration(cfg.MaxLookbackDuration)
		if err != nil {
			return nil, errors.Annotatef(err, "invalid lookback duration")
		}
		now := clock.Now()
		if now.Sub(start) > d {
			start = now.Add(-1 * d)
		}
	}

	tailerArgs := &state.LogTailerParams{
		StartTime:    start,
		InitialLines: cfg.MaxLookbackRecords,
		AllModels:    cfg.AllModels,
	}
	tailer, err := source.newTailer(tailerArgs)
	if err != nil {
		return nil, errors.Annotate(err, "tailing logs")
	}
	return tailer, nil
}
Пример #4
0
Файл: user.go Проект: bac/juju
// CreateLocalLoginMacaroon creates a macaroon that may be provided to a
// user as proof that they have logged in with a valid username and password.
// This macaroon may then be used to obtain a discharge macaroon so that
// the user can log in without presenting their password for a set amount
// of time.
func CreateLocalLoginMacaroon(
	tag names.UserTag,
	service BakeryService,
	clock clock.Clock,
) (*macaroon.Macaroon, error) {
	// We create the macaroon with a random ID and random root key, which
	// enables multiple clients to login as the same user and obtain separate
	// macaroons without having them use the same root key.
	return service.NewMacaroon("", nil, []checkers.Caveat{
		{Condition: "is-authenticated-user " + tag.Id()},
		checkers.TimeBeforeCaveat(clock.Now().Add(LocalLoginInteractionTimeout)),
	})
}
Пример #5
0
// GetTriggers returns the signal channels for state transitions based on the current state.
// It controls the transitions of the inactive meter status worker.
//
// In a simple case, the transitions are trivial:
//
// D------------------A----------------------R--------------------->
//
// D - disconnect time
// A - amber status triggered
// R - red status triggered
//
// The problem arises from the fact that the lifetime of the worker can
// be interrupted, possibly with significant portions of the duration missing.
func GetTriggers(
	wst WorkerState,
	status string,
	disconnectedAt time.Time,
	clk clock.Clock,
	amberGracePeriod time.Duration,
	redGracePeriod time.Duration) (<-chan time.Time, <-chan time.Time) {

	now := clk.Now()

	if wst == Done {
		return nil, nil
	}

	if wst <= WaitingAmber && status == "RED" {
		// If the current status is already RED, we don't want to deescalate.
		wst = WaitingRed
		//	} else if wst <= WaitingAmber && now.Sub(disconnectedAt) >= amberGracePeriod {
		// If we missed the transition to amber, activate it.
		//		wst = WaitingRed
	} else if wst < Done && now.Sub(disconnectedAt) >= redGracePeriod {
		// If we missed the transition to amber and it's time to transition to RED, go straight to RED.
		wst = WaitingRed
	}

	if wst == WaitingRed {
		redSignal := clk.After(redGracePeriod - now.Sub(disconnectedAt))
		return nil, redSignal
	}
	if wst == WaitingAmber || wst == Uninitialized {
		amberSignal := clk.After(amberGracePeriod - now.Sub(disconnectedAt))
		redSignal := clk.After(redGracePeriod - now.Sub(disconnectedAt))
		return amberSignal, redSignal
	}
	return nil, nil
}
Пример #6
0
Файл: user.go Проект: bac/juju
// CheckLocalLoginRequest checks that the given HTTP request contains at least
// one valid local login macaroon minted by the given service using
// CreateLocalLoginMacaroon. It returns an error with a
// *bakery.VerificationError cause if the macaroon verification failed. If the
// macaroon is valid, CheckLocalLoginRequest returns a list of caveats to add
// to the discharge macaroon.
func CheckLocalLoginRequest(
	service *bakery.Service,
	req *http.Request,
	tag names.UserTag,
	clock clock.Clock,
) ([]checkers.Caveat, error) {
	_, err := httpbakery.CheckRequest(service, req, nil, checkers.CheckerFunc{
		// Having a macaroon with an is-authenticated-user
		// caveat is proof that the user is "logged in".
		"is-authenticated-user",
		func(cond, arg string) error { return nil },
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	firstPartyCaveats := []checkers.Caveat{
		checkers.DeclaredCaveat("username", tag.Id()),
		checkers.TimeBeforeCaveat(clock.Now().Add(localLoginExpiryTime)),
	}
	return firstPartyCaveats, nil
}
Пример #7
0
func preparePasswordCredential(
	clock clock.Clock,
	newUUID func() (utils.UUID, error),
) (ad.PasswordCredential, error) {
	password, err := newUUID()
	if err != nil {
		return ad.PasswordCredential{}, errors.Annotate(err, "generating password")
	}
	passwordKeyUUID, err := newUUID()
	if err != nil {
		return ad.PasswordCredential{}, errors.Annotate(err, "generating password key ID")
	}
	startDate := clock.Now().UTC()
	endDate := startDate.Add(passwordExpiryDuration)
	return ad.PasswordCredential{
		CustomKeyIdentifier: []byte("juju-" + startDate.Format("20060102")),
		KeyId:               passwordKeyUUID.String(),
		Value:               password.String(),
		StartDate:           startDate,
		EndDate:             endDate,
	}, nil
}
Пример #8
0
// SendMetrics will send any unsent metrics
// over the MetricSender interface in batches
// no larger than batchSize.
func SendMetrics(st ModelBackend, sender MetricSender, clock clock.Clock, batchSize int, transmitVendorMetrics bool) error {
	metricsManager, err := st.MetricsManager()
	if err != nil {
		return errors.Trace(err)
	}
	sent := 0
	held := 0
	for {
		metrics, err := st.MetricsToSend(batchSize)
		if err != nil {
			return errors.Trace(err)
		}
		lenM := len(metrics)
		if lenM == 0 {
			if sent == 0 {
				logger.Infof("nothing to send")
			} else {
				logger.Infof("done sending")
			}
			break
		}

		var wireData []*wireformat.MetricBatch
		var heldBatches []string
		heldBatchUnits := map[string]bool{}
		for _, m := range metrics {
			if !transmitVendorMetrics && len(m.Credentials()) == 0 {
				heldBatches = append(heldBatches, m.UUID())
				heldBatchUnits[m.Unit()] = true
			} else {
				wireData = append(wireData, ToWire(m))
			}
		}
		response, err := sender.Send(wireData)
		if err != nil {
			logger.Errorf("%+v", err)
			if incErr := metricsManager.IncrementConsecutiveErrors(); incErr != nil {
				logger.Errorf("failed to increment error count %v", incErr)
				return errors.Trace(errors.Wrap(err, incErr))
			}
			return errors.Trace(err)
		}
		if response != nil {
			// TODO (mattyw) We are currently ignoring errors during response handling.
			acknowledged := handleResponse(metricsManager, st, *response)
			// Stop sending if there are no acknowledged batches.
			if acknowledged == 0 {
				logger.Debugf("got 0 acks, ending send loop")
				break
			}
			if err := metricsManager.SetLastSuccessfulSend(clock.Now()); err != nil {
				err = errors.Annotate(err, "failed to set successful send time")
				logger.Warningf("%v", err)
				return errors.Trace(err)
			}
		}
		// Mark held metric batches as sent so that they can be cleaned up later.
		if len(heldBatches) > 0 {
			err := st.SetMetricBatchesSent(heldBatches)
			if err != nil {
				return errors.Annotatef(err, "failed to mark metric batches as sent for %s", st.ModelTag())
			}
		}

		setHeldBatchUnitMeterStatus(st, heldBatchUnits)

		sent += len(wireData)
		held += len(heldBatches)
	}

	unsent, err := st.CountOfUnsentMetrics()
	if err != nil {
		return errors.Trace(err)
	}
	sentStored, err := st.CountOfSentMetrics()
	if err != nil {
		return errors.Trace(err)
	}
	logger.Infof("metrics collection summary for %s: sent:%d unsent:%d held:%d (%d sent metrics stored)", st.ModelTag(), sent, unsent, held, sentStored)

	return nil
}