예제 #1
0
파일: undertaker.go 프로젝트: imoapps/juju
// NewUndertaker returns a worker which processes a dying environment.
func NewUndertaker(client apiundertaker.UndertakerClient, clock uc.Clock) worker.Worker {
	f := func(stopCh <-chan struct{}) error {
		result, err := client.EnvironInfo()
		if err != nil {
			return errors.Trace(err)
		}
		if result.Error != nil {
			return errors.Trace(result.Error)
		}
		envInfo := result.Result

		if envInfo.Life == params.Alive {
			return errors.Errorf("undertaker worker should not be started for an alive environment: %q", envInfo.GlobalName)
		}

		if envInfo.Life == params.Dying {
			// Process the dying environment. This blocks until the environment
			// is dead.
			processDyingEnv(client, clock, stopCh)
		}

		// If environ is not alive or dying, it must be dead.

		if envInfo.IsSystem {
			// Nothing to do. We don't remove environment docs for a state server
			// environment.
			return nil
		}

		cfg, err := client.EnvironConfig()
		if err != nil {
			return errors.Trace(err)
		}

		env, err := environs.New(cfg)
		if err != nil {
			return errors.Trace(err)
		}

		err = env.Destroy()
		if err != nil {
			return errors.Trace(err)
		}

		tod := clock.Now()
		if envInfo.TimeOfDeath != nil {
			// If TimeOfDeath is not nil, the environment was already dead
			// before the worker was started. So we use the recorded time of
			// death. This may happen if the system is rebooted after an
			// environment is set to dead, but before the environ docs are
			// removed.
			tod = *envInfo.TimeOfDeath
		}

		// Process the dead environment
		return processDeadEnv(client, clock, tod, stopCh)
	}
	return worker.NewSimpleWorker(f)
}
예제 #2
0
파일: undertaker.go 프로젝트: imoapps/juju
func processDeadEnv(client apiundertaker.UndertakerClient, clock uc.Clock, tod time.Time, stopCh <-chan struct{}) error {
	timeDead := clock.Now().Sub(tod)
	wait := ripTime - timeDead
	if wait < 0 {
		wait = 0
	}

	select {
	case <-clock.After(wait):
		err := client.RemoveEnviron()
		return errors.Annotate(err, "could not remove all docs for dead environment")
	case <-stopCh:
		return tomb.ErrDying
	}
}
예제 #3
0
파일: logstream.go 프로젝트: bac/juju
func (eph logStreamEndpointHandler) newTailer(source logStreamSource, cfg params.LogStreamConfig, clock clock.Clock) (state.LogTailer, error) {
	start, err := source.getStart(cfg.Sink, cfg.AllModels)
	if err != nil {
		return nil, errors.Annotate(err, "getting log start position")
	}
	if cfg.MaxLookbackDuration != "" {
		d, err := time.ParseDuration(cfg.MaxLookbackDuration)
		if err != nil {
			return nil, errors.Annotatef(err, "invalid lookback duration")
		}
		now := clock.Now()
		if now.Sub(start) > d {
			start = now.Add(-1 * d)
		}
	}

	tailerArgs := &state.LogTailerParams{
		StartTime:    start,
		InitialLines: cfg.MaxLookbackRecords,
		AllModels:    cfg.AllModels,
	}
	tailer, err := source.newTailer(tailerArgs)
	if err != nil {
		return nil, errors.Annotate(err, "tailing logs")
	}
	return tailer, nil
}
예제 #4
0
파일: user.go 프로젝트: bac/juju
// CreateLocalLoginMacaroon creates a macaroon that may be provided to a
// user as proof that they have logged in with a valid username and password.
// This macaroon may then be used to obtain a discharge macaroon so that
// the user can log in without presenting their password for a set amount
// of time.
func CreateLocalLoginMacaroon(
	tag names.UserTag,
	service BakeryService,
	clock clock.Clock,
) (*macaroon.Macaroon, error) {
	// We create the macaroon with a random ID and random root key, which
	// enables multiple clients to login as the same user and obtain separate
	// macaroons without having them use the same root key.
	return service.NewMacaroon("", nil, []checkers.Caveat{
		{Condition: "is-authenticated-user " + tag.Id()},
		checkers.TimeBeforeCaveat(clock.Now().Add(LocalLoginInteractionTimeout)),
	})
}
예제 #5
0
파일: txnpruner.go 프로젝트: bac/juju
// New returns a worker which periodically prunes the data for
// completed transactions.
func New(tp TransactionPruner, interval time.Duration, clock clock.Clock) worker.Worker {
	return worker.NewSimpleWorker(func(stopCh <-chan struct{}) error {
		for {
			select {
			case <-clock.After(interval):
				err := tp.MaybePruneTransactions()
				if err != nil {
					return errors.Annotate(err, "pruning failed, txnpruner stopping")
				}
			case <-stopCh:
				return nil
			}
		}
	})
}
예제 #6
0
파일: killstatus.go 프로젝트: bac/juju
// newTimedStatusUpdater returns a function which waits a given period of time
// before querying the apiserver for updated data.
func newTimedStatusUpdater(ctx *cmd.Context, api destroyControllerAPI, controllerModelUUID string, clock clock.Clock) func(time.Duration) (ctrData, []modelData) {
	return func(wait time.Duration) (ctrData, []modelData) {
		if wait > 0 {
			<-clock.After(wait)
		}

		// If we hit an error, status.HostedModelCount will be 0, the polling
		// loop will stop and we'll go directly to destroying the model.
		ctrStatus, modelsStatus, err := newData(api, controllerModelUUID)
		if err != nil {
			ctx.Infof("Unable to get the controller summary from the API: %s.", err)
		}

		return ctrStatus, modelsStatus
	}
}
예제 #7
0
파일: user.go 프로젝트: bac/juju
// CheckLocalLoginRequest checks that the given HTTP request contains at least
// one valid local login macaroon minted by the given service using
// CreateLocalLoginMacaroon. It returns an error with a
// *bakery.VerificationError cause if the macaroon verification failed. If the
// macaroon is valid, CheckLocalLoginRequest returns a list of caveats to add
// to the discharge macaroon.
func CheckLocalLoginRequest(
	service *bakery.Service,
	req *http.Request,
	tag names.UserTag,
	clock clock.Clock,
) ([]checkers.Caveat, error) {
	_, err := httpbakery.CheckRequest(service, req, nil, checkers.CheckerFunc{
		// Having a macaroon with an is-authenticated-user
		// caveat is proof that the user is "logged in".
		"is-authenticated-user",
		func(cond, arg string) error { return nil },
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	firstPartyCaveats := []checkers.Caveat{
		checkers.DeclaredCaveat("username", tag.Id()),
		checkers.TimeBeforeCaveat(clock.Now().Add(localLoginExpiryTime)),
	}
	return firstPartyCaveats, nil
}
예제 #8
0
파일: interactive.go 프로젝트: bac/juju
func preparePasswordCredential(
	clock clock.Clock,
	newUUID func() (utils.UUID, error),
) (ad.PasswordCredential, error) {
	password, err := newUUID()
	if err != nil {
		return ad.PasswordCredential{}, errors.Annotate(err, "generating password")
	}
	passwordKeyUUID, err := newUUID()
	if err != nil {
		return ad.PasswordCredential{}, errors.Annotate(err, "generating password key ID")
	}
	startDate := clock.Now().UTC()
	endDate := startDate.Add(passwordExpiryDuration)
	return ad.PasswordCredential{
		CustomKeyIdentifier: []byte("juju-" + startDate.Format("20060102")),
		KeyId:               passwordKeyUUID.String(),
		Value:               password.String(),
		StartDate:           startDate,
		EndDate:             endDate,
	}, nil
}
예제 #9
0
파일: triggers.go 프로젝트: howbazaar/juju
// GetTriggers returns the signal channels for state transitions based on the current state.
// It controls the transitions of the inactive meter status worker.
//
// In a simple case, the transitions are trivial:
//
// D------------------A----------------------R--------------------->
//
// D - disconnect time
// A - amber status triggered
// R - red status triggered
//
// The problem arises from the fact that the lifetime of the worker can
// be interrupted, possibly with significant portions of the duration missing.
func GetTriggers(
	wst WorkerState,
	status string,
	disconnectedAt time.Time,
	clk clock.Clock,
	amberGracePeriod time.Duration,
	redGracePeriod time.Duration) (<-chan time.Time, <-chan time.Time) {

	now := clk.Now()

	if wst == Done {
		return nil, nil
	}

	if wst <= WaitingAmber && status == "RED" {
		// If the current status is already RED, we don't want to deescalate.
		wst = WaitingRed
		//	} else if wst <= WaitingAmber && now.Sub(disconnectedAt) >= amberGracePeriod {
		// If we missed the transition to amber, activate it.
		//		wst = WaitingRed
	} else if wst < Done && now.Sub(disconnectedAt) >= redGracePeriod {
		// If we missed the transition to amber and it's time to transition to RED, go straight to RED.
		wst = WaitingRed
	}

	if wst == WaitingRed {
		redSignal := clk.After(redGracePeriod - now.Sub(disconnectedAt))
		return nil, redSignal
	}
	if wst == WaitingAmber || wst == Uninitialized {
		amberSignal := clk.After(amberGracePeriod - now.Sub(disconnectedAt))
		redSignal := clk.After(redGracePeriod - now.Sub(disconnectedAt))
		return amberSignal, redSignal
	}
	return nil, nil
}
예제 #10
0
// runCommandsWithTimeout is a helper to abstract common code between run commands and
// juju-run as an action
func (runner *runner) runCommandsWithTimeout(commands string, timeout time.Duration, clock clock.Clock) (*utilexec.ExecResponse, error) {
	srv, err := runner.startJujucServer()
	if err != nil {
		return nil, err
	}
	defer srv.Close()

	env, err := runner.context.HookVars(runner.paths)
	if err != nil {
		return nil, errors.Trace(err)
	}
	command := utilexec.RunParams{
		Commands:    commands,
		WorkingDir:  runner.paths.GetCharmDir(),
		Environment: env,
		Clock:       clock,
	}

	err = command.Run()
	if err != nil {
		return nil, err
	}
	runner.context.SetProcess(hookProcess{command.Process()})

	var cancel chan struct{}
	if timeout != 0 {
		cancel = make(chan struct{})
		go func() {
			<-clock.After(timeout)
			close(cancel)
		}()
	}

	// Block and wait for process to finish
	return command.WaitWithCancel(cancel)
}
예제 #11
0
파일: metricsender.go 프로젝트: kat-co/juju
// SendMetrics will send any unsent metrics
// over the MetricSender interface in batches
// no larger than batchSize.
func SendMetrics(st ModelBackend, sender MetricSender, clock clock.Clock, batchSize int, transmitVendorMetrics bool) error {
	metricsManager, err := st.MetricsManager()
	if err != nil {
		return errors.Trace(err)
	}
	sent := 0
	held := 0
	for {
		metrics, err := st.MetricsToSend(batchSize)
		if err != nil {
			return errors.Trace(err)
		}
		lenM := len(metrics)
		if lenM == 0 {
			if sent == 0 {
				logger.Infof("nothing to send")
			} else {
				logger.Infof("done sending")
			}
			break
		}

		var wireData []*wireformat.MetricBatch
		var heldBatches []string
		heldBatchUnits := map[string]bool{}
		for _, m := range metrics {
			if !transmitVendorMetrics && len(m.Credentials()) == 0 {
				heldBatches = append(heldBatches, m.UUID())
				heldBatchUnits[m.Unit()] = true
			} else {
				wireData = append(wireData, ToWire(m))
			}
		}
		response, err := sender.Send(wireData)
		if err != nil {
			logger.Errorf("%+v", err)
			if incErr := metricsManager.IncrementConsecutiveErrors(); incErr != nil {
				logger.Errorf("failed to increment error count %v", incErr)
				return errors.Trace(errors.Wrap(err, incErr))
			}
			return errors.Trace(err)
		}
		if response != nil {
			// TODO (mattyw) We are currently ignoring errors during response handling.
			acknowledged := handleResponse(metricsManager, st, *response)
			// Stop sending if there are no acknowledged batches.
			if acknowledged == 0 {
				logger.Debugf("got 0 acks, ending send loop")
				break
			}
			if err := metricsManager.SetLastSuccessfulSend(clock.Now()); err != nil {
				err = errors.Annotate(err, "failed to set successful send time")
				logger.Warningf("%v", err)
				return errors.Trace(err)
			}
		}
		// Mark held metric batches as sent so that they can be cleaned up later.
		if len(heldBatches) > 0 {
			err := st.SetMetricBatchesSent(heldBatches)
			if err != nil {
				return errors.Annotatef(err, "failed to mark metric batches as sent for %s", st.ModelTag())
			}
		}

		setHeldBatchUnitMeterStatus(st, heldBatchUnits)

		sent += len(wireData)
		held += len(heldBatches)
	}

	unsent, err := st.CountOfUnsentMetrics()
	if err != nil {
		return errors.Trace(err)
	}
	sentStored, err := st.CountOfSentMetrics()
	if err != nil {
		return errors.Trace(err)
	}
	logger.Infof("metrics collection summary for %s: sent:%d unsent:%d held:%d (%d sent metrics stored)", st.ModelTag(), sent, unsent, held, sentStored)

	return nil
}
예제 #12
0
파일: updater.go 프로젝트: bac/juju
func machineLoop(context machineContext, m machine, lifeChanged <-chan struct{}, clock clock.Clock) error {
	// Use a short poll interval when initially waiting for
	// a machine's address and machine agent to start, and a long one when it already
	// has an address and the machine agent is started.
	pollInterval := ShortPoll
	pollInstance := func() error {
		instInfo, err := pollInstanceInfo(context, m)
		if err != nil {
			return err
		}

		machineStatus := status.Pending
		if err == nil {
			if statusInfo, err := m.Status(); err != nil {
				logger.Warningf("cannot get current machine status for machine %v: %v", m.Id(), err)
			} else {
				// TODO(perrito666) add status validation.
				machineStatus = status.Status(statusInfo.Status)
			}
		}

		// the extra condition below (checking allocating/pending) is here to improve user experience
		// without it the instance status will say "pending" for +10 minutes after the agent comes up to "started"
		if instInfo.status.Status != status.Allocating && instInfo.status.Status != status.Pending {
			if len(instInfo.addresses) > 0 && machineStatus == status.Started {
				// We've got at least one address and a status and instance is started, so poll infrequently.
				pollInterval = LongPoll
			} else if pollInterval < LongPoll {
				// We have no addresses or not started - poll increasingly rarely
				// until we do.
				pollInterval = time.Duration(float64(pollInterval) * ShortPollBackoff)
			}
		}
		return nil
	}

	shouldPollInstance := true
	for {
		if shouldPollInstance {
			if err := pollInstance(); err != nil {
				if !params.IsCodeNotProvisioned(err) {
					return errors.Trace(err)
				}
			}
			shouldPollInstance = false
		}
		select {
		case <-context.dying():
			return context.errDying()
		case <-clock.After(pollInterval):
			shouldPollInstance = true
		case <-lifeChanged:
			if err := m.Refresh(); err != nil {
				return err
			}
			if m.Life() == params.Dead {
				return nil
			}
		}
	}
}