func (t *localServerSuite) TestAllocateAddressFailureToFindNetworkInterface(c *gc.C) { env := t.prepareEnviron(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) instanceIds, err := env.StateServerInstances() c.Assert(err, jc.ErrorIsNil) instId := instanceIds[0] addr := network.Address{Value: "8.0.0.4"} // Invalid instance found err = env.AllocateAddress(instId+"foo", "", &addr, "foo", "bar") c.Assert(err, gc.ErrorMatches, ".*InvalidInstanceID.NotFound.*") // No network interface err = env.AllocateAddress(instId, "", &addr, "foo", "bar") c.Assert(errors.Cause(err), gc.ErrorMatches, "unexpected AWS response: network interface not found") // Nil or empty address given. err = env.AllocateAddress(instId, "", nil, "foo", "bar") c.Assert(errors.Cause(err), gc.ErrorMatches, "invalid address: nil or empty") err = env.AllocateAddress(instId, "", &network.Address{Value: ""}, "foo", "bar") c.Assert(errors.Cause(err), gc.ErrorMatches, "invalid address: nil or empty") }
func (s *uniterResolver) nextOp( localState resolver.LocalState, remoteState remotestate.Snapshot, opFactory operation.Factory, ) (operation.Operation, error) { switch remoteState.Life { case params.Alive: case params.Dying: // Normally we handle relations last, but if we're dying we // must ensure that all relations are broken first. op, err := s.config.Relations.NextOp(localState, remoteState, opFactory) if errors.Cause(err) != resolver.ErrNoOperation { return op, err } // We're not in a hook error and the unit is Dying, // so we should proceed to tear down. // // TODO(axw) move logic for cascading destruction of // subordinates, relation units and storage // attachments into state, via cleanups. if localState.Started { return opFactory.NewRunHook(hook.Info{Kind: hooks.Stop}) } fallthrough case params.Dead: // The unit is dying/dead and stopped, so tell the uniter // to terminate. return nil, resolver.ErrTerminate } // Now that storage hooks have run at least once, before anything else, // we need to run the install hook. // TODO(cmars): remove !localState.Started. It's here as a temporary // measure because unit agent upgrades aren't being performed yet. if !localState.Installed && !localState.Started { return opFactory.NewRunHook(hook.Info{Kind: hooks.Install}) } if charmModified(localState, remoteState) { return opFactory.NewUpgrade(remoteState.CharmURL) } if localState.ConfigVersion != remoteState.ConfigVersion { return opFactory.NewRunHook(hook.Info{Kind: hooks.ConfigChanged}) } op, err := s.config.Relations.NextOp(localState, remoteState, opFactory) if errors.Cause(err) != resolver.ErrNoOperation { return op, err } // UpdateStatus hook runs if nothing else needs to. if localState.UpdateStatusVersion != remoteState.UpdateStatusVersion { return opFactory.NewRunHook(hook.Info{Kind: hooks.UpdateStatus}) } return nil, resolver.ErrNoOperation }
// newState returns a new State that uses the given environment. // The environment must have already been bootstrapped. func newState(environ environs.Environ, mongoInfo *mongo.MongoInfo) (*state.State, error) { config := environ.Config() password := config.AdminSecret() if password == "" { return nil, fmt.Errorf("cannot connect without admin-secret") } modelTag := names.NewModelTag(config.UUID()) mongoInfo.Password = password opts := mongo.DefaultDialOpts() st, err := state.Open(modelTag, mongoInfo, opts, environs.NewStatePolicy()) if errors.IsUnauthorized(errors.Cause(err)) { // We try for a while because we might succeed in // connecting to mongo before the state has been // initialized and the initial password set. for a := redialStrategy.Start(); a.Next(); { st, err = state.Open(modelTag, mongoInfo, opts, environs.NewStatePolicy()) if !errors.IsUnauthorized(errors.Cause(err)) { break } } if err != nil { return nil, err } } else if err != nil { return nil, err } if err := updateSecrets(environ, st); err != nil { st.Close() return nil, fmt.Errorf("unable to push secrets: %v", err) } return st, nil }
func (s *loginSuite) TestLoginAsDeactivatedUser(c *gc.C) { info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() info.Tag = nil info.Password = "" st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() password := "******" u := s.Factory.MakeUser(c, &factory.UserParams{Password: password, Disabled: true}) _, err = st.Client().Status([]string{}) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Client"`, Code: "not implemented", }) // Since these are user login tests, the nonce is empty. err = st.Login(u.Tag(), password, "") c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }) _, err = st.Client().Status([]string{}) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Client"`, Code: "not implemented", }) }
func (s *loginSuite) TestLoginAsDeactivatedUser(c *gc.C) { info, srv := newServer(c, s.State) defer assertStop(c, srv) info.ModelTag = s.State.ModelTag() st := s.openAPIWithoutLogin(c, info) password := "******" u := s.Factory.MakeUser(c, &factory.UserParams{Password: password, Disabled: true}) _, err := st.Client().Status([]string{}) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Client"`, Code: "not implemented", }) // Since these are user login tests, the nonce is empty. err = st.Login(u.Tag(), password, "", nil) assertInvalidEntityPassword(c, err) _, err = st.Client().Status([]string{}) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Client"`, Code: "not implemented", }) }
// Run implements Command.Run func (c *killCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() if !c.assumeYes { if err := confirmDestruction(ctx, controllerName); err != nil { return err } } // Attempt to connect to the API. api, err := c.getControllerAPI() switch { case err == nil: defer api.Close() case errors.Cause(err) == common.ErrPerm: return errors.Annotate(err, "cannot destroy controller") default: if errors.Cause(err) != modelcmd.ErrConnTimedOut { logger.Debugf("unable to open api: %s", err) } ctx.Infof("Unable to open API: %s\n", err) api = nil } // Obtain controller environ so we can clean up afterwards. controllerEnviron, err := c.getControllerEnviron(ctx, store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } // If we were unable to connect to the API, just destroy the controller through // the environs interface. if api == nil { ctx.Infof("Unable to connect to the API server. Destroying through provider.") return environs.Destroy(controllerName, controllerEnviron, store) } // Attempt to destroy the controller and all environments. err = api.DestroyController(true) if err != nil { ctx.Infof("Unable to destroy controller through the API: %s. Destroying through provider.", err) return environs.Destroy(controllerName, controllerEnviron, store) } ctx.Infof("Destroying controller %q\nWaiting for resources to be reclaimed", controllerName) updateStatus := newTimedStatusUpdater(ctx, api, controllerEnviron.Config().UUID()) for ctrStatus, envsStatus := updateStatus(0); hasUnDeadModels(envsStatus); ctrStatus, envsStatus = updateStatus(2 * time.Second) { ctx.Infof(fmtCtrStatus(ctrStatus)) for _, envStatus := range envsStatus { ctx.Verbosef(fmtModelStatus(envStatus)) } } ctx.Infof("All hosted models reclaimed, cleaning up controller machines") return environs.Destroy(controllerName, controllerEnviron, store) }
func (s *loginSuite) TestBadLogin(c *gc.C) { // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is // called with user-admin permissions automatically. info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() adminUser := s.AdminUserTag(c) for i, t := range []struct { tag names.Tag password string err error code string }{{ tag: adminUser, password: "******", err: &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }, code: params.CodeUnauthorized, }, { tag: names.NewUserTag("unknown"), password: "******", err: &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }, code: params.CodeUnauthorized, }} { c.Logf("test %d; entity %q; password %q", i, t.tag, t.password) func() { // Open the API without logging in, so we can perform // operations on the connection before calling Login. st := s.openAPIWithoutLogin(c, info) defer st.Close() _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) // Since these are user login tests, the nonce is empty. err = st.Login(t.tag, t.password, "", nil) c.Assert(errors.Cause(err), gc.DeepEquals, t.err) c.Assert(params.ErrCode(err), gc.Equals, t.code) _, err = apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) }() } }
func rebootstrap(cfg *config.Config, ctx *cmd.Context, cons constraints.Value) (environs.Environ, error) { progress("re-bootstrapping environment") // Turn on safe mode so that the newly bootstrapped instance // will not destroy all the instances it does not know about. cfg, err := cfg.Apply(map[string]interface{}{ "provisioner-safe-mode": true, }) if err != nil { return nil, errors.Annotate(err, "cannot enable provisioner-safe-mode") } env, err := environs.New(cfg) if err != nil { return nil, err } instanceIds, err := env.StateServerInstances() switch errors.Cause(err) { case nil, environs.ErrNoInstances: // Some providers will return a nil error even // if there are no live state server instances. break case environs.ErrNotBootstrapped: return nil, errors.Trace(err) default: return nil, errors.Annotate(err, "cannot determine state server instances") } if len(instanceIds) > 0 { instances, err := env.Instances(instanceIds) switch errors.Cause(err) { case nil, environs.ErrPartialInstances: return nil, fmt.Errorf("old bootstrap instances %q still seems to exist; will not replace", instances) case environs.ErrNoInstances: // No state server instances, so keep running. break default: return nil, errors.Annotate(err, "cannot detect whether old instance is still running") } } // Remove the storage so that we can bootstrap without the provider complaining. if env, ok := env.(environs.EnvironStorage); ok { if err := env.Storage().Remove(common.StateFile); err != nil { return nil, errors.Annotate(err, fmt.Sprintf("cannot remove %q from storage", common.StateFile)) } } // TODO If we fail beyond here, then we won't have a state file and // we won't be able to re-run this script because it fails without it. // We could either try to recreate the file if we fail (which is itself // error-prone) or we could provide a --no-check flag to make // it go ahead anyway without the check. args := bootstrap.BootstrapParams{Constraints: cons} if err := bootstrap.Bootstrap(envcmd.BootstrapContextNoVerify(ctx), env, args); err != nil { return nil, errors.Annotate(err, "cannot bootstrap new instance") } return env, nil }
// WaitForAgentInitialisation polls the bootstrapped controller with a read-only // command which will fail until the controller is fully initialised. // TODO(wallyworld) - add a bespoke command to maybe the admin facade for this purpose. func WaitForAgentInitialisation(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName, hostedModelName string) error { // TODO(katco): 2016-08-09: lp:1611427 attempts := utils.AttemptStrategy{ Min: bootstrapReadyPollCount, Delay: bootstrapReadyPollDelay, } var ( apiAttempts int err error ) // Make a best effort to find the new controller address so we can print it. addressInfo := "" controller, err := c.ClientStore().ControllerByName(controllerName) if err == nil && len(controller.APIEndpoints) > 0 { addr, err := network.ParseHostPort(controller.APIEndpoints[0]) if err == nil { addressInfo = fmt.Sprintf(" at %s", addr.Address.Value) } } ctx.Infof("Contacting Juju controller%s to verify accessibility...", addressInfo) apiAttempts = 1 for attempt := attempts.Start(); attempt.Next(); apiAttempts++ { err = tryAPI(c) if err == nil { ctx.Infof("Bootstrap complete, %q controller now available.", controllerName) ctx.Infof("Controller machines are in the %q model.", bootstrap.ControllerModelName) ctx.Infof("Initial model %q added.", hostedModelName) break } // As the API server is coming up, it goes through a number of steps. // Initially the upgrade steps run, but the api server allows some // calls to be processed during the upgrade, but not the list blocks. // Logins are also blocked during space discovery. // It is also possible that the underlying database causes connections // to be dropped as it is initialising, or reconfiguring. These can // lead to EOF or "connection is shut down" error messages. We skip // these too, hoping that things come back up before the end of the // retry poll count. errorMessage := errors.Cause(err).Error() switch { case errors.Cause(err) == io.EOF, strings.HasSuffix(errorMessage, "connection is shut down"), strings.HasSuffix(errorMessage, "no api connection available"), strings.Contains(errorMessage, "spaces are still being discovered"): ctx.Verbosef("Still waiting for API to become available") continue case params.ErrCode(err) == params.CodeUpgradeInProgress: ctx.Verbosef("Still waiting for API to become available: %v", err) continue } break } return errors.Annotatef(err, "unable to contact api server after %d attempts", apiAttempts) }
// Run implements Command.Run func (c *killCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() if !c.assumeYes { if err := confirmDestruction(ctx, controllerName); err != nil { return err } } // Attempt to connect to the API. api, err := c.getControllerAPI() switch { case err == nil: defer api.Close() case errors.Cause(err) == common.ErrPerm: return errors.Annotate(err, "cannot destroy controller") default: if errors.Cause(err) != modelcmd.ErrConnTimedOut { logger.Debugf("unable to open api: %s", err) } ctx.Infof("Unable to open API: %s\n", err) api = nil } // Obtain controller environ so we can clean up afterwards. controllerEnviron, err := c.getControllerEnviron(ctx, store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } // If we were unable to connect to the API, just destroy the controller through // the environs interface. if api == nil { ctx.Infof("Unable to connect to the API server, destroying through provider") return environs.Destroy(controllerName, controllerEnviron, store) } // Attempt to destroy the controller and all environments. err = api.DestroyController(true) if err != nil { ctx.Infof("Unable to destroy controller through the API: %s\nDestroying through provider", err) return environs.Destroy(controllerName, controllerEnviron, store) } ctx.Infof("Destroying controller %q\nWaiting for resources to be reclaimed", controllerName) uuid := controllerEnviron.Config().UUID() if err := c.WaitForModels(ctx, api, uuid); err != nil { c.DirectDestroyRemaining(ctx, api) } return environs.Destroy(controllerName, controllerEnviron, store) }
// TODO: The following functions can be moved later. // ErrorEqual returns a boolean indicating whether err1 is equal to err2. func ErrorEqual(err1, err2 error) bool { e1 := errors.Cause(err1) e2 := errors.Cause(err2) if e1 == e2 { return true } if e1 == nil || e2 == nil { return e1 == e2 } return e1.Error() == e2.Error() }
// filesystemParamsBySource separates the filesystem parameters by filesystem source. func filesystemParamsBySource( environConfig *config.Config, baseStorageDir string, params []storage.FilesystemParams, managedFilesystemSource storage.FilesystemSource, ) (map[string][]storage.FilesystemParams, map[string]storage.FilesystemSource, error) { // TODO(axw) later we may have multiple instantiations (sources) // for a storage provider, e.g. multiple Ceph installations. For // now we assume a single source for each provider type, with no // configuration. filesystemSources := make(map[string]storage.FilesystemSource) for _, params := range params { sourceName := string(params.Provider) if _, ok := filesystemSources[sourceName]; ok { continue } if params.Volume != (names.VolumeTag{}) { filesystemSources[sourceName] = managedFilesystemSource continue } filesystemSource, err := filesystemSource( environConfig, baseStorageDir, sourceName, params.Provider, ) if errors.Cause(err) == errNonDynamic { filesystemSource = nil } else if err != nil { return nil, nil, errors.Annotate(err, "getting filesystem source") } filesystemSources[sourceName] = filesystemSource } paramsBySource := make(map[string][]storage.FilesystemParams) for _, params := range params { sourceName := string(params.Provider) filesystemSource := filesystemSources[sourceName] if filesystemSource == nil { // Ignore nil filesystem sources; this means that the // filesystem should be created by the machine-provisioner. continue } err := filesystemSource.ValidateFilesystemParams(params) switch errors.Cause(err) { case nil: paramsBySource[sourceName] = append(paramsBySource[sourceName], params) default: return nil, nil, errors.Annotatef(err, "invalid parameters for filesystem %s", params.Tag.Id()) } } return paramsBySource, filesystemSources, nil }
func (t *localServerSuite) TestAllocateAddressIPAddressInUseOrEmpty(c *gc.C) { env, instId := t.setUpInstanceWithDefaultVpc(c) addr := network.Address{Value: "8.0.0.4"} mockAssign := func(ec2Inst *amzec2.EC2, netId string, addr network.Address) error { return &amzec2.Error{Code: "InvalidParameterValue"} } t.PatchValue(&ec2.AssignPrivateIPAddress, mockAssign) err := env.AllocateAddress(instId, "", addr, "foo", "bar") c.Assert(errors.Cause(err), gc.Equals, environs.ErrIPAddressUnavailable) err = env.AllocateAddress(instId, "", network.Address{}, "foo", "bar") c.Assert(errors.Cause(err), gc.Equals, environs.ErrIPAddressUnavailable) }
func (*rpcSuite) TestTransformErrors(c *gc.C) { root := &Root{ errorInst: &ErrorMethods{&codedError{"message", "code"}}, } tfErr := func(err error) error { c.Check(err, gc.NotNil) if e, ok := err.(*codedError); ok { return &codedError{ m: "transformed: " + e.m, code: "transformed: " + e.code, } } return fmt.Errorf("transformed: %v", err) } client, srvDone, _, _ := newRPCClientServer(c, root, tfErr, false) defer closeClient(c, client, srvDone) // First, we don't transform methods we can't find. err := client.Call(rpc.Request{"foo", 0, "", "bar"}, nil, nil) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "foo"`, Code: rpc.CodeNotImplemented, }) err = client.Call(rpc.Request{"ErrorMethods", 0, "", "NoMethod"}, nil, nil) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "no such request - method ErrorMethods.NoMethod is not implemented", Code: rpc.CodeNotImplemented, }) // We do transform any errors that happen from calling the RootMethod // and beyond. err = client.Call(rpc.Request{"ErrorMethods", 0, "", "Call"}, nil, nil) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "transformed: message", Code: "transformed: code", }) root.errorInst.err = nil err = client.Call(rpc.Request{"ErrorMethods", 0, "", "Call"}, nil, nil) c.Assert(err, jc.ErrorIsNil) root.errorInst = nil err = client.Call(rpc.Request{"ErrorMethods", 0, "", "Call"}, nil, nil) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "transformed: no error methods", }) }
// setStatus inteprets the supplied params as documented on the type. func setStatus(st *State, params setStatusParams) (err error) { defer errors.DeferredAnnotatef(&err, "cannot set status") // TODO(fwereade): this can/should probably be recording the time the // status was *set*, not the time it happened to arrive in state. // We should almost certainly be accepting StatusInfo in the exposed // SetStatus methods, for symetry with the Status methods. now := time.Now().UnixNano() doc := statusDoc{ Status: params.status, StatusInfo: params.message, StatusData: escapeKeys(params.rawData), Updated: now, } probablyUpdateStatusHistory(st, params.globalKey, doc) // Set the authoritative status document, or fail trying. buildTxn := updateStatusSource(st, params.globalKey, doc) if params.token != nil { buildTxn = buildTxnWithLeadership(buildTxn, params.token) } err = st.run(buildTxn) if cause := errors.Cause(err); cause == mgo.ErrNotFound { return errors.NotFoundf(params.badge) } return errors.Trace(err) }
func (s *relationsSuite) TestNextOpNothing(c *gc.C) { unitTag := names.NewUnitTag("wordpress/0") abort := make(chan struct{}) var numCalls int32 unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} apiCaller := mockAPICaller(c, &numCalls, uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), ) st := uniter.NewState(apiCaller, unitTag) r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) c.Assert(err, jc.ErrorIsNil) assertNumCalls(c, &numCalls, 2) localState := resolver.LocalState{ State: operation.State{ Kind: operation.Continue, }, } remoteState := remotestate.Snapshot{} relationsResolver := relation.NewRelationsResolver(r) _, err = relationsResolver.NextOp(localState, remoteState, &mockOperations{}) c.Assert(errors.Cause(err), gc.Equals, resolver.ErrNoOperation) }
// APICall places a call to the remote machine. // // This fills out the rpc.Request on the given facade, version for a given // object id, and the specific RPC method. It marshalls the Arguments, and will // unmarshall the result into the response object that is supplied. func (s *state) APICall(facade string, version int, id, method string, args, response interface{}) error { retrySpec := retry.CallArgs{ Func: func() error { return s.client.Call(rpc.Request{ Type: facade, Version: version, Id: id, Action: method, }, args, response) }, IsFatalError: func(err error) bool { err = errors.Cause(err) ec, ok := err.(hasErrorCode) if !ok { return true } return ec.ErrorCode() != params.CodeRetry }, Delay: 100 * time.Millisecond, MaxDelay: 1500 * time.Millisecond, MaxDuration: 10 * time.Second, BackoffFunc: retry.DoubleDelay, Clock: s.clock, } err := retry.Call(retrySpec) return errors.Trace(err) }
func (s *LogReaderSuite) TestNextError(c *gc.C) { cUUID := "feebdaed-2f18-4fd2-967d-db9663db7bea" stub := &testing.Stub{} conn := &mockConnector{stub: stub} jsonReader := mockStream{stub: stub} conn.ReturnConnectStream = jsonReader failure := errors.New("an error") stub.SetErrors(nil, failure) var cfg params.LogStreamConfig stream, err := logstream.Open(conn, cfg, cUUID) c.Assert(err, gc.IsNil) var nextErr error done := make(chan struct{}) go func() { _, nextErr = stream.Next() c.Check(errors.Cause(nextErr), gc.Equals, failure) close(done) }() select { case <-done: case <-time.After(coretesting.LongWait): c.Errorf("timed out waiting for record") } stub.CheckCallNames(c, "ConnectStream", "ReadJSON") }
func (s *machineSuite) TestEntitySetPassword(c *gc.C) { entity, err := apiagent.NewState(s.st).Entity(s.machine.Tag()) c.Assert(err, jc.ErrorIsNil) err = entity.SetPassword("foo") c.Assert(err, gc.ErrorMatches, "password is only 3 bytes long, and is not a valid Agent password") err = entity.SetPassword("foo-12345678901234567890") c.Assert(err, jc.ErrorIsNil) err = entity.ClearReboot() c.Assert(err, jc.ErrorIsNil) err = s.machine.Refresh() c.Assert(err, jc.ErrorIsNil) c.Assert(s.machine.PasswordValid("bar"), jc.IsFalse) c.Assert(s.machine.PasswordValid("foo-12345678901234567890"), jc.IsTrue) // Check that we cannot log in to mongo with the correct password. // This is because there's no mongo password set for s.machine, // which has JobHostUnits info := s.MongoInfo(c) // TODO(dfc) this entity.Tag should return a Tag tag, err := names.ParseTag(entity.Tag()) c.Assert(err, jc.ErrorIsNil) info.Tag = tag info.Password = "******" err = tryOpenState(s.State.ModelTag(), info) c.Assert(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) }
// FingerprintMatches determines whether or not the identified file's // fingerprint matches the expected fingerprint. func (fpm FingerprintMatcher) FingerprintMatches(filename string, expected charmresource.Fingerprint) (bool, error) { open := fpm.Open if open == nil { open = func(filename string) (io.ReadCloser, error) { return os.Open(filename) } } generateFingerprint := fpm.GenerateFingerprint if generateFingerprint == nil { generateFingerprint = charmresource.GenerateFingerprint } file, err := open(filename) if os.IsNotExist(errors.Cause(err)) { return false, nil } if err != nil { return false, errors.Trace(err) } defer file.Close() fp, err := generateFingerprint(file) if err != nil { return false, errors.Trace(err) } matches := (fp.String() == expected.String()) return matches, nil }
// Prepare prepares a new environment based on the provided configuration. // If the environment is already prepared, it behaves like New. func Prepare(cfg *config.Config, ctx BootstrapContext, store configstore.Storage) (Environ, error) { if p, err := Provider(cfg.Type()); err != nil { return nil, err } else if info, err := store.ReadInfo(cfg.Name()); errors.IsNotFound(errors.Cause(err)) { info = store.CreateInfo(cfg.Name()) if env, err := prepare(ctx, cfg, info, p); err == nil { return env, decorateAndWriteInfo(info, env.Config()) } else { if err := info.Destroy(); err != nil { logger.Warningf("cannot destroy newly created environment info: %v", err) } return nil, err } } else if err != nil { return nil, errors.Annotatef(err, "error reading environment info %q", cfg.Name()) } else if !info.Initialized() { return nil, errors.Errorf( "found uninitialized environment info for %q; environment preparation probably in progress or interrupted", cfg.Name(), ) } else if len(info.BootstrapConfig()) == 0 { return nil, errors.New("found environment info but no bootstrap config") } else { cfg, err = config.New(config.NoDefaults, info.BootstrapConfig()) if err != nil { return nil, errors.Annotate(err, "cannot parse bootstrap config") } return New(cfg) } }
func (w *collect) do() error { logger.Tracef("recording metrics") config := w.agent.CurrentConfig() tag := config.Tag() unitTag, ok := tag.(names.UnitTag) if !ok { return errors.Errorf("expected a unit tag, got %v", tag) } paths := uniter.NewWorkerPaths(config.DataDir(), unitTag, "metrics-collect") recorder, err := newRecorder(unitTag, paths, w.unitCharmLookup, w.metricFactory) if errors.Cause(err) == errMetricsNotDefined { logger.Tracef("%v", err) return nil } else if err != nil { return errors.Annotate(err, "failed to instantiate metric recorder") } ctx := newHookContext(unitTag.String(), recorder) err = ctx.addJujuUnitsMetric() if err != nil { return errors.Annotatef(err, "error adding 'juju-units' metric") } r := runner.NewRunner(ctx, paths) err = r.RunHook(string(hooks.CollectMetrics)) if err != nil { return errors.Annotatef(err, "error running 'collect-metrics' hook") } return nil }
// ClaimLeadership is part of the leadership.Claimer interface. func (m leadershipClaimer) ClaimLeadership(serviceName, unitName string, duration time.Duration) error { err := m.manager.Claim(serviceName, unitName, duration) if errors.Cause(err) == corelease.ErrClaimDenied { return leadership.ErrClaimDenied } return errors.Trace(err) }
// Check is part of the leadership.Token interface. func (t leadershipToken) Check(out interface{}) error { err := t.token.Check(out) if errors.Cause(err) == corelease.ErrNotHeld { return errors.Errorf("%q is not leader of %q", t.unitName, t.serviceName) } return errors.Trace(err) }
// ChangeServicePassword can change the password of a service // as long as it belongs to the user defined in this package func (s *SvcManager) ChangeServicePassword(svcName, newPassword string) error { currentConfig, err := s.Config(svcName) if err != nil { // If access is denied when accessing the service it means // we can't own it, so there's no reason to return an error // since we only want to change the password on services started // by us. if errors.Cause(err) == syscall.ERROR_ACCESS_DENIED { return nil } return errors.Trace(err) } if currentConfig.ServiceStartName == jujudUser { currentConfig.Password = newPassword service, err := s.getService(svcName) if err != nil { return errors.Trace(err) } defer service.Close() err = service.UpdateConfig(currentConfig) if err != nil { return errors.Trace(err) } } if err != nil { return errors.Trace(err) } return nil }
func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error { debug.Log("Restorer.restoreNodeTo", "node %v, dir %v, dst %v", node.Name, dir, dst) dstPath := filepath.Join(dst, dir, node.Name) err := node.CreateAt(dstPath, res.repo) if err != nil { debug.Log("Restorer.restoreNodeTo", "node.CreateAt(%s) error %v", dstPath, err) } // Did it fail because of ENOENT? if err != nil && os.IsNotExist(errors.Cause(err)) { debug.Log("Restorer.restoreNodeTo", "create intermediate paths") // Create parent directories and retry err = os.MkdirAll(filepath.Dir(dstPath), 0700) if err == nil || err == os.ErrExist { err = node.CreateAt(dstPath, res.repo) } } if err != nil { debug.Log("Restorer.restoreNodeTo", "error %v", err) err = res.Error(dstPath, node, errors.Annotate(err, "create node")) if err != nil { return err } } debug.Log("Restorer.restoreNodeTo", "successfully restored %v", node.Name) return nil }
func (s *httpSuite) TestHTTPClient(c *gc.C) { var handler http.HandlerFunc srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { handler(w, req) })) defer srv.Close() s.client.BaseURL = srv.URL for i, test := range httpClientTests { c.Logf("test %d: %s", i, test.about) handler = test.handler var resp interface{} if test.expectResponse != nil { resp = reflect.New(reflect.TypeOf(test.expectResponse).Elem()).Interface() } err := s.client.Get("/", resp) if test.expectError != "" { c.Check(err, gc.ErrorMatches, test.expectError) c.Check(params.ErrCode(err), gc.Equals, test.expectErrorCode) if err, ok := errors.Cause(err).(*params.Error); ok { c.Check(err.Info, jc.DeepEquals, test.expectErrorInfo) } else if test.expectErrorInfo != nil { c.Fatalf("no error info found in error") } continue } c.Check(err, gc.IsNil) c.Check(resp, jc.DeepEquals, test.expectResponse) } }
func (helpersSuite) TestAPIResult2ServiceResourcesFailure(c *gc.C) { apiRes := api.Resource{ CharmResource: api.CharmResource{ Name: "spam", Type: "file", Path: "spam.tgz", Origin: "upload", Revision: 1, Fingerprint: []byte(fingerprint), Size: 10, }, ID: "a-service/spam", ServiceID: "a-service", } failure := errors.New("<failure>") _, err := api.APIResult2ServiceResources(api.ResourcesResult{ ErrorResult: params.ErrorResult{ Error: ¶ms.Error{ Message: failure.Error(), }, }, Resources: []api.Resource{ apiRes, }, }) c.Check(err, gc.ErrorMatches, "<failure>") c.Check(errors.Cause(err), gc.Not(gc.Equals), failure) }
func (s *managedStorageSuite) TestPutPendingUpload(c *gc.C) { // Manually set up a scenario where there's a resource recorded // but the upload has not occurred. rc := blobstore.GetResourceCatalog(s.managedStorage) hash := "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7" id, path, err := rc.Put(hash, 3) c.Assert(err, gc.IsNil) c.Assert(path, gc.Equals, "") managedResource := blobstore.ManagedResource{ EnvUUID: "env", User: "******", Path: "environs/env/path/to/blob", } c.Assert(err, gc.IsNil) _, err = blobstore.PutManagedResource(s.managedStorage, managedResource, id) _, _, err = s.managedStorage.GetForEnvironment("env", "/path/to/blob") c.Assert(errors.Cause(err), gc.Equals, blobstore.ErrUploadPending) // Despite the upload being pending, a second concurrent upload will succeed. rdr := bytes.NewReader([]byte("abc")) err = s.managedStorage.PutForEnvironment("env", "/path/to/blob", rdr, 3) c.Assert(err, gc.IsNil) s.assertGet(c, "/path/to/blob", []byte("abc")) }
// StoreCharmArchive stores a charm archive in environment storage. func StoreCharmArchive(st *state.State, curl *charm.URL, ch charm.Charm, r io.Reader, size int64, sha256 string) error { storage := newStateStorage(st.EnvironUUID(), st.MongoSession()) storagePath, err := charmArchiveStoragePath(curl) if err != nil { return errors.Annotate(err, "cannot generate charm archive name") } if err := storage.Put(storagePath, r, size); err != nil { return errors.Annotate(err, "cannot add charm to storage") } // Now update the charm data in state and mark it as no longer pending. _, err = st.UpdateUploadedCharm(ch, curl, storagePath, sha256) if err != nil { alreadyUploaded := err == state.ErrCharmRevisionAlreadyModified || errors.Cause(err) == state.ErrCharmRevisionAlreadyModified || state.IsCharmAlreadyUploadedError(err) if err := storage.Remove(storagePath); err != nil { if alreadyUploaded { logger.Errorf("cannot remove duplicated charm archive from storage: %v", err) } else { logger.Errorf("cannot remove unsuccessfully recorded charm archive from storage: %v", err) } } if alreadyUploaded { // Somebody else managed to upload and update the charm in // state before us. This is not an error. return nil } } return nil }