// addServiceUnits adds a given number of units to a service. func addServiceUnits(st *state.State, args params.AddServiceUnits) ([]*state.Unit, error) { service, err := st.Service(args.ServiceName) if err != nil { return nil, err } if args.NumUnits < 1 { return nil, fmt.Errorf("must add at least one unit") } // New API uses placement directives. if len(args.Placement) > 0 { return jjj.AddUnitsWithPlacement(st, service, args.NumUnits, args.Placement) } // Otherwise we use the older machine spec. if args.NumUnits > 1 && args.ToMachineSpec != "" { return nil, fmt.Errorf("cannot use NumUnits with ToMachineSpec") } if args.ToMachineSpec != "" && names.IsValidMachine(args.ToMachineSpec) { _, err = st.Machine(args.ToMachineSpec) if err != nil { return nil, errors.Annotatef(err, `cannot add units for service "%v" to machine %v`, args.ServiceName, args.ToMachineSpec) } } return jjj.AddUnits(st, service, args.NumUnits, args.ToMachineSpec) }
func currentInfo(st *state.State, db *mgo.Database) (*serverInfo, error) { var doc stateServersDoc err := db.C("stateServers").Find(bson.D{{"_id", "e"}}).One(&doc) if err != nil { return nil, fmt.Errorf("cannot get state server info: %v", err) } ms := make(map[string]*state.Machine) var all []string all = append(all, doc.MachineIds...) all = append(all, doc.VotingMachineIds...) for _, id := range all { if _, ok := ms[id]; ok { continue } m, err := st.Machine(id) if err != nil { return nil, fmt.Errorf("cannot get info on machine %s: %v", id, err) } ms[id] = m } return &serverInfo{ servers: &doc, machines: ms, }, nil }
// NewRebootAPI creates a new server-side RebootAPI facade. func NewRebootAPI(st *state.State, resources *common.Resources, auth common.Authorizer) (*RebootAPI, error) { if !auth.AuthMachineAgent() { return nil, common.ErrPerm } tag, ok := auth.GetAuthTag().(names.MachineTag) if !ok { return nil, errors.Errorf("Expected names.MachineTag, got %T", auth.GetAuthTag()) } machine, err := st.Machine(tag.Id()) if err != nil { return nil, errors.Trace(err) } canAccess := func() (common.AuthFunc, error) { return auth.AuthOwner, nil } return &RebootAPI{ RebootActionGetter: common.NewRebootActionGetter(st, canAccess), RebootRequester: common.NewRebootRequester(st, canAccess), RebootFlagClearer: common.NewRebootFlagClearer(st, canAccess), st: st, machine: machine, resources: resources, auth: auth, }, nil }
// MachineConfig returns information from the environment config that is // needed for machine cloud-init (for non-state servers only). // It is exposed for testing purposes. // TODO(rog) fix environs/manual tests so they do not need to // call this, or move this elsewhere. func MachineConfig(st *state.State, machineId, nonce, dataDir string) (*cloudinit.MachineConfig, error) { environConfig, err := st.EnvironConfig() if err != nil { return nil, err } // Get the machine so we can get its series and arch. // If the Arch is not set in hardware-characteristics, // an error is returned. machine, err := st.Machine(machineId) if err != nil { return nil, err } hc, err := machine.HardwareCharacteristics() if err != nil { return nil, err } if hc.Arch == nil { return nil, fmt.Errorf("arch is not set for %q", machine.Tag()) } // Find the appropriate tools information. env, err := environs.New(environConfig) if err != nil { return nil, err } tools, err := findInstanceTools(env, machine.Series(), *hc.Arch) if err != nil { return nil, err } // Find the API endpoints. apiInfo, err := environs.APIInfo(env) if err != nil { return nil, err } auth := authentication.NewAuthenticator(st.MongoConnectionInfo(), apiInfo) mongoInfo, apiInfo, err := auth.SetupAuthentication(machine) if err != nil { return nil, err } // Find requested networks. networks, err := machine.RequestedNetworks() if err != nil { return nil, err } mcfg := environs.NewMachineConfig(machineId, nonce, networks, mongoInfo, apiInfo) if dataDir != "" { mcfg.DataDir = dataDir } mcfg.Tools = tools err = environs.FinishMachineConfig(mcfg, environConfig, constraints.Value{}) if err != nil { return nil, err } return mcfg, nil }
func upgradeMongoWatcher(st *state.State, stopch <-chan struct{}, machineID string, maybeStopMongo StopMongo) error { m, err := st.Machine(machineID) if err != nil { return errors.Annotatef(err, "cannot start watcher for machine %q", machineID) } watch := m.Watch() defer func() { watch.Kill() watch.Wait() }() for { select { case <-watch.Changes(): if err := m.Refresh(); err != nil { return errors.Annotate(err, "cannot refresh machine information") } if !m.IsManager() { continue } expectedVersion, err := m.StopMongoUntilVersion() if err != nil { return errors.Annotate(err, "cannot obtain minimum version of mongo") } if expectedVersion == mongo.Mongo24 { continue } var isMaster bool isMaster, err = mongo.IsMaster(st.MongoSession(), m) if err != nil { return errors.Annotatef(err, "cannot determine if machine %q is master", machineID) } err = maybeStopMongo(expectedVersion, isMaster) if err != nil { return errors.Annotate(err, "cannot determine if mongo must be stopped") } if !isMaster { addrs := make([]string, len(m.Addresses())) ssi, err := st.StateServingInfo() if err != nil { return errors.Annotate(err, "cannot obtain state serving info to stop mongo") } for i, addr := range m.Addresses() { addrs[i] = net.JoinHostPort(addr.Value, strconv.Itoa(ssi.StatePort)) } if err := replicaset.Remove(st.MongoSession(), addrs...); err != nil { return errors.Annotatef(err, "cannot remove %q from replicaset", m.Id()) } if err := m.SetStopMongoUntilVersion(mongo.Mongo24); err != nil { return errors.Annotate(err, "cannot reset stop mongo flag") } } case <-stopch: return nil } } }
// getAllUnits returns a list of all principal and subordinate units // assigned to the given machine. func getAllUnits(st *state.State, tag names.Tag) ([]string, error) { machine, err := st.Machine(tag.Id()) if err != nil { return nil, err } // Start a watcher on machine's units, read the initial event and stop it. watch := machine.WatchUnits() defer watch.Stop() if units, ok := <-watch.Changes(); ok { return units, nil } return nil, fmt.Errorf("cannot obtain units of machine %q: %v", tag, watch.Err()) }
func addMachineForUnit(st *state.State, unit *state.Unit, placement *instance.Placement, networks []string) (*state.Machine, error) { unitCons, err := unit.Constraints() if err != nil { return nil, err } var containerType instance.ContainerType var mid, placementDirective string // Extract container type and parent from container placement directives. if containerType, err = instance.ParseContainerType(placement.Scope); err == nil { mid = placement.Directive } else { switch placement.Scope { case st.EnvironUUID(): placementDirective = placement.Directive case instance.MachineScope: mid = placement.Directive default: return nil, errors.Errorf("invalid environment UUID %q", placement.Scope) } } // Create any new machine marked as dirty so that // nothing else will grab it before we assign the unit to it. // If a container is to be used, create it. if containerType != "" { template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, } return st.AddMachineInsideMachine(template, mid, containerType) } // If a placement directive is to be used, do that here. if placementDirective != "" { template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, Placement: placementDirective, } return st.AddOneMachine(template) } // Otherwise use an existing machine. return st.Machine(mid) }
// EnsureAvailabilitySingle applies a single StateServersSpec specification to the current environment. // Exported so it can be called by the legacy client API in the client package. func EnsureAvailabilitySingle(st *state.State, spec params.StateServersSpec) (params.StateServersChanges, error) { if !st.IsStateServer() { return params.StateServersChanges{}, errors.New("unsupported with hosted environments") } // Check if changes are allowed and the command may proceed. blockChecker := common.NewBlockChecker(st) if err := blockChecker.ChangeAllowed(); err != nil { return params.StateServersChanges{}, errors.Trace(err) } // Validate the environment tag if present. if spec.EnvironTag != "" { tag, err := names.ParseEnvironTag(spec.EnvironTag) if err != nil { return params.StateServersChanges{}, errors.Errorf("invalid environment tag: %v", err) } if _, err := st.FindEntity(tag); err != nil { return params.StateServersChanges{}, err } } series := spec.Series if series == "" { ssi, err := st.StateServerInfo() if err != nil { return params.StateServersChanges{}, err } // We should always have at least one voting machine // If we *really* wanted we could just pick whatever series is // in the majority, but really, if we always copy the value of // the first one, then they'll stay in sync. if len(ssi.VotingMachineIds) == 0 { // Better than a panic()? return params.StateServersChanges{}, fmt.Errorf("internal error, failed to find any voting machines") } templateMachine, err := st.Machine(ssi.VotingMachineIds[0]) if err != nil { return params.StateServersChanges{}, err } series = templateMachine.Series() } changes, err := st.EnsureAvailability(spec.NumStateServers, spec.Constraints, series, spec.Placement) if err != nil { return params.StateServersChanges{}, err } return stateServersChanges(changes), nil }
func AssertPrincipalServiceDeployed(c *gc.C, st *state.State, serviceName string, curl *charm.URL, forced bool, bundle charm.Charm, cons constraints.Value) *state.Service { service, err := st.Service(serviceName) c.Assert(err, jc.ErrorIsNil) charm, force, err := service.Charm() c.Assert(err, jc.ErrorIsNil) c.Assert(force, gc.Equals, forced) c.Assert(charm.URL(), gc.DeepEquals, curl) // When charms are read from state, storage properties are // always deserialised as empty slices if empty or nil, so // update bundle to match (bundle comes from parsing charm // metadata yaml where nil means nil). for name, bundleMeta := range bundle.Meta().Storage { if bundleMeta.Properties == nil { bundleMeta.Properties = []string{} bundle.Meta().Storage[name] = bundleMeta } } c.Assert(charm.Meta(), jc.DeepEquals, bundle.Meta()) c.Assert(charm.Config(), jc.DeepEquals, bundle.Config()) serviceCons, err := service.Constraints() c.Assert(err, jc.ErrorIsNil) c.Assert(serviceCons, gc.DeepEquals, cons) for a := coretesting.LongAttempt.Start(); a.Next(); { units, err := service.AllUnits() c.Assert(err, jc.ErrorIsNil) for _, unit := range units { mid, err := unit.AssignedMachineId() if !a.HasNext() { c.Assert(err, jc.ErrorIsNil) } else if err != nil { continue } machine, err := st.Machine(mid) c.Assert(err, jc.ErrorIsNil) machineCons, err := machine.Constraints() c.Assert(err, jc.ErrorIsNil) c.Assert(machineCons, gc.DeepEquals, cons) } break } return service }
// Assign the unit to a provisioned machine with dummy addresses set. func assertAssignUnit(c *gc.C, st *state.State, u *state.Unit) { err := u.AssignToNewMachine() c.Assert(err, jc.ErrorIsNil) mid, err := u.AssignedMachineId() c.Assert(err, jc.ErrorIsNil) machine, err := st.Machine(mid) c.Assert(err, jc.ErrorIsNil) err = machine.SetProvisioned("i-exist", "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) err = machine.SetProviderAddresses(network.Address{ Type: network.IPv4Address, Scope: network.ScopeCloudLocal, Value: "private.address.example.com", }, network.Address{ Type: network.IPv4Address, Scope: network.ScopePublic, Value: "public.address.example.com", }) c.Assert(err, jc.ErrorIsNil) }
// environManagerInstances returns all environ manager instances. func environManagerInstances(st *state.State) ([]instance.Id, error) { info, err := st.ControllerInfo() if err != nil { return nil, err } instances := make([]instance.Id, 0, len(info.MachineIds)) for _, id := range info.MachineIds { machine, err := st.Machine(id) if err != nil { return nil, err } instanceId, err := machine.InstanceId() if err == nil { instances = append(instances, instanceId) } else if !errors.IsNotProvisioned(err) { return nil, err } } return instances, nil }
func isMachineMaster(st *state.State, tag names.MachineTag) (bool, error) { if st == nil { // If there is no state, we aren't a master. return false, nil } // Not calling the agent openState method as it does other checks // we really don't care about here. All we need here is the machine // so we can determine if we are the master or not. machine, err := st.Machine(tag.Id()) if err != nil { // This shouldn't happen, and if it does, the state worker will have // found out before us, and already errored, or is likely to error out // very shortly. All we do here is return the error. The state worker // returns an error that will cause the agent to be terminated. return false, errors.Trace(err) } isMaster, err := mongo.IsMaster(st.MongoSession(), machine) if err != nil { return false, errors.Trace(err) } return isMaster, nil }
// addServiceUnits adds a given number of units to a service. func addServiceUnits(state *state.State, args params.AddServiceUnits) ([]*state.Unit, error) { service, err := state.Service(args.ServiceName) if err != nil { return nil, err } if args.NumUnits < 1 { return nil, fmt.Errorf("must add at least one unit") } if args.NumUnits > 1 && args.ToMachineSpec != "" { return nil, fmt.Errorf("cannot use NumUnits with ToMachineSpec") } if args.ToMachineSpec != "" && names.IsValidMachine(args.ToMachineSpec) { _, err = state.Machine(args.ToMachineSpec) if err != nil { return nil, errors.Annotatef(err, `cannot add units for service "%v" to machine %v`, args.ServiceName, args.ToMachineSpec) } } return jjj.AddUnits(state, service, args.NumUnits, args.ToMachineSpec) }
// newUniterBaseAPI creates a new instance of the uniter base API. func newUniterBaseAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*uniterBaseAPI, error) { if !authorizer.AuthUnitAgent() { return nil, common.ErrPerm } var unit *state.Unit var err error switch tag := authorizer.GetAuthTag().(type) { case names.UnitTag: unit, err = st.Unit(tag.Id()) if err != nil { return nil, errors.Trace(err) } default: return nil, errors.Errorf("expected names.UnitTag, got %T", tag) } accessUnit := func() (common.AuthFunc, error) { return authorizer.AuthOwner, nil } accessService := func() (common.AuthFunc, error) { switch tag := authorizer.GetAuthTag().(type) { case names.UnitTag: entity, err := st.Unit(tag.Id()) if err != nil { return nil, errors.Trace(err) } serviceName := entity.ServiceName() serviceTag := names.NewServiceTag(serviceName) return func(tag names.Tag) bool { return tag == serviceTag }, nil default: return nil, errors.Errorf("expected names.UnitTag, got %T", tag) } } accessMachine := func() (common.AuthFunc, error) { machineId, err := unit.AssignedMachineId() if err != nil { return nil, errors.Trace(err) } machine, err := st.Machine(machineId) if err != nil { return nil, errors.Trace(err) } return func(tag names.Tag) bool { return tag == machine.Tag() }, nil } accessUnitOrService := common.AuthEither(accessUnit, accessService) return &uniterBaseAPI{ LifeGetter: common.NewLifeGetter(st, accessUnitOrService), StatusAPI: NewStatusAPI(st, accessUnitOrService), DeadEnsurer: common.NewDeadEnsurer(st, accessUnit), AgentEntityWatcher: common.NewAgentEntityWatcher(st, resources, accessUnitOrService), APIAddresser: common.NewAPIAddresser(st, resources), EnvironWatcher: common.NewEnvironWatcher(st, resources, authorizer), RebootRequester: common.NewRebootRequester(st, accessMachine), LeadershipSettingsAccessor: leadershipSettingsAccessorFactory(st, resources, authorizer), st: st, auth: authorizer, resources: resources, accessUnit: accessUnit, accessService: accessService, unit: unit, }, nil }
// InstanceConfig returns information from the environment config that // is needed for machine cloud-init (for non-controllers only). It // is exposed for testing purposes. // TODO(rog) fix environs/manual tests so they do not need to call this, or move this elsewhere. func InstanceConfig(st *state.State, machineId, nonce, dataDir string) (*instancecfg.InstanceConfig, error) { environConfig, err := st.ModelConfig() if err != nil { return nil, errors.Annotate(err, "getting model config") } // Get the machine so we can get its series and arch. // If the Arch is not set in hardware-characteristics, // an error is returned. machine, err := st.Machine(machineId) if err != nil { return nil, errors.Annotate(err, "getting machine") } hc, err := machine.HardwareCharacteristics() if err != nil { return nil, errors.Annotate(err, "getting machine hardware characteristics") } if hc.Arch == nil { return nil, fmt.Errorf("arch is not set for %q", machine.Tag()) } // Find the appropriate tools information. agentVersion, ok := environConfig.AgentVersion() if !ok { return nil, errors.New("no agent version set in model configuration") } environment, err := st.Model() if err != nil { return nil, errors.Annotate(err, "getting state model") } urlGetter := common.NewToolsURLGetter(environment.UUID(), st) toolsFinder := common.NewToolsFinder(st, st, urlGetter) findToolsResult, err := toolsFinder.FindTools(params.FindToolsParams{ Number: agentVersion, MajorVersion: -1, MinorVersion: -1, Series: machine.Series(), Arch: *hc.Arch, }) if err != nil { return nil, errors.Annotate(err, "finding tools") } if findToolsResult.Error != nil { return nil, errors.Annotate(findToolsResult.Error, "finding tools") } tools := findToolsResult.List[0] // Get the API connection info; attempt all API addresses. apiHostPorts, err := st.APIHostPorts() if err != nil { return nil, errors.Annotate(err, "getting API addresses") } apiAddrs := make(set.Strings) for _, hostPorts := range apiHostPorts { for _, hp := range hostPorts { apiAddrs.Add(hp.NetAddr()) } } apiInfo := &api.Info{ Addrs: apiAddrs.SortedValues(), CACert: st.CACert(), ModelTag: st.ModelTag(), } auth := authentication.NewAuthenticator(st.MongoConnectionInfo(), apiInfo) mongoInfo, apiInfo, err := auth.SetupAuthentication(machine) if err != nil { return nil, errors.Annotate(err, "setting up machine authentication") } // Find requested networks. networks, err := machine.RequestedNetworks() if err != nil { return nil, errors.Annotate(err, "getting requested networks for machine") } // Figure out if secure connections are supported. info, err := st.StateServingInfo() if err != nil { return nil, errors.Annotate(err, "getting state serving info") } secureServerConnection := info.CAPrivateKey != "" icfg, err := instancecfg.NewInstanceConfig(machineId, nonce, environConfig.ImageStream(), machine.Series(), "", secureServerConnection, networks, mongoInfo, apiInfo, ) if err != nil { return nil, errors.Annotate(err, "initializing instance config") } if dataDir != "" { icfg.DataDir = dataDir } icfg.Tools = tools err = instancecfg.FinishInstanceConfig(icfg, environConfig) if err != nil { return nil, errors.Annotate(err, "finishing instance config") } return icfg, nil }
// AddUnits starts n units of the given service and allocates machines // to them as necessary. func AddUnits(st *state.State, svc *state.Service, n int, machineIdSpec string) ([]*state.Unit, error) { units := make([]*state.Unit, n) // Hard code for now till we implement a different approach. policy := state.AssignCleanEmpty // All units should have the same networks as the service. networks, err := svc.Networks() if err != nil { return nil, fmt.Errorf("cannot get service %q networks: %v", svc.Name(), err) } // TODO what do we do if we fail half-way through this process? for i := 0; i < n; i++ { unit, err := svc.AddUnit() if err != nil { return nil, fmt.Errorf("cannot add unit %d/%d to service %q: %v", i+1, n, svc.Name(), err) } if machineIdSpec != "" { if n != 1 { return nil, fmt.Errorf("cannot add multiple units of service %q to a single machine", svc.Name()) } // machineIdSpec may be an existing machine or container, eg 3/lxc/2 // or a new container on a machine, eg lxc:3 mid := machineIdSpec var containerType instance.ContainerType specParts := strings.SplitN(machineIdSpec, ":", 2) if len(specParts) > 1 { firstPart := specParts[0] var err error if containerType, err = instance.ParseContainerType(firstPart); err == nil { mid = specParts[1] } else { mid = machineIdSpec } } if !names.IsMachine(mid) { return nil, fmt.Errorf("invalid force machine id %q", mid) } var unitCons *constraints.Value unitCons, err = unit.Constraints() if err != nil { return nil, err } var err error var m *state.Machine // If a container is to be used, create it. if containerType != "" { // Create the new machine marked as dirty so that // nothing else will grab it before we assign the unit to it. template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, } m, err = st.AddMachineInsideMachine(template, mid, containerType) } else { m, err = st.Machine(mid) } if err != nil { return nil, fmt.Errorf("cannot assign unit %q to machine: %v", unit.Name(), err) } err = unit.AssignToMachine(m) if err != nil { return nil, err } } else if err := st.AssignUnit(unit, policy); err != nil { return nil, err } units[i] = unit } return units, nil }
// InstanceConfig returns information from the environment config that // is needed for machine cloud-init (for non-controllers only). It // is exposed for testing purposes. // TODO(rog) fix environs/manual tests so they do not need to call this, or move this elsewhere. func InstanceConfig(st *state.State, machineId, nonce, dataDir string) (*instancecfg.InstanceConfig, error) { environConfig, err := st.ModelConfig() if err != nil { return nil, err } // Get the machine so we can get its series and arch. // If the Arch is not set in hardware-characteristics, // an error is returned. machine, err := st.Machine(machineId) if err != nil { return nil, err } hc, err := machine.HardwareCharacteristics() if err != nil { return nil, err } if hc.Arch == nil { return nil, fmt.Errorf("arch is not set for %q", machine.Tag()) } // Find the appropriate tools information. agentVersion, ok := environConfig.AgentVersion() if !ok { return nil, errors.New("no agent version set in model configuration") } environment, err := st.Model() if err != nil { return nil, err } urlGetter := common.NewToolsURLGetter(environment.UUID(), st) toolsFinder := common.NewToolsFinder(st, st, urlGetter) findToolsResult, err := toolsFinder.FindTools(params.FindToolsParams{ Number: agentVersion, MajorVersion: -1, MinorVersion: -1, Series: machine.Series(), Arch: *hc.Arch, }) if err != nil { return nil, err } if findToolsResult.Error != nil { return nil, findToolsResult.Error } tools := findToolsResult.List[0] // Find the API endpoints. env, err := environs.New(environConfig) if err != nil { return nil, err } apiInfo, err := environs.APIInfo(env) if err != nil { return nil, err } auth := authentication.NewAuthenticator(st.MongoConnectionInfo(), apiInfo) mongoInfo, apiInfo, err := auth.SetupAuthentication(machine) if err != nil { return nil, err } // Find requested networks. networks, err := machine.RequestedNetworks() if err != nil { return nil, err } // Figure out if secure connections are supported. info, err := st.StateServingInfo() if err != nil { return nil, err } secureServerConnection := info.CAPrivateKey != "" icfg, err := instancecfg.NewInstanceConfig(machineId, nonce, env.Config().ImageStream(), machine.Series(), "", secureServerConnection, networks, mongoInfo, apiInfo, ) if err != nil { return nil, err } if dataDir != "" { icfg.DataDir = dataDir } icfg.Tools = tools err = instancecfg.FinishInstanceConfig(icfg, environConfig) if err != nil { return nil, err } return icfg, nil }
// DeployService fetches the charm from the charm store and deploys it. // The logic has been factored out into a common function which is called by // both the legacy API on the client facade, as well as the new service facade. func deployService(st *state.State, owner string, args params.ServiceDeploy) error { curl, err := charm.ParseURL(args.CharmUrl) if err != nil { return errors.Trace(err) } if curl.Revision < 0 { return errors.Errorf("charm url must include revision") } // Do a quick but not complete validation check before going any further. for _, p := range args.Placement { if p.Scope != instance.MachineScope { continue } _, err = st.Machine(p.Directive) if err != nil { return errors.Annotatef(err, `cannot deploy "%v" to machine %v`, args.ServiceName, p.Directive) } } // Try to find the charm URL in state first. ch, err := st.Charm(curl) if errors.IsNotFound(err) { // Clients written to expect 1.16 compatibility require this next block. if curl.Schema != "cs" { return errors.Errorf(`charm url has unsupported schema %q`, curl.Schema) } if err = AddCharmWithAuthorization(st, params.AddCharmWithAuthorization{ URL: args.CharmUrl, }); err == nil { ch, err = st.Charm(curl) } } if err != nil { return errors.Trace(err) } var settings charm.Settings if len(args.ConfigYAML) > 0 { settings, err = ch.Config().ParseSettingsYAML([]byte(args.ConfigYAML), args.ServiceName) } else if len(args.Config) > 0 { // Parse config in a compatible way (see function comment). settings, err = parseSettingsCompatible(ch, args.Config) } if err != nil { return errors.Trace(err) } // Convert network tags to names for any given networks. requestedNetworks, err := networkTagsToNames(args.Networks) if err != nil { return errors.Trace(err) } _, err = jjj.DeployService(st, jjj.DeployServiceParams{ ServiceName: args.ServiceName, Series: args.Series, // TODO(dfc) ServiceOwner should be a tag ServiceOwner: owner, Charm: ch, NumUnits: args.NumUnits, ConfigSettings: settings, Constraints: args.Constraints, Placement: args.Placement, Networks: requestedNetworks, Storage: args.Storage, }) return errors.Trace(err) }
// EnableHASingle applies a single ControllersServersSpec specification to the current environment. // Exported so it can be called by the legacy client API in the client package. func EnableHASingle(st *state.State, spec params.ControllersSpec) (params.ControllersChanges, error) { if !st.IsController() { return params.ControllersChanges{}, errors.New("unsupported with hosted models") } // Check if changes are allowed and the command may proceed. blockChecker := common.NewBlockChecker(st) if err := blockChecker.ChangeAllowed(); err != nil { return params.ControllersChanges{}, errors.Trace(err) } // Validate the environment tag if present. if spec.ModelTag != "" { tag, err := names.ParseModelTag(spec.ModelTag) if err != nil { return params.ControllersChanges{}, errors.Errorf("invalid model tag: %v", err) } if _, err := st.FindEntity(tag); err != nil { return params.ControllersChanges{}, err } } series := spec.Series if series == "" { ssi, err := st.ControllerInfo() if err != nil { return params.ControllersChanges{}, err } // We should always have at least one voting machine // If we *really* wanted we could just pick whatever series is // in the majority, but really, if we always copy the value of // the first one, then they'll stay in sync. if len(ssi.VotingMachineIds) == 0 { // Better than a panic()? return params.ControllersChanges{}, errors.Errorf("internal error, failed to find any voting machines") } templateMachine, err := st.Machine(ssi.VotingMachineIds[0]) if err != nil { return params.ControllersChanges{}, err } series = templateMachine.Series() } if constraints.IsEmpty(&spec.Constraints) { // No constraints specified, so we'll use the constraints off // a running controller. controllerInfo, err := st.ControllerInfo() if err != nil { return params.ControllersChanges{}, err } // We'll sort the controller ids to find the smallest. // This will typically give the initial bootstrap machine. var controllerIds []int for _, id := range controllerInfo.MachineIds { idNum, err := strconv.Atoi(id) if err != nil { logger.Warningf("ignoring non numeric controller id %v", id) continue } controllerIds = append(controllerIds, idNum) } if len(controllerIds) == 0 { errors.Errorf("internal error, failed to find any controllers") } sort.Ints(controllerIds) // Load the controller machine and get its constraints. controllerId := controllerIds[0] controller, err := st.Machine(strconv.Itoa(controllerId)) if err != nil { return params.ControllersChanges{}, errors.Annotatef(err, "reading controller id %v", controllerId) } spec.Constraints, err = controller.Constraints() if err != nil { return params.ControllersChanges{}, errors.Annotatef(err, "reading constraints for controller id %v", controllerId) } } changes, err := st.EnableHA(spec.NumControllers, spec.Constraints, series, spec.Placement) if err != nil { return params.ControllersChanges{}, err } return controllersChanges(changes), nil }
// newUniterBaseAPI creates a new instance of the uniter base API. func newUniterBaseAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*uniterBaseAPI, error) { if !authorizer.AuthUnitAgent() { return nil, common.ErrPerm } var unit *state.Unit var err error switch tag := authorizer.GetAuthTag().(type) { case names.UnitTag: unit, err = st.Unit(tag.Id()) if err != nil { return nil, errors.Trace(err) } default: return nil, errors.Errorf("expected names.UnitTag, got %T", tag) } accessUnit := func() (common.AuthFunc, error) { return authorizer.AuthOwner, nil } accessService := func() (common.AuthFunc, error) { switch tag := authorizer.GetAuthTag().(type) { case names.UnitTag: entity, err := st.Unit(tag.Id()) if err != nil { return nil, errors.Trace(err) } serviceName := entity.ServiceName() serviceTag := names.NewServiceTag(serviceName) return func(tag names.Tag) bool { return tag == serviceTag }, nil default: return nil, errors.Errorf("expected names.UnitTag, got %T", tag) } } accessMachine := func() (common.AuthFunc, error) { machineId, err := unit.AssignedMachineId() if err != nil { return nil, errors.Trace(err) } machine, err := st.Machine(machineId) if err != nil { return nil, errors.Trace(err) } return func(tag names.Tag) bool { return tag == machine.Tag() }, nil } msAPI, err := meterstatus.NewMeterStatusAPI(st, resources, authorizer) if err != nil { return nil, errors.Annotate(err, "could not create meter status API handler") } accessUnitOrService := common.AuthEither(accessUnit, accessService) return &uniterBaseAPI{ LifeGetter: common.NewLifeGetter(st, accessUnitOrService), DeadEnsurer: common.NewDeadEnsurer(st, accessUnit), AgentEntityWatcher: common.NewAgentEntityWatcher(st, resources, accessUnitOrService), APIAddresser: common.NewAPIAddresser(st, resources), EnvironWatcher: common.NewEnvironWatcher(st, resources, authorizer), RebootRequester: common.NewRebootRequester(st, accessMachine), LeadershipSettingsAccessor: leadershipSettingsAccessorFactory(st, resources, authorizer), MeterStatus: msAPI, // TODO(fwereade): so *every* unit should be allowed to get/set its // own status *and* its service's? This is not a pleasing arrangement. StatusAPI: NewStatusAPI(st, accessUnitOrService), st: st, auth: authorizer, resources: resources, accessUnit: accessUnit, accessService: accessService, unit: unit, }, nil }