func (s *serviceSuite) TestUnitService(c *gc.C) { for i, test := range unitNameTests { c.Logf("test %d: %q", i, test.pattern) if !test.valid { expect := fmt.Sprintf("%q is not a valid unit name", test.pattern) testFunc := func() { names.UnitService(test.pattern) } c.Assert(testFunc, gc.PanicMatches, expect) } else { c.Assert(names.UnitService(test.pattern), gc.Equals, test.service) } } }
func (s *serviceSuite) TestUnitService(c *gc.C) { for i, test := range unitNameTests { c.Logf("test %d: %q", i, test.pattern) if !test.valid { expect := fmt.Sprintf("%q is not a valid unit name", test.pattern) _, err := names.UnitService(test.pattern) c.Assert(err, gc.ErrorMatches, expect) } else { result, err := names.UnitService(test.pattern) c.Assert(err, gc.IsNil) c.Assert(result, gc.Equals, test.service) } } }
func (u *UniterAPIV3) checkRemoteUnit(relUnit *state.RelationUnit, remoteUnitTag string) (string, error) { // Make sure the unit is indeed remote. if remoteUnitTag == u.auth.GetAuthTag().String() { return "", common.ErrPerm } // Check remoteUnit is indeed related. Note that we don't want to actually get // the *Unit, because it might have been removed; but its relation settings will // persist until the relation itself has been removed (and must remain accessible // because the local unit's view of reality may be time-shifted). tag, err := names.ParseUnitTag(remoteUnitTag) if err != nil { return "", common.ErrPerm } remoteUnitName := tag.Id() remoteServiceName, err := names.UnitService(remoteUnitName) if err != nil { return "", common.ErrPerm } rel := relUnit.Relation() _, err = rel.RelatedEndpoints(remoteServiceName) if err != nil { return "", common.ErrPerm } return remoteUnitName, nil }
func (c *DebugHooksCommand) validateHooks() error { if len(c.hooks) == 0 { return nil } service := names.UnitService(c.Target) relations, err := c.apiClient.ServiceCharmRelations(service) if err != nil { return err } validHooks := make(map[string]bool) for _, hook := range hooks.UnitHooks() { validHooks[string(hook)] = true } for _, relation := range relations { for _, hook := range hooks.RelationHooks() { hook := fmt.Sprintf("%s-%s", relation, hook) validHooks[hook] = true } } for _, hook := range c.hooks { if !validHooks[hook] { names := make([]string, 0, len(validHooks)) for hookName, _ := range validHooks { names = append(names, hookName) } sort.Strings(names) logger.Infof("unknown hook %s, valid hook names: %v", hook, names) return fmt.Errorf("unit %q does not contain hook %q", c.Target, hook) } } return nil }
// Merge merges in the provided leadership settings. Only leaders for // the given service may perform this operation. func (lsa *LeadershipSettingsAccessor) Merge(bulkArgs params.MergeLeadershipSettingsBulkParams) (params.ErrorResults, error) { callerUnitId := lsa.authorizer.GetAuthTag().Id() requireServiceId, err := names.UnitService(callerUnitId) if err != nil { return params.ErrorResults{}, err } results := make([]params.ErrorResult, len(bulkArgs.Params)) for i, arg := range bulkArgs.Params { result := &results[i] // TODO(fwereade): we shoudn't assume a ServiceTag: we should // use an actual auth func to determine permissions. serviceTag, err := names.ParseServiceTag(arg.ServiceTag) if err != nil { result.Error = common.ServerError(err) continue } serviceId := serviceTag.Id() if serviceId != requireServiceId { result.Error = common.ServerError(common.ErrPerm) continue } token := lsa.leaderCheckFn(serviceId, callerUnitId) err = lsa.mergeSettingsChunkFn(token, serviceId, arg.Settings) if err != nil { result.Error = common.ServerError(err) } } return params.ErrorResults{Results: results}, nil }
// Run implements cmd.Command.Run. func (c *ShowServiceCommand) Run(ctx *cmd.Context) error { apiclient, err := c.deps.NewClient(c) if err != nil { return errors.Annotatef(err, "can't connect to %s", c.ConnectionName()) } defer apiclient.Close() var unit string var service string if names.IsValidService(c.target) { service = c.target } else { service, err = names.UnitService(c.target) if err != nil { return errors.Errorf("%q is neither a service nor a unit", c.target) } unit = c.target } vals, err := apiclient.ListResources([]string{service}) if err != nil { return errors.Trace(err) } if len(vals) != 1 { return errors.Errorf("bad data returned from server") } v := vals[0] if unit == "" { return c.formatServiceResources(ctx, v) } return c.formatUnitResources(ctx, unit, service, v) }
// ServiceName returns the service name. func (u *Unit) ServiceName() string { service, err := names.UnitService(u.Name()) if err != nil { panic(err) } return service }
// numUnitsForService return the number of units belonging to the given service // currently in the environment. func (h *bundleHandler) numUnitsForService(service string) (num int) { for unit := range h.unitStatus { svc, err := names.UnitService(unit) if err != nil { // Should never happen. panic(err) } if svc == service { num++ } } return num }
func (m *leadershipService) authMember(serviceTag names.ServiceTag) bool { ownerTag := m.authorizer.GetAuthTag() unitTag, ok := ownerTag.(names.UnitTag) if !ok { return false } unitId := unitTag.Id() requireServiceId, err := names.UnitService(unitId) if err != nil { return false } return serviceTag.Id() == requireServiceId }
// Service returns the service. func (u *Unit) Service() (*Service, error) { serviceTag := names.NewServiceTag(names.UnitService(u.Name())) service := &Service{ st: u.st, tag: serviceTag.String(), } // Call Refresh() immediately to get the up-to-date // life and other needed locally cached fields. err := service.Refresh() if err != nil { return nil, err } return service, nil }
// NewTrackerWorker returns a TrackerWorker that attempts to claim and retain // service leadership for the supplied unit. It will claim leadership for twice // the supplied duration, and once it's leader it will renew leadership every // time the duration elapses. // Thus, successful leadership claims on the resulting Tracker will guarantee // leadership for the duration supplied here without generating additional calls // to the supplied manager (which may very well be on the other side of a // network connection). func NewTrackerWorker(tag names.UnitTag, leadership leadership.LeadershipManager, duration time.Duration) TrackerWorker { unitName := tag.Id() serviceName, _ := names.UnitService(unitName) t := &tracker{ unitName: unitName, serviceName: serviceName, leadership: leadership, duration: duration, claimTickets: make(chan chan bool), waitLeaderTickets: make(chan chan bool), waitMinionTickets: make(chan chan bool), } go func() { defer t.tomb.Done() defer func() { for _, ticketCh := range t.waitingLeader { close(ticketCh) } for _, ticketCh := range t.waitingMinion { close(ticketCh) } }() err := t.loop() // TODO: jam 2015-04-02 is this the most elegant way to make // sure we shutdown cleanly? Essentially the lowest level sees // that we are dying, and propagates an ErrDying up to us so // that we shut down, which we then are passing back into // Tomb.Kill(). // Tomb.Kill() special cases the exact object ErrDying, and has // no idea about errors.Cause and the general errors.Trace // mechanisms that we use. // So we explicitly unwrap before calling tomb.Kill() else // tomb.Stop() thinks that we have a genuine error. switch cause := errors.Cause(err); cause { case tomb.ErrDying: err = cause } t.tomb.Kill(err) }() return t }
// chooseMachine returns the id of a machine that will be used to host a unit // of all the given services. If one of the services still requires units to be // added, an empty string is returned, meaning that a new machine must be // created for holding the unit. If instead all units are already placed, // return the id of the machine which already holds units of the given services // and which hosts the least number of units. func (h *bundleHandler) chooseMachine(services ...string) string { candidateMachines := make(map[string]bool, len(h.unitStatus)) numUnitsPerMachine := make(map[string]int, len(h.unitStatus)) numUnitsPerService := make(map[string]int, len(h.data.Services)) // Collect the number of units and the corresponding machines for all // involved services. for unit, machine := range h.unitStatus { // Retrieve the top level machine. machine = strings.Split(machine, "/")[0] numUnitsPerMachine[machine]++ svc, err := names.UnitService(unit) if err != nil { // Should never happen because the bundle logic has already checked // that unit names are well formed. panic(err) } for _, service := range services { if service != svc { continue } numUnitsPerService[service]++ candidateMachines[machine] = true } } // If at least one service still requires units to be added, return an // empty machine in order to force new machine creation. for _, service := range services { if numUnitsPerService[service] < h.data.Services[service].NumUnits { return "" } } // Return the least used machine. var result string var min int for machine, num := range numUnitsPerMachine { if candidateMachines[machine] && (result == "" || num < min) { result, min = machine, num } } return result }
// Read reads leadership settings for the provided service ID. Any // unit of the service may perform this operation. func (lsa *LeadershipSettingsAccessor) Read(bulkArgs params.Entities) (params.GetLeadershipSettingsBulkResults, error) { callerUnitId := lsa.authorizer.GetAuthTag().Id() requireServiceId, err := names.UnitService(callerUnitId) if err != nil { return params.GetLeadershipSettingsBulkResults{}, err } results := make([]params.GetLeadershipSettingsResult, len(bulkArgs.Entities)) for i, arg := range bulkArgs.Entities { result := &results[i] // TODO(fwereade): we shoudn't assume a ServiceTag: we should // use an actual auth func to determine permissions. serviceTag, err := names.ParseServiceTag(arg.Tag) if err != nil { result.Error = common.ServerError(err) continue } serviceId := serviceTag.Id() if serviceId != requireServiceId { result.Error = common.ServerError(common.ErrPerm) continue } settings, err := lsa.getSettingsFn(serviceId) if err != nil { result.Error = common.ServerError(err) continue } result.Settings = settings } return params.GetLeadershipSettingsBulkResults{results}, nil }
// SetStatus sets the status on the service given by the unit in args if the unit is the leader. func (s *ServiceStatusSetter) SetStatus(args params.SetStatus) (params.ErrorResults, error) { result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } canModify, err := s.getCanModify() if err != nil { return params.ErrorResults{}, err } for i, arg := range args.Entities { // TODO(fwereade): the auth is basically nonsense, and basically only // works by coincidence. Read carefully. // We "know" that arg.Tag is either the calling unit or its service // (because getCanModify is authUnitOrService, and we'll fail out if // it isn't); and, in practice, it's always going to be the calling // unit (because, /sigh, we don't actually use service tags to refer // to services in this method). tag, err := names.ParseTag(arg.Tag) if err != nil { result.Results[i].Error = ServerError(err) continue } if !canModify(tag) { result.Results[i].Error = ServerError(ErrPerm) continue } unitTag, ok := tag.(names.UnitTag) if !ok { // No matter what the canModify says, if this entity is not // a unit, we say "NO". result.Results[i].Error = ServerError(ErrPerm) continue } unitId := unitTag.Id() // Now we have the unit, we can get the service that should have been // specified in the first place... serviceId, err := names.UnitService(unitId) if err != nil { result.Results[i].Error = ServerError(err) continue } service, err := s.st.Service(serviceId) if err != nil { result.Results[i].Error = ServerError(err) continue } // ...and set the status, conditional on the unit being (and remaining) // service leader. checker := s.st.LeadershipChecker() token := checker.LeadershipCheck(serviceId, unitId) // TODO(fwereade) pass token into SetStatus instead of checking here. if err := token.Check(nil); err != nil { // TODO(fwereade) this should probably be ErrPerm is certain cases, // but I don't think I implemented an exported ErrNotLeader. I // should have done, though. result.Results[i].Error = ServerError(err) continue } if err := service.SetStatus(status.Status(arg.Status), arg.Info, arg.Data); err != nil { result.Results[i].Error = ServerError(err) } } return result, nil }
// createAvailabilitySet creates the availability set for a machine to use // if it doesn't already exist, and returns the availability set's ID. The // algorithm used for choosing the availability set is: // - if there is a distribution group, use the same availability set as // the instances in that group. Instances in the group may be in // different availability sets (when multiple services colocated on a // machine), so we pick one arbitrarily // - if there is no distribution group, create an availability name with // a name based on the value of the tags.JujuUnitsDeployed tag in vmTags, // if it exists // - if there are no units assigned to the machine, then use the "juju" // availability set func createAvailabilitySet( client compute.AvailabilitySetsClient, vmName, resourceGroup, location string, vmTags, envTags map[string]string, distributionGroupFunc func() ([]instance.Id, error), instancesFunc func([]instance.Id) ([]instance.Instance, error), ) (string, error) { logger.Debugf("selecting availability set for %q", vmName) // First we check if there's a distribution group, and if so, // use the availability set of the first instance we find in it. var instanceIds []instance.Id if distributionGroupFunc != nil { var err error instanceIds, err = distributionGroupFunc() if err != nil { return "", errors.Annotate( err, "querying distribution group", ) } } instances, err := instancesFunc(instanceIds) switch err { case nil, environs.ErrPartialInstances, environs.ErrNoInstances: default: return "", errors.Annotate( err, "querying distribution group instances", ) } for _, instance := range instances { if instance == nil { continue } instance := instance.(*azureInstance) availabilitySetSubResource := instance.Properties.AvailabilitySet if availabilitySetSubResource == nil || availabilitySetSubResource.ID == nil { continue } logger.Debugf("- selecting availability set of %q", instance.Name) return to.String(availabilitySetSubResource.ID), nil } // We'll have to create an availability set. Use the name of one of the // services assigned to the machine. availabilitySetName := "juju" if unitNames, ok := vmTags[tags.JujuUnitsDeployed]; ok { for _, unitName := range strings.Fields(unitNames) { if !names.IsValidUnit(unitName) { continue } serviceName, err := names.UnitService(unitName) if err != nil { return "", errors.Annotate( err, "getting service name", ) } availabilitySetName = serviceName break } } logger.Debugf("- creating availability set %q", availabilitySetName) availabilitySet, err := client.CreateOrUpdate( resourceGroup, availabilitySetName, compute.AvailabilitySet{ Location: to.StringPtr(location), // NOTE(axw) we do *not* want to use vmTags here, // because an availability set is shared by machines. Tags: toTagsPtr(envTags), }, ) if err != nil { return "", errors.Annotatef( err, "creating availability set %q", availabilitySetName, ) } return to.String(availabilitySet.ID), nil }
// Status returns the status of the Service for each given Unit tag. func (s *ServiceStatusGetter) Status(args params.Entities) (params.ServiceStatusResults, error) { result := params.ServiceStatusResults{ Results: make([]params.ServiceStatusResult, len(args.Entities)), } canAccess, err := s.getCanAccess() if err != nil { return params.ServiceStatusResults{}, err } for i, arg := range args.Entities { // TODO(fwereade): the auth is basically nonsense, and basically only // works by coincidence (and is happening at the wrong layer anyway). // Read carefully. // We "know" that arg.Tag is either the calling unit or its service // (because getCanAccess is authUnitOrService, and we'll fail out if // it isn't); and, in practice, it's always going to be the calling // unit (because, /sigh, we don't actually use service tags to refer // to services in this method). tag, err := names.ParseTag(arg.Tag) if err != nil { result.Results[i].Error = ServerError(err) continue } if !canAccess(tag) { result.Results[i].Error = ServerError(ErrPerm) continue } unitTag, ok := tag.(names.UnitTag) if !ok { // No matter what the canAccess says, if this entity is not // a unit, we say "NO". result.Results[i].Error = ServerError(ErrPerm) continue } unitId := unitTag.Id() // Now we have the unit, we can get the service that should have been // specified in the first place... serviceId, err := names.UnitService(unitId) if err != nil { result.Results[i].Error = ServerError(err) continue } service, err := s.st.Service(serviceId) if err != nil { result.Results[i].Error = ServerError(err) continue } // ...so we can check the unit's service leadership... checker := s.st.LeadershipChecker() token := checker.LeadershipCheck(serviceId, unitId) if err := token.Check(nil); err != nil { // TODO(fwereade) this should probably be ErrPerm is certain cases, // but I don't think I implemented an exported ErrNotLeader. I // should have done, though. result.Results[i].Error = ServerError(err) continue } // ...and collect the results. serviceStatus, unitStatuses, err := service.ServiceAndUnitsStatus() if err != nil { result.Results[i].Service.Error = ServerError(err) result.Results[i].Error = ServerError(err) continue } result.Results[i].Service.Status = serviceStatus.Status result.Results[i].Service.Info = serviceStatus.Message result.Results[i].Service.Data = serviceStatus.Data result.Results[i].Service.Since = serviceStatus.Since result.Results[i].Units = make(map[string]params.StatusResult, len(unitStatuses)) for uTag, r := range unitStatuses { ur := params.StatusResult{ Status: r.Status, Info: r.Message, Data: r.Data, Since: r.Since, } result.Results[i].Units[uTag] = ur } } return result, nil }
// ServiceName returns the service name. func (u *Unit) ServiceName() string { return names.UnitService(u.Name()) }