// serviceSetSettingsYAML updates the settings for the given service, // taking the configuration from a YAML string. func serviceSetSettingsYAML(service *state.Service, settings string) error { b := []byte(settings) var all map[string]interface{} if err := goyaml.Unmarshal(b, &all); err != nil { return errors.Annotate(err, "parsing settings data") } // The file is already in the right format. if _, ok := all[service.Name()]; !ok { changes, err := settingsFromGetYaml(all) if err != nil { return errors.Annotate(err, "processing YAML generated by get") } return errors.Annotate(service.UpdateConfigSettings(changes), "updating settings with service YAML") } ch, _, err := service.Charm() if err != nil { return errors.Annotate(err, "obtaining charm for this service") } changes, err := ch.Config().ParseSettingsYAML(b, service.Name()) if err != nil { return errors.Annotate(err, "creating config from YAML") } return errors.Annotate(service.UpdateConfigSettings(changes), "updating settings") }
// AddUnits starts n units of the given service using the specified placement // directives to allocate the machines. func AddUnits(st *state.State, svc *state.Service, n int, placement []*instance.Placement) ([]*state.Unit, error) { units := make([]*state.Unit, n) // Hard code for now till we implement a different approach. policy := state.AssignCleanEmpty // All units should have the same networks as the service. networks, err := svc.Networks() if err != nil { return nil, errors.Errorf("cannot get service %q networks", svc.Name()) } // TODO what do we do if we fail half-way through this process? for i := 0; i < n; i++ { unit, err := svc.AddUnit() if err != nil { return nil, errors.Annotatef(err, "cannot add unit %d/%d to service %q", i+1, n, svc.Name()) } // Are there still placement directives to use? if i > len(placement)-1 { if err := st.AssignUnit(unit, policy); err != nil { return nil, errors.Trace(err) } units[i] = unit continue } if err := st.AssignUnitWithPlacement(unit, placement[i], networks); err != nil { return nil, errors.Annotatef(err, "adding new machine to host unit %q", unit.Name()) } units[i] = unit } return units, nil }
func buildServiceMatcherShims(s *state.Service, patterns ...string) (shims []closurePredicate, _ error) { // Match on name. shims = append(shims, func() (bool, bool, error) { for _, p := range patterns { if strings.ToLower(s.Name()) == strings.ToLower(p) { return true, true, nil } } return false, false, nil }) // Match on exposure. shims = append(shims, func() (bool, bool, error) { return matchExposure(patterns, s) }) // If the service has an unit instance that matches any of the // given criteria, consider the service a match as well. unitShims, err := buildShimsForUnit(s.AllUnits, patterns...) if err != nil { return nil, err } shims = append(shims, unitShims...) // Units may be able to match the pattern. Ultimately defer to // that logic, and guard against breaking the predicate-chain. if len(unitShims) <= 0 { shims = append(shims, func() (bool, bool, error) { return false, true, nil }) } return shims, nil }
func (context *statusContext) processService(service *state.Service) (status params.ServiceStatus) { serviceCharmURL, _ := service.CharmURL() status.Charm = serviceCharmURL.String() status.Exposed = service.IsExposed() status.Life = processLife(service) latestCharm, ok := context.latestCharms[*serviceCharmURL.WithRevision(-1)] if ok && latestCharm != serviceCharmURL.String() { status.CanUpgradeTo = latestCharm } var err error status.Relations, status.SubordinateTo, err = context.processServiceRelations(service) if err != nil { status.Err = err return } networks, err := service.Networks() if err != nil { status.Err = err return } var cons constraints.Value if service.IsPrincipal() { // Only principals can have constraints. cons, err = service.Constraints() if err != nil { status.Err = err return } } // TODO(dimitern): Drop support for this in a follow-up. if len(networks) > 0 || cons.HaveNetworks() { // Only the explicitly requested networks (using "juju deploy // <svc> --networks=...") will be enabled, and altough when // specified, networks constraints will be used for instance // selection, they won't be actually enabled. status.Networks = params.NetworksSpecification{ Enabled: networks, Disabled: append(cons.IncludeNetworks(), cons.ExcludeNetworks()...), } } if service.IsPrincipal() { status.Units = context.processUnits(context.units[service.Name()], serviceCharmURL.String()) serviceStatus, err := service.Status() if err != nil { status.Err = err return } status.Status.Status = params.Status(serviceStatus.Status) status.Status.Info = serviceStatus.Message status.Status.Data = serviceStatus.Data status.Status.Since = serviceStatus.Since status.MeterStatuses = context.processUnitMeterStatuses(context.units[service.Name()]) } return status }
// serviceSetSettingsYAML updates the settings for the given service, // taking the configuration from a YAML string. func serviceSetSettingsYAML(service *state.Service, settings string) error { ch, _, err := service.Charm() if err != nil { return err } changes, err := ch.Config().ParseSettingsYAML([]byte(settings), service.Name()) if err != nil { return err } return service.UpdateConfigSettings(changes) }
func (context *statusContext) processServiceRelations(service *state.Service) (related map[string][]string, subord []string, err error) { subordSet := make(set.Strings) related = make(map[string][]string) relations := context.relations[service.Name()] for _, relation := range relations { ep, err := relation.Endpoint(service.Name()) if err != nil { return nil, nil, err } relationName := ep.Relation.Name eps, err := relation.RelatedEndpoints(service.Name()) if err != nil { return nil, nil, err } for _, ep := range eps { if isSubordinate(&ep, service) { subordSet.Add(ep.ServiceName) } related[relationName] = append(related[relationName], ep.ServiceName) } } for relationName, serviceNames := range related { sn := set.NewStrings(serviceNames...) related[relationName] = sn.SortedValues() } return related, subordSet.SortedValues(), nil }
func assertOneRelation(c *gc.C, srv *state.Service, relId int, endpoints ...state.Endpoint) *state.Relation { rels, err := srv.Relations() c.Assert(err, gc.IsNil) c.Assert(rels, gc.HasLen, 1) rel := rels[0] c.Assert(rel.Id(), gc.Equals, relId) c.Assert(rel.Endpoints(), jc.SameContents, endpoints) name := srv.Name() expectEp := endpoints[0] ep, err := rel.Endpoint(name) c.Assert(err, gc.IsNil) c.Assert(ep, gc.DeepEquals, expectEp) if len(endpoints) == 2 { expectEp = endpoints[1] } eps, err := rel.RelatedEndpoints(name) c.Assert(err, gc.IsNil) c.Assert(eps, gc.DeepEquals, []state.Endpoint{expectEp}) return rel }
func NewProReqRelation(c *gc.C, s *ConnSuite, scope charm.RelationScope) *ProReqRelation { psvc := s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) var rsvc *state.Service if scope == charm.ScopeGlobal { rsvc = s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) } else { rsvc = s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) } eps, err := s.State.InferEndpoints("mysql", rsvc.Name()) c.Assert(err, jc.ErrorIsNil) rel, err := s.State.AddRelation(eps...) c.Assert(err, jc.ErrorIsNil) prr := &ProReqRelation{rel: rel, psvc: psvc, rsvc: rsvc} prr.pu0, prr.pru0 = addRU(c, psvc, rel, nil) prr.pu1, prr.pru1 = addRU(c, psvc, rel, nil) if scope == charm.ScopeGlobal { prr.ru0, prr.rru0 = addRU(c, rsvc, rel, nil) prr.ru1, prr.rru1 = addRU(c, rsvc, rel, nil) } else { prr.ru0, prr.rru0 = addRU(c, rsvc, rel, prr.pu0) prr.ru1, prr.rru1 = addRU(c, rsvc, rel, prr.pu1) } return prr }
func (context *statusContext) processService(service *state.Service) params.ServiceStatus { serviceCharmURL, _ := service.CharmURL() var processedStatus = params.ServiceStatus{ Charm: serviceCharmURL.String(), Exposed: service.IsExposed(), Life: processLife(service), } if latestCharm, ok := context.latestCharms[*serviceCharmURL.WithRevision(-1)]; ok && latestCharm != nil { if latestCharm.Revision() > serviceCharmURL.Revision { processedStatus.CanUpgradeTo = latestCharm.String() } } var err error processedStatus.Relations, processedStatus.SubordinateTo, err = context.processServiceRelations(service) if err != nil { processedStatus.Err = err return processedStatus } if service.IsPrincipal() { processedStatus.Units = context.processUnits(context.units[service.Name()], serviceCharmURL.String()) serviceStatus, err := service.Status() if err != nil { processedStatus.Err = err return processedStatus } processedStatus.Status.Status = serviceStatus.Status processedStatus.Status.Info = serviceStatus.Message processedStatus.Status.Data = serviceStatus.Data processedStatus.Status.Since = serviceStatus.Since processedStatus.MeterStatuses = context.processUnitMeterStatuses(context.units[service.Name()]) } return processedStatus }
func (s *serviceSuite) claimLeadership(c *gc.C, unit *state.Unit, service *state.Service) { leadership := leadership.NewLeadershipManager(lease.Manager()) err := leadership.ClaimLeadership(service.Name(), unit.Name(), time.Minute) c.Assert(err, jc.ErrorIsNil) }
// AddUnits starts n units of the given service and allocates machines // to them as necessary. func AddUnits(st *state.State, svc *state.Service, n int, machineIdSpec string) ([]*state.Unit, error) { if machineIdSpec != "" && n != 1 { return nil, errors.Errorf("cannot add multiple units of service %q to a single machine", svc.Name()) } placement, err := makePlacement(machineIdSpec) if err != nil { return nil, errors.Trace(err) } return AddUnitsWithPlacement(st, svc, n, placement) }
// AddUnits starts n units of the given service and allocates machines // to them as necessary. func AddUnits(st *state.State, svc *state.Service, n int, machineIdSpec string) ([]*state.Unit, error) { units := make([]*state.Unit, n) // Hard code for now till we implement a different approach. policy := state.AssignCleanEmpty // All units should have the same networks as the service. networks, err := svc.Networks() if err != nil { return nil, fmt.Errorf("cannot get service %q networks: %v", svc.Name(), err) } // TODO what do we do if we fail half-way through this process? for i := 0; i < n; i++ { unit, err := svc.AddUnit() if err != nil { return nil, fmt.Errorf("cannot add unit %d/%d to service %q: %v", i+1, n, svc.Name(), err) } if machineIdSpec != "" { if n != 1 { return nil, fmt.Errorf("cannot add multiple units of service %q to a single machine", svc.Name()) } // machineIdSpec may be an existing machine or container, eg 3/lxc/2 // or a new container on a machine, eg lxc:3 mid := machineIdSpec var containerType instance.ContainerType specParts := strings.SplitN(machineIdSpec, ":", 2) if len(specParts) > 1 { firstPart := specParts[0] var err error if containerType, err = instance.ParseContainerType(firstPart); err == nil { mid = specParts[1] } else { mid = machineIdSpec } } if !names.IsMachine(mid) { return nil, fmt.Errorf("invalid force machine id %q", mid) } var unitCons *constraints.Value unitCons, err = unit.Constraints() if err != nil { return nil, err } var err error var m *state.Machine // If a container is to be used, create it. if containerType != "" { // Create the new machine marked as dirty so that // nothing else will grab it before we assign the unit to it. template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, } m, err = st.AddMachineInsideMachine(template, mid, containerType) } else { m, err = st.Machine(mid) } if err != nil { return nil, fmt.Errorf("cannot assign unit %q to machine: %v", unit.Name(), err) } err = unit.AssignToMachine(m) if err != nil { return nil, err } } else if err := st.AssignUnit(unit, policy); err != nil { return nil, err } units[i] = unit } return units, nil }
// AddUnits starts n units of the given service and allocates machines // to them as necessary. func AddUnits(st *state.State, svc *state.Service, n int, machineIdSpec string) ([]*state.Unit, error) { if machineIdSpec != "" && n != 1 { return nil, errors.Errorf("cannot add multiple units of service %q to a single machine", svc.Name()) } var placement []*instance.Placement if machineIdSpec != "" { mid := machineIdSpec scope := instance.MachineScope var containerType instance.ContainerType specParts := strings.SplitN(machineIdSpec, ":", 2) if len(specParts) > 1 { firstPart := specParts[0] var err error if containerType, err = instance.ParseContainerType(firstPart); err == nil { mid = specParts[1] scope = string(containerType) } } if !names.IsValidMachine(mid) { return nil, fmt.Errorf("invalid force machine id %q", mid) } placement = []*instance.Placement{ { Scope: scope, Directive: mid, }, } } return AddUnitsWithPlacement(st, svc, n, placement) }
func (s *serviceSuite) claimLeadership(c *gc.C, unit *state.Unit, service *state.Service) { claimer := s.State.LeadershipClaimer() err := claimer.ClaimLeadership(service.Name(), unit.Name(), time.Minute) c.Assert(err, jc.ErrorIsNil) }