// fetchMachines returns a map from top level machine id to machines, where machines[0] is the host // machine and machines[1..n] are any containers (including nested ones). // // If machineIds is non-nil, only machines whose IDs are in the set are returned. func fetchMachines(st stateInterface, machineIds set.Strings) (map[string][]*state.Machine, error) { v := make(map[string][]*state.Machine) machines, err := st.AllMachines() if err != nil { return nil, err } // AllMachines gives us machines sorted by id. for _, m := range machines { if machineIds != nil && !machineIds.Contains(m.Id()) { continue } parentId, ok := m.ParentId() if !ok { // Only top level host machines go directly into the machine map. v[m.Id()] = []*state.Machine{m} } else { topParentId := state.TopParentId(m.Id()) machines, ok := v[topParentId] if !ok { panic(fmt.Errorf("unexpected machine id %q", parentId)) } machines = append(machines, m) v[topParentId] = machines } } return v, nil }
// collect calls f on all values in src and returns an alphabetically // ordered list of the returned results without duplicates. func (src List) collect(f func(*Tools) string) []string { var seen set.Strings for _, tools := range src { seen.Add(f(tools)) } return seen.SortedValues() }
func (stringSetSuite) TestUninitializedPanics(c *gc.C) { f := func() { var s set.Strings s.Add("foo") } c.Assert(f, gc.PanicMatches, "uninitalised set") }
// ConvertSpaceName returns a string derived from name that does not // already exist in existing. It does not modify existing. func ConvertSpaceName(name string, existing set.Strings) string { // First lower case and replace spaces with dashes. name = strings.Replace(name, " ", "-", -1) name = strings.ToLower(name) // Replace any character that isn't in the set "-", "a-z", "0-9". name = network.SpaceInvalidChars.ReplaceAllString(name, "") // Get rid of any dashes at the start as that isn't valid. name = dashPrefix.ReplaceAllString(name, "") // And any at the end. name = dashSuffix.ReplaceAllString(name, "") // Repleace multiple dashes with a single dash. name = multipleDashes.ReplaceAllString(name, "-") // Special case of when the space name was only dashes or invalid // characters! if name == "" { name = "empty" } // If this name is in use add a numerical suffix. if existing.Contains(name) { counter := 2 for existing.Contains(name + fmt.Sprintf("-%d", counter)) { counter += 1 } name = name + fmt.Sprintf("-%d", counter) } return name }
func (s *ModelConfigSourceSuite) assertModelConfigValues(c *gc.C, modelCfg *config.Config, modelAttributes, controllerAttributes set.Strings) { expectedValues := make(config.ConfigValues) defaultAttributes := set.NewStrings() for defaultAttr := range config.ConfigDefaults() { defaultAttributes.Add(defaultAttr) } for attr, val := range modelCfg.AllAttrs() { source := "model" if defaultAttributes.Contains(attr) { source = "default" } if modelAttributes.Contains(attr) { source = "model" } if controllerAttributes.Contains(attr) { source = "controller" } expectedValues[attr] = config.ConfigValue{ Value: val, Source: source, } } sources, err := s.State.ModelConfigValues() c.Assert(err, jc.ErrorIsNil) c.Assert(sources, jc.DeepEquals, expectedValues) }
func (s *macOSXVersionSuite) TestOSVersion(c *gc.C) { knownSeries := set.Strings{} for _, series := range macOSXSeries { knownSeries.Add(series) } c.Check(osVersion(), jc.Satisfies, knownSeries.Contains) }
func (context *statusContext) processServiceRelations(service *state.Service) ( related map[string][]string, subord []string, err error) { var subordSet set.Strings related = make(map[string][]string) relations := context.relations[service.Name()] for _, relation := range relations { ep, err := relation.Endpoint(service.Name()) if err != nil { return nil, nil, err } relationName := ep.Relation.Name eps, err := relation.RelatedEndpoints(service.Name()) if err != nil { return nil, nil, err } for _, ep := range eps { if isSubordinate(&ep, service) { subordSet.Add(ep.ServiceName) } related[relationName] = append(related[relationName], ep.ServiceName) } } for relationName, serviceNames := range related { sn := set.NewStrings(serviceNames...) related[relationName] = sn.SortedValues() } return related, subordSet.SortedValues(), nil }
// storeManifest stores, into dataPath, the supplied manifest for the supplied charm. func (d *manifestDeployer) storeManifest(url *charm.URL, manifest set.Strings) error { if err := os.MkdirAll(d.DataPath(manifestsDataPath), 0755); err != nil { return err } name := charm.Quote(url.String()) path := filepath.Join(d.DataPath(manifestsDataPath), name) return utils.WriteYaml(path, manifest.SortedValues()) }
func (s *macOSXVersionSuite) TestOSVersion(c *gc.C) { knownSeries := set.Strings{} for _, series := range macOSXSeries { knownSeries.Add(series) } version, err := osVersion() c.Assert(err, gc.IsNil) c.Check(version, jc.Satisfies, knownSeries.Contains) }
func (s *MigrationSuite) AssertExportedFields(c *gc.C, doc interface{}, fields set.Strings) { expected := getExportedFields(doc) unknown := expected.Difference(fields) removed := fields.Difference(expected) // If this test fails, it means that extra fields have been added to the // doc without thinking about the migration implications. c.Check(unknown, gc.HasLen, 0) c.Assert(removed, gc.HasLen, 0) }
// ParseMetadataFromStorage loads ImageMetadata from the specified storage reader. func ParseMetadataFromStorage(c *gc.C, stor storage.StorageReader) []*imagemetadata.ImageMetadata { source := storage.NewStorageSimpleStreamsDataSource("test storage reader", stor, "images") // Find the simplestreams index file. params := simplestreams.ValueParams{ DataType: "image-ids", ValueTemplate: imagemetadata.ImageMetadata{}, } const requireSigned = false indexPath := simplestreams.UnsignedIndex indexRef, err := simplestreams.GetIndexWithFormat( source, indexPath, "index:1.0", requireSigned, simplestreams.CloudSpec{}, params) c.Assert(err, gc.IsNil) c.Assert(indexRef.Indexes, gc.HasLen, 1) imageIndexMetadata := indexRef.Indexes["com.ubuntu.cloud:custom"] c.Assert(imageIndexMetadata, gc.NotNil) // Read the products file contents. r, err := stor.Get(path.Join("images", imageIndexMetadata.ProductsFilePath)) defer r.Close() c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) // Parse the products file metadata. url, err := source.URL(imageIndexMetadata.ProductsFilePath) c.Assert(err, gc.IsNil) cloudMetadata, err := simplestreams.ParseCloudMetadata(data, "products:1.0", url, imagemetadata.ImageMetadata{}) c.Assert(err, gc.IsNil) // Collate the metadata. imageMetadataMap := make(map[string]*imagemetadata.ImageMetadata) var expectedProductIds set.Strings var imageVersions set.Strings for _, mc := range cloudMetadata.Products { for _, items := range mc.Items { for key, item := range items.Items { imageMetadata := item.(*imagemetadata.ImageMetadata) imageMetadataMap[key] = imageMetadata imageVersions.Add(key) productId := fmt.Sprintf("com.ubuntu.cloud:server:%s:%s", mc.Version, imageMetadata.Arch) expectedProductIds.Add(productId) } } } // Make sure index's product IDs are all represented in the products metadata. sort.Strings(imageIndexMetadata.ProductIds) c.Assert(imageIndexMetadata.ProductIds, gc.DeepEquals, expectedProductIds.SortedValues()) imageMetadata := make([]*imagemetadata.ImageMetadata, len(imageMetadataMap)) for i, key := range imageVersions.SortedValues() { imageMetadata[i] = imageMetadataMap[key] } return imageMetadata }
// removeDiff removes every path in oldManifest that is not present in newManifest. func (d *manifestDeployer) removeDiff(oldManifest, newManifest set.Strings) error { diff := oldManifest.Difference(newManifest) for _, path := range diff.SortedValues() { fullPath := filepath.Join(d.charmPath, filepath.FromSlash(path)) if err := os.RemoveAll(fullPath); err != nil { return err } } return nil }
func (w *minUnitsWatcher) initial() (*set.Strings, error) { serviceNames := new(set.Strings) doc := &minUnitsDoc{} iter := w.st.minUnits.Find(nil).Iter() for iter.Next(doc) { w.known[doc.ServiceName] = doc.Revno serviceNames.Add(doc.ServiceName) } return serviceNames, iter.Err() }
func (w *minUnitsWatcher) initial() (*set.Strings, error) { var serviceNames set.Strings var doc minUnitsDoc iter := w.st.minUnits.Find(nil).Iter() for iter.Next(&doc) { w.known[doc.ServiceName] = doc.Revno serviceNames.Add(doc.ServiceName) } return &serviceNames, iter.Close() }
// initial pre-loads the actions documents that are already queued for // the units this watcher was started for func (w *actionWatcher) initial() (*set.Strings, error) { var actions set.Strings var doc actionDoc iter := w.st.actions.Find(nil).Iter() for iter.Next(&doc) { if w.filterFn(doc.Id) { actions.Add(doc.Id) } } return &actions, iter.Close() }
func (w *lifecycleWatcher) initial() (*set.Strings, error) { var ids set.Strings var doc lifeDoc iter := w.coll.Find(w.members).Select(lifeFields).Iter() for iter.Next(&doc) { ids.Add(doc.Id) if doc.Life != Dead { w.life[doc.Id] = doc.Life } } return &ids, iter.Close() }
// findAvailableTools returns a list of available tools, // including tools that may be locally built and then // uploaded. Tools that need to be built will have an // empty URL. func findAvailableTools(env environs.Environ, arch *string, upload bool) (coretools.List, error) { if upload { // We're forcing an upload: ensure we can do so. if err := validateUploadAllowed(env, arch); err != nil { return nil, err } return locallyBuildableTools(), nil } // We're not forcing an upload, so look for tools // in the environment's simplestreams search paths // for existing tools. var vers *version.Number if agentVersion, ok := env.Config().AgentVersion(); ok { vers = &agentVersion } dev := version.Current.IsDev() || env.Config().Development() logger.Debugf("looking for bootstrap tools: version=%v", vers) toolsList, findToolsErr := findBootstrapTools(env, vers, arch, dev) if findToolsErr != nil && !errors.IsNotFound(findToolsErr) { return nil, findToolsErr } if !dev || vers != nil { // We are not running a development build, or agent-version // was specified; the only tools available are the ones we've // just found. return toolsList, findToolsErr } // The tools located may not include the ones that the // provider requires. We are running a development build, // so augment the list of tools with those that we can build // locally. // Collate the set of arch+series that are externally available // so we can see if we need to build any locally. If we need // to, only then do we validate that we can upload (which // involves a potentially expensive SupportedArchitectures call). var archSeries set.Strings for _, tools := range toolsList { archSeries.Add(tools.Version.Arch + tools.Version.Series) } var localToolsList coretools.List for _, tools := range locallyBuildableTools() { if !archSeries.Contains(tools.Version.Arch + tools.Version.Series) { localToolsList = append(localToolsList, tools) } } if len(localToolsList) == 0 || validateUploadAllowed(env, arch) != nil { return toolsList, findToolsErr } return append(toolsList, localToolsList...), nil }
func (m *Machine) prepareOneSetDevicesAddresses(args *LinkLayerDeviceAddress, allProviderIDs set.Strings) (_ *ipAddressDoc, err error) { defer errors.DeferredAnnotatef(&err, "invalid address %q", args.CIDRAddress) if err := m.validateSetDevicesAddressesArgs(args); err != nil { return nil, errors.Trace(err) } if allProviderIDs.Contains(string(args.ProviderID)) { return nil, NewProviderIDNotUniqueError(args.ProviderID) } return m.newIPAddressDocFromArgs(args) }
func (m *Machine) prepareOneSetLinkLayerDeviceArgs(args *LinkLayerDeviceArgs, pendingNames set.Strings) (_ *linkLayerDeviceDoc, err error) { defer errors.DeferredAnnotatef(&err, "invalid device %q", args.Name) if err := m.validateSetLinkLayerDeviceArgs(args); err != nil { return nil, errors.Trace(err) } if pendingNames.Contains(args.Name) { return nil, errors.NewNotValid(nil, "Name specified more than once") } return m.newLinkLayerDeviceDocFromArgs(args), nil }
// stripIgnored removes the ignored DBs from the mongo dump files. // This involves deleting DB-specific directories. // // NOTE(fwereade): the only directories we actually delete are "admin" // and "backups"; and those only if they're in the `ignored` set. I have // no idea why the code was structured this way; but I am, as requested // as usual by management, *not* fixing anything about backup beyond the // bug du jour. // // Basically, the ignored set is a filthy lie, and all the work we do to // generate it is pure obfuscation. func stripIgnored(ignored set.Strings, dumpDir string) error { for _, dbName := range ignored.Values() { switch dbName { case storageDBName, "admin": dirname := filepath.Join(dumpDir, dbName) if err := os.RemoveAll(dirname); err != nil { return errors.Trace(err) } } } return nil }
// AssertAllSpacesResult makes it easier to verify AllSpaces results. func (s *SubnetsSuite) AssertAllSpacesResult(c *gc.C, got params.SpaceResults, expected []common.BackingSpace) { seen := set.Strings{} results := []params.SpaceResult{} for _, space := range expected { if seen.Contains(space.Name()) { continue } seen.Add(space.Name()) result := params.SpaceResult{} result.Tag = names.NewSpaceTag(space.Name()).String() results = append(results, result) } c.Assert(got, jc.DeepEquals, params.SpaceResults{Results: results}) }
func (w *actionWatcher) merge(changes *set.Strings, updates map[interface{}]bool) error { for id, exists := range updates { if id, ok := id.(string); ok { if exists { changes.Add(id) } else { changes.Remove(id) } } else { return fmt.Errorf("id is not of type string") } } return nil }
func mergedAddresses(machineAddresses, providerAddresses []address) []network.Address { merged := make([]network.Address, len(providerAddresses), len(providerAddresses)+len(machineAddresses)) var providerValues set.Strings for i, address := range providerAddresses { providerValues.Add(address.Value) merged[i] = address.InstanceAddress() } for _, address := range machineAddresses { if !providerValues.Contains(address.Value) { merged = append(merged, address.InstanceAddress()) } } return merged }
func (w *lifecycleWatcher) merge(ids set.Strings, updates map[interface{}]bool) error { // Separate ids into those thought to exist and those known to be removed. var changed []string latest := make(map[string]Life) for id, exists := range updates { switch id := id.(type) { case string: if exists { changed = append(changed, id) } else { latest[id] = Dead } default: return errors.Errorf("id is not of type string, got %T", id) } } // Collect life states from ids thought to exist. Any that don't actually // exist are ignored (we'll hear about them in the next set of updates -- // all that's actually happened in that situation is that the watcher // events have lagged a little behind reality). iter := w.coll.Find(bson.D{{"_id", bson.D{{"$in", changed}}}}).Select(lifeFields).Iter() var doc lifeDoc for iter.Next(&doc) { latest[doc.Id] = doc.Life } if err := iter.Close(); err != nil { return err } // Add to ids any whose life state is known to have changed. for id, newLife := range latest { gone := newLife == Dead oldLife, known := w.life[id] switch { case known && gone: delete(w.life, id) case !known && !gone: w.life[id] = newLife case known && newLife != oldLife: w.life[id] = newLife default: continue } ids.Add(id) } return nil }
func (s *BootstrapSuite) testToolsMetadata(c *gc.C, exploded bool) { provider, err := environs.Provider(s.envcfg.Type()) c.Assert(err, gc.IsNil) env, err := provider.Open(s.envcfg) c.Assert(err, gc.IsNil) envtesting.RemoveFakeToolsMetadata(c, env.Storage()) _, cmd, err := s.initBootstrapCommand(c, nil, "--env-config", s.b64yamlEnvcfg, "--instance-id", string(s.instanceId)) c.Assert(err, gc.IsNil) err = cmd.Run(nil) c.Assert(err, gc.IsNil) // We don't write metadata at bootstrap anymore. simplestreamsMetadata, err := envtools.ReadMetadata(env.Storage()) c.Assert(err, gc.IsNil) c.Assert(simplestreamsMetadata, gc.HasLen, 0) // The tools should have been added to state, and // exploded into each of the supported series of // the same operating system if the tools were uploaded. st, err := state.Open(&mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, }, Password: testPasswordHash(), }, mongo.DefaultDialOpts(), environs.NewStatePolicy()) c.Assert(err, gc.IsNil) defer st.Close() var expectedSeries set.Strings if exploded { for _, series := range version.SupportedSeries() { os, err := version.GetOSFromSeries(series) c.Assert(err, gc.IsNil) if os == version.Current.OS { expectedSeries.Add(series) } } } else { expectedSeries.Add(version.Current.Series) } storage, err := st.ToolsStorage() c.Assert(err, gc.IsNil) defer storage.Close() metadata, err := storage.AllMetadata() c.Assert(err, gc.IsNil) c.Assert(metadata, gc.HasLen, expectedSeries.Size()) for _, m := range metadata { c.Assert(expectedSeries.Contains(m.Version.Series), jc.IsTrue) } }
func (m *Machine) prepareToSetDevicesAddresses(devicesAddresses []LinkLayerDeviceAddress, existingProviderIDs set.Strings) ([]ipAddressDoc, error) { var pendingDocs []ipAddressDoc allProviderIDs := set.NewStrings(existingProviderIDs.Values()...) for _, args := range devicesAddresses { newDoc, err := m.prepareOneSetDevicesAddresses(&args, allProviderIDs) if err != nil { return nil, errors.Trace(err) } pendingDocs = append(pendingDocs, *newDoc) if args.ProviderID != "" { allProviderIDs.Add(string(args.ProviderID)) } } return pendingDocs, nil }
// stripIgnored removes the ignored DBs from the mongo dump files. // This involves deleting DB-specific directories. func stripIgnored(ignored set.Strings, dumpDir string) error { for _, dbName := range ignored.Values() { if dbName != "backups" { // We allow all ignored databases except "backups" to be // included in the archive file. Restore will be // responsible for deleting those databases after // restoring them. continue } dirname := filepath.Join(dumpDir, dbName) if err := os.RemoveAll(dirname); err != nil { return errors.Trace(err) } } return nil }
func (m *model) validateStorage(allMachineIDs, allApplications, allUnits set.Strings) error { appsAndUnits := allApplications.Union(allUnits) allStorage := set.NewStrings() for i, storage := range m.Storages_.Storages_ { if err := storage.Validate(); err != nil { return errors.Annotatef(err, "storage[%d]", i) } allStorage.Add(storage.Tag().Id()) owner, err := storage.Owner() if err != nil { return errors.Wrap(err, errors.NotValidf("storage[%d] owner (%s)", i, owner)) } ownerID := owner.Id() if !appsAndUnits.Contains(ownerID) { return errors.NotValidf("storage[%d] owner (%s)", i, ownerID) } for _, unit := range storage.Attachments() { if !allUnits.Contains(unit.Id()) { return errors.NotValidf("storage[%d] attachment referencing unknown unit %q", i, unit) } } } allVolumes := set.NewStrings() for i, volume := range m.Volumes_.Volumes_ { if err := volume.Validate(); err != nil { return errors.Annotatef(err, "volume[%d]", i) } allVolumes.Add(volume.Tag().Id()) if storeID := volume.Storage().Id(); storeID != "" && !allStorage.Contains(storeID) { return errors.NotValidf("volume[%d] referencing unknown storage %q", i, storeID) } for j, attachment := range volume.Attachments() { if machineID := attachment.Machine().Id(); !allMachineIDs.Contains(machineID) { return errors.NotValidf("volume[%d].attachment[%d] referencing unknown machine %q", i, j, machineID) } } } for i, filesystem := range m.Filesystems_.Filesystems_ { if err := filesystem.Validate(); err != nil { return errors.Annotatef(err, "filesystem[%d]", i) } if storeID := filesystem.Storage().Id(); storeID != "" && !allStorage.Contains(storeID) { return errors.NotValidf("filesystem[%d] referencing unknown storage %q", i, storeID) } if volID := filesystem.Volume().Id(); volID != "" && !allVolumes.Contains(volID) { return errors.NotValidf("filesystem[%d] referencing unknown volume %q", i, volID) } for j, attachment := range filesystem.Attachments() { if machineID := attachment.Machine().Id(); !allMachineIDs.Contains(machineID) { return errors.NotValidf("filesystem[%d].attachment[%d] referencing unknown machine %q", i, j, machineID) } } } return nil }
// commonServiceInstances returns instances with // services in common with the specified machine. func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) { units, err := m.Units() if err != nil { return nil, err } var instanceIdSet set.Strings for _, unit := range units { if !unit.IsPrincipal() { continue } instanceIds, err := state.ServiceInstances(st, unit.ServiceName()) if err != nil { return nil, err } for _, instanceId := range instanceIds { instanceIdSet.Add(string(instanceId)) } } instanceIds := make([]instance.Id, instanceIdSet.Size()) // Sort values to simplify testing. for i, instanceId := range instanceIdSet.SortedValues() { instanceIds[i] = instance.Id(instanceId) } return instanceIds, nil }
// upgradeCertificateDNSNames ensure that the controller certificate // recorded in the agent config and also mongo server.pem contains the // DNSNames entries required by Juju. func upgradeCertificateDNSNames(config agent.ConfigSetter) error { si, ok := config.StateServingInfo() if !ok || si.CAPrivateKey == "" { // No certificate information exists yet, nothing to do. return nil } // Validate the current certificate and private key pair, and then // extract the current DNS names from the certificate. If the // certificate validation fails, or it does not contain the DNS // names we require, we will generate a new one. var dnsNames set.Strings serverCert, _, err := cert.ParseCertAndKey(si.Cert, si.PrivateKey) if err != nil { // The certificate is invalid, so create a new one. logger.Infof("parsing certificate/key failed, will generate a new one: %v", err) dnsNames = set.NewStrings() } else { dnsNames = set.NewStrings(serverCert.DNSNames...) } update := false requiredDNSNames := []string{"local", "juju-apiserver", "juju-mongodb"} for _, dnsName := range requiredDNSNames { if dnsNames.Contains(dnsName) { continue } dnsNames.Add(dnsName) update = true } if !update { return nil } // Write a new certificate to the mongo pem and agent config files. si.Cert, si.PrivateKey, err = cert.NewDefaultServer(config.CACert(), si.CAPrivateKey, dnsNames.Values()) if err != nil { return err } if err := mongo.UpdateSSLKey(config.DataDir(), si.Cert, si.PrivateKey); err != nil { return err } config.SetStateServingInfo(si) return nil }