func (stringSetSuite) TestUninitialized(c *gc.C) { var uninitialized set.Strings c.Assert(uninitialized.Size(), gc.Equals, 0) c.Assert(uninitialized.IsEmpty(), gc.Equals, true) // You can get values and sorted values from an unitialized set. AssertValues(c, uninitialized) // All contains checks are false c.Assert(uninitialized.Contains("foo"), gc.Equals, false) // Remove works on an uninitialized Strings uninitialized.Remove("foo") var other set.Strings // Union returns a new set that is empty but initialized. c.Assert(uninitialized.Union(other), gc.DeepEquals, set.NewStrings()) c.Assert(uninitialized.Intersection(other), gc.DeepEquals, set.NewStrings()) c.Assert(uninitialized.Difference(other), gc.DeepEquals, set.NewStrings()) other = set.NewStrings("foo", "bar") c.Assert(uninitialized.Union(other), gc.DeepEquals, other) c.Assert(uninitialized.Intersection(other), gc.DeepEquals, set.NewStrings()) c.Assert(uninitialized.Difference(other), gc.DeepEquals, set.NewStrings()) c.Assert(other.Union(uninitialized), gc.DeepEquals, other) c.Assert(other.Intersection(uninitialized), gc.DeepEquals, set.NewStrings()) c.Assert(other.Difference(uninitialized), gc.DeepEquals, other) // Once something is added, the set becomes initialized. uninitialized.Add("foo") AssertValues(c, uninitialized, "foo") }
func (s *BootstrapSuite) testToolsMetadata(c *gc.C, exploded bool) { provider, err := environs.Provider(s.envcfg.Type()) c.Assert(err, gc.IsNil) env, err := provider.Open(s.envcfg) c.Assert(err, gc.IsNil) oldMetadata, err := envtools.ReadMetadata(env.Storage()) c.Assert(err, gc.IsNil) _, cmd, err := s.initBootstrapCommand(c, nil, "--env-config", s.b64yamlEnvcfg, "--instance-id", string(s.instanceId)) c.Assert(err, gc.IsNil) err = cmd.Run(nil) c.Assert(err, gc.IsNil) newMetadata, err := envtools.ReadMetadata(env.Storage()) c.Assert(err, gc.IsNil) if !exploded { c.Assert(newMetadata, gc.HasLen, len(oldMetadata)) } else { // new metadata should have more tools. c.Assert(len(newMetadata), jc.GreaterThan, len(oldMetadata)) var expectedSeries set.Strings for _, series := range version.SupportedSeries() { os, err := version.GetOSFromSeries(series) c.Assert(err, gc.IsNil) if os == version.Ubuntu { expectedSeries.Add(series) } } c.Assert(newMetadata, gc.HasLen, expectedSeries.Size()) for _, m := range newMetadata { c.Assert(expectedSeries.Contains(m.Release), jc.IsTrue) } } }
func (s *macOSXVersionSuite) TestOSVersion(c *gc.C) { knownSeries := set.Strings{} for _, series := range macOSXSeries { knownSeries.Add(series) } c.Check(osVersion(), jc.Satisfies, knownSeries.Contains) }
func (stringSetSuite) TestUninitializedPanics(c *gc.C) { f := func() { var s set.Strings s.Add("foo") } c.Assert(f, gc.PanicMatches, "uninitalised set") }
// commonServiceInstances returns instances with // services in common with the specified machine. func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) { units, err := m.Units() if err != nil { return nil, err } var instanceIdSet set.Strings for _, unit := range units { if !unit.IsPrincipal() { continue } instanceIds, err := state.ServiceInstances(st, unit.ServiceName()) if err != nil { return nil, err } for _, instanceId := range instanceIds { instanceIdSet.Add(string(instanceId)) } } instanceIds := make([]instance.Id, instanceIdSet.Size()) // Sort values to simplify testing. for i, instanceId := range instanceIdSet.SortedValues() { instanceIds[i] = instance.Id(instanceId) } return instanceIds, nil }
// collect calls f on all values in src and returns an alphabetically // ordered list of the returned results without duplicates. func (src List) collect(f func(*Tools) string) []string { var seen set.Strings for _, tools := range src { seen.Add(f(tools)) } return seen.SortedValues() }
func (context *statusContext) processServiceRelations(service *state.Service) ( related map[string][]string, subord []string, err error) { var subordSet set.Strings related = make(map[string][]string) relations := context.relations[service.Name()] for _, relation := range relations { ep, err := relation.Endpoint(service.Name()) if err != nil { return nil, nil, err } relationName := ep.Relation.Name eps, err := relation.RelatedEndpoints(service.Name()) if err != nil { return nil, nil, err } for _, ep := range eps { if isSubordinate(&ep, service) { subordSet.Add(ep.ServiceName) } related[relationName] = append(related[relationName], ep.ServiceName) } } for relationName, serviceNames := range related { sn := set.NewStrings(serviceNames...) related[relationName] = sn.SortedValues() } return related, subordSet.SortedValues(), nil }
func (s *macOSXVersionSuite) TestOSVersion(c *gc.C) { knownSeries := set.Strings{} for _, series := range macOSXSeries { knownSeries.Add(series) } version, err := osVersion() c.Assert(err, gc.IsNil) c.Check(version, jc.Satisfies, knownSeries.Contains) }
// ParseMetadataFromStorage loads ImageMetadata from the specified storage reader. func ParseMetadataFromStorage(c *gc.C, stor storage.StorageReader) []*imagemetadata.ImageMetadata { source := storage.NewStorageSimpleStreamsDataSource("test storage reader", stor, "images") // Find the simplestreams index file. params := simplestreams.ValueParams{ DataType: "image-ids", ValueTemplate: imagemetadata.ImageMetadata{}, } const requireSigned = false indexPath := simplestreams.UnsignedIndex indexRef, err := simplestreams.GetIndexWithFormat( source, indexPath, "index:1.0", requireSigned, simplestreams.CloudSpec{}, params) c.Assert(err, gc.IsNil) c.Assert(indexRef.Indexes, gc.HasLen, 1) imageIndexMetadata := indexRef.Indexes["com.ubuntu.cloud:custom"] c.Assert(imageIndexMetadata, gc.NotNil) // Read the products file contents. r, err := stor.Get(path.Join("images", imageIndexMetadata.ProductsFilePath)) defer r.Close() c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) // Parse the products file metadata. url, err := source.URL(imageIndexMetadata.ProductsFilePath) c.Assert(err, gc.IsNil) cloudMetadata, err := simplestreams.ParseCloudMetadata(data, "products:1.0", url, imagemetadata.ImageMetadata{}) c.Assert(err, gc.IsNil) // Collate the metadata. imageMetadataMap := make(map[string]*imagemetadata.ImageMetadata) var expectedProductIds set.Strings var imageVersions set.Strings for _, mc := range cloudMetadata.Products { for _, items := range mc.Items { for key, item := range items.Items { imageMetadata := item.(*imagemetadata.ImageMetadata) imageMetadataMap[key] = imageMetadata imageVersions.Add(key) productId := fmt.Sprintf("com.ubuntu.cloud:server:%s:%s", mc.Version, imageMetadata.Arch) expectedProductIds.Add(productId) } } } // Make sure index's product IDs are all represented in the products metadata. sort.Strings(imageIndexMetadata.ProductIds) c.Assert(imageIndexMetadata.ProductIds, gc.DeepEquals, expectedProductIds.SortedValues()) imageMetadata := make([]*imagemetadata.ImageMetadata, len(imageMetadataMap)) for i, key := range imageVersions.SortedValues() { imageMetadata[i] = imageMetadataMap[key] } return imageMetadata }
func (w *minUnitsWatcher) initial() (*set.Strings, error) { serviceNames := new(set.Strings) doc := &minUnitsDoc{} iter := w.st.minUnits.Find(nil).Iter() for iter.Next(doc) { w.known[doc.ServiceName] = doc.Revno serviceNames.Add(doc.ServiceName) } return serviceNames, iter.Err() }
func (w *minUnitsWatcher) initial() (*set.Strings, error) { var serviceNames set.Strings var doc minUnitsDoc iter := w.st.minUnits.Find(nil).Iter() for iter.Next(&doc) { w.known[doc.ServiceName] = doc.Revno serviceNames.Add(doc.ServiceName) } return &serviceNames, iter.Close() }
// initial pre-loads the actions documents that are already queued for // the units this watcher was started for func (w *actionWatcher) initial() (*set.Strings, error) { var actions set.Strings var doc actionDoc iter := w.st.actions.Find(nil).Iter() for iter.Next(&doc) { if w.filterFn(doc.Id) { actions.Add(doc.Id) } } return &actions, iter.Close() }
func (w *lifecycleWatcher) initial() (*set.Strings, error) { var ids set.Strings var doc lifeDoc iter := w.coll.Find(w.members).Select(lifeFields).Iter() for iter.Next(&doc) { ids.Add(doc.Id) if doc.Life != Dead { w.life[doc.Id] = doc.Life } } return &ids, iter.Close() }
func (s *BootstrapSuite) testToolsMetadata(c *gc.C, exploded bool) { provider, err := environs.Provider(s.envcfg.Type()) c.Assert(err, gc.IsNil) env, err := provider.Open(s.envcfg) c.Assert(err, gc.IsNil) envtesting.RemoveFakeToolsMetadata(c, env.Storage()) _, cmd, err := s.initBootstrapCommand(c, nil, "--env-config", s.b64yamlEnvcfg, "--instance-id", string(s.instanceId)) c.Assert(err, gc.IsNil) err = cmd.Run(nil) c.Assert(err, gc.IsNil) // We don't write metadata at bootstrap anymore. simplestreamsMetadata, err := envtools.ReadMetadata(env.Storage()) c.Assert(err, gc.IsNil) c.Assert(simplestreamsMetadata, gc.HasLen, 0) // The tools should have been added to state, and // exploded into each of the supported series of // the same operating system if the tools were uploaded. st, err := state.Open(&mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, }, Password: testPasswordHash(), }, mongo.DefaultDialOpts(), environs.NewStatePolicy()) c.Assert(err, gc.IsNil) defer st.Close() var expectedSeries set.Strings if exploded { for _, series := range version.SupportedSeries() { os, err := version.GetOSFromSeries(series) c.Assert(err, gc.IsNil) if os == version.Current.OS { expectedSeries.Add(series) } } } else { expectedSeries.Add(version.Current.Series) } storage, err := st.ToolsStorage() c.Assert(err, gc.IsNil) defer storage.Close() metadata, err := storage.AllMetadata() c.Assert(err, gc.IsNil) c.Assert(metadata, gc.HasLen, expectedSeries.Size()) for _, m := range metadata { c.Assert(expectedSeries.Contains(m.Version.Series), jc.IsTrue) } }
// findAvailableTools returns a list of available tools, // including tools that may be locally built and then // uploaded. Tools that need to be built will have an // empty URL. func findAvailableTools(env environs.Environ, arch *string, upload bool) (coretools.List, error) { if upload { // We're forcing an upload: ensure we can do so. if err := validateUploadAllowed(env, arch); err != nil { return nil, err } return locallyBuildableTools(), nil } // We're not forcing an upload, so look for tools // in the environment's simplestreams search paths // for existing tools. var vers *version.Number if agentVersion, ok := env.Config().AgentVersion(); ok { vers = &agentVersion } dev := version.Current.IsDev() || env.Config().Development() logger.Debugf("looking for bootstrap tools: version=%v", vers) toolsList, findToolsErr := findBootstrapTools(env, vers, arch, dev) if findToolsErr != nil && !errors.IsNotFound(findToolsErr) { return nil, findToolsErr } if !dev || vers != nil { // We are not running a development build, or agent-version // was specified; the only tools available are the ones we've // just found. return toolsList, findToolsErr } // The tools located may not include the ones that the // provider requires. We are running a development build, // so augment the list of tools with those that we can build // locally. // Collate the set of arch+series that are externally available // so we can see if we need to build any locally. If we need // to, only then do we validate that we can upload (which // involves a potentially expensive SupportedArchitectures call). var archSeries set.Strings for _, tools := range toolsList { archSeries.Add(tools.Version.Arch + tools.Version.Series) } var localToolsList coretools.List for _, tools := range locallyBuildableTools() { if !archSeries.Contains(tools.Version.Arch + tools.Version.Series) { localToolsList = append(localToolsList, tools) } } if len(localToolsList) == 0 || validateUploadAllowed(env, arch) != nil { return toolsList, findToolsErr } return append(toolsList, localToolsList...), nil }
// AssertAllSpacesResult makes it easier to verify AllSpaces results. func (s *SubnetsSuite) AssertAllSpacesResult(c *gc.C, got params.SpaceResults, expected []common.BackingSpace) { seen := set.Strings{} results := []params.SpaceResult{} for _, space := range expected { if seen.Contains(space.Name()) { continue } seen.Add(space.Name()) result := params.SpaceResult{} result.Tag = names.NewSpaceTag(space.Name()).String() results = append(results, result) } c.Assert(got, jc.DeepEquals, params.SpaceResults{Results: results}) }
func mergedAddresses(machineAddresses, providerAddresses []address) []network.Address { merged := make([]network.Address, len(providerAddresses), len(providerAddresses)+len(machineAddresses)) var providerValues set.Strings for i, address := range providerAddresses { providerValues.Add(address.Value) merged[i] = address.InstanceAddress() } for _, address := range machineAddresses { if !providerValues.Contains(address.Value) { merged = append(merged, address.InstanceAddress()) } } return merged }
func (w *actionWatcher) merge(changes *set.Strings, updates map[interface{}]bool) error { for id, exists := range updates { if id, ok := id.(string); ok { if exists { changes.Add(id) } else { changes.Remove(id) } } else { return fmt.Errorf("id is not of type string") } } return nil }
func (w *lifecycleWatcher) merge(ids set.Strings, updates map[interface{}]bool) error { // Separate ids into those thought to exist and those known to be removed. var changed []string latest := make(map[string]Life) for id, exists := range updates { switch id := id.(type) { case string: if exists { changed = append(changed, id) } else { latest[id] = Dead } default: return errors.Errorf("id is not of type string, got %T", id) } } // Collect life states from ids thought to exist. Any that don't actually // exist are ignored (we'll hear about them in the next set of updates -- // all that's actually happened in that situation is that the watcher // events have lagged a little behind reality). iter := w.coll.Find(bson.D{{"_id", bson.D{{"$in", changed}}}}).Select(lifeFields).Iter() var doc lifeDoc for iter.Next(&doc) { latest[doc.Id] = doc.Life } if err := iter.Close(); err != nil { return err } // Add to ids any whose life state is known to have changed. for id, newLife := range latest { gone := newLife == Dead oldLife, known := w.life[id] switch { case known && gone: delete(w.life, id) case !known && !gone: w.life[id] = newLife case known && newLife != oldLife: w.life[id] = newLife default: continue } ids.Add(id) } return nil }
// merge cleans up the pending changes to account for actionId's being // removed before this watcher consumes them, and to account for the slight // potential overlap between the inital actionIds pending before the watcher // starts, and actionId's the watcher detects func (w *actionWatcher) merge(changes, initial set.Strings, updates map[interface{}]bool) error { for id, exists := range updates { switch id := id.(type) { case string: if exists { if !initial.Contains(id) { changes.Add(id) } } else { changes.Remove(id) } default: return errors.Errorf("id is not of type string, got %T", id) } } return nil }
// upgradeCertificateDNSNames ensure that the controller certificate // recorded in the agent config and also mongo server.pem contains the // DNSNames entries required by Juju. func upgradeCertificateDNSNames(config agent.ConfigSetter) error { si, ok := config.StateServingInfo() if !ok || si.CAPrivateKey == "" { // No certificate information exists yet, nothing to do. return nil } // Validate the current certificate and private key pair, and then // extract the current DNS names from the certificate. If the // certificate validation fails, or it does not contain the DNS // names we require, we will generate a new one. var dnsNames set.Strings serverCert, _, err := cert.ParseCertAndKey(si.Cert, si.PrivateKey) if err != nil { // The certificate is invalid, so create a new one. logger.Infof("parsing certificate/key failed, will generate a new one: %v", err) dnsNames = set.NewStrings() } else { dnsNames = set.NewStrings(serverCert.DNSNames...) } update := false requiredDNSNames := []string{"local", "juju-apiserver", "juju-mongodb"} for _, dnsName := range requiredDNSNames { if dnsNames.Contains(dnsName) { continue } dnsNames.Add(dnsName) update = true } if !update { return nil } // Write a new certificate to the mongo pem and agent config files. si.Cert, si.PrivateKey, err = cert.NewDefaultServer(config.CACert(), si.CAPrivateKey, dnsNames.Values()) if err != nil { return err } if err := mongo.UpdateSSLKey(config.DataDir(), si.Cert, si.PrivateKey); err != nil { return err } config.SetStateServingInfo(si) return nil }
func (sb *StubBacking) AllSpaces() ([]common.BackingSpace, error) { sb.MethodCall(sb, "AllSpaces") if err := sb.NextErr(); err != nil { return nil, err } // Filter duplicates. seen := set.Strings{} output := []common.BackingSpace{} for _, space := range sb.Spaces { if seen.Contains(space.Name()) { continue } seen.Add(space.Name()) output = append(output, space) } return output, nil }
func mergedAddresses(machineAddresses, providerAddresses []address) []network.Address { merged := make([]network.Address, 0, len(providerAddresses)+len(machineAddresses)) var providerValues set.Strings for _, address := range providerAddresses { // Older versions of Juju may have stored an empty address so ignore it here. if address.Value == "" { continue } providerValues.Add(address.Value) merged = append(merged, address.InstanceAddress()) } for _, address := range machineAddresses { if !providerValues.Contains(address.Value) { merged = append(merged, address.InstanceAddress()) } } return merged }
func (m *model) validateMachine(machine Machine, allMachineIDs, unitsWithOpenPorts set.Strings) error { if err := machine.Validate(); err != nil { return errors.Trace(err) } allMachineIDs.Add(machine.Id()) for _, op := range machine.OpenedPorts() { for _, pr := range op.OpenPorts() { unitsWithOpenPorts.Add(pr.UnitName()) } } for _, container := range machine.Containers() { err := m.validateMachine(container, allMachineIDs, unitsWithOpenPorts) if err != nil { return errors.Trace(err) } } return nil }
func (w *minUnitsWatcher) merge(serviceNames *set.Strings, change watcher.Change) error { serviceName := change.Id.(string) if change.Revno == -1 { delete(w.known, serviceName) serviceNames.Remove(serviceName) return nil } doc := minUnitsDoc{} if err := w.st.minUnits.FindId(serviceName).One(&doc); err != nil { return err } revno, known := w.known[serviceName] w.known[serviceName] = doc.Revno if !known || doc.Revno > revno { serviceNames.Add(serviceName) } return nil }
func (sb *StubBacking) AllSubnets() ([]networkingcommon.BackingSubnet, error) { sb.MethodCall(sb, "AllSubnets") if err := sb.NextErr(); err != nil { return nil, err } // Filter duplicates. seen := set.Strings{} output := []networkingcommon.BackingSubnet{} for _, subnet := range sb.Subnets { if seen.Contains(subnet.CIDR()) { continue } seen.Add(subnet.CIDR()) output = append(output, subnet) } return output, nil }
// fetchUnitMachineIds returns a set of IDs for machines that // the specified units reside on, and those machines' ancestors. func fetchUnitMachineIds(units map[string]map[string]*state.Unit) (*set.Strings, error) { machineIds := new(set.Strings) for _, svcUnitMap := range units { for _, unit := range svcUnitMap { if !unit.IsPrincipal() { continue } mid, err := unit.AssignedMachineId() if err != nil { return nil, err } for mid != "" { machineIds.Add(mid) mid = state.ParentId(mid) } } } return machineIds, nil }
// ParseMetadataFromStorage loads ToolsMetadata from the specified storage reader. func ParseMetadataFromStorage(c *gc.C, stor storage.StorageReader, expectMirrors bool) []*tools.ToolsMetadata { source := storage.NewStorageSimpleStreamsDataSource("test storage reader", stor, "tools") params := simplestreams.ValueParams{ DataType: tools.ContentDownload, ValueTemplate: tools.ToolsMetadata{}, } const requireSigned = false indexPath := simplestreams.UnsignedIndex indexRef, err := simplestreams.GetIndexWithFormat( source, indexPath, "index:1.0", requireSigned, simplestreams.CloudSpec{}, params) c.Assert(err, gc.IsNil) c.Assert(indexRef.Indexes, gc.HasLen, 1) toolsIndexMetadata := indexRef.Indexes["com.ubuntu.juju:released:tools"] c.Assert(toolsIndexMetadata, gc.NotNil) // Read the products file contents. r, err := stor.Get(path.Join("tools", toolsIndexMetadata.ProductsFilePath)) defer r.Close() c.Assert(err, gc.IsNil) data, err := ioutil.ReadAll(r) c.Assert(err, gc.IsNil) url, err := source.URL(toolsIndexMetadata.ProductsFilePath) c.Assert(err, gc.IsNil) cloudMetadata, err := simplestreams.ParseCloudMetadata(data, "products:1.0", url, tools.ToolsMetadata{}) c.Assert(err, gc.IsNil) toolsMetadataMap := make(map[string]*tools.ToolsMetadata) var expectedProductIds set.Strings var toolsVersions set.Strings for _, mc := range cloudMetadata.Products { for _, items := range mc.Items { for key, item := range items.Items { toolsMetadata := item.(*tools.ToolsMetadata) toolsMetadataMap[key] = toolsMetadata toolsVersions.Add(key) seriesVersion, err := ubuntu.SeriesVersion(toolsMetadata.Release) c.Assert(err, gc.IsNil) productId := fmt.Sprintf("com.ubuntu.juju:%s:%s", seriesVersion, toolsMetadata.Arch) expectedProductIds.Add(productId) } } } // Make sure index's product IDs are all represented in the products metadata. sort.Strings(toolsIndexMetadata.ProductIds) c.Assert(toolsIndexMetadata.ProductIds, gc.DeepEquals, expectedProductIds.SortedValues()) toolsMetadata := make([]*tools.ToolsMetadata, len(toolsMetadataMap)) for i, key := range toolsVersions.SortedValues() { toolsMetadata[i] = toolsMetadataMap[key] } if expectMirrors { r, err = stor.Get(path.Join("tools", simplestreams.UnsignedMirror)) defer r.Close() c.Assert(err, gc.IsNil) data, err = ioutil.ReadAll(r) c.Assert(err, gc.IsNil) c.Assert(string(data), jc.Contains, `"mirrors":`) c.Assert(err, gc.IsNil) } return toolsMetadata }