// CharmArchiveName returns a string that is suitable as a file name // in a storage URL. It is constructed from the charm name, revision // and a random UUID string. func CharmArchiveName(name string, revision int) (string, error) { uuid, err := utils.NewUUID() if err != nil { return "", err } return charm.Quote(fmt.Sprintf("%s-%d-%s", name, revision, uuid)), nil }
func (p manualProvider) Prepare(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { if _, ok := cfg.UnknownAttrs()["storage-auth-key"]; !ok { uuid, err := utils.NewUUID() if err != nil { return nil, err } cfg, err = cfg.Apply(map[string]interface{}{ "storage-auth-key": uuid.String(), }) if err != nil { return nil, err } } if use, ok := cfg.UnknownAttrs()["use-sshstorage"].(bool); ok && !use { return nil, fmt.Errorf("use-sshstorage must not be specified") } envConfig, err := p.validate(cfg, nil) if err != nil { return nil, err } if err := ensureBootstrapUbuntuUser(ctx, envConfig); err != nil { return nil, err } return p.open(envConfig) }
func (uuidSuite) TestUUID(c *gc.C) { uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) uuidCopy := uuid.Copy() uuidRaw := uuid.Raw() uuidStr := uuid.String() c.Assert(uuidRaw, gc.HasLen, 16) c.Assert(uuidStr, jc.Satisfies, utils.IsValidUUIDString) uuid[0] = 0x00 uuidCopy[0] = 0xFF c.Assert(uuid, gc.Not(gc.DeepEquals), uuidCopy) uuidRaw[0] = 0xFF c.Assert(uuid, gc.Not(gc.DeepEquals), uuidRaw) nextUUID, err := utils.NewUUID() c.Assert(err, gc.IsNil) c.Assert(uuid, gc.Not(gc.DeepEquals), nextUUID) }
func prepareConfig(cfg *config.Config) (*config.Config, error) { // Turn an incomplete config into a valid one, if possible. attrs := cfg.UnknownAttrs() if _, ok := attrs["control-dir"]; !ok { uuid, err := utils.NewUUID() if err != nil { return nil, err } attrs["control-dir"] = fmt.Sprintf("%x", uuid.Raw()) } return cfg.Apply(attrs) }
// gatherMachineParams collects all the information we know about the machine // we are about to provision. It will SSH into that machine as the ubuntu user. // The hostname supplied should not include a username. // If we can, we will reverse lookup the hostname by its IP address, and use // the DNS resolved name, rather than the name that was supplied func gatherMachineParams(hostname string) (*params.AddMachineParams, error) { // Generate a unique nonce for the machine. uuid, err := utils.NewUUID() if err != nil { return nil, err } var addrs []instance.Address if addr, err := HostAddress(hostname); err != nil { logger.Warningf("failed to compute public address for %q: %v", hostname, err) } else { addrs = append(addrs, addr) } provisioned, err := checkProvisioned(hostname) if err != nil { err = fmt.Errorf("error checking if provisioned: %v", err) return nil, err } if provisioned { return nil, ErrProvisioned } hc, series, err := DetectSeriesAndHardwareCharacteristics(hostname) if err != nil { err = fmt.Errorf("error detecting hardware characteristics: %v", err) return nil, err } // There will never be a corresponding "instance" that any provider // knows about. This is fine, and works well with the provisioner // task. The provisioner task will happily remove any and all dead // machines from state, but will ignore the associated instance ID // if it isn't one that the environment provider knows about. instanceId := instance.Id(manualInstancePrefix + hostname) nonce := fmt.Sprintf("%s:%s", instanceId, uuid.String()) machineParams := ¶ms.AddMachineParams{ Series: series, HardwareCharacteristics: hc, InstanceId: instanceId, Nonce: nonce, Addrs: addrs, Jobs: []params.MachineJob{params.JobHostUnits}, } return machineParams, nil }
// Initialize sets up an initial empty state and returns it. // This needs to be performed only once for a given environment. // It returns unauthorizedError if access is unauthorized. func Initialize(info *Info, cfg *config.Config, opts DialOpts, policy Policy) (rst *State, err error) { st, err := Open(info, opts, policy) if err != nil { return nil, err } defer func() { if err != nil { st.Close() } }() // A valid environment is used as a signal that the // state has already been initalized. If this is the case // do nothing. if _, err := st.Environment(); err == nil { return st, nil } else if !errors.IsNotFound(err) { return nil, err } logger.Infof("initializing environment") if err := checkEnvironConfig(cfg); err != nil { return nil, err } uuid, err := utils.NewUUID() if err != nil { return nil, fmt.Errorf("environment UUID cannot be created: %v", err) } ops := []txn.Op{ createConstraintsOp(st, environGlobalKey, constraints.Value{}), createSettingsOp(st, environGlobalKey, cfg.AllAttrs()), createEnvironmentOp(st, cfg.Name(), uuid.String()), { C: st.stateServers.Name, Id: environGlobalKey, Insert: &stateServersDoc{}, }, { C: st.stateServers.Name, Id: apiHostPortsKey, Insert: &apiHostPortsDoc{}, }, } if err := st.runTransaction(ops); err == txn.ErrAborted { // The config was created in the meantime. return st, nil } else if err != nil { return nil, err } return st, nil }
func (p maasEnvironProvider) Prepare(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { attrs := cfg.UnknownAttrs() oldName, found := attrs["maas-agent-name"] if found && oldName != "" { return nil, errAgentNameAlreadySet } uuid, err := utils.NewUUID() if err != nil { return nil, err } attrs["maas-agent-name"] = uuid.String() cfg, err = cfg.Apply(attrs) if err != nil { return nil, err } return p.Open(cfg) }
func (*configSuite) TestParsesMAASSettings(c *gc.C) { server := "http://maas.testing.invalid/maas/" oauth := "consumer-key:resource-token:resource-secret" future := "futurama" uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) ecfg, err := newConfig(map[string]interface{}{ "maas-server": server, "maas-oauth": oauth, "maas-agent-name": uuid.String(), "future-key": future, }) c.Assert(err, gc.IsNil) c.Check(ecfg.maasServer(), gc.Equals, server) c.Check(ecfg.maasOAuth(), gc.DeepEquals, oauth) c.Check(ecfg.maasAgentName(), gc.Equals, uuid.String()) c.Check(ecfg.UnknownAttrs()["future-key"], gc.DeepEquals, future) }
// NewLock returns a new lock with the given name within the given lock // directory, without acquiring it. The lock name must match the regular // expression defined by NameRegexp. func NewLock(lockDir, name string) (*Lock, error) { if !validName.MatchString(name) { return nil, fmt.Errorf("Invalid lock name %q. Names must match %q", name, NameRegexp) } nonce, err := utils.NewUUID() if err != nil { return nil, err } lock := &Lock{ name: name, parent: lockDir, nonce: nonce[:], } // Ensure the parent exists. if err := os.MkdirAll(lock.parent, 0755); err != nil { return nil, err } return lock, nil }
func (task *provisionerTask) provisioningInfo(machine *apiprovisioner.Machine) (*provisioningInfo, error) { stateInfo, apiInfo, err := task.auth.SetupAuthentication(machine) if err != nil { logger.Errorf("failed to setup authentication: %v", err) return nil, err } // Generated a nonce for the new instance, with the format: "machine-#:UUID". // The first part is a badge, specifying the tag of the machine the provisioner // is running on, while the second part is a random UUID. uuid, err := utils.NewUUID() if err != nil { return nil, err } // ProvisioningInfo is new in 1.20; wait for the API server to be upgraded // so we don't spew errors on upgrade. var pInfo *params.ProvisioningInfo for { if pInfo, err = machine.ProvisioningInfo(); err == nil { break } if params.IsCodeNotImplemented(err) { logger.Infof("waiting for state server to be upgraded") select { case <-task.tomb.Dying(): return nil, tomb.ErrDying case <-time.After(15 * time.Second): continue } } return nil, err } includeNetworks := pInfo.IncludeNetworks excludeNetworks := pInfo.ExcludeNetworks nonce := fmt.Sprintf("%s:%s", task.machineTag, uuid.String()) machineConfig := environs.NewMachineConfig(machine.Id(), nonce, includeNetworks, excludeNetworks, stateInfo, apiInfo) return &provisioningInfo{ Constraints: pInfo.Constraints, Series: pInfo.Series, Placement: pInfo.Placement, MachineConfig: machineConfig, }, nil }
func (s *RunHookSuite) TestRunHook(c *gc.C) { uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) for i, t := range runHookTests { c.Logf("\ntest %d: %s; perm %v", i, t.summary, t.spec.perm) ctx := s.getHookContext(c, uuid.String(), t.relid, t.remote, t.proxySettings) var charmDir, outPath string var hookExists bool if t.spec.perm == 0 { charmDir = c.MkDir() } else { spec := t.spec spec.name = "something-happened" c.Logf("makeCharm %#v", spec) charmDir, outPath = makeCharm(c, spec) hookExists = true } toolsDir := c.MkDir() t0 := time.Now() err := ctx.RunHook("something-happened", charmDir, toolsDir, "/path/to/socket") if t.err == "" && hookExists { c.Assert(err, gc.IsNil) } else if !hookExists { c.Assert(uniter.IsMissingHookError(err), jc.IsTrue) } else { c.Assert(err, gc.ErrorMatches, t.err) } if t.env != nil { env := map[string]string{"PATH": toolsDir + ":" + os.Getenv("PATH")} for k, v := range t.env { env[k] = v } AssertEnv(c, outPath, charmDir, env, uuid.String()) } if t.spec.background != "" && time.Now().Sub(t0) > 5*time.Second { c.Errorf("background process holding up hook execution") } } }
func (s *RunCommandSuite) getHookContext(c *gc.C) *uniter.HookContext { uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) return s.HookContextSuite.getHookContext(c, uuid.String(), -1, "", noProxies) }
func (s *InterfaceSuite) GetContext(c *gc.C, relId int, remoteName string) jujuc.Context { uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) return s.HookContextSuite.getHookContext(c, uuid.String(), relId, remoteName, noProxies) }
func (s *RunHookSuite) TestRunHookRelationFlushing(c *gc.C) { // Create a charm with a breaking hook. uuid, err := utils.NewUUID() c.Assert(err, gc.IsNil) ctx := s.getHookContext(c, uuid.String(), -1, "", noProxies) charmDir, _ := makeCharm(c, hookSpec{ name: "something-happened", perm: 0700, code: 123, }) // Mess with multiple relation settings. node0, err := s.relctxs[0].Settings() node0.Set("foo", "1") node1, err := s.relctxs[1].Settings() node1.Set("bar", "2") // Run the failing hook. err = ctx.RunHook("something-happened", charmDir, c.MkDir(), "/path/to/socket") c.Assert(err, gc.ErrorMatches, "exit status 123") // Check that the changes to the local settings nodes have been discarded. node0, err = s.relctxs[0].Settings() c.Assert(err, gc.IsNil) c.Assert(node0.Map(), gc.DeepEquals, params.RelationSettings{"relation-name": "db0"}) node1, err = s.relctxs[1].Settings() c.Assert(err, gc.IsNil) c.Assert(node1.Map(), gc.DeepEquals, params.RelationSettings{"relation-name": "db1"}) // Check that the changes have been written to state. settings0, err := s.relunits[0].ReadSettings("u/0") c.Assert(err, gc.IsNil) c.Assert(settings0, gc.DeepEquals, map[string]interface{}{"relation-name": "db0"}) settings1, err := s.relunits[1].ReadSettings("u/0") c.Assert(err, gc.IsNil) c.Assert(settings1, gc.DeepEquals, map[string]interface{}{"relation-name": "db1"}) // Create a charm with a working hook, and mess with settings again. charmDir, _ = makeCharm(c, hookSpec{ name: "something-happened", perm: 0700, }) node0.Set("baz", "3") node1.Set("qux", "4") // Run the hook. err = ctx.RunHook("something-happened", charmDir, c.MkDir(), "/path/to/socket") c.Assert(err, gc.IsNil) // Check that the changes to the local settings nodes are still there. node0, err = s.relctxs[0].Settings() c.Assert(err, gc.IsNil) c.Assert(node0.Map(), gc.DeepEquals, params.RelationSettings{ "relation-name": "db0", "baz": "3", }) node1, err = s.relctxs[1].Settings() c.Assert(err, gc.IsNil) c.Assert(node1.Map(), gc.DeepEquals, params.RelationSettings{ "relation-name": "db1", "qux": "4", }) // Check that the changes have been written to state. settings0, err = s.relunits[0].ReadSettings("u/0") c.Assert(err, gc.IsNil) c.Assert(settings0, gc.DeepEquals, map[string]interface{}{ "relation-name": "db0", "baz": "3", }) settings1, err = s.relunits[1].ReadSettings("u/0") c.Assert(err, gc.IsNil) c.Assert(settings1, gc.DeepEquals, map[string]interface{}{ "relation-name": "db1", "qux": "4", }) }