func (st *State) checkCanUpgrade(currentVersion, newVersion string) error { matchCurrent := "^" + regexp.QuoteMeta(currentVersion) + "-" matchNew := "^" + regexp.QuoteMeta(newVersion) + "-" // Get all machines and units with a different or empty version. sel := bson.D{{"$or", []bson.D{ {{"tools", bson.D{{"$exists", false}}}}, {{"$and", []bson.D{ {{"tools.version", bson.D{{"$not", bson.RegEx{matchCurrent, ""}}}}}, {{"tools.version", bson.D{{"$not", bson.RegEx{matchNew, ""}}}}}, }}}, }}} var agentTags []string for _, collection := range []*mgo.Collection{st.machines, st.units} { var doc struct { Id string `bson:"_id"` } iter := collection.Find(sel).Select(bson.D{{"_id", 1}}).Iter() for iter.Next(&doc) { switch collection.Name { case "machines": agentTags = append(agentTags, names.MachineTag(doc.Id)) case "units": agentTags = append(agentTags, names.UnitTag(doc.Id)) } } if err := iter.Err(); err != nil { return err } } if len(agentTags) > 0 { return newVersionInconsistentError(version.MustParse(currentVersion), agentTags) } return nil }
// primeAgent adds a new Machine to run the given jobs, and sets up the // machine agent's directory. It returns the new machine, the // agent's configuration and the tools currently running. func (s *commonMachineSuite) primeAgent( c *gc.C, vers version.Binary, jobs ...state.MachineJob) (m *state.Machine, config agent.ConfigSetterWriter, tools *tools.Tools) { // Add a machine and ensure it is provisioned. m, err := s.State.AddMachine("quantal", jobs...) c.Assert(err, gc.IsNil) inst, md := jujutesting.AssertStartInstance(c, s.Conn.Environ, m.Id()) c.Assert(m.SetProvisioned(inst.Id(), state.BootstrapNonce, md), gc.IsNil) // Add an address for the tests in case the maybeInitiateMongoServer // codepath is exercised. s.setFakeMachineAddresses(c, m) // Set up the new machine. err = m.SetAgentVersion(vers) c.Assert(err, gc.IsNil) err = m.SetPassword(initialMachinePassword) c.Assert(err, gc.IsNil) tag := names.MachineTag(m.Id()) if m.IsManager() { err = m.SetMongoPassword(initialMachinePassword) c.Assert(err, gc.IsNil) config, tools = s.agentSuite.primeStateAgent(c, tag, initialMachinePassword, vers) info, ok := config.StateServingInfo() c.Assert(ok, jc.IsTrue) err = s.State.SetStateServingInfo(info) c.Assert(err, gc.IsNil) } else { config, tools = s.agentSuite.primeAgent(c, tag, initialMachinePassword, vers) } err = config.Write() c.Assert(err, gc.IsNil) return m, config, tools }
func (s *deployerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) // The two known machines now contain the following units: // machine 0 (not authorized): mysql/1 (principal1) // machine 1 (authorized): mysql/0 (principal0), logging/0 (subordinate0) var err error s.machine0, err = s.State.AddMachine("quantal", state.JobManageEnviron, state.JobHostUnits) c.Assert(err, gc.IsNil) s.machine1, err = s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) s.service0 = s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) s.service1 = s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) eps, err := s.State.InferEndpoints([]string{"mysql", "logging"}) c.Assert(err, gc.IsNil) rel, err := s.State.AddRelation(eps...) c.Assert(err, gc.IsNil) s.principal0, err = s.service0.AddUnit() c.Assert(err, gc.IsNil) err = s.principal0.AssignToMachine(s.machine1) c.Assert(err, gc.IsNil) s.principal1, err = s.service0.AddUnit() c.Assert(err, gc.IsNil) err = s.principal1.AssignToMachine(s.machine0) c.Assert(err, gc.IsNil) relUnit0, err := rel.Unit(s.principal0) c.Assert(err, gc.IsNil) err = relUnit0.EnterScope(nil) c.Assert(err, gc.IsNil) s.subordinate0, err = s.service1.Unit("logging/0") c.Assert(err, gc.IsNil) // Create a FakeAuthorizer so we can check permissions, // set up assuming machine 1 has logged in. s.authorizer = apiservertesting.FakeAuthorizer{ Tag: names.MachineTag(s.machine1.Id()), LoggedIn: true, MachineAgent: true, } // Create the resource registry separately to track invocations to // Register. s.resources = common.NewResources() // Create a deployer API for machine 1. deployer, err := deployer.NewDeployerAPI( s.State, s.resources, s.authorizer, ) c.Assert(err, gc.IsNil) s.deployer = deployer }
// FakeAPIInfo holds information about no state - it will always // give an error when connected to. The machine id gives the machine id // of the machine to be started. func FakeAPIInfo(machineId string) *api.Info { return &api.Info{ Addrs: []string{"0.1.2.3:1234"}, Tag: names.MachineTag(machineId), Password: "******", CACert: testing.CACert, } }
// DeployerTag returns the tag of the agent responsible for deploying // the unit. If no such entity can be determined, false is returned. func (u *Unit) DeployerTag() (string, bool) { if u.doc.Principal != "" { return names.UnitTag(u.doc.Principal), true } else if u.doc.MachineId != "" { return names.MachineTag(u.doc.MachineId), true } return "", false }
func (s *lxcProvisionerSuite) newLxcProvisioner(c *gc.C) provisioner.Provisioner { parentMachineTag := names.MachineTag(s.parentMachineId) agentConfig := s.AgentConfigForTag(c, parentMachineTag) tools, err := s.provisioner.Tools(agentConfig.Tag()) c.Assert(err, gc.IsNil) managerConfig := container.ManagerConfig{container.ConfigName: "juju", "use-clone": "false"} broker, err := provisioner.NewLxcBroker(s.provisioner, tools, agentConfig, managerConfig) c.Assert(err, gc.IsNil) return provisioner.NewContainerProvisioner(instance.LXC, s.provisioner, agentConfig, broker) }
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *gc.C, m *state.Machine, secret string, cons constraints.Value, includeNetworks, excludeNetworks []string, networkInfo []network.Info, waitInstanceId bool) (inst instance.Instance) { s.BackingState.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance if waitInstanceId { s.waitInstanceId(c, m, inst.Id()) } // Check the instance was started with the expected params. c.Assert(o.MachineId, gc.Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, gc.HasLen, 2) c.Assert(nonceParts[0], gc.Equals, names.MachineTag("0")) c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, gc.Equals, secret) c.Assert(o.IncludeNetworks, jc.DeepEquals, includeNetworks) c.Assert(o.ExcludeNetworks, jc.DeepEquals, excludeNetworks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) // All provisioned machines in this test suite have // their hardware characteristics attributes set to // the same values as the constraints due to the dummy // environment being used. if !constraints.IsEmpty(&cons) { c.Assert(o.Constraints, gc.DeepEquals, cons) hc, err := m.HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, RootDisk: cons.RootDisk, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, Tags: cons.Tags, }) } return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } return }
// makeMachineConfig produces a valid cloudinit machine config. func makeMachineConfig(c *gc.C) *cloudinit.MachineConfig { machineID := "0" return &cloudinit.MachineConfig{ MachineId: machineID, MachineNonce: "gxshasqlnng", DataDir: environs.DataDir, LogDir: agent.DefaultLogDir, Jobs: []params.MachineJob{params.JobManageEnviron, params.JobHostUnits}, CloudInitOutputLog: environs.CloudInitOutputLog, Tools: &tools.Tools{URL: "file://" + c.MkDir()}, StateInfo: &state.Info{ CACert: testing.CACert, Addrs: []string{"127.0.0.1:123"}, Tag: names.MachineTag(machineID), Password: "******", }, APIInfo: &api.Info{ CACert: testing.CACert, Addrs: []string{"127.0.0.1:123"}, Tag: names.MachineTag(machineID), }, MachineAgentServiceName: "jujud-machine-0", } }
func InitializeState(c ConfigSetter, envCfg *config.Config, machineCfg BootstrapMachineConfig, timeout state.DialOpts, policy state.Policy) (_ *state.State, _ *state.Machine, resultErr error) { if c.Tag() != names.MachineTag(BootstrapMachineId) { return nil, nil, fmt.Errorf("InitializeState not called with bootstrap machine's configuration") } servingInfo, ok := c.StateServingInfo() if !ok { return nil, nil, fmt.Errorf("state serving information not available") } // N.B. no users are set up when we're initializing the state, // so don't use any tag or password when opening it. info, ok := c.StateInfo() if !ok { return nil, nil, fmt.Errorf("stateinfo not available") } info.Tag = "" info.Password = "" logger.Debugf("initializing address %v", info.Addrs) st, err := state.Initialize(info, envCfg, timeout, policy) if err != nil { return nil, nil, fmt.Errorf("failed to initialize state: %v", err) } logger.Debugf("connected to initial state") defer func() { if resultErr != nil { st.Close() } }() servingInfo.SharedSecret = machineCfg.SharedSecret c.SetStateServingInfo(servingInfo) if err = initAPIHostPorts(c, st, machineCfg.Addresses, servingInfo.APIPort); err != nil { return nil, nil, err } if err := st.SetStateServingInfo(servingInfo); err != nil { return nil, nil, fmt.Errorf("cannot set state serving info: %v", err) } m, err := initUsersAndBootstrapMachine(c, st, machineCfg) if err != nil { return nil, nil, err } return st, m, nil }
// StartInstance is specified in the InstanceBroker interface. func (e *environ) StartInstance(args environs.StartInstanceParams) (instance.Instance, *instance.HardwareCharacteristics, []network.Info, error) { defer delay() machineId := args.MachineConfig.MachineId logger.Infof("dummy startinstance, machine %s", machineId) if err := e.checkBroken("StartInstance"); err != nil { return nil, nil, nil, err } estate, err := e.state() if err != nil { return nil, nil, nil, err } estate.mu.Lock() defer estate.mu.Unlock() if args.MachineConfig.MachineNonce == "" { return nil, nil, nil, fmt.Errorf("cannot start instance: missing machine nonce") } if _, ok := e.Config().CACert(); !ok { return nil, nil, nil, fmt.Errorf("no CA certificate in environment configuration") } if args.MachineConfig.StateInfo.Tag != names.MachineTag(machineId) { return nil, nil, nil, fmt.Errorf("entity tag must match started machine") } if args.MachineConfig.APIInfo.Tag != names.MachineTag(machineId) { return nil, nil, nil, fmt.Errorf("entity tag must match started machine") } logger.Infof("would pick tools from %s", args.Tools) series := args.Tools.OneSeries() idString := fmt.Sprintf("%s-%d", e.name, estate.maxId) i := &dummyInstance{ id: instance.Id(idString), addresses: instance.NewAddresses(idString + ".dns"), ports: make(map[instance.Port]bool), machineId: machineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, } var hc *instance.HardwareCharacteristics // To match current system capability, only provide hardware characteristics for // environ machines, not containers. if state.ParentId(machineId) == "" { // We will just assume the instance hardware characteristics exactly matches // the supplied constraints (if specified). hc = &instance.HardwareCharacteristics{ Arch: args.Constraints.Arch, Mem: args.Constraints.Mem, RootDisk: args.Constraints.RootDisk, CpuCores: args.Constraints.CpuCores, CpuPower: args.Constraints.CpuPower, Tags: args.Constraints.Tags, } // Fill in some expected instance hardware characteristics if constraints not specified. if hc.Arch == nil { arch := "amd64" hc.Arch = &arch } if hc.Mem == nil { mem := uint64(1024) hc.Mem = &mem } if hc.RootDisk == nil { disk := uint64(8192) hc.RootDisk = &disk } if hc.CpuCores == nil { cores := uint64(1) hc.CpuCores = &cores } } // Simulate networks added when requested. networkInfo := make([]network.Info, len(args.MachineConfig.IncludeNetworks)) for i, netName := range args.MachineConfig.IncludeNetworks { if strings.HasPrefix(netName, "bad-") { // Simulate we didn't get correct information for the network. networkInfo[i] = network.Info{ ProviderId: network.Id(netName), NetworkName: netName, CIDR: "invalid", } } else { networkInfo[i] = network.Info{ ProviderId: network.Id(netName), NetworkName: netName, CIDR: fmt.Sprintf("0.%d.2.0/24", i+1), InterfaceName: fmt.Sprintf("eth%d", i), VLANTag: i, MACAddress: fmt.Sprintf("aa:bb:cc:dd:ee:f%d", i), IsVirtual: i > 0, } } } estate.insts[i.id] = i estate.maxId++ estate.ops <- OpStartInstance{ Env: e.name, MachineId: machineId, MachineNonce: args.MachineConfig.MachineNonce, Constraints: args.Constraints, IncludeNetworks: args.MachineConfig.IncludeNetworks, ExcludeNetworks: args.MachineConfig.ExcludeNetworks, NetworkInfo: networkInfo, Instance: i, Info: args.MachineConfig.StateInfo, APIInfo: args.MachineConfig.APIInfo, Secret: e.ecfg().secret(), } return i, hc, networkInfo, nil }
// MachineTag returns the machine tag of the interface. func (ni *NetworkInterface) MachineTag() string { return names.MachineTag(ni.doc.MachineId) }
func (fw *Firewaller) loop() error { defer fw.stopWatchers() var err error var reconciled bool fw.environ, err = worker.WaitForEnviron(fw.environWatcher, fw.st, fw.tomb.Dying()) if err != nil { return err } if fw.environ.Config().FirewallMode() == config.FwGlobal { fw.globalMode = true fw.globalPortRef = make(map[instance.Port]int) } for { select { case <-fw.tomb.Dying(): return tomb.ErrDying case _, ok := <-fw.environWatcher.Changes(): if !ok { return watcher.MustErr(fw.environWatcher) } config, err := fw.st.EnvironConfig() if err != nil { return err } if err := fw.environ.SetConfig(config); err != nil { logger.Errorf("loaded invalid environment configuration: %v", err) } case change, ok := <-fw.machinesWatcher.Changes(): if !ok { return watcher.MustErr(fw.machinesWatcher) } for _, machineId := range change { fw.machineLifeChanged(names.MachineTag(machineId)) } if !reconciled { reconciled = true var err error if fw.globalMode { err = fw.reconcileGlobal() } else { err = fw.reconcileInstances() } if err != nil { return err } } case change := <-fw.unitsChange: if err := fw.unitsChanged(change); err != nil { return err } case change := <-fw.portsChange: change.unitd.ports = change.ports if err := fw.flushUnits([]*unitData{change.unitd}); err != nil { return errors.Annotate(err, "cannot change firewall ports") } case change := <-fw.exposedChange: change.serviced.exposed = change.exposed unitds := []*unitData{} for _, unitd := range change.serviced.unitds { unitds = append(unitds, unitd) } if err := fw.flushUnits(unitds); err != nil { return errors.Annotate(err, "cannot change firewall ports") } } } }
func (a *MachineAgent) Tag() string { return names.MachineTag(a.MachineId) }
// Tag returns a name identifying the machine that is safe to use // as a file name. The returned name will be different from other // Tag values returned by any other entities from the same state. func (m *Machine) Tag() string { return names.MachineTag(m.Id()) }