// checkStartInstance checks that an instance has been started // with a machine id the same as m's, and that the machine's // instance id has been set appropriately. func (s *ProvisionerSuite) checkStartInstance(c *C, m *state.Machine, secret string) { s.State.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: info := s.StateInfo(c) info.EntityName = m.EntityName() c.Assert(o.Info.Password, Not(HasLen), 0) info.Password = o.Info.Password c.Assert(o.Info, DeepEquals, info) // Check we can connect to the state with // the machine's entity name and password. st, err := state.Open(o.Info) c.Assert(err, IsNil) st.Close() c.Assert(o.MachineId, Equals, m.Id()) c.Assert(o.Instance, NotNil) s.checkInstanceId(c, m, o.Instance) c.Assert(o.Secret, Equals, secret) return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Errorf("provisioner did not start an instance") return } } }
// Watch starts an NotifyWatcher for each given machine. func (m *MachinerAPI) Watch(args params.Entities) (params.NotifyWatchResults, error) { result := params.NotifyWatchResults{ Results: make([]params.NotifyWatchResult, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } for i, entity := range args.Entities { err := common.ErrPerm if m.auth.AuthOwner(entity.Tag) { var machine *state.Machine machine, err = m.st.Machine(state.MachineIdFromTag(entity.Tag)) if err == nil { watch := machine.Watch() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers // have no state to transmit. if _, ok := <-watch.Changes(); ok { result.Results[i].NotifyWatcherId = m.resources.Register(watch) } else { err = watcher.MustErr(watch) } } } result.Results[i].Error = common.ServerError(err) } return result, nil }
func (s *assignCleanSuite) assertAssignUnit(c *C, expectedMachine *state.Machine) { unit, err := s.wordpress.AddUnit() c.Assert(err, IsNil) reusedMachine, err := s.assignUnit(unit) c.Assert(err, IsNil) c.Assert(reusedMachine.Id(), Equals, expectedMachine.Id()) c.Assert(reusedMachine.Clean(), jc.IsFalse) }
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string { event := <-s.events c.Assert(event.Action, gc.Equals, mock.Started) err := machine.Refresh() c.Assert(err, gc.IsNil) s.waitInstanceId(c, machine, instance.Id(event.InstanceId)) return event.InstanceId }
// waitRemoved waits for the supplied machine to be removed from state. func (s *ProvisionerSuite) waitRemoved(c *C, m *state.Machine) { s.waitMachine(c, m, func() bool { err := m.Refresh() if state.IsNotFound(err) { return true } c.Assert(err, IsNil) c.Logf("machine %v is still %s", m, m.Life()) return false }) }
// waitInstanceId waits until the supplied machine has an instance id, then // asserts it is as expected. func (s *ProvisionerSuite) waitInstanceId(c *C, m *state.Machine, expect state.InstanceId) { s.waitMachine(c, m, func() bool { err := m.Refresh() c.Assert(err, IsNil) if actual, ok := m.InstanceId(); ok { c.Assert(actual, Equals, expect) return true } c.Logf("machine %v is still unprovisioned", m) return false }) }
// waitInstanceId waits until the supplied machine has an instance id, then // asserts it is as expected. func (s *CommonProvisionerSuite) waitInstanceId(c *C, m *state.Machine, expect instance.Id) { s.waitHardwareCharacteristics(c, m, func() bool { if actual, err := m.InstanceId(); err == nil { c.Assert(actual, Equals, expect) return true } else if !state.IsNotProvisionedError(err) { // We don't expect any errors. panic(err) } c.Logf("machine %v is still unprovisioned", m) return false }) }
func (c *NatCommand) ExecSsh(m *state.Machine, script string) error { host := instance.SelectPublicAddress(m.Addresses()) if host == "" { return fmt.Errorf("could not resolve machine's public address") } log.Println("Configuring NAT routing on machine ", m.Id()) var options ssh.Options cmd := ssh.Command("ubuntu@"+host, []string{"sh -c 'NATCMD=$(mktemp); cat >${NATCMD}; sudo sh -x ${NATCMD}'"}, &options) cmd.Stdin = strings.NewReader(script) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() }
func (task *provisionerTask) startMachine(machine *state.Machine) error { stateInfo, apiInfo, err := task.auth.SetupAuthentication(machine) if err != nil { logger.Errorf("failed to setup authentication: %v", err) return err } cons, err := machine.Constraints() if err != nil { return err } // Generate a unique nonce for the new instance. uuid, err := utils.NewUUID() if err != nil { return err } // Generated nonce has the format: "machine-#:UUID". The first // part is a badge, specifying the tag of the machine the provisioner // is running on, while the second part is a random UUID. nonce := fmt.Sprintf("%s:%s", names.MachineTag(task.machineId), uuid.String()) inst, metadata, err := task.broker.StartInstance(machine.Id(), nonce, machine.Series(), cons, stateInfo, apiInfo) if err != nil { // Set the state to error, so the machine will be skipped next // time until the error is resolved, but don't return an // error; just keep going with the other machines. logger.Errorf("cannot start instance for machine %q: %v", machine, err) if err1 := machine.SetStatus(params.StatusError, err.Error()); err1 != nil { // Something is wrong with this machine, better report it back. logger.Errorf("cannot set error status for machine %q: %v", machine, err1) return err1 } return nil } if err := machine.SetProvisioned(inst.Id(), nonce, metadata); err != nil { logger.Errorf("cannot register instance for machine %v: %v", machine, err) // The machine is started, but we can't record the mapping in // state. It'll keep running while we fail out and restart, // but will then be detected by findUnknownInstances and // killed again. // // TODO(dimitern) Stop the instance right away here. // // Multiple instantiations of a given machine (with the same // machine ID) cannot coexist, because findUnknownInstances is // called before startMachines. However, if the first machine // had started to do work before being replaced, we may // encounter surprising problems. return err } logger.Infof("started machine %s as instance %s with hardware %q", machine, inst.Id(), metadata) return nil }
func (s *SSHCommonSuite) addUnit(srv *state.Service, m *state.Machine, c *C) { u, err := srv.AddUnit() c.Assert(err, IsNil) err = u.AssignToMachine(m) c.Assert(err, IsNil) // fudge unit.SetPublicAddress id, err := m.InstanceId() c.Assert(err, IsNil) insts, err := s.Conn.Environ.Instances([]state.InstanceId{id}) c.Assert(err, IsNil) addr, err := insts[0].WaitDNSName() c.Assert(err, IsNil) err = u.SetPublicAddress(addr) c.Assert(err, IsNil) }
func newMachineToolWaiter(m *state.Machine) *toolsWaiter { w := m.Watch() waiter := &toolsWaiter{ changes: make(chan struct{}, 1), watcher: w, tooler: m, } go func() { for _ = range w.Changes() { waiter.changes <- struct{}{} } close(waiter.changes) }() return waiter }
func (t *LiveTests) assertStartInstance(c *C, m *state.Machine) { // Wait for machine to get an instance id. for a := waitAgent.Start(); a.Next(); { err := m.Refresh() c.Assert(err, IsNil) instId, err := m.InstanceId() if err != nil { c.Assert(state.IsNotProvisionedError(err), IsTrue) continue } _, err = t.Env.Instances([]instance.Id{instId}) c.Assert(err, IsNil) return } c.Fatalf("provisioner failed to start machine after %v", waitAgent.Total) }
// instanceForMachine returns the environs.Instance that represents this machine's instance. func (p *Provisioner) instanceForMachine(m *state.Machine) (environs.Instance, error) { inst, ok := p.instances[m.Id()] if ok { return inst, nil } instId, ok := m.InstanceId() if !ok { return nil, errNotProvisioned } // TODO(dfc): Ask for all instances at once. insts, err := p.environ.Instances([]state.InstanceId{instId}) if err != nil { return nil, err } inst = insts[0] return inst, nil }
func (s *MachinerSuite) waitMachineStatus(c *C, m *state.Machine, expectStatus params.Status) { timeout := time.After(worstCase) for { select { case <-timeout: c.Fatalf("timeout while waiting for machine status to change") case <-time.After(10 * time.Millisecond): status, _, err := m.Status() c.Assert(err, IsNil) if status != expectStatus { c.Logf("machine %q status is %s, still waiting", m, status) continue } return } } }
func (s *CommonProvisionerSuite) waitHardwareCharacteristics(c *C, m *state.Machine, check func() bool) { w := m.WatchHardwareCharacteristics() defer stop(c, w) timeout := time.After(coretesting.LongWait) resync := time.After(0) for { select { case <-w.Changes(): if check() { return } case <-resync: resync = time.After(coretesting.ShortWait) s.State.StartSync() case <-timeout: c.Fatalf("hardware characteristics for machine %v wait timed out", m) } } }
func (s *ProvisionerSuite) waitMachine(c *C, m *state.Machine, check func() bool) { w := m.Watch() defer stop(c, w) timeout := time.After(500 * time.Millisecond) resync := time.After(0) for { select { case <-w.Changes(): if check() { return } case <-resync: resync = time.After(50 * time.Millisecond) s.State.StartSync() case <-timeout: c.Fatalf("machine %v wait timed out", m) } } }
// EnsureDead changes the lifecycle of each given machine to Dead if // it's Alive or Dying. It does nothing otherwise. func (m *MachinerAPI) EnsureDead(args params.Entities) (params.ErrorResults, error) { result := params.ErrorResults{ Errors: make([]*params.Error, len(args.Entities)), } if len(args.Entities) == 0 { return result, nil } for i, entity := range args.Entities { err := common.ErrPerm if m.auth.AuthOwner(entity.Tag) { var machine *state.Machine machine, err = m.st.Machine(state.MachineIdFromTag(entity.Tag)) if err == nil { err = machine.EnsureDead() } } result.Errors[i] = common.ServerError(err) } return result, nil }
// SetStatus sets the status of each given machine. func (m *MachinerAPI) SetStatus(args params.MachinesSetStatus) (params.ErrorResults, error) { result := params.ErrorResults{ Errors: make([]*params.Error, len(args.Machines)), } if len(args.Machines) == 0 { return result, nil } for i, arg := range args.Machines { err := common.ErrPerm if m.auth.AuthOwner(arg.Tag) { var machine *state.Machine machine, err = m.st.Machine(state.MachineIdFromTag(arg.Tag)) if err == nil { err = machine.SetStatus(arg.Status, arg.Info) } } result.Errors[i] = common.ServerError(err) } return result, nil }
func (s *ProvisionerSuite) checkStartInstanceCustom(c *C, m *state.Machine, secret string, cons constraints.Value) { s.State.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: s.waitInstanceId(c, m, o.Instance.Id()) // Check the instance was started with the expected params. c.Assert(o.MachineId, Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, HasLen, 2) c.Assert(nonceParts[0], Equals, state.MachineTag("0")) c.Assert(utils.IsValidUUIDString(nonceParts[1]), Equals, true) c.Assert(o.Secret, Equals, secret) c.Assert(o.Constraints, DeepEquals, cons) // Check we can connect to the state with // the machine's entity name and password. info := s.StateInfo(c) info.Tag = m.Tag() c.Assert(o.Info.Password, Not(HasLen), 0) info.Password = o.Info.Password c.Assert(o.Info, DeepEquals, info) // Check we can connect to the state with // the machine's entity name and password. st, err := state.Open(o.Info, state.DefaultDialOpts()) c.Assert(err, IsNil) st.Close() return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } }
// startInstance starts a new instance for the given machine. func (s *FirewallerSuite) startInstance(c *C, m *state.Machine) environs.Instance { inst, err := s.Conn.Environ.StartInstance(m.Id(), testing.InvalidStateInfo(m.Id()), nil) c.Assert(err, IsNil) err = m.SetInstanceId(inst.Id()) c.Assert(err, IsNil) return inst }
// assertInstanceId asserts that the machine has an instance id // that matches that of the given instance. If the instance is nil, // It asserts that the instance id is unset. func assertInstanceId(c *C, m *state.Machine, inst instance.Instance) { var wantId, gotId instance.Id var err error if inst != nil { wantId = inst.Id() } for a := waitAgent.Start(); a.Next(); { err := m.Refresh() c.Assert(err, IsNil) gotId, err = m.InstanceId() if err != nil { c.Assert(state.IsNotProvisionedError(err), IsTrue) if inst == nil { return } continue } break } c.Assert(err, IsNil) c.Assert(gotId, Equals, wantId) }
// assertInstanceId asserts that the machine has an instance id // that matches that of the given instance. If the instance is nil, // It asserts that the instance id is unset. func assertInstanceId(c *C, m *state.Machine, inst environs.Instance) { var wantId, gotId state.InstanceId var err error if inst != nil { wantId = inst.Id() } for a := waitAgent.Start(); a.Next(); { err := m.Refresh() c.Assert(err, IsNil) gotId, err = m.InstanceId() if state.IsNotFound(err) { if inst == nil { return } continue } c.Assert(err, IsNil) break } c.Assert(err, IsNil) c.Assert(gotId, Equals, wantId) }
func (s *CommonProvisionerSuite) waitMachine(c *C, m *state.Machine, check func() bool) { // TODO(jam): We need to grow a new method on NotifyWatcherC // that calls StartSync while waiting for changes, then // waitMachine and waitHardwareCharacteristics can use that // instead w := m.Watch() defer stop(c, w) timeout := time.After(coretesting.LongWait) resync := time.After(0) for { select { case <-w.Changes(): if check() { return } case <-resync: resync = time.After(coretesting.ShortWait) s.State.StartSync() case <-timeout: c.Fatalf("machine %v wait timed out", m) } } }
// WatchUnits starts a StringsWatcher to watch all units deployed to // any machine passed in args, in order to track which ones should be // deployed or recalled. func (d *DeployerAPI) WatchUnits(args params.Entities) (params.StringsWatchResults, error) { result := params.StringsWatchResults{ Results: make([]params.StringsWatchResult, len(args.Entities)), } for i, entity := range args.Entities { err := common.ErrPerm if d.authorizer.AuthOwner(entity.Tag) { var machine *state.Machine machine, err = d.st.Machine(state.MachineIdFromTag(entity.Tag)) if err == nil { watch := machine.WatchUnits() // Consume the initial event and forward it to the result. if changes, ok := <-watch.Changes(); ok { result.Results[i].StringsWatcherId = d.resources.Register(watch) result.Results[i].Changes = changes } else { err = watcher.MustErr(watch) } } } result.Results[i].Error = common.ServerError(err) } return result, nil }
func (context *statusContext) makeMachineStatus(machine *state.Machine) (status machineStatus) { status.Id = machine.Id() status.Life, status.AgentVersion, status.AgentState, status.AgentStateInfo, status.Err = processAgent(machine) status.Series = machine.Series() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid inst, ok := context.instances[instid] if ok { status.DNSName, _ = inst.DNSName() } else { // Double plus ungood. There is an instance id recorded // for this machine in the state, yet the environ cannot // find that id. status.InstanceState = "missing" } } else { if state.IsNotProvisionedError(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFoundError(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]machineStatus) return }
func (auth *simpleAuth) SetupAuthentication(machine *state.Machine) (*state.Info, *api.Info, error) { password, err := utils.RandomPassword() if err != nil { return nil, nil, fmt.Errorf("cannot make password for machine %v: %v", machine, err) } if err := machine.SetPassword(password); err != nil { return nil, nil, fmt.Errorf("cannot set API password for machine %v: %v", machine, err) } if err := machine.SetMongoPassword(password); err != nil { return nil, nil, fmt.Errorf("cannot set mongo password for machine %v: %v", machine, err) } stateInfo := *auth.stateInfo stateInfo.Tag = machine.Tag() stateInfo.Password = password apiInfo := *auth.apiInfo apiInfo.Tag = machine.Tag() apiInfo.Password = password return &stateInfo, &apiInfo, nil }
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *C, m *state.Machine, secret string, cons constraints.Value) (inst instance.Instance) { s.State.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance s.waitInstanceId(c, m, inst.Id()) // Check the instance was started with the expected params. c.Assert(o.MachineId, Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, HasLen, 2) c.Assert(nonceParts[0], Equals, state.MachineTag("0")) c.Assert(nonceParts[1], checkers.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, Equals, secret) c.Assert(o.Constraints, DeepEquals, cons) // Check we can connect to the state with // the machine's entity name and password. info := s.StateInfo(c) info.Tag = m.Tag() c.Assert(o.Info.Password, Not(HasLen), 0) info.Password = o.Info.Password c.Assert(o.Info, DeepEquals, info) // Check we can connect to the state with // the machine's entity name and password. st, err := state.Open(o.Info, state.DefaultDialOpts()) c.Assert(err, IsNil) // All provisioned machines in this test suite have their hardware characteristics // attributes set to the same values as the constraints due to the dummy environment being used. hc, err := m.HardwareCharacteristics() c.Assert(err, IsNil) c.Assert(*hc, DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, }) st.Close() return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } return }
func (p *Provisioner) setupAuthentication(m *state.Machine) (*state.Info, *api.Info, error) { password, err := utils.RandomPassword() if err != nil { return nil, nil, fmt.Errorf("cannot make password for machine %v: %v", m, err) } if err := m.SetMongoPassword(password); err != nil { return nil, nil, fmt.Errorf("cannot set password for machine %v: %v", m, err) } stateInfo := *p.stateInfo stateInfo.Tag = m.Tag() stateInfo.Password = password apiInfo := *p.apiInfo apiInfo.Tag = m.Tag() apiInfo.Password = password return &stateInfo, &apiInfo, nil }
// checkInstanceIdSet checks that the machine has an instance id // that matches that of the given instance. If the instance is nil, // It checks that the instance id is unset. func (s *ProvisionerSuite) checkInstanceId(c *C, m *state.Machine, inst environs.Instance) { // TODO(dfc) add machine.Watch() to avoid having to poll. s.State.StartSync() var instId state.InstanceId if inst != nil { instId = inst.Id() } for a := veryShortAttempt.Start(); a.Next(); { err := m.Refresh() c.Assert(err, IsNil) _, err = m.InstanceId() if state.IsNotFound(err) { if inst == nil { return } continue } c.Assert(err, IsNil) break } id, err := m.InstanceId() c.Assert(err, IsNil) c.Assert(id, Equals, instId) }
func MatchNetworks(host, gateway *state.Machine) (string, string, error) { var bestPrefix, bestHost, bestGw string for _, hostAddr := range host.Addresses() { if hostAddr.Type != instance.Ipv4Address || isLoopback(hostAddr.Value) { continue } for _, gwAddr := range gateway.Addresses() { if gwAddr.Type != instance.Ipv4Address || isLoopback(hostAddr.Value) { continue } prefix := greatestCommonPrefix(hostAddr.Value, gwAddr.Value) if len(prefix) > len(bestPrefix) { bestPrefix = prefix bestHost = hostAddr.Value bestGw = gwAddr.Value } } } if bestHost != "" && bestGw != "" { return bestHost, bestGw, nil } else { return "", "", fmt.Errorf("failed to find common network for %s and %s", host.Id(), gateway.Id()) } }