// ServerError returns an error suitable for returning to an API // client, with an error code suitable for various kinds of errors // generated in packages outside the API. func ServerError(err error) *params.Error { if err == nil { return nil } code, ok := singletonCode(err) switch { case ok: case errors.IsUnauthorized(err): code = params.CodeUnauthorized case errors.IsNotFound(err): code = params.CodeNotFound case errors.IsAlreadyExists(err): code = params.CodeAlreadyExists case state.IsNotAssigned(err): code = params.CodeNotAssigned case state.IsHasAssignedUnitsError(err): code = params.CodeHasAssignedUnits case IsNoAddressSetError(err): code = params.CodeNoAddressSet case state.IsNotProvisionedError(err): code = params.CodeNotProvisioned case IsUnknownEnviromentError(err): code = params.CodeNotFound default: code = params.ErrCode(err) } return ¶ms.Error{ Message: err.Error(), Code: code, } }
func machineLoop(context machineContext, m machine, changed <-chan struct{}) error { // Use a short poll interval when initially waiting for // a machine's address and machine agent to start, and a long one when it already // has an address and the machine agent is started. pollInterval := ShortPoll pollInstance := true for { if pollInstance { instInfo, err := pollInstanceInfo(context, m) if err != nil && !state.IsNotProvisionedError(err) { // If the provider doesn't implement Addresses/Status now, // it never will until we're upgraded, so don't bother // asking any more. We could use less resources // by taking down the entire worker, but this is easier for now // (and hopefully the local provider will implement // Addresses/Status in the not-too-distant future), // so we won't need to worry about this case at all. if errors.IsNotImplemented(err) { pollInterval = 365 * 24 * time.Hour } else { return err } } machineStatus := params.StatusPending if err == nil { if machineStatus, _, _, err = m.Status(); err != nil { logger.Warningf("cannot get current machine status for machine %v: %v", m.Id(), err) } } if len(instInfo.addresses) > 0 && instInfo.status != "" && machineStatus == params.StatusStarted { // We've got at least one address and a status and instance is started, so poll infrequently. pollInterval = LongPoll } else if pollInterval < LongPoll { // We have no addresses or not started - poll increasingly rarely // until we do. pollInterval = time.Duration(float64(pollInterval) * ShortPollBackoff) } pollInstance = false } select { case <-time.After(pollInterval): pollInstance = true case <-context.dying(): return nil case <-changed: if err := m.Refresh(); err != nil { return err } if m.Life() == state.Dead { return nil } } } }
// waitInstanceId waits until the supplied machine has an instance id, then // asserts it is as expected. func (s *CommonProvisionerSuite) waitInstanceId(c *gc.C, m *state.Machine, expect instance.Id) { s.waitHardwareCharacteristics(c, m, func() bool { if actual, err := m.InstanceId(); err == nil { c.Assert(actual, gc.Equals, expect) return true } else if !state.IsNotProvisionedError(err) { // We don't expect any errors. panic(err) } c.Logf("machine %v is still unprovisioned", m) return false }) }
// environManagerInstances returns all environ manager instances. func environManagerInstances(st *state.State) ([]instance.Id, error) { info, err := st.StateServerInfo() if err != nil { return nil, err } instances := make([]instance.Id, 0, len(info.MachineIds)) for _, id := range info.MachineIds { machine, err := st.Machine(id) if err != nil { return nil, err } instanceId, err := machine.InstanceId() if err == nil { instances = append(instances, instanceId) } else if !state.IsNotProvisionedError(err) { return nil, err } } return instances, nil }
func (context *statusContext) makeMachineStatus(machine *state.Machine) (status api.MachineStatus) { status.Id = machine.Id() status.Agent, status.AgentState, status.AgentStateInfo = processAgent(machine) status.AgentVersion = status.Agent.Version status.Life = status.Agent.Life status.Err = status.Agent.Err status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } status.DNSName = network.SelectPublicAddress(machine.Addresses()) } else { if state.IsNotProvisionedError(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]api.MachineStatus) return }
// pollInstanceInfo checks the current provider addresses and status // for the given machine's instance, and sets them on the machine if they've changed. func pollInstanceInfo(context machineContext, m machine) (instInfo instanceInfo, err error) { instInfo = instanceInfo{} instId, err := m.InstanceId() // We can't ask the machine for its addresses if it isn't provisioned yet. if state.IsNotProvisionedError(err) { return instInfo, err } if err != nil { return instInfo, fmt.Errorf("cannot get machine's instance id: %v", err) } instInfo, err = context.instanceInfo(instId) if err != nil { if errors.IsNotImplemented(err) { return instInfo, err } logger.Warningf("cannot get instance info for instance %q: %v", instId, err) return instInfo, nil } currentInstStatus, err := m.InstanceStatus() if err != nil { // This should never occur since the machine is provisioned. // But just in case, we reset polled status so we try again next time. logger.Warningf("cannot get current instance status for machine %v: %v", m.Id(), err) instInfo.status = "" } else { if instInfo.status != currentInstStatus { logger.Infof("machine %q has new instance status: %v", m.Id(), instInfo.status) if err = m.SetInstanceStatus(instInfo.status); err != nil { logger.Errorf("cannot set instance status on %q: %v", m, err) } } } if !addressesEqual(m.Addresses(), instInfo.addresses) { logger.Infof("machine %q has new addresses: %v", m.Id(), instInfo.addresses) if err = m.SetAddresses(instInfo.addresses...); err != nil { logger.Errorf("cannot set addresses on %q: %v", m, err) } } return instInfo, err }
func (s *workerSuite) TestWorker(c *gc.C) { // Most functionality is already tested in detail - we // just need to test that things are wired together // correctly. s.PatchValue(&ShortPoll, 10*time.Millisecond) s.PatchValue(&LongPoll, 10*time.Millisecond) s.PatchValue(&gatherTime, 10*time.Millisecond) machines, insts := s.setupScenario(c) s.State.StartSync() w := NewWorker(s.State) defer func() { c.Assert(worker.Stop(w), gc.IsNil) }() checkInstanceInfo := func(index int, m machine, expectedStatus string) bool { isProvisioned := true status, err := m.InstanceStatus() if state.IsNotProvisionedError(err) { isProvisioned = false } else { c.Assert(err, gc.IsNil) } return reflect.DeepEqual(m.Addresses(), s.addressesForIndex(index)) && (!isProvisioned || status == expectedStatus) } // Wait for the odd numbered machines in the // first half of the machine slice to be given their // addresses and status. for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for instance info") } if machinesSatisfy(c, machines, func(i int, m *state.Machine) bool { if i < len(machines)/2 && i%2 == 1 { return checkInstanceInfo(i, m, "running") } status, err := m.InstanceStatus() if i%2 == 0 { // Even machines not provisioned yet. c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) } else { c.Assert(status, gc.Equals, "") } return len(m.Addresses()) == 0 }) { break } } // Now provision the even machines in the first half and watch them get addresses. for i := 0; i < len(insts)/2; i += 2 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for machine instance info") } if machinesSatisfy(c, machines, func(i int, m *state.Machine) bool { if i < len(machines)/2 { return checkInstanceInfo(i, m, "running") } // Machines in second half still have no addresses, nor status. status, err := m.InstanceStatus() if i%2 == 0 { // Even machines not provisioned yet. c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) } else { c.Assert(status, gc.Equals, "") } return len(m.Addresses()) == 0 }) { break } } // Provision the remaining machines and check the address and status. for i := len(insts) / 2; i < len(insts); i++ { if i%2 == 0 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) } dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for machine instance info") } if machinesSatisfy(c, machines, func(i int, m *state.Machine) bool { return checkInstanceInfo(i, m, "running") }) { break } } }