func (m *backingMachine) updated(st *State, store *multiwatcherStore, id string) error { info := &multiwatcher.MachineInfo{ EnvUUID: st.EnvironUUID(), Id: m.Id, Life: multiwatcher.Life(m.Life.String()), Series: m.Series, Jobs: paramsJobsFromJobs(m.Jobs), Addresses: mergedAddresses(m.MachineAddresses, m.Addresses), SupportedContainers: m.SupportedContainers, SupportedContainersKnown: m.SupportedContainersKnown, HasVote: m.HasVote, WantsVote: wantsVote(m.Jobs, m.NoVote), } oldInfo := store.Get(info.EntityId()) if oldInfo == nil { // We're adding the entry for the first time, // so fetch the associated machine status. statusInfo, err := getStatus(st, machineGlobalKey(m.Id), "machine") if err != nil { return err } info.Status = multiwatcher.Status(statusInfo.Status) info.StatusInfo = statusInfo.Message } else { // The entry already exists, so preserve the current status and // instance data. oldInfo := oldInfo.(*multiwatcher.MachineInfo) info.Status = oldInfo.Status info.StatusInfo = oldInfo.StatusInfo info.InstanceId = oldInfo.InstanceId info.HardwareCharacteristics = oldInfo.HardwareCharacteristics } // If the machine is been provisioned, fetch the instance id as required, // and set instance id and hardware characteristics. if m.Nonce != "" && info.InstanceId == "" { instanceData, err := getInstanceData(st, m.Id) if err == nil { info.InstanceId = string(instanceData.InstanceId) info.HardwareCharacteristics = hardwareCharacteristics(instanceData) } else if !errors.IsNotFound(err) { return err } } store.Update(info) return nil }
func (s *clientSuite) TestClientWatchAll(c *gc.C) { loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) // A very simple end-to-end test, because // all the logic is tested elsewhere. m, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) err = m.SetProvisioned("i-0", agent.BootstrapNonce, nil) c.Assert(err, jc.ErrorIsNil) watcher, err := s.APIState.Client().WatchAll() c.Assert(err, jc.ErrorIsNil) defer func() { err := watcher.Stop() c.Assert(err, jc.ErrorIsNil) }() deltas, err := watcher.Next() c.Assert(err, jc.ErrorIsNil) c.Assert(len(deltas), gc.Equals, 1) d0, ok := deltas[0].Entity.(*multiwatcher.MachineInfo) c.Assert(ok, jc.IsTrue) d0.AgentStatus.Since = nil d0.InstanceStatus.Since = nil if !c.Check(deltas, jc.DeepEquals, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ ModelUUID: s.State.ModelUUID(), Id: m.Id(), InstanceId: "i-0", AgentStatus: multiwatcher.StatusInfo{ Current: status.Pending, }, InstanceStatus: multiwatcher.StatusInfo{ Current: status.Pending, }, Life: multiwatcher.Life("alive"), Series: "quantal", Jobs: []multiwatcher.MachineJob{state.JobManageModel.ToParams()}, Addresses: []multiwatcher.Address{}, HardwareCharacteristics: &instance.HardwareCharacteristics{}, HasVote: false, WantsVote: true, }, }}) { c.Logf("got:") for _, d := range deltas { c.Logf("%#v\n", d.Entity) } } }
func (s *legacySuite) TestWatchAllModels(c *gc.C) { // The WatchAllModels infrastructure is comprehensively tested // else. This test just ensure that the API calls work end-to-end. sysManager := s.OpenAPI(c) defer sysManager.Close() w, err := sysManager.WatchAllModels() c.Assert(err, jc.ErrorIsNil) defer func() { err := w.Stop() c.Assert(err, jc.ErrorIsNil) }() deltasC := make(chan []multiwatcher.Delta) go func() { deltas, err := w.Next() c.Assert(err, jc.ErrorIsNil) deltasC <- deltas }() select { case deltas := <-deltasC: c.Assert(deltas, gc.HasLen, 1) modelInfo := deltas[0].Entity.(*multiwatcher.ModelInfo) env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(modelInfo.ModelUUID, gc.Equals, env.UUID()) c.Assert(modelInfo.Name, gc.Equals, env.Name()) c.Assert(modelInfo.Life, gc.Equals, multiwatcher.Life("alive")) c.Assert(modelInfo.Owner, gc.Equals, env.Owner().Id()) c.Assert(modelInfo.ControllerUUID, gc.Equals, env.ControllerUUID()) case <-time.After(testing.LongWait): c.Fatal("timed out") } }
var marshalTestCases = []struct { about string // Value holds a real Go struct. value multiwatcher.Delta // JSON document. json string }{{ about: "MachineInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.MachineInfo{ Id: "Benji", InstanceId: "Shazam", Status: "error", StatusInfo: "foo", Life: multiwatcher.Life("alive"), Series: "trusty", SupportedContainers: []instance.ContainerType{instance.LXC}, Jobs: []multiwatcher.MachineJob{state.JobManageEnviron.ToParams()}, Addresses: []network.Address{}, HardwareCharacteristics: &instance.HardwareCharacteristics{}, }, }, json: `["machine","change",{"Id":"Benji","InstanceId":"Shazam","HasVote":false,"WantsVote":false,"Status":"error","StatusInfo":"foo","StatusData":null,"Life":"alive","Series":"trusty","SupportedContainers":["lxc"],"SupportedContainersKnown":false,"Jobs":["JobManageEnviron"],"Addresses":[],"HardwareCharacteristics":{}}]`, }, { about: "ServiceInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.ServiceInfo{ Name: "Benji", Exposed: true, CharmURL: "cs:quantal/name",
func (svc *backingService) updated(st *State, store *multiwatcherStore, id string) error { if svc.CharmURL == nil { return errors.Errorf("charm url is nil") } env, err := st.Environment() if err != nil { return errors.Trace(err) } info := &multiwatcher.ServiceInfo{ EnvUUID: st.EnvironUUID(), Name: svc.Name, Exposed: svc.Exposed, CharmURL: svc.CharmURL.String(), OwnerTag: svc.fixOwnerTag(env), Life: multiwatcher.Life(svc.Life.String()), MinUnits: svc.MinUnits, Subordinate: svc.Subordinate, } oldInfo := store.Get(info.EntityId()) needConfig := false if oldInfo == nil { logger.Debugf("new service %q added to backing state", svc.Name) key := serviceGlobalKey(svc.Name) // We're adding the entry for the first time, // so fetch the associated child documents. c, err := readConstraints(st, key) if err != nil { return errors.Trace(err) } info.Constraints = c needConfig = true // Fetch the status. service, err := st.Service(svc.Name) if err != nil { return errors.Trace(err) } serviceStatus, err := service.Status() if err != nil { logger.Warningf("reading service status for key %s: %v", key, err) } if err != nil && !errors.IsNotFound(err) { return errors.Annotatef(err, "reading service status for key %s", key) } if err == nil { info.Status = multiwatcher.StatusInfo{ Current: multiwatcher.Status(serviceStatus.Status), Message: serviceStatus.Message, Data: serviceStatus.Data, Since: serviceStatus.Since, } } else { // TODO(wallyworld) - bug http://pad.lv/1451283 // return an error here once we figure out what's happening // Not sure how status can even return NotFound as it is created // with the service initially. For now, we'll log the error as per // the above and return Unknown. now := time.Now() info.Status = multiwatcher.StatusInfo{ Current: multiwatcher.Status(StatusUnknown), Since: &now, } } } else { // The entry already exists, so preserve the current status. oldInfo := oldInfo.(*multiwatcher.ServiceInfo) info.Constraints = oldInfo.Constraints if info.CharmURL == oldInfo.CharmURL { // The charm URL remains the same - we can continue to // use the same config settings. info.Config = oldInfo.Config } else { // The charm URL has changed - we need to fetch the // settings from the new charm's settings doc. needConfig = true } } if needConfig { var err error info.Config, _, err = readSettingsDoc(st, serviceSettingsKey(svc.Name, svc.CharmURL)) if err != nil { return errors.Trace(err) } } store.Update(info) return nil }