func (c *Codec) ReadHeader(hdr *rpc.Header) error { c.msg = inMsg{} // avoid any potential cross-message contamination. var err error if c.isLogging() { var m json.RawMessage err = c.conn.Receive(&m) if err == nil { log.Debugf("rpc/jsoncodec: <- %s", m) err = json.Unmarshal(m, &c.msg) } else { log.Debugf("rpc/jsoncodec: <- error: %v (closing %v)", err, c.isClosing()) } } else { err = c.conn.Receive(&c.msg) } if err != nil { // If we've closed the connection, we may get a spurious error, // so ignore it. if c.isClosing() || err == io.EOF { return io.EOF } return fmt.Errorf("error receiving message: %v", err) } hdr.RequestId = c.msg.RequestId hdr.Type = c.msg.Type hdr.Id = c.msg.Id hdr.Request = c.msg.Request hdr.Error = c.msg.Error hdr.ErrorCode = c.msg.ErrorCode return nil }
// StateInfo is a reusable implementation of Environ.StateInfo, available to // providers that also use the other functionality from this file. func StateInfo(env Environ) (*state.Info, *api.Info, error) { st, err := LoadState(env.Storage()) if err != nil { return nil, nil, err } config := env.Config() if _, hasCert := config.CACert(); !hasCert { return nil, nil, fmt.Errorf("no CA certificate in environment configuration") } // Wait for the DNS names of any of the instances // to become available. log.Debugf("waiting for DNS name(s) of state server instances %v", st.StateInstances) var hostnames []string for a := LongAttempt.Start(); len(hostnames) == 0 && a.Next(); { insts, err := env.Instances(st.StateInstances) if err != nil && err != ErrPartialInstances { log.Debugf("error getting state instances: %v", err.Error()) return nil, nil, err } hostnames = getDNSNames(insts) } if len(hostnames) == 0 { return nil, nil, fmt.Errorf("timed out waiting for mgo address from %v", st.StateInstances) } stateInfo, apiInfo := getStateInfo(config, hostnames) return stateInfo, apiInfo, nil }
func (c *Codec) WriteMessage(hdr *rpc.Header, body interface{}) error { r := &outMsg{ RequestId: hdr.RequestId, Type: hdr.Type, Id: hdr.Id, Request: hdr.Request, Error: hdr.Error, ErrorCode: hdr.ErrorCode, } if hdr.IsRequest() { r.Params = body } else { r.Response = body } if c.isLogging() { data, err := json.Marshal(r) if err != nil { log.Debugf("rpc/jsoncodec: -> marshal error: %v", err) return err } log.Debugf("rpc/jsoncodec: -> %s", data) } return c.conn.Send(r) }
// tryLock tries locking l.keys, one at a time, and succeeds only if it // can lock all of them in order. The keys should be pre-sorted so that // two-way conflicts can't happen. If any of the keys fail to be locked, // and expiring the old lock doesn't work, tryLock undoes all previous // locks and aborts with an error. func (l *UpdateLock) tryLock() error { for i, key := range l.keys { log.Debugf("store: Trying to lock charm %s for updates...", key) doc := bson.D{{"_id", key}, {"time", l.time}} err := l.locks.Insert(doc) if err == nil { log.Debugf("store: Charm %s is now locked for updates.", key) continue } if lerr, ok := err.(*mgo.LastError); ok && lerr.Code == 11000 { log.Debugf("store: Charm %s is locked. Trying to expire lock.", key) l.tryExpire(key) err = l.locks.Insert(doc) if err == nil { log.Debugf("store: Charm %s is now locked for updates.", key) continue } } // Couldn't lock everyone. Undo previous locks. for j := i - 1; j >= 0; j-- { // Using time below should be unnecessary, but it's an extra check. // Can't do anything about errors here. Lock will expire anyway. l.locks.Remove(bson.D{{"_id", l.keys[j]}, {"time", l.time}}) } err = maybeConflict(err) log.Errorf("store: Can't lock charms %v for updating: %v", l.keys, err) return err } return nil }
// upgradeChanged responds to changes in the service or in the // upgrade requests that defines which charm changes should be // delivered as upgrades. func (f *filter) upgradeChanged() (err error) { if f.life != state.Alive { log.Debugf("worker/uniter/filter: charm check skipped, unit is dying") f.outUpgrade = nil return nil } if f.upgradeFrom.url == nil { log.Debugf("worker/uniter/filter: charm check skipped, not yet installed.") f.outUpgrade = nil return nil } if *f.upgradeAvailable.url != *f.upgradeFrom.url { if f.upgradeAvailable.force || !f.upgradeFrom.force { log.Debugf("worker/uniter/filter: preparing new upgrade event") if f.upgrade == nil || *f.upgrade != *f.upgradeAvailable.url { f.upgrade = f.upgradeAvailable.url } f.outUpgrade = f.outUpgradeOn return nil } } log.Debugf("worker/uniter/filter: no new charm event") f.outUpgrade = nil return nil }
// Run updates the configuration of a service func (c *SetCommand) Run(ctx *cmd.Context) error { var unvalidated = make(map[string]string) var remove []string contents, err := c.Config.Read(ctx) if err != nil && err != cmd.ErrNoPath { return err } if len(contents) > 0 { if err := goyaml.Unmarshal(contents, &unvalidated); err != nil { return err } } else { unvalidated, remove, err = parse(c.Options) if err != nil { return err } } conn, err := juju.NewConnFromName(c.EnvName) if err != nil { return err } defer conn.Close() srv, err := conn.State.Service(c.ServiceName) if err != nil { return err } charm, _, err := srv.Charm() if err != nil { return err } // 1. Validate will convert this partial configuration // into a full configuration by inserting charm defaults // for missing values. validated, err := charm.Config().Validate(unvalidated) if err != nil { return err } // 2. strip out the additional default keys added in the previous step. validated = strip(validated, unvalidated) cfg, err := srv.Config() if err != nil { return err } // 3. Update any keys that remain after validation and filtering. if len(validated) > 0 { log.Debugf("cmd/juju: updating configuration items: %v", validated) cfg.Update(validated) } // 4. Delete any removed keys. if len(remove) > 0 { log.Debugf("cmd/juju: removing configuration items: %v", remove) for _, k := range remove { cfg.Delete(k) } } _, err = cfg.Write() return err }
// Stop stops p's periodical ping and immediately report that it is dead. func (p *Pinger) Kill() error { p.mu.Lock() defer p.mu.Unlock() if p.started { log.Debugf("state/presence: killing pinger for %q (was started)", p.beingKey) return p.killStarted() } log.Debugf("state/presence: killing pinger for %q (was stopped)", p.beingKey) return p.killStopped() }
// CharmInfo retrieves the CharmInfo value for the charm at url. func (s *Store) CharmInfo(url *charm.URL) (info *CharmInfo, err error) { session := s.session.Copy() defer session.Close() log.Debugf("store: Retrieving charm info for %s", url) rev := url.Revision url = url.WithRevision(-1) charms := session.Charms() var cdoc charmDoc var qdoc interface{} if rev == -1 { qdoc = bson.D{{"urls", url}} } else { qdoc = bson.D{{"urls", url}, {"revision", rev}} } err = charms.Find(qdoc).Sort("-revision").One(&cdoc) if err != nil { log.Errorf("store: Failed to find charm %s: %v", url, err) return nil, ErrNotFound } info = &CharmInfo{ cdoc.Revision, cdoc.Digest, cdoc.Sha256, cdoc.Size, cdoc.FileId, cdoc.Meta, cdoc.Config, } return info, nil }
// Main runs the Command specified by req, and fills in resp. A single command // is run at a time. func (j *Jujuc) Main(req Request, resp *Response) error { if req.CommandName == "" { return badReqErrorf("command not specified") } if !filepath.IsAbs(req.Dir) { return badReqErrorf("Dir is not absolute") } c, err := j.getCmd(req.ContextId, req.CommandName) if err != nil { return badReqErrorf("%s", err) } var stdin, stdout, stderr bytes.Buffer ctx := &cmd.Context{ Dir: req.Dir, Stdin: &stdin, Stdout: &stdout, Stderr: &stderr, } j.mu.Lock() defer j.mu.Unlock() log.Infof("worker/uniter/jujuc: running hook tool %q %q", req.CommandName, req.Args) log.Debugf("worker/uniter/jujuc: hook context id %q; dir %q", req.ContextId, req.Dir) resp.Code = cmd.Main(c, ctx, req.Args) resp.Stdout = stdout.Bytes() resp.Stderr = stderr.Bytes() return nil }
func (e *environ) userData(scfg *startInstanceParams) ([]byte, error) { cfg := &cloudinit.MachineConfig{ StateServer: scfg.stateServer, StateInfo: scfg.info, StateServerCert: scfg.stateServerCert, StateServerKey: scfg.stateServerKey, InstanceIdAccessor: "$(curl http://169.254.169.254/1.0/meta-data/instance-id)", ProviderType: "ec2", DataDir: "/var/lib/juju", Tools: scfg.tools, MachineId: scfg.machineId, AuthorizedKeys: e.ecfg().AuthorizedKeys(), Config: scfg.config, } cloudcfg, err := cloudinit.New(cfg) if err != nil { return nil, err } data, err := cloudcfg.Render() if err != nil { return nil, err } cdata := trivial.Gzip(data) log.Debugf("environs/ec2: ec2 user data; %d bytes: %q", len(cdata), data) return cdata, nil }
func killWorker(id string, info *workerInfo) { if info.worker != nil { log.Debugf("worker: killing %q", id) info.worker.Kill() info.worker = nil } info.stopping = true info.start = nil }
// Unlock removes the previously acquired server-side lock that prevents // other processes from attempting to update a set of charm URLs. func (l *UpdateLock) Unlock() { log.Debugf("store: Unlocking charms for future updates: %v", l.keys) defer l.locks.Database.Session.Close() for i := len(l.keys) - 1; i >= 0; i-- { // Using time below ensures only the proper lock is removed. // Can't do much about errors here. Locks will expire anyway. l.locks.Remove(bson.D{{"_id", l.keys[i]}, {"time", l.time}}) } }
// modeContext returns a function that implements logging and common error // manipulation for Mode funcs. func modeContext(name string, err *error) func() { log.Printf("worker/uniter: %s starting", name) return func() { log.Debugf("worker/uniter: %s exiting", name) switch *err { case nil, tomb.ErrDying, worker.ErrDead: default: *err = errors.New(name + ": " + (*err).Error()) } } }
// modeContext returns a function that implements logging and common error // manipulation for Mode funcs. func modeContext(name string, err *error) func() { log.Infof("worker/uniter: %s starting", name) return func() { log.Debugf("worker/uniter: %s exiting", name) switch *err { case nil, tomb.ErrDying, worker.ErrTerminateAgent: default: *err = stderrors.New(name + ": " + (*err).Error()) } } }
// upgradeChanged responds to changes in the service or in the // upgrade requests that defines which charm changes should be // delivered as upgrades. func (f *filter) upgradeChanged() (err error) { if f.life != state.Alive { log.Debugf("worker/uniter/filter: charm check skipped, unit is dying") f.outUpgrade = nil return nil } if *f.upgradeAvailable.url != *f.upgradeRequested.url { if f.upgradeAvailable.force || !f.upgradeRequested.force { log.Debugf("worker/uniter/filter: preparing new upgrade event") if f.upgrade == nil || *f.upgrade.URL() != *f.upgradeAvailable.url { if f.upgrade, err = f.st.Charm(f.upgradeAvailable.url); err != nil { return err } } f.outUpgrade = f.outUpgradeOn return nil } } log.Debugf("worker/uniter/filter: no new charm event") return nil }
func VerifyStorage(storage Storage) error { reader := strings.NewReader(verificationContent) err := storage.Put(verificationFilename, reader, int64(len(verificationContent))) if err != nil { log.Debugf( "environs: failed to write bootstrap-verify file: %v", err) return VerifyStorageError } return nil }
// initLastId reads the most recent changelog document and initializes // lastId with it. This causes all history that precedes the creation // of the watcher to be ignored. func (w *Watcher) initLastId() error { log.Debugf("state/watcher: reading most recent document to ignore past history...") var entry struct { Id interface{} "_id" } err := w.log.Find(nil).Sort("-$natural").One(&entry) if err != nil && err != mgo.ErrNotFound { return err } w.lastId = entry.Id return nil }
func (e *environ) StopInstances(insts []instance.Instance) error { ids := make([]instance.Id, len(insts)) for i, inst := range insts { instanceValue, ok := inst.(*openstackInstance) if !ok { return errors.New("Incompatible instance.Instance supplied") } ids[i] = instanceValue.Id() } log.Debugf("environs/openstack: terminating instances %v", ids) return e.terminateInstances(ids) }
// Stop stops p's periodical ping. // Watchers will not notice p has stopped pinging until the // previous ping times out. func (p *Pinger) Stop() error { p.mu.Lock() defer p.mu.Unlock() if p.started { log.Debugf("state/presence: stopping pinger for %q with seq=%d", p.beingKey, p.beingSeq) } p.tomb.Kill(nil) err := p.tomb.Wait() // TODO ping one more time to guarantee a late timeout. p.started = false return err }
func (c *JujuLogCommand) Run(_ *cmd.Context) error { badge := c.ctx.UnitName() if r, found := c.ctx.HookRelation(); found { badge = badge + " " + r.FakeId() } msg := badge + ": " + c.Message if c.Debug { log.Debugf("%s", msg) } else { log.Printf("%s", msg) } return nil }
// unitsChanged responds to changes to the assigned units. func (fw *Firewaller) unitsChanged(change *unitsChange) error { changed := []*unitData{} for _, name := range change.units { unit, err := fw.st.Unit(name) if err != nil && !state.IsNotFound(err) { return err } var machineId string if unit != nil { machineId, err = unit.AssignedMachineId() if state.IsNotFound(err) { continue } else if err != nil { if _, ok := err.(*state.NotAssignedError); !ok { return err } } } if unitd, known := fw.unitds[name]; known { knownMachineId := fw.unitds[name].machined.id if unit == nil || unit.Life() == state.Dead || machineId != knownMachineId { fw.forgetUnit(unitd) changed = append(changed, unitd) log.Debugf("worker/firewaller: stopped watching unit %s", name) } } else if unit != nil && unit.Life() != state.Dead && fw.machineds[machineId] != nil { err = fw.startUnit(unit, machineId) if err != nil { return err } changed = append(changed, fw.unitds[name]) log.Debugf("worker/firewaller: started watching unit %s", name) } } if err := fw.flushUnits(changed); err != nil { return fmt.Errorf("cannot change firewall ports: %v", err) } return nil }
// startInstance is the internal version of StartInstance, used by Bootstrap // as well as via StartInstance itself. func (e *environ) startInstance(scfg *startInstanceParams) (environs.Instance, error) { if scfg.tools == nil { var err error flags := environs.HighestVersion | environs.CompatVersion scfg.tools, err = environs.FindTools(e, version.Current, flags) if err != nil { return nil, err } } log.Printf("environs/openstack: starting machine %s in %q running tools version %q from %q", scfg.machineId, e.name, scfg.tools.Binary, scfg.tools.URL) // TODO(wallyworld) - implement spec lookup if strings.Contains(scfg.tools.Series, "unknown") || strings.Contains(scfg.tools.Series, "unknown") { return nil, fmt.Errorf("cannot find image for unknown series or architecture") } userData, err := e.userData(scfg) if err != nil { return nil, fmt.Errorf("cannot make user data: %v", err) } log.Debugf("environs/openstack: openstack user data: %q", userData) groups, err := e.setUpGroups(scfg.machineId) if err != nil { return nil, fmt.Errorf("cannot set up groups: %v", err) } var groupNames = make([]nova.SecurityGroupName, len(groups)) for i, g := range groups { groupNames[i] = nova.SecurityGroupName{g.Name} } var server *nova.Entity for a := shortAttempt.Start(); a.Next(); { server, err = e.nova().RunServer(nova.RunServerOpts{ Name: state.MachineEntityName(scfg.machineId), // TODO(wallyworld) - do not use hard coded image FlavorId: defaultFlavorId, ImageId: defaultImageId, UserData: userData, SecurityGroupNames: groupNames, }) if err == nil || !gooseerrors.IsNotFound(err) { break } } if err != nil { return nil, fmt.Errorf("cannot run instance: %v", err) } inst := &instance{e, server} log.Printf("environs/openstack: started instance %q", inst.Id()) return inst, nil }
// forgetMachine cleans the machine data after the machine is removed. func (fw *Firewaller) forgetMachine(machined *machineData) error { for _, unitd := range machined.unitds { fw.forgetUnit(unitd) } if err := fw.flushMachine(machined); err != nil { return err } delete(fw.machineds, machined.id) if err := machined.Stop(); err != nil { return err } log.Debugf("worker/firewaller: stopped watching machine %s", machined.id) return nil }
// handle deals with requests delivered by the public API // onto the background watcher goroutine. func (w *Watcher) handle(req interface{}) { log.Debugf("state/watcher: got request: %#v", req) switch r := req.(type) { case reqSync: w.next = time.After(0) if r.done != nil { w.syncDone = append(w.syncDone, r.done) } case reqWatch: for _, info := range w.watches[r.key] { if info.ch == r.info.ch { panic(fmt.Errorf("tried to re-add channel %v for %s", info.ch, r.key)) } } if revno, ok := w.current[r.key]; ok && (revno > r.info.revno || revno == -1 && r.info.revno >= 0) { r.info.revno = revno w.requestEvents = append(w.requestEvents, event{r.info.ch, r.key, revno}) } w.watches[r.key] = append(w.watches[r.key], r.info) case reqUnwatch: watches := w.watches[r.key] removed := false for i, info := range watches { if info.ch == r.ch { watches[i] = watches[len(watches)-1] w.watches[r.key] = watches[:len(watches)-1] removed = true break } } if !removed { panic(fmt.Errorf("tried to remove missing channel %v for %s", r.ch, r.key)) } for i := range w.requestEvents { e := &w.requestEvents[i] if e.key == r.key && e.ch == r.ch { e.ch = nil } } for i := range w.syncEvents { e := &w.syncEvents[i] if e.key == r.key && e.ch == r.ch { e.ch = nil } } default: panic(fmt.Errorf("unknown request: %T", req)) } }
// ClearResolved notifies the filter that a resolved event has been handled // and should not be reported again. func (f *filter) ClearResolved() error { select { case <-f.tomb.Dying(): return tomb.ErrDying case f.clearResolved <- nothing: } select { case <-f.tomb.Dying(): return tomb.ErrDying case <-f.didClearResolved: log.Debugf("resolved clear completed") return nil } panic("unreachable") }
func (c *JujuLogCommand) Run(ctx *cmd.Context) error { if c.formatFlag != "" { fmt.Fprintf(ctx.Stderr, "--format flag deprecated for command %q", c.Info().Name) } badge := c.ctx.UnitName() if r, found := c.ctx.HookRelation(); found { badge = badge + " " + r.FakeId() } msg := badge + ": " + c.Message if c.Debug { log.Debugf("%s", msg) } else { log.Infof("%s", msg) } return nil }
// OpenCharm opens for reading via rc the charm currently available at url. // rc must be closed after dealing with it or resources will leak. func (s *Store) OpenCharm(url *charm.URL) (info *CharmInfo, rc io.ReadCloser, err error) { log.Debugf("store: Opening charm %s", url) info, err = s.CharmInfo(url) if err != nil { return nil, nil, err } session := s.session.Copy() file, err := session.CharmFS().OpenId(info.fileId) if err != nil { log.Errorf("store: Failed to open GridFS file for charm %s: %v", url, err) session.Close() return nil, nil, err } rc = &reader{session, file} return }
func (suite) TestLogger(c *C) { buf := &bytes.Buffer{} log.Target = stdlog.New(buf, "", 0) for _, t := range logTests { log.Debug = t.debug log.Printf(t.input) c.Assert(buf.String(), Equals, "JUJU "+t.input+"\n") buf.Reset() log.Debugf(t.input) if t.debug { c.Assert(buf.String(), Equals, "JUJU:DEBUG "+t.input+"\n") } else { c.Assert(buf.String(), Equals, "") } buf.Reset() } }
func (e *environ) terminateInstances(ids []instance.Id) error { if len(ids) == 0 { return nil } var firstErr error novaClient := e.nova() for _, id := range ids { err := novaClient.DeleteServer(string(id)) if gooseerrors.IsNotFound(err) { err = nil } if err != nil && firstErr == nil { log.Debugf("environs/openstack: error terminating instance %q: %v", id, err) firstErr = err } } return firstErr }
// machineLifeChanged starts watching new machines when the firewaller // is starting, or when new machines come to life, and stops watching // machines that are dying. func (fw *Firewaller) machineLifeChanged(id string) error { m, err := fw.st.Machine(id) found := !errors.IsNotFoundError(err) if found && err != nil { return err } dead := !found || m.Life() == state.Dead machined, known := fw.machineds[id] if known && dead { return fw.forgetMachine(machined) } if !known && !dead { err = fw.startMachine(id) if err != nil { return err } log.Debugf("worker/firewaller: started watching machine %s", id) } return nil }