// listTools is like ListTools, but only returns the tools from // a particular storage. func listTools(store StorageReader, majorVersion int) ([]*state.Tools, error) { dir := fmt.Sprintf("%s%d.", toolPrefix, majorVersion) names, err := store.List(dir) if err != nil { return nil, err } var toolsList []*state.Tools for _, name := range names { if !strings.HasPrefix(name, toolPrefix) || !strings.HasSuffix(name, ".tgz") { log.Printf("environs: unexpected tools file found %q", name) continue } vers := name[len(toolPrefix) : len(name)-len(".tgz")] var t state.Tools t.Binary, err = version.ParseBinary(vers) if err != nil { log.Printf("environs: failed to parse %q: %v", vers, err) continue } if t.Major != majorVersion { log.Printf("environs: tool %q found in wrong directory %q", name, dir) continue } t.URL, err = store.URL(name) if err != nil { log.Printf("environs: cannot get URL for %q: %v", name, err) continue } toolsList = append(toolsList, &t) } return toolsList, nil }
// ModeInit is the initial Uniter mode. func ModeInit(u *Uniter) (next Mode, err error) { defer modeContext("ModeInit", &err)() log.Printf("worker/uniter: updating unit addresses") cfg, err := u.st.EnvironConfig() if err != nil { return nil, err } provider, err := environs.Provider(cfg.Type()) if err != nil { return nil, err } if private, err := provider.PrivateAddress(); err != nil { return nil, err } else if err = u.unit.SetPrivateAddress(private); err != nil { return nil, err } if public, err := provider.PublicAddress(); err != nil { return nil, err } else if err = u.unit.SetPublicAddress(public); err != nil { return nil, err } log.Printf("reconciling relation state") if err := u.restoreRelations(); err != nil { return nil, err } return ModeContinue, nil }
// unitChanged responds to changes in the unit. func (f *filter) unitChanged() error { if err := f.unit.Refresh(); err != nil { if state.IsNotFound(err) { return worker.ErrDead } return err } if f.life != f.unit.Life() { switch f.life = f.unit.Life(); f.life { case state.Dying: log.Printf("worker/uniter: unit is dying") close(f.outUnitDying) f.outUpgrade = nil case state.Dead: log.Printf("worker/uniter: unit is dead") return worker.ErrDead } } if resolved := f.unit.Resolved(); resolved != f.resolved { f.resolved = resolved if f.resolved != state.ResolvedNone { f.outResolved = f.outResolvedOn } } return nil }
// flushGlobalPorts opens and closes global ports in the environment. // It keeps a reference count for ports so that only 0-to-1 and 1-to-0 events // modify the environment. func (fw *Firewaller) flushGlobalPorts(rawOpen, rawClose []state.Port) error { // Filter which ports are really to open or close. var toOpen, toClose []state.Port for _, port := range rawOpen { if fw.globalPortRef[port] == 0 { toOpen = append(toOpen, port) } fw.globalPortRef[port]++ } for _, port := range rawClose { fw.globalPortRef[port]-- if fw.globalPortRef[port] == 0 { toClose = append(toClose, port) delete(fw.globalPortRef, port) } } // Open and close the ports. if len(toOpen) > 0 { if err := fw.environ.OpenPorts(toOpen); err != nil { // TODO(mue) Add local retry logic. return err } state.SortPorts(toOpen) log.Printf("worker/firewaller: opened ports %v in environment", toOpen) } if len(toClose) > 0 { if err := fw.environ.ClosePorts(toClose); err != nil { // TODO(mue) Add local retry logic. return err } state.SortPorts(toClose) log.Printf("worker/firewaller: closed ports %v in environment", toClose) } return nil }
// upgrade pulls from current into target. If target has local changes, but // no conflicts, it will be snapshotted before any changes are made. func (d *Deployer) upgrade(target *GitDir) error { log.Printf("worker/uniter/charm: preparing charm upgrade") url, err := ReadCharmURL(d.current) if err != nil { return err } if err := target.Init(); err != nil { return err } if dirty, err := target.Dirty(); err != nil { return err } else if dirty { if conflicted, err := target.Conflicted(); err != nil { return err } else if !conflicted { log.Printf("worker/uniter/charm: snapshotting dirty charm before upgrade") if err = target.Snapshotf("Pre-upgrade snapshot."); err != nil { return err } } } log.Printf("worker/uniter/charm: deploying charm") if err := target.Pull(d.current); err != nil { return err } return target.Snapshotf("Upgraded charm to %q.", url) }
// addRelation causes the unit agent to join the supplied relation, and to // store persistent state in the supplied dir. func (u *Uniter) addRelation(rel *state.Relation, dir *relation.StateDir) error { log.Printf("worker/uniter: joining relation %q", rel) ru, err := rel.Unit(u.unit) if err != nil { return err } r := NewRelationer(ru, dir, u.relationHooks) w := u.unit.Watch() defer watcher.Stop(w, &u.tomb) for { select { case <-u.tomb.Dying(): return tomb.ErrDying case _, ok := <-w.Changes(): if !ok { return watcher.MustErr(w) } if err := r.Join(); err == state.ErrCannotEnterScopeYet { log.Printf("worker/uniter: cannot enter scope for relation %q; waiting for subordinate to be removed", rel) continue } else if err != nil { return err } log.Printf("worker/uniter: joined relation %q", rel) u.relationers[rel.Id()] = r return nil } } panic("unreachable") }
func (s *Server) serveCharm(w http.ResponseWriter, r *http.Request) { if !strings.HasPrefix(r.URL.Path, "/charm/") { panic("serveCharm: bad url") } curl, err := charm.ParseURL("cs:" + r.URL.Path[len("/charm/"):]) if err != nil { w.WriteHeader(http.StatusNotFound) return } info, rc, err := s.store.OpenCharm(curl) if err == ErrNotFound { w.WriteHeader(http.StatusNotFound) return } if err != nil { w.WriteHeader(http.StatusInternalServerError) log.Printf("store: cannot open charm %q: %v", curl, err) return } if statsEnabled(r) { go s.store.IncCounter(charmStatsKey(curl, "charm-bundle")) } defer rc.Close() w.Header().Set("Connection", "close") // No keep-alive for now. w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Length", strconv.FormatInt(info.BundleSize(), 10)) _, err = io.Copy(w, rc) if err != nil { log.Printf("store: failed to stream charm %q: %v", curl, err) } }
func logStatus(status state.UnitStatus, info string) { if info == "" { log.Printf("builddb: Unit status is %q", status) } else { log.Printf("builddb: Unit status is %q: %s", status, info) } }
func build() error { environ, err := environs.NewFromName("") if err != nil { return err } err = environs.Bootstrap(environ, true, nil) if err != nil { return err } conn, err := juju.NewConn(environ) if err != nil { return err } repo := &charm.LocalRepository{filepath.Dir(os.Args[0])} curl := charm.MustParseURL("local:precise/builddb") ch, err := conn.PutCharm(curl, repo, false) if err != nil { return err } service, err := conn.AddService("builddb", ch) if err != nil { return err } if err := service.SetExposed(); err != nil { return err } units, err := conn.AddUnits(service, 1) if err != nil { return err } log.Printf("builddb: Waiting for unit to reach %q status...", state.UnitStarted) unit := units[0] last, info, err := unit.Status() if err != nil { return err } logStatus(last, info) for last != state.UnitStarted { time.Sleep(2 * time.Second) if err := unit.Refresh(); err != nil { return err } status, info, err := unit.Status() if err != nil { return err } if status != last { logStatus(status, info) last = status } } addr, ok := unit.PublicAddress() if !ok { return fmt.Errorf("cannot retrieve files: build unit lacks a public-address") } log.Printf("builddb: Built files published at http://%s", addr) log.Printf("builddb: Remember to destroy the environment when you're done...") return nil }
// finish completes the charm writing process and inserts the final metadata. // After it completes the charm will be available for consumption. func (w *charmWriter) finish() error { if w.file == nil { return nil } defer w.session.Close() id := w.file.Id() size := w.file.Size() err := w.file.Close() if err != nil { log.Printf("store: Failed to close GridFS file: %v", err) return err } charms := w.session.Charms() sha256 := hex.EncodeToString(w.sha256.Sum(nil)) charm := charmDoc{ w.urls, w.revision, w.digest, sha256, size, id.(bson.ObjectId), w.charm.Meta(), w.charm.Config(), } if err = charms.Insert(&charm); err != nil { err = maybeConflict(err) log.Printf("store: Failed to insert new revision of charm %v: %v", w.urls, err) return err } return nil }
// runHook executes the supplied hook.Info in an appropriate hook context. If // the hook itself fails to execute, it returns errHookFailed. func (u *Uniter) runHook(hi hook.Info) (err error) { // Prepare context. if err = hi.Validate(); err != nil { return err } hookName := string(hi.Kind) relationId := -1 if hi.Kind.IsRelation() { relationId = hi.RelationId if hookName, err = u.relationers[relationId].PrepareHook(hi); err != nil { return err } } hctxId := fmt.Sprintf("%s:%s:%d", u.unit.Name(), hookName, u.rand.Int63()) hctx := &HookContext{ service: u.service, unit: u.unit, id: hctxId, relationId: relationId, remoteUnitName: hi.RemoteUnit, relations: map[int]*ContextRelation{}, } for id, r := range u.relationers { hctx.relations[id] = r.Context() } // Prepare server. getCmd := func(ctxId, cmdName string) (cmd.Command, error) { // TODO: switch to long-running server with single context; // use nonce in place of context id. if ctxId != hctxId { return nil, fmt.Errorf("expected context id %q, got %q", hctxId, ctxId) } return jujuc.NewCommand(hctx, cmdName) } socketPath := filepath.Join(u.baseDir, "agent.socket") srv, err := jujuc.NewServer(getCmd, socketPath) if err != nil { return err } go srv.Run() defer srv.Close() // Run the hook. if err := u.writeState(RunHook, Pending, &hi, nil); err != nil { return err } log.Printf("worker/uniter: running %q hook", hookName) if err := hctx.RunHook(hookName, u.charm.Path(), u.toolsDir, socketPath); err != nil { log.Printf("worker/uniter: hook failed: %s", err) return errHookFailed } if err := u.writeState(RunHook, Done, &hi, nil); err != nil { return err } log.Printf("worker/uniter: ran %q hook", hookName) return u.commitHook(hi) }
// PublishCharmsDistro publishes all branch tips found in // the /charms distribution in Launchpad onto store under // the "cs:" scheme. // apiBase specifies the Launchpad base API URL, such // as lpad.Production or lpad.Staging. // Errors found while processing one or more branches are // all returned as a PublishBranchErrors value. func PublishCharmsDistro(store *Store, apiBase lpad.APIBase) error { oauth := &lpad.OAuth{Anonymous: true, Consumer: "juju"} root, err := lpad.Login(apiBase, oauth) if err != nil { return err } distro, err := root.Distro("charms") if err != nil { return err } tips, err := distro.BranchTips(time.Time{}) if err != nil { return err } var errs PublishBranchErrors for _, tip := range tips { if !strings.HasSuffix(tip.UniqueName, "/trunk") { continue } burl, curl, err := uniqueNameURLs(tip.UniqueName) if err != nil { errs = append(errs, PublishBranchError{tip.UniqueName, err}) log.Printf("error: %v\n", err) continue } log.Printf("----- %s\n", burl) if tip.Revision == "" { errs = append(errs, PublishBranchError{burl, fmt.Errorf("branch has no revisions")}) log.Printf("error: branch has no revisions\n") continue } // Charm is published in the personal URL and in any explicitly // assigned official series. urls := []*charm.URL{curl} schema, name := curl.Schema, curl.Name for _, series := range tip.OfficialSeries { curl = &charm.URL{Schema: schema, Name: name, Series: series, Revision: -1} curl.Series = series curl.User = "" urls = append(urls, curl) } err = PublishBazaarBranch(store, urls, burl, tip.Revision) if err == ErrRedundantUpdate { continue } if err != nil { errs = append(errs, PublishBranchError{burl, err}) log.Printf("error: %v\n", err) } } if errs != nil { return errs } return nil }
func (conn *Conn) addCharm(curl *charm.URL, ch charm.Charm) (*state.Charm, error) { var f *os.File name := charm.Quote(curl.String()) switch ch := ch.(type) { case *charm.Dir: var err error if f, err = ioutil.TempFile("", name); err != nil { return nil, err } defer os.Remove(f.Name()) defer f.Close() err = ch.BundleTo(f) if err != nil { return nil, fmt.Errorf("cannot bundle charm: %v", err) } if _, err := f.Seek(0, 0); err != nil { return nil, err } case *charm.Bundle: var err error if f, err = os.Open(ch.Path); err != nil { return nil, fmt.Errorf("cannot read charm bundle: %v", err) } defer f.Close() default: return nil, fmt.Errorf("unknown charm type %T", ch) } h := sha256.New() size, err := io.Copy(h, f) if err != nil { return nil, err } digest := hex.EncodeToString(h.Sum(nil)) if _, err := f.Seek(0, 0); err != nil { return nil, err } storage := conn.Environ.Storage() log.Printf("writing charm to storage [%d bytes]", size) if err := storage.Put(name, f, size); err != nil { return nil, fmt.Errorf("cannot put charm: %v", err) } ustr, err := storage.URL(name) if err != nil { return nil, fmt.Errorf("cannot get storage URL for charm: %v", err) } u, err := url.Parse(ustr) if err != nil { return nil, fmt.Errorf("cannot parse storage URL: %v", err) } log.Printf("adding charm to state") sch, err := conn.State.AddCharm(ch, curl, u, digest) if err != nil { return nil, fmt.Errorf("cannot add charm: %v", err) } return sch, nil }
// deploy deploys the supplied charm, and sets follow-up hook operation state // as indicated by reason. func (u *Uniter) deploy(sch *state.Charm, reason Op) error { if reason != Install && reason != Upgrade { panic(fmt.Errorf("%q is not a deploy operation", reason)) } var hi *hook.Info if u.s != nil && (u.s.Op == RunHook || u.s.Op == Upgrade) { // If this upgrade interrupts a RunHook, we need to preserve the hook // info so that we can return to the appropriate error state. However, // if we're resuming (or have force-interrupted) an Upgrade, we also // need to preserve whatever hook info was preserved when we initially // started upgrading, to ensure we still return to the correct state. hi = u.s.Hook } url := sch.URL() if u.s == nil || u.s.OpStep != Done { log.Printf("worker/uniter: fetching charm %q", url) bun, err := u.bundles.Read(sch, u.tomb.Dying()) if err != nil { return err } if err = u.deployer.Stage(bun, url); err != nil { return err } log.Printf("worker/uniter: deploying charm %q", url) if err = u.writeState(reason, Pending, hi, url); err != nil { return err } if err = u.deployer.Deploy(u.charm); err != nil { return err } if err = u.writeState(reason, Done, hi, url); err != nil { return err } } log.Printf("worker/uniter: charm %q is deployed", url) if err := u.unit.SetCharm(sch); err != nil { return err } status := Queued if hi != nil { // If a hook operation was interrupted, restore it. status = Pending } else { // Otherwise, queue the relevant post-deploy hook. hi = &hook.Info{} switch reason { case Install: hi.Kind = hook.Install case Upgrade: hi.Kind = hook.UpgradeCharm } } return u.writeState(RunHook, status, hi, nil) }
// startInstance is the internal version of StartInstance, used by Bootstrap // as well as via StartInstance itself. func (e *environ) startInstance(scfg *startInstanceParams) (environs.Instance, error) { if scfg.tools == nil { var err error flags := environs.HighestVersion | environs.CompatVersion scfg.tools, err = environs.FindTools(e, version.Current, flags) if err != nil { return nil, err } } log.Printf("environs/ec2: starting machine %s in %q running tools version %q from %q", scfg.machineId, e.name, scfg.tools.Binary, scfg.tools.URL) spec, err := findInstanceSpec(&instanceConstraint{ series: scfg.tools.Series, arch: scfg.tools.Arch, region: e.ecfg().region(), }) if err != nil { return nil, fmt.Errorf("cannot find image satisfying constraints: %v", err) } // TODO quick sanity check that we can access the tools URL? userData, err := e.userData(scfg) if err != nil { return nil, fmt.Errorf("cannot make user data: %v", err) } groups, err := e.setUpGroups(scfg.machineId) if err != nil { return nil, fmt.Errorf("cannot set up groups: %v", err) } var instances *ec2.RunInstancesResp for a := shortAttempt.Start(); a.Next(); { instances, err = e.ec2().RunInstances(&ec2.RunInstances{ ImageId: spec.imageId, MinCount: 1, MaxCount: 1, UserData: userData, InstanceType: "m1.small", SecurityGroups: groups, }) if err == nil || ec2ErrCode(err) != "InvalidGroup.NotFound" { break } } if err != nil { return nil, fmt.Errorf("cannot run instances: %v", err) } if len(instances.Instances) != 1 { return nil, fmt.Errorf("expected 1 started instance, got %d", len(instances.Instances)) } inst := &instance{e, &instances.Instances[0]} log.Printf("environs/ec2: started instance %q", inst.Id()) return inst, nil }
// startInstance is the internal version of StartInstance, used by Bootstrap // as well as via StartInstance itself. func (e *environ) startInstance(scfg *startInstanceParams) (environs.Instance, error) { if scfg.tools == nil { var err error flags := environs.HighestVersion | environs.CompatVersion scfg.tools, err = environs.FindTools(e, version.Current, flags) if err != nil { return nil, err } } log.Printf("environs/openstack: starting machine %s in %q running tools version %q from %q", scfg.machineId, e.name, scfg.tools.Binary, scfg.tools.URL) // TODO(wallyworld) - implement spec lookup if strings.Contains(scfg.tools.Series, "unknown") || strings.Contains(scfg.tools.Series, "unknown") { return nil, fmt.Errorf("cannot find image for unknown series or architecture") } userData, err := e.userData(scfg) if err != nil { return nil, fmt.Errorf("cannot make user data: %v", err) } log.Debugf("environs/openstack: openstack user data: %q", userData) groups, err := e.setUpGroups(scfg.machineId) if err != nil { return nil, fmt.Errorf("cannot set up groups: %v", err) } var groupNames = make([]nova.SecurityGroupName, len(groups)) for i, g := range groups { groupNames[i] = nova.SecurityGroupName{g.Name} } var server *nova.Entity for a := shortAttempt.Start(); a.Next(); { server, err = e.nova().RunServer(nova.RunServerOpts{ Name: state.MachineEntityName(scfg.machineId), // TODO(wallyworld) - do not use hard coded image FlavorId: defaultFlavorId, ImageId: defaultImageId, UserData: userData, SecurityGroupNames: groupNames, }) if err == nil || !gooseerrors.IsNotFound(err) { break } } if err != nil { return nil, fmt.Errorf("cannot run instance: %v", err) } inst := &instance{e, server} log.Printf("environs/openstack: started instance %q", inst.Id()) return inst, nil }
// reconcileInstances compares the initially started watcher for machines, // units and services with the opened and closed ports of the instances and // opens and closes the appropriate ports for each instance. func (fw *Firewaller) reconcileInstances() error { for _, machined := range fw.machineds { m, err := machined.machine() if state.IsNotFound(err) { if err := fw.forgetMachine(machined); err != nil { return err } continue } else if err != nil { return err } instanceId, err := m.InstanceId() if err != nil { return err } instances, err := fw.environ.Instances([]state.InstanceId{instanceId}) if err == environs.ErrNoInstances { return nil } else if err != nil { return err } initialPorts, err := instances[0].Ports(machined.id) if err != nil { return err } // Check which ports to open or to close. toOpen := diff(machined.ports, initialPorts) toClose := diff(initialPorts, machined.ports) if len(toOpen) > 0 { log.Printf("worker/firewaller: opening instance ports %v for machine %s", toOpen, machined.id) if err := instances[0].OpenPorts(machined.id, toOpen); err != nil { // TODO(mue) Add local retry logic. return err } state.SortPorts(toOpen) } if len(toClose) > 0 { log.Printf("worker/firewaller: closing instance ports %v for machine %s", toClose, machined.id) if err := instances[0].ClosePorts(machined.id, toClose); err != nil { // TODO(mue) Add local retry logic. return err } state.SortPorts(toClose) } } return nil }
func (e *environ) StartInstance(machineId string, info *state.Info, tools *state.Tools) (environs.Instance, error) { defer delay() log.Printf("environs/dummy: dummy startinstance, machine %s", machineId) if err := e.checkBroken("StartInstance"); err != nil { return nil, err } e.state.mu.Lock() defer e.state.mu.Unlock() if _, ok := e.Config().CACert(); !ok { return nil, fmt.Errorf("no CA certificate in environment configuration") } if info.EntityName != state.MachineEntityName(machineId) { return nil, fmt.Errorf("entity name must match started machine") } if tools != nil && (strings.HasPrefix(tools.Series, "unknown") || strings.HasPrefix(tools.Arch, "unknown")) { return nil, fmt.Errorf("cannot find image for %s-%s", tools.Series, tools.Arch) } i := &instance{ state: e.state, id: state.InstanceId(fmt.Sprintf("%s-%d", e.state.name, e.state.maxId)), ports: make(map[state.Port]bool), machineId: machineId, } e.state.insts[i.id] = i e.state.maxId++ e.state.ops <- OpStartInstance{ Env: e.state.name, MachineId: machineId, Instance: i, Info: info, Secret: e.ecfg().secret(), } return i, nil }
func (e *environ) Destroy(ensureInsts []environs.Instance) error { log.Printf("environs/openstack: destroying environment %q", e.name) insts, err := e.AllInstances() if err != nil { return fmt.Errorf("cannot get instances: %v", err) } found := make(map[state.InstanceId]bool) var ids []state.InstanceId for _, inst := range insts { ids = append(ids, inst.Id()) found[inst.Id()] = true } // Add any instances we've been told about but haven't yet shown // up in the instance list. for _, inst := range ensureInsts { id := state.InstanceId(inst.(*instance).Id()) if !found[id] { ids = append(ids, id) found[id] = true } } err = e.terminateInstances(ids) if err != nil { return err } // To properly observe e.storageUnlocked we need to get its value while // holding e.ecfgMutex. e.Storage() does this for us, then we convert // back to the (*storage) to access the private deleteAll() method. st := e.Storage().(*storage) return st.deleteAll() }
// info returns the revision and SHA256 digest of the charm referenced by curl. func (s *store) info(curl *URL) (rev int, digest string, err error) { key := curl.String() resp, err := http.Get(s.baseURL + "/charm-info?charms=" + url.QueryEscape(key)) if err != nil { return } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return } infos := make(map[string]*InfoResponse) if err = json.Unmarshal(body, &infos); err != nil { return } info, found := infos[key] if !found { err = fmt.Errorf("charm: charm store returned response without charm %q", key) return } for _, w := range info.Warnings { log.Printf("charm: WARNING: charm store reports for %q: %s", key, w) } if info.Errors != nil { err = fmt.Errorf( "charm info errors for %q: %s", key, strings.Join(info.Errors, "; "), ) return } return info.Revision, info.Sha256, nil }
func removeOnErr(err *error, path string) { if *err != nil { if e := os.Remove(path); e != nil { log.Printf("installer: cannot remove %q: %v", path, e) } } }
// MgoReset deletes all content from the shared MongoDB server. func MgoReset() { session := MgoDial() defer session.Close() dbnames, err := session.DatabaseNames() if isUnauthorized(err) { // If we've got an unauthorized access error, we're // locked out of the database. We restart it to regain // access. This should only happen when tests fail. destroyMgoServer() log.Printf("testing: restarting MongoDB server after unauthorized access") if err := startMgoServer(); err != nil { panic(err) } return } if err != nil { panic(err) } for _, name := range dbnames { switch name { case "admin", "local", "config": default: err = session.DB(name).DropDatabase() if err != nil { panic(fmt.Errorf("Cannot drop MongoDB database %v: %v", name, err)) } } } }
// AddUnitSubordinateTo adds a new subordinate unit to the service, subordinate // to principal. It does not verify relation state sanity or pre-existence of // other subordinates of the same service; is deprecated; and only continues // to exist for the convenience of certain tests, which are themselves due for // overhaul. func (s *Service) AddUnitSubordinateTo(principal *Unit) (unit *Unit, err error) { log.Printf("state: Service.AddUnitSubordinateTo is DEPRECATED; subordinate units should be created only as a side-effect of a principal entering relation scope") defer trivial.ErrorContextf(&err, "cannot add unit to service %q as a subordinate of %q", s, principal) ch, _, err := s.Charm() if err != nil { return nil, err } if !ch.Meta().Subordinate { return nil, fmt.Errorf("service is not a subordinate") } if !principal.IsPrincipal() { return nil, fmt.Errorf("unit is not a principal") } name, ops, err := s.addUnitOps(principal.doc.Name, false) if err != nil { return nil, err } if err = s.st.runner.Run(ops, "", nil); err == nil { return s.Unit(name) } else if err != txn.ErrAborted { return nil, err } if alive, err := isAlive(s.st.services, s.doc.Name); err != nil { return nil, err } else if !alive { return nil, fmt.Errorf("service is not alive") } if alive, err := isAlive(s.st.units, principal.doc.Name); err != nil { return nil, err } else if !alive { return nil, fmt.Errorf("principal unit is not alive") } return nil, fmt.Errorf("inconsistent state") }
func removeAll(dir string) { err := os.RemoveAll(dir) if err == nil || os.IsNotExist(err) { return } log.Printf("environs: cannot remove %q: %v", dir, err) }
// realTimeSlot disables the hardcoding introduced by fakeTimeSlot. func realTimeSlot() { fakeMutex.Lock() fakeNow = time.Time{} fakeOffset = 0 fakeMutex.Unlock() log.Printf("state/presence: Not faking presence time. Real time slot in use.") }
func (s *Store) ensureIndexes() error { session := s.session indexes := []struct { c *mgo.Collection i mgo.Index }{{ session.StatCounters(), mgo.Index{Key: []string{"k", "t"}, Unique: true}, }, { session.StatTokens(), mgo.Index{Key: []string{"t"}, Unique: true}, }, { session.Charms(), mgo.Index{Key: []string{"urls", "revision"}, Unique: true}, }, { session.Events(), mgo.Index{Key: []string{"urls", "digest"}}, }} for _, idx := range indexes { err := idx.c.EnsureIndex(idx.i) if err != nil { log.Printf("store: Error ensuring stat.counters index: %v", err) return err } } return nil }
// tryLock tries locking l.keys, one at a time, and succeeds only if it // can lock all of them in order. The keys should be pre-sorted so that // two-way conflicts can't happen. If any of the keys fail to be locked, // and expiring the old lock doesn't work, tryLock undoes all previous // locks and aborts with an error. func (l *UpdateLock) tryLock() error { for i, key := range l.keys { log.Debugf("store: Trying to lock charm %s for updates...", key) doc := bson.D{{"_id", key}, {"time", l.time}} err := l.locks.Insert(doc) if err == nil { log.Debugf("store: Charm %s is now locked for updates.", key) continue } if lerr, ok := err.(*mgo.LastError); ok && lerr.Code == 11000 { log.Debugf("store: Charm %s is locked. Trying to expire lock.", key) l.tryExpire(key) err = l.locks.Insert(doc) if err == nil { log.Debugf("store: Charm %s is now locked for updates.", key) continue } } // Couldn't lock everyone. Undo previous locks. for j := i - 1; j >= 0; j-- { // Using time below should be unnecessary, but it's an extra check. // Can't do anything about errors here. Lock will expire anyway. l.locks.Remove(bson.D{{"_id", l.keys[j]}, {"time", l.time}}) } err = maybeConflict(err) log.Printf("store: Can't lock charms %v for updating: %v", l.keys, err) return err } return nil }
// CharmInfo retrieves the CharmInfo value for the charm at url. func (s *Store) CharmInfo(url *charm.URL) (info *CharmInfo, err error) { session := s.session.Copy() defer session.Close() log.Debugf("store: Retrieving charm info for %s", url) rev := url.Revision url = url.WithRevision(-1) charms := session.Charms() var cdoc charmDoc var qdoc interface{} if rev == -1 { qdoc = bson.D{{"urls", url}} } else { qdoc = bson.D{{"urls", url}, {"revision", rev}} } err = charms.Find(qdoc).Sort("-revision").One(&cdoc) if err != nil { log.Printf("store: Failed to find charm %s: %v", url, err) return nil, ErrNotFound } info = &CharmInfo{ cdoc.Revision, cdoc.Digest, cdoc.Sha256, cdoc.Size, cdoc.FileId, cdoc.Meta, cdoc.Config, } return info, nil }
func cleanTempFile(f *os.File) { if f != nil { f.Close() if err := os.Remove(f.Name()); err != nil { log.Printf("downloader: cannot remove temp file %q: %v", f.Name(), err) } } }
// Write creates an entry in the charms GridFS when first called, // and streams all written data into it. func (w *charmWriter) Write(data []byte) (n int, err error) { if w.file == nil { w.session = w.store.session.Copy() w.file, err = w.session.CharmFS().Create("") if err != nil { log.Printf("store: Failed to create GridFS file: %v", err) return 0, err } w.sha256 = sha256.New() log.Printf("store: Creating GridFS file with id %q...", w.file.Id().(bson.ObjectId).Hex()) } _, err = w.sha256.Write(data) if err != nil { panic("hash.Hash should never error") } return w.file.Write(data) }