Esempio n. 1
0
func (s *Server) serveCharm(w http.ResponseWriter, r *http.Request) {
	if !strings.HasPrefix(r.URL.Path, "/charm/") {
		panic("serveCharm: bad url")
	}
	curl, err := charm.ParseURL("cs:" + r.URL.Path[len("/charm/"):])
	if err != nil {
		w.WriteHeader(http.StatusNotFound)
		return
	}
	info, rc, err := s.store.OpenCharm(curl)
	if err == ErrNotFound {
		w.WriteHeader(http.StatusNotFound)
		return
	}
	if err != nil {
		w.WriteHeader(http.StatusInternalServerError)
		log.Errorf("store: cannot open charm %q: %v", curl, err)
		return
	}
	if statsEnabled(r) {
		go s.store.IncCounter(charmStatsKey(curl, "charm-bundle"))
	}
	defer rc.Close()
	w.Header().Set("Connection", "close") // No keep-alive for now.
	w.Header().Set("Content-Type", "application/octet-stream")
	w.Header().Set("Content-Length", strconv.FormatInt(info.BundleSize(), 10))
	_, err = io.Copy(w, rc)
	if err != nil {
		log.Errorf("store: failed to stream charm %q: %v", curl, err)
	}
}
Esempio n. 2
0
// finish completes the charm writing process and inserts the final metadata.
// After it completes the charm will be available for consumption.
func (w *charmWriter) finish() error {
	if w.file == nil {
		return nil
	}
	defer w.session.Close()
	id := w.file.Id()
	size := w.file.Size()
	err := w.file.Close()
	if err != nil {
		log.Errorf("store: Failed to close GridFS file: %v", err)
		return err
	}
	charms := w.session.Charms()
	sha256 := hex.EncodeToString(w.sha256.Sum(nil))
	charm := charmDoc{
		w.urls,
		w.revision,
		w.digest,
		sha256,
		size,
		id.(bson.ObjectId),
		w.charm.Meta(),
		w.charm.Config(),
	}
	if err = charms.Insert(&charm); err != nil {
		err = maybeConflict(err)
		log.Errorf("store: Failed to insert new revision of charm %v: %v", w.urls, err)
		return err
	}
	return nil
}
Esempio n. 3
0
// PublishCharmsDistro publishes all branch tips found in
// the /charms distribution in Launchpad onto store under
// the "cs:" scheme.
// apiBase specifies the Launchpad base API URL, such
// as lpad.Production or lpad.Staging.
// Errors found while processing one or more branches are
// all returned as a PublishBranchErrors value.
func PublishCharmsDistro(store *Store, apiBase lpad.APIBase) error {
	oauth := &lpad.OAuth{Anonymous: true, Consumer: "juju"}
	root, err := lpad.Login(apiBase, oauth)
	if err != nil {
		return err
	}
	distro, err := root.Distro("charms")
	if err != nil {
		return err
	}
	tips, err := distro.BranchTips(time.Time{})
	if err != nil {
		return err
	}

	var errs PublishBranchErrors
	for _, tip := range tips {
		if !strings.HasSuffix(tip.UniqueName, "/trunk") {
			continue
		}
		burl, curl, err := uniqueNameURLs(tip.UniqueName)
		if err != nil {
			errs = append(errs, PublishBranchError{tip.UniqueName, err})
			log.Errorf("%v\n", err)
			continue
		}
		log.Infof("----- %s\n", burl)
		if tip.Revision == "" {
			errs = append(errs, PublishBranchError{burl, fmt.Errorf("branch has no revisions")})
			log.Errorf("branch has no revisions\n")
			continue
		}
		// Charm is published in the personal URL and in any explicitly
		// assigned official series.
		urls := []*charm.URL{curl}
		schema, name := curl.Schema, curl.Name
		for _, series := range tip.OfficialSeries {
			curl = &charm.URL{Schema: schema, Name: name, Series: series, Revision: -1}
			curl.Series = series
			curl.User = ""
			urls = append(urls, curl)
		}

		err = PublishBazaarBranch(store, urls, burl, tip.Revision)
		if err == ErrRedundantUpdate {
			continue
		}
		if err != nil {
			errs = append(errs, PublishBranchError{burl, err})
			log.Errorf("%v\n", err)
		}
	}
	if errs != nil {
		return errs
	}
	return nil
}
Esempio n. 4
0
func (p *Provisioner) startMachine(m *state.Machine) error {
	// TODO(dfc) the state.Info passed to environ.StartInstance remains contentious
	// however as the PA only knows one state.Info, and that info is used by MAs and
	// UAs to locate the state for this environment, it is logical to use the same
	// state.Info as the PA.
	stateInfo, apiInfo, err := p.setupAuthentication(m)
	if err != nil {
		return err
	}
	cons, err := m.Constraints()
	if err != nil {
		return err
	}
	// Generate a unique nonce for the new instance.
	uuid, err := utils.NewUUID()
	if err != nil {
		return err
	}
	// Generated nonce has the format: "machine-#:UUID". The first
	// part is a badge, specifying the tag of the machine the provisioner
	// is running on, while the second part is a random UUID.
	nonce := fmt.Sprintf("%s:%s", state.MachineTag(p.machineId), uuid.String())
	inst, err := p.environ.StartInstance(m.Id(), nonce, m.Series(), cons, stateInfo, apiInfo)
	if err != nil {
		// Set the state to error, so the machine will be skipped next
		// time until the error is resolved, but don't return an
		// error; just keep going with the other machines.
		log.Errorf("worker/provisioner: cannot start instance for machine %q: %v", m, err)
		if err1 := m.SetStatus(params.StatusError, err.Error()); err1 != nil {
			// Something is wrong with this machine, better report it back.
			log.Errorf("worker/provisioner: cannot set error status for machine %q: %v", m, err1)
			return err1
		}
		return nil
	}
	if err := m.SetProvisioned(inst.Id(), nonce); err != nil {
		// The machine is started, but we can't record the mapping in
		// state. It'll keep running while we fail out and restart,
		// but will then be detected by findUnknownInstances and
		// killed again.
		//
		// TODO(dimitern) Stop the instance right away here.
		//
		// Multiple instantiations of a given machine (with the same
		// machine ID) cannot coexist, because findUnknownInstances is
		// called before startMachines. However, if the first machine
		// had started to do work before being replaced, we may
		// encounter surprising problems.
		return err
	}
	// populate the local cache
	p.instances[m.Id()] = inst
	p.machines[inst.Id()] = m.Id()
	log.Noticef("worker/provisioner: started machine %s as instance %s", m, inst.Id())
	return nil
}
Esempio n. 5
0
// tryLock tries locking l.keys, one at a time, and succeeds only if it
// can lock all of them in order. The keys should be pre-sorted so that
// two-way conflicts can't happen. If any of the keys fail to be locked,
// and expiring the old lock doesn't work, tryLock undoes all previous
// locks and aborts with an error.
func (l *UpdateLock) tryLock() error {
	for i, key := range l.keys {
		log.Debugf("store: Trying to lock charm %s for updates...", key)
		doc := bson.D{{"_id", key}, {"time", l.time}}
		err := l.locks.Insert(doc)
		if err == nil {
			log.Debugf("store: Charm %s is now locked for updates.", key)
			continue
		}
		if lerr, ok := err.(*mgo.LastError); ok && lerr.Code == 11000 {
			log.Debugf("store: Charm %s is locked. Trying to expire lock.", key)
			l.tryExpire(key)
			err = l.locks.Insert(doc)
			if err == nil {
				log.Debugf("store: Charm %s is now locked for updates.", key)
				continue
			}
		}
		// Couldn't lock everyone. Undo previous locks.
		for j := i - 1; j >= 0; j-- {
			// Using time below should be unnecessary, but it's an extra check.
			// Can't do anything about errors here. Lock will expire anyway.
			l.locks.Remove(bson.D{{"_id", l.keys[j]}, {"time", l.time}})
		}
		err = maybeConflict(err)
		log.Errorf("store: Can't lock charms %v for updating: %v", l.keys, err)
		return err
	}
	return nil
}
Esempio n. 6
0
func (s *Store) ensureIndexes() error {
	session := s.session
	indexes := []struct {
		c *mgo.Collection
		i mgo.Index
	}{{
		session.StatCounters(),
		mgo.Index{Key: []string{"k", "t"}, Unique: true},
	}, {
		session.StatTokens(),
		mgo.Index{Key: []string{"t"}, Unique: true},
	}, {
		session.Charms(),
		mgo.Index{Key: []string{"urls", "revision"}, Unique: true},
	}, {
		session.Events(),
		mgo.Index{Key: []string{"urls", "digest"}},
	}}
	for _, idx := range indexes {
		err := idx.c.EnsureIndex(idx.i)
		if err != nil {
			log.Errorf("store: Error ensuring stat.counters index: %v", err)
			return err
		}
	}
	return nil
}
Esempio n. 7
0
func (c *closeWorker) Wait() error {
	err := c.worker.Wait()
	if err := c.closer.Close(); err != nil {
		log.Errorf("closeWorker: close error: %v", err)
	}
	return err
}
Esempio n. 8
0
// newFilter returns a filter that handles state changes pertaining to the
// supplied unit.
func newFilter(st *state.State, unitName string) (*filter, error) {
	f := &filter{
		st:                st,
		outUnitDying:      make(chan struct{}),
		outConfig:         make(chan struct{}),
		outConfigOn:       make(chan struct{}),
		outUpgrade:        make(chan *charm.URL),
		outUpgradeOn:      make(chan *charm.URL),
		outResolved:       make(chan state.ResolvedMode),
		outResolvedOn:     make(chan state.ResolvedMode),
		outRelations:      make(chan []int),
		outRelationsOn:    make(chan []int),
		wantForcedUpgrade: make(chan bool),
		wantResolved:      make(chan struct{}),
		discardConfig:     make(chan struct{}),
		setCharm:          make(chan *charm.URL),
		didSetCharm:       make(chan struct{}),
		clearResolved:     make(chan struct{}),
		didClearResolved:  make(chan struct{}),
	}
	go func() {
		defer f.tomb.Done()
		err := f.loop(unitName)
		log.Errorf("worker/uniter/filter: %v", err)
		f.tomb.Kill(err)
	}()
	return f, nil
}
Esempio n. 9
0
// CharmInfo retrieves the CharmInfo value for the charm at url.
func (s *Store) CharmInfo(url *charm.URL) (info *CharmInfo, err error) {
	session := s.session.Copy()
	defer session.Close()

	log.Debugf("store: Retrieving charm info for %s", url)
	rev := url.Revision
	url = url.WithRevision(-1)

	charms := session.Charms()
	var cdoc charmDoc
	var qdoc interface{}
	if rev == -1 {
		qdoc = bson.D{{"urls", url}}
	} else {
		qdoc = bson.D{{"urls", url}, {"revision", rev}}
	}
	err = charms.Find(qdoc).Sort("-revision").One(&cdoc)
	if err != nil {
		log.Errorf("store: Failed to find charm %s: %v", url, err)
		return nil, ErrNotFound
	}
	info = &CharmInfo{
		cdoc.Revision,
		cdoc.Digest,
		cdoc.Sha256,
		cdoc.Size,
		cdoc.FileId,
		cdoc.Meta,
		cdoc.Config,
	}
	return info, nil
}
Esempio n. 10
0
func (srv *Server) run(lis net.Listener) {
	defer srv.tomb.Done()
	defer srv.wg.Wait() // wait for any outstanding requests to complete.
	srv.wg.Add(1)
	go func() {
		<-srv.tomb.Dying()
		lis.Close()
		srv.wg.Done()
	}()
	handler := websocket.Handler(func(conn *websocket.Conn) {
		srv.wg.Add(1)
		defer srv.wg.Done()
		// If we've got to this stage and the tomb is still
		// alive, we know that any tomb.Kill must occur after we
		// have called wg.Add, so we avoid the possibility of a
		// handler goroutine running after Stop has returned.
		if srv.tomb.Err() != tomb.ErrStillAlive {
			return
		}
		if err := srv.serveConn(conn); err != nil {
			log.Errorf("state/api: error serving RPCs: %v", err)
		}
	})
	// The error from http.Serve is not interesting.
	http.Serve(lis, handler)
}
Esempio n. 11
0
// Run executes the subcommand that was selected in Init.
func (c *SuperCommand) Run(ctx *Context) error {
	if c.showDescription {
		if c.Purpose != "" {
			fmt.Fprintf(ctx.Stdout, "%s\n", c.Purpose)
		} else {
			fmt.Fprintf(ctx.Stdout, "%s: no description available\n", c.Info().Name)
		}
		return nil
	}
	if c.subcmd == nil {
		panic("Run: missing subcommand; Init failed or not called")
	}
	if c.Log != nil {
		if err := c.Log.Start(ctx); err != nil {
			return err
		}
	}
	err := c.subcmd.Run(ctx)
	if err != nil && err != ErrSilent {
		log.Errorf("command failed: %v", err)
	} else {
		log.Infof("command finished")
	}
	return err
}
Esempio n. 12
0
// Open connects to the server described by the given
// info, waits for it to be initialized, and returns a new State
// representing the environment connected to.
// It returns unauthorizedError if access is unauthorized.
func Open(info *Info, opts DialOpts) (*State, error) {
	log.Infof("state: opening state; mongo addresses: %q; entity %q", info.Addrs, info.Tag)
	if len(info.Addrs) == 0 {
		return nil, stderrors.New("no mongo addresses")
	}
	if len(info.CACert) == 0 {
		return nil, stderrors.New("missing CA certificate")
	}
	xcert, err := cert.ParseCert(info.CACert)
	if err != nil {
		return nil, fmt.Errorf("cannot parse CA certificate: %v", err)
	}
	pool := x509.NewCertPool()
	pool.AddCert(xcert)
	tlsConfig := &tls.Config{
		RootCAs:    pool,
		ServerName: "anything",
	}
	dial := func(addr net.Addr) (net.Conn, error) {
		c, err := net.Dial("tcp", addr.String())
		if err != nil {
			log.Errorf("state: connection failed, will retry: %v", err)
			return nil, err
		}
		cc := tls.Client(c, tlsConfig)
		if err := cc.Handshake(); err != nil {
			log.Errorf("state: TLS handshake failed: %v", err)
			return nil, err
		}
		return cc, nil
	}
	session, err := mgo.DialWithInfo(&mgo.DialInfo{
		Addrs:   info.Addrs,
		Timeout: opts.Timeout,
		Dial:    dial,
	})
	if err != nil {
		return nil, err
	}
	log.Infof("state: connection established")
	st, err := newState(session, info)
	if err != nil {
		session.Close()
		return nil, err
	}
	return st, nil
}
Esempio n. 13
0
func (c *Cleaner) Handle() error {
	if err := c.st.Cleanup(); err != nil {
		log.Errorf("worker/cleaner: cannot cleanup state: %v", err)
	}
	// We do not return the err from Cleanup, because we don't want to stop
	// the loop as a failure
	return nil
}
Esempio n. 14
0
// forgetUnit cleans the unit data after the unit is removed.
func (fw *Firewaller) forgetUnit(unitd *unitData) {
	name := unitd.unit.Name()
	serviced := unitd.serviced
	machined := unitd.machined
	if err := unitd.Stop(); err != nil {
		log.Errorf("worker/firewaller: unit watcher %q returned error when stopping: %v", name, err)
	}
	// Clean up after stopping.
	delete(fw.unitds, name)
	delete(machined.unitds, name)
	delete(serviced.unitds, name)
	if len(serviced.unitds) == 0 {
		// Stop service data after all units are removed.
		if err := serviced.Stop(); err != nil {
			log.Errorf("worker/firewaller: service watcher %q returned error when stopping: %v", serviced.service, err)
		}
		delete(fw.serviceds, serviced.service.Name())
	}
}
Esempio n. 15
0
// mustLackRevision returns an error if any of the urls has a revision.
func mustLackRevision(context string, urls ...*charm.URL) error {
	for _, url := range urls {
		if url.Revision != -1 {
			err := fmt.Errorf("%s: got charm URL with revision: %s", context, url)
			log.Errorf("store: %v", err)
			return err
		}
	}
	return nil
}
Esempio n. 16
0
func (call *Call) done() {
	select {
	case call.Done <- call:
		// ok
	default:
		// We don't want to block here.  It is the caller's responsibility to make
		// sure the channel has enough buffer space. See comment in Go().
		log.Errorf("rpc: discarding Call reply due to insufficient Done chan capacity")
	}
}
Esempio n. 17
0
// cmd runs the specified command inside the directory. Errors will be logged
// in detail.
func (d *GitDir) cmd(args ...string) error {
	cmd := exec.Command("git", args...)
	cmd.Dir = d.path
	if out, err := cmd.CombinedOutput(); err != nil {
		log.Errorf("worker/uniter/charm: git command failed: %s\npath: %s\nargs: %#v\n%s",
			err, d.path, args, string(out))
		return fmt.Errorf("git %s failed: %s", args[0], err)
	}
	return nil
}
Esempio n. 18
0
// StopAll stops all the resources.
func (rs *Resources) StopAll() {
	rs.mu.Lock()
	defer rs.mu.Unlock()
	for _, r := range rs.resources {
		if err := r.Stop(); err != nil {
			log.Errorf("state/api: error stopping %T resource: %v", r, err)
		}
	}
	rs.resources = make(map[string]Resource)
}
Esempio n. 19
0
func (rr *Resumer) loop() error {
	for {
		select {
		case <-rr.tomb.Dying():
			return tomb.ErrDying
		case <-time.After(interval):
			if err := rr.tr.ResumeTransactions(); err != nil {
				log.Errorf("worker/resumer: cannot resume transactions: %v", err)
			}
		}
	}
}
Esempio n. 20
0
func Open(info *Info, opts DialOpts) (*State, error) {
	// TODO Select a random address from info.Addrs
	// and only fail when we've tried all the addresses.
	// TODO what does "origin" really mean, and is localhost always ok?
	cfg, err := websocket.NewConfig("wss://"+info.Addrs[0]+"/", "http://localhost/")
	if err != nil {
		return nil, err
	}
	pool := x509.NewCertPool()
	xcert, err := cert.ParseCert(info.CACert)
	if err != nil {
		return nil, err
	}
	pool.AddCert(xcert)
	cfg.TlsConfig = &tls.Config{
		RootCAs:    pool,
		ServerName: "anything",
	}
	var conn *websocket.Conn
	openAttempt := utils.AttemptStrategy{
		Total: opts.Timeout,
		Delay: opts.RetryDelay,
	}
	for a := openAttempt.Start(); a.Next(); {
		log.Infof("state/api: dialing %q", cfg.Location)
		conn, err = websocket.DialConfig(cfg)
		if err == nil {
			break
		}
		log.Errorf("state/api: %v", err)
	}
	if err != nil {
		return nil, err
	}
	log.Infof("state/api: connection established")

	client := rpc.NewConn(jsoncodec.NewWebsocket(conn))
	client.Start()
	st := &State{
		client: client,
		conn:   conn,
	}
	if info.Tag != "" || info.Password != "" {
		if err := st.Login(info.Tag, info.Password, info.Nonce); err != nil {
			conn.Close()
			return nil, err
		}
	}
	st.broken = make(chan struct{})
	go st.heartbeatMonitor()
	return st, nil
}
Esempio n. 21
0
// Match returns a List, derived from src, containing only those tools that
// match the supplied Filter. If no tools match, it returns ErrNoMatches.
func (src List) Match(f Filter) (List, error) {
	var result List
	for _, tools := range src {
		if f.match(tools) {
			result = append(result, tools)
		}
	}
	if len(result) == 0 {
		log.Errorf("environs/tools: cannot match %#v", f)
		return nil, ErrNoMatches
	}
	return result, nil
}
Esempio n. 22
0
// commonLoop implements the loop structure common to the client
// watchers. It should be started in a separate goroutine by any
// watcher that embeds commonWatcher. It kills the commonWatcher's
// tomb when an error occurs.
func (w *commonWatcher) commonLoop() {
	defer close(w.in)
	w.wg.Add(1)
	go func() {
		// When the watcher has been stopped, we send a Stop request
		// to the server, which will remove the watcher and return a
		// CodeStopped error to any currently outstanding call to
		// Next. If a call to Next happens just after the watcher has
		// been stopped, we'll get a CodeNotFound error; Either way
		// we'll return, wait for the stop request to complete, and
		// the watcher will die with all resources cleaned up.
		defer w.wg.Done()
		<-w.tomb.Dying()
		if err := w.call("Stop", nil); err != nil {
			log.Errorf("state/api: error trying to stop watcher %v", err)
		}
	}()
	w.wg.Add(1)
	go func() {
		// Because Next blocks until there are changes, we need to
		// call it in a separate goroutine, so the watcher can be
		// stopped normally.
		defer w.wg.Done()
		for {
			result := w.newResult()
			err := w.call("Next", &result)
			if err != nil {
				if code := params.ErrCode(err); code == params.CodeStopped || code == params.CodeNotFound {
					if w.tomb.Err() != tomb.ErrStillAlive {
						// The watcher has been stopped at the client end, so we're
						// expecting one of the above two kinds of error.
						// We might see the same errors if the server itself
						// has been shut down, in which case we leave them
						// untouched.
						err = tomb.ErrDying
					}
				}
				// Something went wrong, just report the error and bail out.
				w.tomb.Kill(err)
				return
			}
			select {
			case <-w.tomb.Dying():
				return
			case w.in <- result:
				// Report back the result we just got.
			}
		}
	}()
	w.wg.Wait()
}
Esempio n. 23
0
func (p *Provisioner) loop() error {
	environWatcher := p.st.WatchEnvironConfig()
	defer watcher.Stop(environWatcher, &p.tomb)

	var err error
	p.environ, err = worker.WaitForEnviron(environWatcher, p.tomb.Dying())
	if err != nil {
		return err
	}

	// Get a new StateInfo from the environment: the one used to
	// launch the agent may refer to localhost, which will be
	// unhelpful when attempting to run an agent on a new machine.
	if p.stateInfo, p.apiInfo, err = p.environ.StateInfo(); err != nil {
		return err
	}

	// Call processMachines to stop any unknown instances before watching machines.
	if err := p.processMachines(nil); err != nil {
		return err
	}

	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machinesWatcher := p.st.WatchMachines()
	defer watcher.Stop(machinesWatcher, &p.tomb)
	// START OMIT
	// launchpad.net/juju-core/worker/provisioner/provisioner.go
	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case cfg, ok := <-environWatcher.Changes():
			if !ok {
				return watcher.MustErr(environWatcher)
			}
			if err := p.setConfig(cfg); err != nil {
				log.Errorf("worker/provisioner: loaded invalid environment configuration: %v", err)
			}
		case ids, ok := <-machinesWatcher.Changes():
			if !ok {
				return watcher.MustErr(machinesWatcher)
			}
			if err := p.processMachines(ids); err != nil {
				return err
			}
		}
	}
	// END OMIT
	panic("not reached")
}
Esempio n. 24
0
func (a *MachineAgent) Entity(st *state.State) (AgentState, error) {
	m, err := st.Machine(a.MachineId)
	if err != nil {
		return nil, err
	}
	// Check the machine nonce as provisioned matches the agent.Conf value.
	if !m.CheckProvisioned(a.Conf.MachineNonce) {
		// The agent is running on a different machine to the one it
		// should be according to state. It must stop immediately.
		log.Errorf("running machine %v agent on inappropriate instance", m)
		return nil, worker.ErrTerminateAgent
	}
	return m, nil
}
Esempio n. 25
0
// AptGetInstall runs 'apt-get install packages' for the packages listed here
func AptGetInstall(packages ...string) error {
	cmdArgs := append([]string(nil), aptGetCommand...)
	cmdArgs = append(cmdArgs, "install")
	cmdArgs = append(cmdArgs, packages...)
	aptLogger.Infof("Running: %s", cmdArgs)
	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
	cmd.Env = append(os.Environ(), aptGetEnvOptions...)
	out, err := commandOutput(cmd)
	if err != nil {
		log.Errorf("utils/apt: apt-get command failed: %v\nargs: %#v\n%s",
			err, cmdArgs, string(out))
		return fmt.Errorf("apt-get failed: %v", err)
	}
	return nil
}
Esempio n. 26
0
// OpenCharm opens for reading via rc the charm currently available at url.
// rc must be closed after dealing with it or resources will leak.
func (s *Store) OpenCharm(url *charm.URL) (info *CharmInfo, rc io.ReadCloser, err error) {
	log.Debugf("store: Opening charm %s", url)
	info, err = s.CharmInfo(url)
	if err != nil {
		return nil, nil, err
	}
	session := s.session.Copy()
	file, err := session.CharmFS().OpenId(info.fileId)
	if err != nil {
		log.Errorf("store: Failed to open GridFS file for charm %s: %v", url, err)
		session.Close()
		return nil, nil, err
	}
	rc = &reader{session, file}
	return
}
Esempio n. 27
0
// CharmPublisher returns a new CharmPublisher for importing a charm that
// will be made available in the store at all of the provided URLs.
// The digest parameter must contain the unique identifier that
// represents the charm data being imported (e.g. the VCS revision sha1).
// ErrRedundantUpdate is returned if all of the provided urls are
// already associated to that digest.
func (s *Store) CharmPublisher(urls []*charm.URL, digest string) (p *CharmPublisher, err error) {
	log.Infof("store: Trying to add charms %v with key %q...", urls, digest)
	if err = mustLackRevision("CharmPublisher", urls...); err != nil {
		return
	}
	session := s.session.Copy()
	defer session.Close()

	maxRev := -1
	newKey := false
	charms := session.Charms()
	doc := charmDoc{}
	for i := range urls {
		urlStr := urls[i].String()
		err = charms.Find(bson.D{{"urls", urlStr}}).Sort("-revision").One(&doc)
		if err == mgo.ErrNotFound {
			log.Infof("store: Charm %s not yet in the store.", urls[i])
			newKey = true
			continue
		}
		if doc.Digest != digest {
			log.Infof("store: Charm %s is out of date with revision key %q.", urlStr, digest)
			newKey = true
		}
		if err != nil {
			log.Errorf("store: Unknown error looking for charm %s: %s", urlStr, err)
			return
		}
		if doc.Revision > maxRev {
			maxRev = doc.Revision
		}
	}
	if !newKey {
		log.Infof("store: All charms have revision key %q. Nothing to update.", digest)
		err = ErrRedundantUpdate
		return
	}
	revision := maxRev + 1
	log.Infof("store: Preparing writer to add charms with revision %d.", revision)
	w := &charmWriter{
		store:    s,
		urls:     urls,
		revision: revision,
		digest:   digest,
	}
	return &CharmPublisher{revision, w}, nil
}
Esempio n. 28
0
func (s *Server) serveEvent(w http.ResponseWriter, r *http.Request) {
	if r.URL.Path != "/charm-event" {
		w.WriteHeader(http.StatusNotFound)
		return
	}
	r.ParseForm()
	response := map[string]*charm.EventResponse{}
	for _, url := range r.Form["charms"] {
		digest := ""
		if i := strings.Index(url, "@"); i >= 0 && i+1 < len(url) {
			digest = url[i+1:]
			url = url[:i]
		}
		c := &charm.EventResponse{}
		response[url] = c
		curl, err := charm.ParseURL(url)
		var event *CharmEvent
		if err == nil {
			event, err = s.store.CharmEvent(curl, digest)
		}
		var skey []string
		if err == nil {
			skey = charmStatsKey(curl, "charm-event")
			c.Kind = event.Kind.String()
			c.Revision = event.Revision
			c.Digest = event.Digest
			c.Errors = event.Errors
			c.Warnings = event.Warnings
			c.Time = event.Time.UTC().Format(time.RFC3339)
		} else {
			c.Errors = append(c.Errors, err.Error())
		}
		if skey != nil && statsEnabled(r) {
			go s.store.IncCounter(skey)
		}
	}
	data, err := json.Marshal(response)
	if err == nil {
		w.Header().Set("Content-Type", "application/json")
		_, err = w.Write(data)
	}
	if err != nil {
		log.Errorf("store: cannot write content: %v", err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
}
Esempio n. 29
0
// Run runs a machine agent.
func (a *MachineAgent) Run(_ *cmd.Context) error {
	defer a.tomb.Done()
	log.Infof("machine agent %v start", a.Tag())
	if err := a.Conf.read(a.Tag()); err != nil {
		return err
	}
	if err := EnsureWeHaveLXC(a.Conf.DataDir, a.Tag()); err != nil {
		log.Errorf("we were unable to install the lxc package, unable to continue: %v", err)
		return err
	}
	charm.CacheDir = filepath.Join(a.Conf.DataDir, "charmcache")

	// ensureStateWorker ensures that there is a worker that
	// connects to the state that runs within itself all the workers
	// that need a state connection Unless we're bootstrapping, we
	// need to connect to the API server to find out if we need to
	// call this, so we make the APIWorker call it when necessary if
	// the machine requires it.  Note that ensureStateWorker can be
	// called many times - StartWorker does nothing if there is
	// already a worker started with the given name.
	ensureStateWorker := func() {
		a.runner.StartWorker("state", func() (worker.Worker, error) {
			// TODO(rog) go1.1: use method expression
			return a.StateWorker()
		})
	}
	if a.MachineId == bootstrapMachineId {
		// If we're bootstrapping, we don't have an API
		// server to connect to, so start the state worker regardless.

		// TODO(rog) When we have HA, we only want to do this
		// when we really are bootstrapping - once other
		// instances of the API server have been started, we
		// should follow the normal course of things and ignore
		// the fact that this was once the bootstrap machine.
		log.Infof("Starting StateWorker for machine-0")
		ensureStateWorker()
	}
	a.runner.StartWorker("api", func() (worker.Worker, error) {
		// TODO(rog) go1.1: use method expression
		return a.APIWorker(ensureStateWorker)
	})
	err := agentDone(a.runner.Wait())
	a.tomb.Kill(err)
	return err
}
Esempio n. 30
0
// Write creates an entry in the charms GridFS when first called,
// and streams all written data into it.
func (w *charmWriter) Write(data []byte) (n int, err error) {
	if w.file == nil {
		w.session = w.store.session.Copy()
		w.file, err = w.session.CharmFS().Create("")
		if err != nil {
			log.Errorf("store: Failed to create GridFS file: %v", err)
			return 0, err
		}
		w.sha256 = sha256.New()
		log.Infof("store: Creating GridFS file with id %q...", w.file.Id().(bson.ObjectId).Hex())
	}
	_, err = w.sha256.Write(data)
	if err != nil {
		panic("hash.Hash should never error")
	}
	return w.file.Write(data)
}