Ejemplo n.º 1
0
// SetUp is defined on the worker.NotifyWatchHandler interface.
func (kw *keyupdaterWorker) SetUp() (watcher.NotifyWatcher, error) {
	// Record the keys Juju knows about.
	// TODO(dfc)
	jujuKeys, err := kw.st.AuthorisedKeys(kw.tag.String())
	if err != nil {
		return nil, errors.LoggedErrorf(logger, "reading Juju ssh keys for %q: %v", kw.tag, err)
	}
	kw.jujuKeys = set.NewStrings(jujuKeys...)

	// Read the keys currently in ~/.ssh/authorised_keys.
	sshKeys, err := ssh.ListKeys(SSHUser, ssh.FullKeys)
	if err != nil {
		return nil, errors.LoggedErrorf(logger, "reading ssh authorized keys for %q: %v", kw.tag, err)
	}
	// Record any keys not added by Juju.
	for _, key := range sshKeys {
		_, comment, err := ssh.KeyFingerprint(key)
		// Also record keys which we cannot parse.
		if err != nil || !strings.HasPrefix(comment, ssh.JujuCommentPrefix) {
			kw.nonJujuKeys = append(kw.nonJujuKeys, key)
		}
	}
	// Write out the ssh authorised keys file to match the current state of the world.
	if err := kw.writeSSHKeys(jujuKeys); err != nil {
		return nil, errors.LoggedErrorf(logger, "adding current Juju keys to ssh authorised keys: %v", err)
	}

	w, err := kw.st.WatchAuthorisedKeys(kw.tag.String())
	if err != nil {
		return nil, errors.LoggedErrorf(logger, "starting key updater worker: %v", err)
	}
	logger.Infof("%q key updater worker started", kw.tag)
	return w, nil
}
Ejemplo n.º 2
0
Archivo: kvm.go Proyecto: kapilt/juju
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {

	name := names.NewMachineTag(machineConfig.MachineId).String()
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Note here that the kvmObjectFacotry only returns a valid container
	// object, and doesn't actually construct the underlying kvm container on
	// disk.
	kvmContainer := KvmObjectFactory.New(name)

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to create container directory: %v", err)
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "failed to write user data: %v", err)
	}
	// Create the container.
	startParams = ParseConstraintsToStartParams(machineConfig.Constraints)
	startParams.Arch = version.Current.Arch
	startParams.Series = series
	startParams.Network = network
	startParams.UserDataFile = userDataFilename

	// If the Simplestream requested is anything but released, update
	// our StartParams to request it.
	if machineConfig.ImageStream != imagemetadata.ReleasedStream {
		startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + machineConfig.ImageStream
	}

	var hardware instance.HardwareCharacteristics
	hardware, err = instance.ParseHardware(
		fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v",
			startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
	if err != nil {
		logger.Warningf("failed to parse hardware: %v", err)
	}

	logger.Tracef("create the container, constraints: %v", machineConfig.Constraints)
	if err := kvmContainer.Start(startParams); err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "kvm container creation failed: %v", err)
	}
	logger.Tracef("kvm container created")
	return &kvmInstance{kvmContainer, name}, &hardware, nil
}
Ejemplo n.º 3
0
// retrieveLatestCharmInfo looks up the charm store to return the charm URLs for the
// latest revision of the deployed charms.
func retrieveLatestCharmInfo(deployedCharms map[string]*charm.URL, uuid string) ([]*charm.URL, error) {
	var curls []*charm.URL
	for _, curl := range deployedCharms {
		if curl.Schema == "local" {
			// Version checking for charms from local repositories is not
			// currently supported, since we don't yet support passing in
			// a path to the local repo. This may change if the need arises.
			continue
		}
		curls = append(curls, curl)
	}

	// Do a bulk call to get the revision info for all charms.
	logger.Infof("retrieving revision information for %d charms", len(curls))
	store := charm.Store.WithJujuAttrs("environment_uuid=" + uuid)
	revInfo, err := store.Latest(curls...)
	if err != nil {
		return nil, errors.LoggedErrorf(logger, "finding charm revision info: %v", err)
	}
	var latestCurls []*charm.URL
	for i, info := range revInfo {
		curl := curls[i]
		if info.Err == nil {
			latestCurls = append(latestCurls, curl.WithRevision(info.Revision))
		} else {
			logger.Errorf("retrieving charm info for %s: %v", curl, info.Err)
		}
	}
	return latestCurls, nil
}
Ejemplo n.º 4
0
// Start starts periodically reporting that p's key is alive.
func (p *Pinger) Start() error {
	p.mu.Lock()
	defer p.mu.Unlock()
	if p.started {
		return errors.Errorf("pinger already started")
	}
	p.tomb = tomb.Tomb{}
	if err := p.prepare(); err != nil {
		return errors.Trace(err)
	}
	logger.Tracef("starting pinger for %q with seq=%d", p.beingKey, p.beingSeq)
	if err := p.ping(); err != nil {
		return errors.Trace(err)
	}
	p.started = true
	go func() {
		err := p.loop()
		cause := errors.Cause(err)
		// tomb expects ErrDying or ErrStillAlive as
		// exact values, so we need to log and unwrap
		// the error first.
		if err != nil && cause != tomb.ErrDying {
			errors.LoggedErrorf(logger, "pinger loop failed: %v", err)
		}
		p.tomb.Kill(cause)
		p.tomb.Done()
	}()
	return nil
}
Ejemplo n.º 5
0
func (c *kvmContainer) Start(params StartParams) error {
	logger.Debugf("Synchronise images for %s %s", params.Series, params.Arch)
	if err := SyncImages(params.Series, params.Arch); err != nil {
		return err
	}
	var bridge string
	if params.Network != nil {
		if params.Network.NetworkType == container.BridgeNetwork {
			bridge = params.Network.Device
		} else {
			return errors.LoggedErrorf(logger, "Non-bridge network devices not yet supported")
		}
	}
	logger.Debugf("Create the machine %s", c.name)
	if err := CreateMachine(CreateMachineParams{
		Hostname:      c.name,
		Series:        params.Series,
		Arch:          params.Arch,
		UserDataFile:  params.UserDataFile,
		NetworkBridge: bridge,
		Memory:        params.Memory,
		CpuCores:      params.CpuCores,
		RootDisk:      params.RootDisk,
	}); err != nil {
		return err
	}

	logger.Debugf("Set machine %s to autostart", c.name)
	return AutostartMachine(c.name)
}
Ejemplo n.º 6
0
// Handle is defined on the worker.NotifyWatchHandler interface.
func (kw *keyupdaterWorker) Handle() error {
	// Read the keys that Juju has.
	newKeys, err := kw.st.AuthorisedKeys(kw.tag.String())
	if err != nil {
		return errors.LoggedErrorf(logger, "reading Juju ssh keys for %q: %v", kw.tag, err)
	}
	// Figure out if any keys have been added or deleted.
	newJujuKeys := set.NewStrings(newKeys...)
	deleted := kw.jujuKeys.Difference(newJujuKeys)
	added := newJujuKeys.Difference(kw.jujuKeys)
	if added.Size() > 0 || deleted.Size() > 0 {
		logger.Debugf("adding ssh keys to authorised keys: %v", added)
		logger.Debugf("deleting ssh keys from authorised keys: %v", deleted)
		if err = kw.writeSSHKeys(newKeys); err != nil {
			return errors.LoggedErrorf(logger, "updating ssh keys: %v", err)
		}
	}
	kw.jujuKeys = newJujuKeys
	return nil
}
Ejemplo n.º 7
0
Archivo: agent.go Proyecto: kapilt/juju
// agentDone processes the error returned by
// an exiting agent.
func agentDone(err error) error {
	if err == worker.ErrTerminateAgent {
		err = nil
	}
	if ug, ok := err.(*upgrader.UpgradeReadyError); ok {
		if err := ug.ChangeAgentTools(); err != nil {
			// Return and let upstart deal with the restart.
			return errors.LoggedErrorf(logger, "cannot change agent tools: %v", err)
		}
	}
	return err
}
Ejemplo n.º 8
0
// New returns a new Watcher observing the changelog collection,
// which must be a capped collection maintained by mgo/txn.
func New(changelog *mgo.Collection) *Watcher {
	w := &Watcher{
		log:     changelog,
		watches: make(map[watchKey][]watchInfo),
		current: make(map[watchKey]int64),
		request: make(chan interface{}),
	}
	go func() {
		err := w.loop()
		cause := errors.Cause(err)
		// tomb expects ErrDying or ErrStillAlive as
		// exact values, so we need to log and unwrap
		// the error first.
		if err != nil && cause != tomb.ErrDying {
			errors.LoggedErrorf(logger, "watcher loop failed: %v", err)
		}
		w.tomb.Kill(cause)
		w.tomb.Done()
	}()
	return w
}
Ejemplo n.º 9
0
// NewStoreManager returns a new StoreManager that retrieves information
// using the given backing.
func NewStoreManager(backing Backing) *StoreManager {
	sm := newStoreManagerNoRun(backing)
	go func() {
		defer sm.tomb.Done()
		// TODO(rog) distinguish between temporary and permanent errors:
		// if we get an error in loop, this logic kill the state's StoreManager
		// forever. This currently fits the way we go about things,
		// because we reconnect to the state on any error, but
		// perhaps there are errors we could recover from.

		err := sm.loop()
		cause := errors.Cause(err)
		// tomb expects ErrDying or ErrStillAlive as
		// exact values, so we need to log and unwrap
		// the error first.
		if err != nil && cause != tomb.ErrDying {
			errors.LoggedErrorf(logger, "store manager loop failed: %v", err)
		}
		sm.tomb.Kill(cause)
	}()
	return sm
}
Ejemplo n.º 10
0
// NewWatcher returns a new Watcher.
func NewWatcher(base *mgo.Collection) *Watcher {
	w := &Watcher{
		base:     base,
		pings:    pingsC(base),
		beings:   beingsC(base),
		beingKey: make(map[int64]string),
		beingSeq: make(map[string]int64),
		watches:  make(map[string][]chan<- Change),
		request:  make(chan interface{}),
	}
	go func() {
		err := w.loop()
		cause := errors.Cause(err)
		// tomb expects ErrDying or ErrStillAlive as
		// exact values, so we need to log and unwrap
		// the error first.
		if err != nil && cause != tomb.ErrDying {
			errors.LoggedErrorf(logger, "watcher loop failed: %v", err)
		}
		w.tomb.Kill(cause)
		w.tomb.Done()
	}()
	return w
}