Esempio n. 1
0
// createFilesystemAttachments creates filesystem attachments with the specified parameters.
func createFilesystemAttachments(
	ctx *context,
	params []storage.FilesystemAttachmentParams,
) ([]storage.FilesystemAttachment, error) {
	paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(ctx, params)
	if err != nil {
		return nil, errors.Trace(err)
	}
	var allFilesystemAttachments []storage.FilesystemAttachment
	for sourceName, params := range paramsBySource {
		logger.Debugf("attaching filesystems: %v", params)
		filesystemSource := filesystemSources[sourceName]
		results, err := filesystemSource.AttachFilesystems(params)
		if err != nil {
			return nil, errors.Annotatef(err, "attaching filesystems from source %q", sourceName)
		}
		for i, result := range results {
			if result.Error != nil {
				return nil, errors.Annotatef(
					err, "attaching %s to %s",
					names.ReadableString(params[i].Filesystem),
					names.ReadableString(params[i].Machine),
				)
			}
			allFilesystemAttachments = append(allFilesystemAttachments, *result.FilesystemAttachment)
		}
	}
	return allFilesystemAttachments, nil
}
Esempio n. 2
0
func (w *collect) do() error {
	logger.Tracef("recording metrics")

	config := w.agent.CurrentConfig()
	tag := config.Tag()
	unitTag, ok := tag.(names.UnitTag)
	if !ok {
		return errors.Errorf("expected a unit tag, got %v", tag)
	}
	paths := uniter.NewWorkerPaths(config.DataDir(), unitTag, "metrics-collect")

	recorder, err := newRecorder(unitTag, paths, w.unitCharmLookup, w.metricFactory)
	if errors.Cause(err) == errMetricsNotDefined {
		logger.Tracef("%v", err)
		return nil
	} else if err != nil {
		return errors.Annotate(err, "failed to instantiate metric recorder")
	}

	ctx := newHookContext(unitTag.String(), recorder)
	err = ctx.addJujuUnitsMetric()
	if err != nil {
		return errors.Annotatef(err, "error adding 'juju-units' metric")
	}

	r := runner.NewRunner(ctx, paths)
	err = r.RunHook(string(hooks.CollectMetrics))
	if err != nil {
		return errors.Annotatef(err, "error running 'collect-metrics' hook")
	}
	return nil
}
Esempio n. 3
0
func (ctxt *Context) ModelDialer(controller, model string) (*Dialer, error) {
	if controller == "" {
		c, err := ctxt.store.origStore.CurrentController()
		if err != nil {
			return nil, errors.Annotatef(err, "cannot get current controller")
		}
		controller = c
	}
	if model == "" {
		m, err := ctxt.store.origStore.CurrentModel(controller)
		if err != nil {
			return nil, errors.Annotatef(err, "cannot get current model")
		}
		model = m
	}
	modelUUID := model
	if !utils.IsValidUUIDString(model) {
		md, err := ctxt.store.origStore.ModelByName(controller, model)
		if err != nil {
			return nil, errors.Annotatef(err, "cannot get model")
		}
		modelUUID = md.ModelUUID
	}
	return ctxt.dialer(controller, modelUUID)
}
Esempio n. 4
0
// PrivateAddress implements the server side of Client.PrivateAddress.
func (c *Client) PrivateAddress(p params.PrivateAddress) (results params.PrivateAddressResults, err error) {
	switch {
	case names.IsValidMachine(p.Target):
		machine, err := c.api.stateAccessor.Machine(p.Target)
		if err != nil {
			return results, err
		}
		addr, err := machine.PrivateAddress()
		if err != nil {
			return results, errors.Annotatef(err, "error fetching address for machine %q", machine)
		}
		return params.PrivateAddressResults{PrivateAddress: addr.Value}, nil

	case names.IsValidUnit(p.Target):
		unit, err := c.api.stateAccessor.Unit(p.Target)
		if err != nil {
			return results, err
		}
		addr, err := unit.PrivateAddress()
		if err != nil {
			return results, errors.Annotatef(err, "error fetching address for unit %q", unit)
		}
		return params.PrivateAddressResults{PrivateAddress: addr.Value}, nil
	}
	return results, fmt.Errorf("unknown unit or machine %q", p.Target)

}
Esempio n. 5
0
File: user.go Progetto: bac/juju
func (m *ExternalMacaroonAuthenticator) newDischargeRequiredError(cause error) error {
	if m.Service == nil || m.Macaroon == nil {
		return errors.Trace(cause)
	}
	mac := m.Macaroon.Clone()
	// TODO(fwereade): 2016-03-17 lp:1558657
	expiryTime := time.Now().Add(externalLoginExpiryTime)
	if err := addMacaroonTimeBeforeCaveat(m.Service, mac, expiryTime); err != nil {
		return errors.Annotatef(err, "cannot create macaroon")
	}
	err := m.Service.AddCaveat(mac, checkers.NeedDeclaredCaveat(
		checkers.Caveat{
			Location:  m.IdentityLocation,
			Condition: "is-authenticated-user",
		},
		usernameKey,
	))
	if err != nil {
		return errors.Annotatef(err, "cannot create macaroon")
	}
	return &common.DischargeRequiredError{
		Cause:    cause,
		Macaroon: mac,
	}
}
Esempio n. 6
0
// AddDefaultBlockDevicesDocs creates block devices documents
// for all existing machines in all environments.
func AddDefaultBlockDevicesDocs(st *State) error {
	environments, closer := st.getCollection(environmentsC)
	defer closer()

	var envDocs []bson.M
	err := environments.Find(nil).Select(bson.M{"_id": 1}).All(&envDocs)
	if err != nil {
		return errors.Annotate(err, "failed to read environments")
	}

	for _, envDoc := range envDocs {
		envUUID := envDoc["_id"].(string)
		envSt, err := st.ForEnviron(names.NewEnvironTag(envUUID))
		if err != nil {
			return errors.Annotatef(err, "failed to open environment %q", envUUID)
		}
		defer envSt.Close()

		machines, err := envSt.AllMachines()
		if err != nil {
			return errors.Annotatef(err, "failed to retrieve machines for environment %q", envUUID)
		}

		for _, machine := range machines {
			// If a txn fails because the doc already exists, that's ok.
			if err := envSt.runTransaction([]txn.Op{
				createMachineBlockDevicesOp(machine.Id()),
			}); err != nil && err != txn.ErrAborted {
				return err
			}
		}
	}
	return nil
}
Esempio n. 7
0
// ListServices lists all installed services on the running system
func ListServices() ([]string, error) {
	initName, err := VersionInitSystem(series.HostSeries())
	if err != nil {
		return nil, errors.Trace(err)
	}

	switch initName {
	case InitSystemWindows:
		services, err := windows.ListServices()
		if err != nil {
			return nil, errors.Annotatef(err, "failed to list %s services", initName)
		}
		return services, nil
	case InitSystemUpstart:
		services, err := upstart.ListServices()
		if err != nil {
			return nil, errors.Annotatef(err, "failed to list %s services", initName)
		}
		return services, nil
	case InitSystemSystemd:
		services, err := systemd.ListServices()
		if err != nil {
			return nil, errors.Annotatef(err, "failed to list %s services", initName)
		}
		return services, nil
	default:
		return nil, errors.NotFoundf("init system %q", initName)
	}
}
Esempio n. 8
0
File: charmref.go Progetto: bac/juju
// appCharmDecRefOps returns the operations necessary to delete a
// reference to a charm and its per-application settings and storage
// constraints document. If no references to a given (app, charm) pair
// remain, the operations returned will also remove the settings and
// storage constraints documents for that pair, and schedule a cleanup
// to see if the charm itself is now unreferenced and can be tidied
// away itself.
func appCharmDecRefOps(st modelBackend, appName string, curl *charm.URL) ([]txn.Op, error) {

	refcounts, closer := st.getCollection(refcountsC)
	defer closer()

	charmKey := charmGlobalKey(curl)
	charmOp, err := nsRefcounts.AliveDecRefOp(refcounts, charmKey)
	if err != nil {
		return nil, errors.Annotate(err, "charm reference")
	}

	settingsKey := applicationSettingsKey(appName, curl)
	settingsOp, isFinal, err := nsRefcounts.DyingDecRefOp(refcounts, settingsKey)
	if err != nil {
		return nil, errors.Annotatef(err, "settings reference %s", settingsKey)
	}

	storageConstraintsKey := applicationStorageConstraintsKey(appName, curl)
	storageConstraintsOp, _, err := nsRefcounts.DyingDecRefOp(refcounts, storageConstraintsKey)
	if err != nil {
		return nil, errors.Annotatef(err, "storage constraints reference %s", storageConstraintsKey)
	}

	ops := []txn.Op{settingsOp, storageConstraintsOp, charmOp}
	if isFinal {
		// XXX(fwereade): this construction, in common with ~all
		// our refcount logic, is safe in parallel but not in
		// serial. If this logic is used twice while composing a
		// single transaction, the removal won't be triggered.
		// see `Application.removeOps` for the workaround.
		ops = append(ops, finalAppCharmRemoveOps(appName, curl)...)
	}
	return ops, nil
}
Esempio n. 9
0
// obliterateUnit removes a unit from state completely. It is not safe or
// sane to obliterate any unit in isolation; its only reasonable use is in
// the context of machine obliteration, in which we can be sure that unclean
// shutdown of units is not going to leave a machine in a difficult state.
func (st *State) obliterateUnit(unitName string) error {
	unit, err := st.Unit(unitName)
	if errors.IsNotFound(err) {
		return nil
	} else if err != nil {
		return err
	}
	// Unlike the machine, we *can* always destroy the unit, and (at least)
	// prevent further dependencies being added. If we're really lucky, the
	// unit will be removed immediately.
	if err := unit.Destroy(); err != nil {
		return errors.Annotatef(err, "cannot destroy unit %q", unitName)
	}
	if err := unit.Refresh(); errors.IsNotFound(err) {
		return nil
	} else if err != nil {
		return err
	}
	// Destroy and remove all storage attachments for the unit.
	if err := st.cleanupUnitStorageAttachments(unit.UnitTag(), true); err != nil {
		return errors.Annotatef(err, "cannot destroy storage for unit %q", unitName)
	}
	for _, subName := range unit.SubordinateNames() {
		if err := st.obliterateUnit(subName); err != nil {
			return err
		}
	}
	if err := unit.EnsureDead(); err != nil {
		return err
	}
	return unit.Remove()
}
Esempio n. 10
0
File: worker.go Progetto: bac/juju
// Handle is part of the watcher.StringsHandler interface.
// It should give us any actions currently enqueued for this machine.
// We try to execute every action before returning
func (h *handler) Handle(_ <-chan struct{}, actionsSlice []string) error {
	for _, actionId := range actionsSlice {
		ok := names.IsValidAction(actionId)
		if !ok {
			return errors.Errorf("got invalid action id %s", actionId)
		}

		actionTag := names.NewActionTag(actionId)
		action, err := h.config.Facade.Action(actionTag)
		if err != nil {
			return errors.Annotatef(err, "could not retrieve action %s", actionId)
		}

		err = h.config.Facade.ActionBegin(actionTag)
		if err != nil {
			return errors.Annotatef(err, "could not begin action %s", action.Name())
		}

		// We try to handle the action. The result returned from handling the action is
		// sent through using ActionFinish. We only stop the loop if ActionFinish fails.
		var finishErr error
		results, err := h.config.HandleAction(action.Name(), action.Params())
		if err != nil {
			finishErr = h.config.Facade.ActionFinish(actionTag, params.ActionFailed, nil, err.Error())
		} else {
			finishErr = h.config.Facade.ActionFinish(actionTag, params.ActionCompleted, results, "")
		}
		if finishErr != nil {
			return errors.Trace(finishErr)
		}
	}
	return nil
}
Esempio n. 11
0
// Manifold returns a dependency.Manifold that runs a charm revision worker
// according to the supplied configuration.
func Manifold(config ManifoldConfig) dependency.Manifold {
	return dependency.Manifold{
		Inputs: []string{
			config.APICallerName,
			config.ClockName,
		},
		Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) {
			var clock clock.Clock
			if err := getResource(config.ClockName, &clock); err != nil {
				return nil, errors.Trace(err)
			}
			var apiCaller base.APICaller
			if err := getResource(config.APICallerName, &apiCaller); err != nil {
				return nil, errors.Trace(err)
			}
			facade, err := config.NewFacade(apiCaller)
			if err != nil {
				return nil, errors.Annotatef(err, "cannot create facade")
			}

			worker, err := config.NewWorker(charmrevision.Config{
				RevisionUpdater: facade,
				Clock:           clock,
				Period:          config.Period,
			})
			if err != nil {
				return nil, errors.Annotatef(err, "cannot create worker")
			}
			return worker, nil
		},
	}
}
Esempio n. 12
0
File: txns.go Progetto: bac/juju
// updateOps modifies the Insert and Update fields in a slice of
// txn.Ops to ensure they are multi-model safe where
// possible. The returned []txn.Op is a new copy of the input (with
// changes).
func (r *multiModelRunner) updateOps(ops []txn.Op) ([]txn.Op, error) {
	var outOps []txn.Op
	for _, op := range ops {
		collInfo, found := r.schema[op.C]
		if !found {
			return nil, errors.Errorf("forbidden transaction: references unknown collection %q", op.C)
		}
		if collInfo.rawAccess {
			return nil, errors.Errorf("forbidden transaction: references raw-access collection %q", op.C)
		}
		outOp := op
		if !collInfo.global {
			outOp.Id = ensureModelUUIDIfString(r.modelUUID, op.Id)
			if op.Insert != nil {
				newInsert, err := mungeDocForMultiEnv(op.Insert, r.modelUUID, modelUUIDRequired)
				if err != nil {
					return nil, errors.Annotatef(err, "cannot insert into %q", op.C)
				}
				outOp.Insert = newInsert
			}
			if op.Update != nil {
				newUpdate, err := r.mungeUpdate(op.Update)
				if err != nil {
					return nil, errors.Annotatef(err, "cannot update %q", op.C)
				}
				outOp.Update = newUpdate
			}
		}
		outOps = append(outOps, outOp)
	}
	logger.Tracef("rewrote transaction: %#v", outOps)
	return outOps, nil
}
Esempio n. 13
0
func addDefaultBindingsToServices(st *State) error {
	services, err := st.AllServices()
	if err != nil {
		return errors.Trace(err)
	}

	upgradesLogger.Debugf("adding default endpoint bindings to services (where missing)")
	ops := make([]txn.Op, 0, len(services))
	for _, service := range services {
		ch, _, err := service.Charm()
		if err != nil {
			return errors.Annotatef(err, "cannot get charm for service %q", service.Name())
		}
		if _, err := service.EndpointBindings(); err == nil {
			upgradesLogger.Debugf("service %q already has bindings (skipping)", service.Name())
			continue
		} else if !errors.IsNotFound(err) {
			return errors.Annotatef(err, "checking service %q for existing bindings", service.Name())
		}
		// Passing nil for the bindings map will use the defaults.
		createOp, err := createEndpointBindingsOp(st, service.globalKey(), nil, ch.Meta())
		if err != nil {
			return errors.Annotatef(err, "setting default endpoint bindings for service %q", service.Name())
		}
		ops = append(ops, createOp)
	}
	return st.runTransaction(ops)
}
Esempio n. 14
0
func detachFilesystems(ctx *context, attachments []storage.FilesystemAttachmentParams) error {
	paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(ctx, attachments)
	if err != nil {
		return errors.Trace(err)
	}
	for sourceName, params := range paramsBySource {
		logger.Debugf("detaching filesystems: %v", params)
		filesystemSource := filesystemSources[sourceName]
		results, err := filesystemSource.DetachFilesystems(params)
		if err != nil {
			return errors.Annotatef(err, "detaching filesystems from source %q", sourceName)
		}
		for i, err := range results {
			if err == nil {
				continue
			}
			return errors.Annotatef(
				err, "detaching %s from %s",
				names.ReadableString(params[i].Filesystem),
				names.ReadableString(params[i].Machine),
			)
		}
	}
	return nil
}
Esempio n. 15
0
func createContainer(
	lxcContainer golxc.Container,
	directory string,
	networkConfig *container.NetworkConfig,
	extraCreateArgs, templateParams []string,
	caCert []byte,
) error {
	// Generate initial lxc.conf with networking settings.
	netConfig := generateNetworkConfig(networkConfig)
	configPath := filepath.Join(directory, "lxc.conf")
	if err := ioutil.WriteFile(configPath, []byte(netConfig), 0644); err != nil {
		return errors.Annotatef(err, "failed to write container config %q", configPath)
	}
	logger.Tracef("wrote initial config %q for container %q", configPath, lxcContainer.Name())

	var err error
	var execEnv []string = nil
	var closer func()
	if caCert != nil {
		execEnv, closer, err = wgetEnvironment(caCert)
		if err != nil {
			return errors.Annotatef(err, "failed to get environment for wget execution")
		}
		defer closer()
	}

	// Create the container.
	logger.Debugf("creating lxc container %q", lxcContainer.Name())
	logger.Debugf("lxc-create template params: %v", templateParams)
	if err := lxcContainer.Create(configPath, defaultTemplate, extraCreateArgs, templateParams, execEnv); err != nil {
		return errors.Annotatef(err, "lxc container creation failed")
	}
	return nil
}
Esempio n. 16
0
// OpenPorts sends a request to the GCE API to open the provided port
// ranges on the named firewall. If the firewall does not exist yet it
// is created, with the provided port ranges opened. Otherwise the
// existing firewall is updated to add the provided port ranges to the
// ports it already has open. The call blocks until the ports are
// opened or the request fails.
func (gce Connection) OpenPorts(fwname string, ports ...network.PortRange) error {
	// TODO(ericsnow) Short-circuit if ports is empty.

	// Compose the full set of open ports.
	currentPorts, err := gce.Ports(fwname)
	if err != nil {
		return errors.Trace(err)
	}
	inputPortsSet := network.NewPortSet(ports...)
	if inputPortsSet.IsEmpty() {
		return nil
	}
	currentPortsSet := network.NewPortSet(currentPorts...)

	// Send the request, depending on the current ports.
	if currentPortsSet.IsEmpty() {
		// Create a new firewall.
		firewall := firewallSpec(fwname, inputPortsSet)
		if err := gce.raw.AddFirewall(gce.projectID, firewall); err != nil {
			return errors.Annotatef(err, "opening port(s) %+v", ports)
		}
		return nil
	}

	// Update an existing firewall.
	newPortsSet := currentPortsSet.Union(inputPortsSet)
	firewall := firewallSpec(fwname, newPortsSet)
	if err := gce.raw.UpdateFirewall(gce.projectID, fwname, firewall); err != nil {
		return errors.Annotatef(err, "opening port(s) %+v", ports)
	}
	return nil
}
Esempio n. 17
0
// AddLeadsershipSettingsDocs creates service leadership documents in
// the settings collection for all services in all environments.
func AddLeadershipSettingsDocs(st *State) error {
	environments, closer := st.getCollection(environmentsC)
	defer closer()

	var envDocs []bson.M
	err := environments.Find(nil).Select(bson.M{"_id": 1}).All(&envDocs)
	if err != nil {
		return errors.Annotate(err, "failed to read environments")
	}

	for _, envDoc := range envDocs {
		envUUID := envDoc["_id"].(string)
		envSt, err := st.ForEnviron(names.NewEnvironTag(envUUID))
		if err != nil {
			return errors.Annotatef(err, "failed to open environment %q", envUUID)
		}
		defer envSt.Close()

		services, err := envSt.AllServices()
		if err != nil {
			return errors.Annotatef(err, "failed to retrieve services for environment %q", envUUID)
		}

		for _, service := range services {
			// The error from this is intentionally ignored as the
			// transaction will fail if the service already has a
			// leadership settings doc.
			envSt.runTransaction([]txn.Op{
				addLeadershipSettingsOp(service.Name()),
			})
		}
	}
	return nil
}
Esempio n. 18
0
// ClosePorts sends a request to the GCE API to close the provided port
// ranges on the named firewall. If the firewall does not exist nothing
// happens. If the firewall is left with no ports then it is removed.
// Otherwise it will be left with just the open ports it has that do not
// match the provided port ranges. The call blocks until the ports are
// closed or the request fails.
func (gce Connection) ClosePorts(fwname string, ports ...network.PortRange) error {
	// Compose the full set of open ports.
	currentPorts, err := gce.Ports(fwname)
	if err != nil {
		return errors.Trace(err)
	}
	inputPortsSet := network.NewPortSet(ports...)
	if inputPortsSet.IsEmpty() {
		return nil
	}
	currentPortsSet := network.NewPortSet(currentPorts...)
	newPortsSet := currentPortsSet.Difference(inputPortsSet)

	// Send the request, depending on the current ports.
	if newPortsSet.IsEmpty() {
		// Delete a firewall.
		// TODO(ericsnow) Handle case where firewall does not exist.
		if err := gce.raw.RemoveFirewall(gce.projectID, fwname); err != nil {
			return errors.Annotatef(err, "closing port(s) %+v", ports)
		}
		return nil
	}

	// Update an existing firewall.
	firewall := firewallSpec(fwname, newPortsSet)
	if err := gce.raw.UpdateFirewall(gce.projectID, fwname, firewall); err != nil {
		return errors.Annotatef(err, "closing port(s) %+v", ports)
	}
	return nil
}
Esempio n. 19
0
func newService(name string, conf common.Conf, initSystem, series string) (Service, error) {
	switch initSystem {
	case InitSystemWindows:
		svc, err := windows.NewService(name, conf)
		if err != nil {
			return nil, errors.Annotatef(err, "failed to wrap service %q", name)
		}
		return svc, nil
	case InitSystemUpstart:
		return upstart.NewService(name, conf), nil
	case InitSystemSystemd:
		dataDir, err := paths.DataDir(series)
		if err != nil {
			return nil, errors.Annotatef(err, "failed to find juju data dir for service %q", name)
		}

		svc, err := systemd.NewService(name, conf, dataDir)
		if err != nil {
			return nil, errors.Annotatef(err, "failed to wrap service %q", name)
		}
		return svc, nil
	default:
		return nil, errors.NotFoundf("init system %q", initSystem)
	}
}
Esempio n. 20
0
// machineVolumeParams retrieves VolumeParams for the volumes that should be
// provisioned with, and attached to, the machine. The client should ignore
// parameters that it does not know how to handle.
func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) {
	volumeAttachments, err := m.VolumeAttachments()
	if err != nil {
		return nil, err
	}
	if len(volumeAttachments) == 0 {
		return nil, nil
	}
	envConfig, err := p.st.EnvironConfig()
	if err != nil {
		return nil, err
	}
	poolManager := poolmanager.New(state.NewStateSettings(p.st))
	allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments))
	for _, volumeAttachment := range volumeAttachments {
		volumeTag := volumeAttachment.Volume()
		volume, err := p.st.Volume(volumeTag)
		if err != nil {
			return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id())
		}
		storageInstance, err := storagecommon.MaybeAssignedStorageInstance(
			volume.StorageInstance, p.st.StorageInstance,
		)
		if err != nil {
			return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id())
		}
		volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager)
		if err != nil {
			return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id())
		}
		provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider))
		if err != nil {
			return nil, errors.Annotate(err, "getting storage provider")
		}
		if provider.Dynamic() {
			// Leave dynamic storage to the storage provisioner.
			continue
		}
		volumeAttachmentParams, ok := volumeAttachment.Params()
		if !ok {
			// Attachment is already provisioned; this is an insane
			// state, so we should not proceed with the volume.
			return nil, errors.Errorf(
				"volume %s already attached to machine %s",
				volumeTag.Id(), m.Id(),
			)
		}
		// Not provisioned yet, so ask the cloud provisioner do it.
		volumeParams.Attachment = &params.VolumeAttachmentParams{
			volumeTag.String(),
			m.Tag().String(),
			"", // we're creating the volume, so it has no volume ID.
			"", // we're creating the machine, so it has no instance ID.
			volumeParams.Provider,
			volumeAttachmentParams.ReadOnly,
		}
		allVolumeParams = append(allVolumeParams, volumeParams)
	}
	return allVolumeParams, nil
}
Esempio n. 21
0
// AddUnits starts n units of the given service using the specified placement
// directives to allocate the machines.
func AddUnits(st *state.State, svc *state.Service, n int, placement []*instance.Placement) ([]*state.Unit, error) {
	units := make([]*state.Unit, n)
	// Hard code for now till we implement a different approach.
	policy := state.AssignCleanEmpty
	// All units should have the same networks as the service.
	networks, err := svc.Networks()
	if err != nil {
		return nil, errors.Errorf("cannot get service %q networks", svc.Name())
	}
	// TODO what do we do if we fail half-way through this process?
	for i := 0; i < n; i++ {
		unit, err := svc.AddUnit()
		if err != nil {
			return nil, errors.Annotatef(err, "cannot add unit %d/%d to service %q", i+1, n, svc.Name())
		}
		// Are there still placement directives to use?
		if i > len(placement)-1 {
			if err := st.AssignUnit(unit, policy); err != nil {
				return nil, errors.Trace(err)
			}
			units[i] = unit
			continue
		}
		if err := st.AssignUnitWithPlacement(unit, placement[i], networks); err != nil {
			return nil, errors.Annotatef(err, "adding new machine to host unit %q", unit.Name())
		}
		units[i] = unit
	}
	return units, nil
}
Esempio n. 22
0
func checkToolsAvailability(cfg *config.Config, finder toolsFinder) (version.Number, error) {
	currentVersion, ok := cfg.AgentVersion()
	if !ok || currentVersion == version.Zero {
		return version.Zero, nil
	}

	env, err := newEnvirons(cfg)
	if err != nil {
		return version.Zero, errors.Annotatef(err, "cannot make environ")
	}

	// finder receives major and minor as parameters as it uses them to filter versions and
	// only return patches for the passed major.minor (from major.minor.patch).
	// We'll try the released stream first, then fall back to the current configured stream
	// if no released tools are found.
	vers, err := finder(env, currentVersion.Major, currentVersion.Minor, tools.ReleasedStream, coretools.Filter{})
	preferredStream := tools.PreferredStream(&currentVersion, cfg.Development(), cfg.AgentStream())
	if preferredStream != tools.ReleasedStream && errors.Cause(err) == coretools.ErrNoMatches {
		vers, err = finder(env, currentVersion.Major, currentVersion.Minor, preferredStream, coretools.Filter{})
	}
	if err != nil {
		return version.Zero, errors.Annotatef(err, "cannot find available tools")
	}
	// Newest also returns a list of the items in this list matching with the
	// newest version.
	newest, _ := vers.Newest()
	return newest, nil
}
Esempio n. 23
0
File: deploy.go Progetto: bac/juju
// AddUnits starts n units of the given application using the specified placement
// directives to allocate the machines.
func AddUnits(
	unitAssigner UnitAssigner,
	unitAdder UnitAdder,
	appName string,
	n int,
	placement []*instance.Placement,
) ([]*state.Unit, error) {
	units := make([]*state.Unit, n)
	// Hard code for now till we implement a different approach.
	policy := state.AssignCleanEmpty
	// TODO what do we do if we fail half-way through this process?
	for i := 0; i < n; i++ {
		unit, err := unitAdder.AddUnit()
		if err != nil {
			return nil, errors.Annotatef(err, "cannot add unit %d/%d to application %q", i+1, n, appName)
		}
		// Are there still placement directives to use?
		if i > len(placement)-1 {
			if err := unitAssigner.AssignUnit(unit, policy); err != nil {
				return nil, errors.Trace(err)
			}
			units[i] = unit
			continue
		}
		if err := unitAssigner.AssignUnitWithPlacement(unit, placement[i]); err != nil {
			return nil, errors.Annotatef(err, "adding new machine to host unit %q", unit.Name())
		}
		units[i] = unit
	}
	return units, nil
}
Esempio n. 24
0
// UpdateService will re-write the service scripts for mongo and restart it.
func (u *UpgradeMongoCommand) UpdateService(auth bool) error {
	var oplogSize int
	if oplogSizeString := u.agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return errors.Annotatef(err, "invalid oplog size: %q", oplogSizeString)
		}
	}

	var numaCtlPolicy bool
	if numaCtlString := u.agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" {
		var err error
		if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil {
			return errors.Annotatef(err, "invalid numactl preference: %q", numaCtlString)
		}
	}
	ssi, _ := u.agentConfig.StateServingInfo()

	err := u.mongoEnsureServiceInstalled(u.agentConfig.DataDir(),
		ssi.StatePort,
		oplogSize,
		numaCtlPolicy,
		u.agentConfig.MongoVersion(),
		auth)
	return errors.Annotate(err, "cannot ensure mongodb service script is properly installed")
}
Esempio n. 25
0
File: machine.go Progetto: bac/juju
func importAgentToolsV1(source map[string]interface{}) (*agentTools, error) {
	fields := schema.Fields{
		"tools-version": schema.String(),
		"url":           schema.String(),
		"sha256":        schema.String(),
		"size":          schema.Int(),
	}
	checker := schema.FieldMap(fields, nil) // no defaults

	coerced, err := checker.Coerce(source, nil)
	if err != nil {
		return nil, errors.Annotatef(err, "agentTools v1 schema check failed")
	}
	valid := coerced.(map[string]interface{})
	// From here we know that the map returned from the schema coercion
	// contains fields of the right type.

	verString := valid["tools-version"].(string)
	toolsVersion, err := version.ParseBinary(verString)
	if err != nil {
		return nil, errors.Annotatef(err, "agentTools tools-version")
	}

	return &agentTools{
		Version_:      1,
		ToolsVersion_: toolsVersion,
		URL_:          valid["url"].(string),
		SHA256_:       valid["sha256"].(string),
		Size_:         valid["size"].(int64),
	}, nil
}
Esempio n. 26
0
func ensureMongoService(agentConfig agent.Config) error {
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return errors.Annotatef(err, "invalid oplog size: %q", oplogSizeString)
		}
	}

	var numaCtlPolicy bool
	if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" {
		var err error
		if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil {
			return errors.Annotatef(err, "invalid numactl preference: %q", numaCtlString)
		}
	}

	si, ok := agentConfig.StateServingInfo()
	if !ok {
		return errors.Errorf("agent config has no state serving info")
	}

	err := mongo.EnsureServiceInstalled(agentConfig.DataDir(),
		si.StatePort,
		oplogSize,
		numaCtlPolicy)
	return errors.Annotate(err, "cannot ensure that mongo service start/stop scripts are in place")
}
Esempio n. 27
0
// charmsBakeryGetError implements a getError function
// appropriate for passing to httpbakery.Client.DoWithBodyAndCustomError
// for the charms endpoint.
func charmsBakeryGetError(resp *http.Response) error {
	if resp.StatusCode != http.StatusUnauthorized {
		return nil
	}
	data, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return errors.Annotatef(err, "cannot read body")
	}
	var charmResp params.CharmsResponse
	if err := json.Unmarshal(data, &charmResp); err != nil {
		return errors.Annotatef(err, "cannot unmarshal body")
	}
	errResp := &params.Error{
		Message: charmResp.Error,
		Code:    charmResp.ErrorCode,
		Info:    charmResp.ErrorInfo,
	}
	if errResp.Code != params.CodeDischargeRequired {
		return errResp
	}
	if errResp.Info == nil {
		return errors.Annotatef(err, "no error info found in discharge-required response error")
	}
	// It's a discharge-required error, so make an appropriate httpbakery
	// error from it.
	return &httpbakery.Error{
		Message: errResp.Message,
		Code:    httpbakery.ErrDischargeRequired,
		Info: &httpbakery.ErrorInfo{
			Macaroon:     errResp.Info.Macaroon,
			MacaroonPath: errResp.Info.MacaroonPath,
		},
	}
}
Esempio n. 28
0
// addStorage adds storage instances to given unit as specified.
func (st *State) addStorageForUnit(
	ch *Charm, u *Unit,
	name string, cons StorageConstraints,
) error {
	all, err := u.StorageConstraints()
	if err != nil {
		return errors.Annotatef(err, "getting existing storage directives for %s", u.Tag().Id())
	}

	// Check storage name was declared.
	_, exists := all[name]
	if !exists {
		return errors.NotFoundf("charm storage %q", name)
	}

	// Populate missing configuration parameters with default values.
	conf, err := st.EnvironConfig()
	if err != nil {
		return errors.Trace(err)
	}
	completeCons, err := storageConstraintsWithDefaults(
		conf,
		ch.Meta().Storage[name],
		name, cons,
	)
	if err != nil {
		return errors.Trace(err)
	}

	// This can happen for charm stores that specify instances range from 0,
	// and no count was specified at deploy as storage constraints for this store,
	// and no count was specified to storage add as a contraint either.
	if cons.Count == 0 {
		return errors.NotValidf("adding storage where instance count is 0")
	}

	buildTxn := func(attempt int) ([]txn.Op, error) {
		if attempt > 0 {
			if err := u.Refresh(); err != nil {
				return nil, errors.Trace(err)
			}
		}
		if u.Life() != Alive {
			return nil, unitNotAliveErr
		}
		err = st.validateUnitStorage(ch.Meta(), u, name, completeCons)
		if err != nil {
			return nil, errors.Trace(err)
		}
		ops, err := st.constructAddUnitStorageOps(ch, u, name, completeCons)
		if err != nil {
			return nil, errors.Trace(err)
		}
		return ops, nil
	}
	if err := st.run(buildTxn); err != nil {
		return errors.Annotatef(err, "adding storage to unit %s", u)
	}
	return nil
}
Esempio n. 29
0
// storeImageMetadataInState writes image metadata into state store.
func storeImageMetadataInState(st *state.State, source string, priority int, existingMetadata []*imagemetadata.ImageMetadata) error {
	if len(existingMetadata) == 0 {
		return nil
	}
	metadataState := make([]cloudimagemetadata.Metadata, len(existingMetadata))
	for i, one := range existingMetadata {
		m := cloudimagemetadata.Metadata{
			cloudimagemetadata.MetadataAttributes{
				Stream:          one.Stream,
				Region:          one.RegionName,
				Arch:            one.Arch,
				VirtType:        one.VirtType,
				RootStorageType: one.Storage,
				Source:          source,
			},
			priority,
			one.Id,
		}
		s, err := seriesFromVersion(one.Version)
		if err != nil {
			return errors.Annotatef(err, "cannot determine series for version %v", one.Version)
		}
		m.Series = s
		metadataState[i] = m
	}
	if err := st.CloudImageMetadataStorage.SaveMetadata(metadataState); err != nil {
		return errors.Annotatef(err, "cannot cache image metadata")
	}
	return nil
}
Esempio n. 30
0
File: machiner.go Progetto: bac/juju
func (mr *Machiner) Handle(_ <-chan struct{}) error {
	if err := mr.machine.Refresh(); params.IsCodeNotFoundOrCodeUnauthorized(err) {
		// NOTE(axw) we can distinguish between NotFound and CodeUnauthorized,
		// so we could call NotifyMachineDead here in case the agent failed to
		// call NotifyMachineDead directly after setting the machine Dead in
		// the first place. We're not doing that to be cautious: the machine
		// could be missing from state due to invalid global state.
		return worker.ErrTerminateAgent
	} else if err != nil {
		return err
	}

	life := mr.machine.Life()
	if life == params.Alive {
		observedConfig, err := getObservedNetworkConfig(networkingcommon.DefaultNetworkConfigSource())
		if err != nil {
			return errors.Annotate(err, "cannot discover observed network config")
		} else if len(observedConfig) == 0 {
			logger.Warningf("not updating network config: no observed config found to update")
		}
		if len(observedConfig) > 0 {
			if err := mr.machine.SetObservedNetworkConfig(observedConfig); err != nil {
				return errors.Annotate(err, "cannot update observed network config")
			}
		}
		logger.Debugf("observed network config updated")

		return nil
	}
	logger.Debugf("%q is now %s", mr.config.Tag, life)
	if err := mr.machine.SetStatus(status.Stopped, "", nil); err != nil {
		return errors.Annotatef(err, "%s failed to set status stopped", mr.config.Tag)
	}

	// Attempt to mark the machine Dead. If the machine still has units
	// assigned, or storage attached, this will fail with
	// CodeHasAssignedUnits or CodeMachineHasAttachedStorage respectively.
	// Once units or storage are removed, the watcher will trigger again
	// and we'll reattempt.
	if err := mr.machine.EnsureDead(); err != nil {
		if params.IsCodeHasAssignedUnits(err) {
			return nil
		}
		if params.IsCodeMachineHasAttachedStorage(err) {
			logger.Tracef("machine still has storage attached")
			return nil
		}
		return errors.Annotatef(err, "%s failed to set machine to dead", mr.config.Tag)
	}
	// Report on the machine's death. It is important that we do this after
	// the machine is Dead, because this is the mechanism we use to clean up
	// the machine (uninstall). If we were to report before marking the machine
	// as Dead, then we would risk uninstalling prematurely.
	if mr.config.NotifyMachineDead != nil {
		if err := mr.config.NotifyMachineDead(); err != nil {
			return errors.Annotate(err, "reporting machine death")
		}
	}
	return worker.ErrTerminateAgent
}