Exemplo n.º 1
0
func detachFilesystems(ctx *context, attachments []storage.FilesystemAttachmentParams) error {
	paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(ctx, attachments)
	if err != nil {
		return errors.Trace(err)
	}
	for sourceName, params := range paramsBySource {
		logger.Debugf("detaching filesystems: %v", params)
		filesystemSource := filesystemSources[sourceName]
		results, err := filesystemSource.DetachFilesystems(params)
		if err != nil {
			return errors.Annotatef(err, "detaching filesystems from source %q", sourceName)
		}
		for i, err := range results {
			if err == nil {
				continue
			}
			return errors.Annotatef(
				err, "detaching %s from %s",
				names.ReadableString(params[i].Filesystem),
				names.ReadableString(params[i].Machine),
			)
		}
	}
	return nil
}
Exemplo n.º 2
0
// attachVolumes creates volume attachments with the specified parameters.
func attachVolumes(ctx *context, ops map[params.MachineStorageId]*attachVolumeOp) error {
	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
	for _, op := range ops {
		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
	}
	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
		ctx.environConfig, ctx.config.StorageDir, volumeAttachmentParams,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var reschedule []scheduleOp
	var volumeAttachments []storage.VolumeAttachment
	var statuses []params.EntityStatusArgs
	for sourceName, volumeAttachmentParams := range paramsBySource {
		logger.Debugf("attaching volumes: %+v", volumeAttachmentParams)
		volumeSource := volumeSources[sourceName]
		results, err := volumeSource.AttachVolumes(volumeAttachmentParams)
		if err != nil {
			return errors.Annotatef(err, "attaching volumes from source %q", sourceName)
		}
		for i, result := range results {
			p := volumeAttachmentParams[i]
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    p.Volume.String(),
				Status: params.StatusAttached,
			})
			status := &statuses[len(statuses)-1]
			if result.Error != nil {
				// Reschedule the volume attachment.
				id := params.MachineStorageId{
					MachineTag:    p.Machine.String(),
					AttachmentTag: p.Volume.String(),
				}
				reschedule = append(reschedule, ops[id])

				// Note: we keep the status as "attaching" to
				// indicate that we will retry. When we distinguish
				// between transient and permanent errors, we will
				// set the status to "error" for permanent errors.
				status.Status = params.StatusAttaching
				status.Info = result.Error.Error()
				logger.Debugf(
					"failed to attach %s to %s: %v",
					names.ReadableString(p.Volume),
					names.ReadableString(p.Machine),
					result.Error,
				)
				continue
			}
			volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if err := setVolumeAttachmentInfo(ctx, volumeAttachments); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Exemplo n.º 3
0
// createFilesystemAttachments creates filesystem attachments with the specified parameters.
func createFilesystemAttachments(
	ctx *context,
	params []storage.FilesystemAttachmentParams,
) ([]storage.FilesystemAttachment, error) {
	paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(ctx, params)
	if err != nil {
		return nil, errors.Trace(err)
	}
	var allFilesystemAttachments []storage.FilesystemAttachment
	for sourceName, params := range paramsBySource {
		logger.Debugf("attaching filesystems: %v", params)
		filesystemSource := filesystemSources[sourceName]
		results, err := filesystemSource.AttachFilesystems(params)
		if err != nil {
			return nil, errors.Annotatef(err, "attaching filesystems from source %q", sourceName)
		}
		for i, result := range results {
			if result.Error != nil {
				return nil, errors.Annotatef(
					err, "attaching %s to %s",
					names.ReadableString(params[i].Filesystem),
					names.ReadableString(params[i].Machine),
				)
			}
			allFilesystemAttachments = append(allFilesystemAttachments, *result.FilesystemAttachment)
		}
	}
	return allFilesystemAttachments, nil
}
Exemplo n.º 4
0
func setVolumeAttachmentInfo(ctx *context, volumeAttachments []storage.VolumeAttachment) error {
	if len(volumeAttachments) == 0 {
		return nil
	}
	// TODO(axw) we need to be able to list volume attachments in the
	// provider, by environment, so that we can "harvest" them if they're
	// unknown. This will take care of killing volumes that we fail to
	// record in state.
	errorResults, err := ctx.config.Volumes.SetVolumeAttachmentInfo(
		volumeAttachmentsFromStorage(volumeAttachments),
	)
	if err != nil {
		return errors.Annotate(err, "publishing volumes to state")
	}
	for i, result := range errorResults {
		if result.Error != nil {
			return errors.Annotatef(
				result.Error, "publishing attachment of %s to %s to state",
				names.ReadableString(volumeAttachments[i].Volume),
				names.ReadableString(volumeAttachments[i].Machine),
			)
		}
		// Record the volume attachment in the context.
		id := params.MachineStorageId{
			MachineTag:    volumeAttachments[i].Machine.String(),
			AttachmentTag: volumeAttachments[i].Volume.String(),
		}
		ctx.volumeAttachments[id] = volumeAttachments[i]
		removePendingVolumeAttachment(ctx, id)
	}
	return nil
}
Exemplo n.º 5
0
func setFilesystemAttachmentInfo(ctx *context, filesystemAttachments []storage.FilesystemAttachment) error {
	if len(filesystemAttachments) == 0 {
		return nil
	}
	// TODO(axw) we need to be able to list filesystem attachments in the
	// provider, by environment, so that we can "harvest" them if they're
	// unknown. This will take care of killing filesystems that we fail to
	// record in state.
	errorResults, err := ctx.filesystemAccessor.SetFilesystemAttachmentInfo(
		filesystemAttachmentsFromStorage(filesystemAttachments),
	)
	if err != nil {
		return errors.Annotate(err, "publishing filesystems to state")
	}
	for i, result := range errorResults {
		if result.Error != nil {
			return errors.Annotatef(
				result.Error, "publishing attachment of %s to %s to state",
				names.ReadableString(filesystemAttachments[i].Filesystem),
				names.ReadableString(filesystemAttachments[i].Machine),
			)
		}
		// Record the filesystem attachment in the context.
		ctx.filesystemAttachments[params.MachineStorageId{
			MachineTag:    filesystemAttachments[i].Machine.String(),
			AttachmentTag: filesystemAttachments[i].Filesystem.String(),
		}] = filesystemAttachments[i]
	}
	return nil
}
Exemplo n.º 6
0
func (s *StatusSetter) updateEntityStatusData(tag names.Tag, data map[string]interface{}) error {
	entity0, err := s.st.FindEntity(tag)
	if err != nil {
		return err
	}
	statusGetter, ok := entity0.(status.StatusGetter)
	if !ok {
		return NotSupportedError(tag, "getting status")
	}
	existingStatusInfo, err := statusGetter.Status()
	if err != nil {
		return err
	}
	newData := existingStatusInfo.Data
	if newData == nil {
		newData = data
	} else {
		for k, v := range data {
			newData[k] = v
		}
	}
	entity, ok := entity0.(status.StatusSetter)
	if !ok {
		return NotSupportedError(tag, "updating status")
	}
	if len(newData) > 0 && existingStatusInfo.Status != status.StatusError {
		return fmt.Errorf("%s is not in an error state", names.ReadableString(tag))
	}
	return entity.SetStatus(existingStatusInfo.Status, existingStatusInfo.Message, newData)
}
Exemplo n.º 7
0
func (f *fakeRetryProvisioningClient) RetryProvisioning(machines ...names.MachineTag) (
	[]params.ErrorResult, error) {

	if f.err != nil {
		return nil, f.err
	}

	results := make([]params.ErrorResult, len(machines))

	// For each of the machines passed in, verify that we have the
	// id and that the info string is "broken".
	for i, machine := range machines {
		m, ok := f.m[machine.Id()]
		if ok {
			if m.info == "broken" {
				// The real RetryProvisioning command sets the
				// status data "transient" : true.
				m.data["transient"] = true
			} else {
				results[i].Error = common.ServerError(
					fmt.Errorf("%s is not in an error state",
						names.ReadableString(machine)))
			}
		} else {
			results[i].Error = common.ServerError(
				errors.NotFoundf("machine %s", machine.Id()))
		}
	}

	return results, nil
}
Exemplo n.º 8
0
// storageChanged responds to unit storage changes.
func (w *RemoteStateWatcher) storageChanged(keys []string) error {
	tags := make([]names.StorageTag, len(keys))
	for i, key := range keys {
		tags[i] = names.NewStorageTag(key)
	}
	ids := make([]params.StorageAttachmentId, len(keys))
	for i, tag := range tags {
		ids[i] = params.StorageAttachmentId{
			StorageTag: tag.String(),
			UnitTag:    w.unit.Tag().String(),
		}
	}
	results, err := w.st.StorageAttachmentLife(ids)
	if err != nil {
		return errors.Trace(err)
	}

	w.mu.Lock()
	defer w.mu.Unlock()

	for i, result := range results {
		tag := tags[i]
		if result.Error == nil {
			if storageSnapshot, ok := w.current.Storage[tag]; ok {
				// We've previously started a watcher for this storage
				// attachment, so all we needed to do was update the
				// lifecycle state.
				storageSnapshot.Life = result.Life
				w.current.Storage[tag] = storageSnapshot
				continue
			}
			// We haven't seen this storage attachment before, so start
			// a watcher now; add it to our catacomb in case of mishap;
			// and wait for the initial event.
			saw, err := w.st.WatchStorageAttachment(tag, w.unit.Tag())
			if err != nil {
				return errors.Annotate(err, "watching storage attachment")
			}
			if err := w.catacomb.Add(saw); err != nil {
				return errors.Trace(err)
			}
			if err := w.watchStorageAttachment(tag, result.Life, saw); err != nil {
				return errors.Trace(err)
			}
		} else if params.IsCodeNotFound(result.Error) {
			if watcher, ok := w.storageAttachmentWatchers[tag]; ok {
				// already under catacomb management, any error tracked already
				worker.Stop(watcher)
				delete(w.storageAttachmentWatchers, tag)
			}
			delete(w.current.Storage, tag)
		} else {
			return errors.Annotatef(
				result.Error, "getting life of %s attachment",
				names.ReadableString(tag),
			)
		}
	}
	return nil
}
Exemplo n.º 9
0
// createFilesystems creates filesystems with the specified parameters.
func createFilesystems(ctx *context, params []storage.FilesystemParams) ([]storage.Filesystem, error) {
	// TODO(axw) later we may have multiple instantiations (sources)
	// for a storage provider, e.g. multiple Ceph installations. For
	// now we assume a single source for each provider type, with no
	// configuration.

	// Create filesystem sources.
	filesystemSources := make(map[string]storage.FilesystemSource)
	for _, params := range params {
		sourceName := string(params.Provider)
		if _, ok := filesystemSources[sourceName]; ok {
			continue
		}
		if params.Volume != (names.VolumeTag{}) {
			filesystemSources[sourceName] = ctx.managedFilesystemSource
			continue
		}
		filesystemSource, err := filesystemSource(
			ctx.environConfig, ctx.storageDir, sourceName, params.Provider,
		)
		if err != nil {
			return nil, errors.Annotate(err, "getting filesystem source")
		}
		filesystemSources[sourceName] = filesystemSource
	}

	// Validate and gather filesystem parameters.
	paramsBySource := make(map[string][]storage.FilesystemParams)
	for _, params := range params {
		sourceName := string(params.Provider)
		filesystemSource := filesystemSources[sourceName]
		err := filesystemSource.ValidateFilesystemParams(params)
		if err != nil {
			// TODO(axw) we should set an error status for params.Tag
			// here, and we should retry periodically.
			logger.Errorf("ignoring invalid filesystem: %v", err)
			continue
		}
		paramsBySource[sourceName] = append(paramsBySource[sourceName], params)
	}

	var allFilesystems []storage.Filesystem
	for sourceName, params := range paramsBySource {
		logger.Debugf("creating filesystems: %v", params)
		filesystemSource := filesystemSources[sourceName]
		results, err := filesystemSource.CreateFilesystems(params)
		if err != nil {
			return nil, errors.Annotatef(err, "creating filesystems from source %q", sourceName)
		}
		for i, result := range results {
			if result.Error != nil {
				return nil, errors.Annotatef(result.Error, "creating %s", names.ReadableString(params[i].Tag))
			}
			allFilesystems = append(allFilesystems, *result.Filesystem)
		}
	}
	return allFilesystems, nil
}
Exemplo n.º 10
0
// AttachFilesystems is defined on the FilesystemSource interface.
func (s *rootfsFilesystemSource) AttachFilesystems(args []storage.FilesystemAttachmentParams) ([]storage.FilesystemAttachment, error) {
	attachments := make([]storage.FilesystemAttachment, len(args))
	for i, arg := range args {
		attachment, err := s.attachFilesystem(arg)
		if err != nil {
			return nil, errors.Annotatef(err, "attaching %s", names.ReadableString(arg.Filesystem))
		}
		attachments[i] = attachment
	}
	return attachments, nil
}
Exemplo n.º 11
0
// removeEntities removes each specified Dead entity from state.
func removeEntities(ctx *context, tags []names.Tag) error {
	logger.Debugf("removing entities: %v", tags)
	errorResults, err := ctx.life.Remove(tags)
	if err != nil {
		return errors.Annotate(err, "removing storage entities")
	}
	for i, result := range errorResults {
		if result.Error != nil {
			return errors.Annotatef(result.Error, "removing %s from state", names.ReadableString(tags[i]))
		}
	}
	return nil
}
Exemplo n.º 12
0
func volumeStorageAttachmentInfo(
	st StorageInterface,
	storageInstance state.StorageInstance,
	machineTag names.MachineTag,
) (*storage.StorageAttachmentInfo, error) {
	storageTag := storageInstance.StorageTag()
	volume, err := st.StorageInstanceVolume(storageTag)
	if err != nil {
		return nil, errors.Annotate(err, "getting volume")
	}
	volumeInfo, err := volume.Info()
	if err != nil {
		return nil, errors.Annotate(err, "getting volume info")
	}
	volumeAttachment, err := st.VolumeAttachment(machineTag, volume.VolumeTag())
	if err != nil {
		return nil, errors.Annotate(err, "getting volume attachment")
	}
	volumeAttachmentInfo, err := volumeAttachment.Info()
	if err != nil {
		return nil, errors.Annotate(err, "getting volume attachment info")
	}
	blockDevices, err := st.BlockDevices(machineTag)
	if err != nil {
		return nil, errors.Annotate(err, "getting block devices")
	}
	blockDevice, ok := MatchingBlockDevice(
		blockDevices,
		volumeInfo,
		volumeAttachmentInfo,
	)
	if !ok {
		// We must not say that a block-kind storage attachment is
		// provisioned until its block device has shown up on the
		// machine, otherwise the charm may attempt to use it and
		// fail.
		return nil, errors.NotProvisionedf("%v", names.ReadableString(storageTag))
	}
	devicePath, err := volumeAttachmentDevicePath(
		volumeInfo,
		volumeAttachmentInfo,
		*blockDevice,
	)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return &storage.StorageAttachmentInfo{
		storage.StorageKindBlock,
		devicePath,
	}, nil
}
Exemplo n.º 13
0
// processDeadFilesystems processes the FilesystemResults for Dead filesystems,
// deprovisioning filesystems and removing from state as necessary.
func processDeadFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error {
	for _, tag := range tags {
		delete(ctx.pendingFilesystems, tag)
	}
	var destroy []names.FilesystemTag
	var remove []names.Tag
	for i, result := range filesystemResults {
		tag := tags[i]
		if result.Error == nil {
			logger.Debugf("filesystem %s is provisioned, queuing for deprovisioning", tag.Id())
			filesystem, err := filesystemFromParams(result.Result)
			if err != nil {
				return errors.Annotate(err, "getting filesystem info")
			}
			ctx.filesystems[tag] = filesystem
			destroy = append(destroy, tag)
			continue
		}
		if params.IsCodeNotProvisioned(result.Error) {
			logger.Debugf("filesystem %s is not provisioned, queuing for removal", tag.Id())
			remove = append(remove, tag)
			continue
		}
		return errors.Annotatef(result.Error, "getting filesystem information for filesystem %s", tag.Id())
	}
	if len(destroy)+len(remove) == 0 {
		return nil
	}
	if len(destroy) > 0 {
		errorResults, err := destroyFilesystems(ctx, destroy)
		if err != nil {
			return errors.Annotate(err, "destroying filesystems")
		}
		for i, tag := range destroy {
			if err := errorResults[i]; err != nil {
				return errors.Annotatef(err, "destroying %s", names.ReadableString(tag))
			}
			remove = append(remove, tag)
		}
	}
	if err := removeEntities(ctx, remove); err != nil {
		return errors.Annotate(err, "removing filesystems from state")
	}
	return nil
}
Exemplo n.º 14
0
func (w *upgradesteps) prepareForUpgrade() (*state.UpgradeInfo, error) {
	logger.Infof("checking that upgrade can proceed")
	if err := w.preUpgradeSteps(w.st, w.agent.CurrentConfig(), w.st != nil, w.isMaster); err != nil {
		return nil, errors.Annotatef(err, "%s cannot be upgraded", names.ReadableString(w.tag))
	}

	if !w.isStateServer {
		return nil, nil
	}

	logger.Infof("signalling that this state server is ready for upgrade")
	info, err := w.st.EnsureUpgradeInfo(w.tag.Id(), w.fromVersion, w.toVersion)
	if err != nil {
		return nil, errors.Trace(err)
	}

	// State servers need to wait for other state servers to be ready
	// to run the upgrade steps.
	logger.Infof("waiting for other state servers to be ready for upgrade")
	if err := w.waitForOtherStateServers(info); err != nil {
		if err == tomb.ErrDying {
			logger.Warningf(`stopped waiting for other state servers: %v`, err)
			return nil, err
		}
		logger.Errorf(`aborted wait for other state servers: %v`, err)
		// If master, trigger a rollback to the previous agent version.
		if w.isMaster {
			logger.Errorf("downgrading environment agent version to %v due to aborted upgrade",
				w.fromVersion)
			if rollbackErr := w.st.SetEnvironAgentVersion(w.fromVersion); rollbackErr != nil {
				logger.Errorf("rollback failed: %v", rollbackErr)
				return nil, errors.Annotate(rollbackErr, "failed to roll back desired agent version")
			}
		}
		return nil, errors.Annotate(err, "aborted wait for other state servers")
	}
	if w.isMaster {
		logger.Infof("finished waiting - all state servers are ready to run upgrade steps")
	} else {
		logger.Infof("finished waiting - the master has completed its upgrade steps")
	}
	return info, nil
}
Exemplo n.º 15
0
func (*tagSuite) TestReadableString(c *gc.C) {
	var readableStringTests = []struct {
		tag    names.Tag
		result string
	}{{
		tag:    nil,
		result: "",
	}, {
		tag:    names.NewMachineTag("0"),
		result: "machine 0",
	}, {
		tag:    names.NewUnitTag("wordpress/2"),
		result: "unit wordpress/2",
	}}

	for i, test := range readableStringTests {
		c.Logf("test %d: expected result %q", i, test.result)
		resultStr := names.ReadableString(test.tag)
		c.Assert(resultStr, gc.Equals, test.result)
	}
}
Exemplo n.º 16
0
func createVolumeDetailsList(
	st storageAccess,
	volumes []state.Volume,
	attachments map[names.VolumeTag][]state.VolumeAttachment,
) ([]params.VolumeDetails, error) {

	if len(volumes) == 0 {
		return nil, nil
	}
	results := make([]params.VolumeDetails, len(volumes))
	for i, v := range volumes {
		details, err := createVolumeDetails(st, v, attachments[v.VolumeTag()])
		if err != nil {
			return nil, errors.Annotatef(
				err, "getting details for %s",
				names.ReadableString(v.VolumeTag()),
			)
		}
		results[i] = *details
	}
	return results, nil
}
Exemplo n.º 17
0
func createFilesystemDetailsList(
	st storageAccess,
	filesystems []state.Filesystem,
	attachments map[names.FilesystemTag][]state.FilesystemAttachment,
) ([]params.FilesystemDetails, error) {

	if len(filesystems) == 0 {
		return nil, nil
	}
	results := make([]params.FilesystemDetails, len(filesystems))
	for i, f := range filesystems {
		details, err := createFilesystemDetails(st, f, attachments[f.FilesystemTag()])
		if err != nil {
			return nil, errors.Annotatef(
				err, "getting details for %s",
				names.ReadableString(f.FilesystemTag()),
			)
		}
		results[i] = *details
	}
	return results, nil
}
Exemplo n.º 18
0
// storageEntityLife queries the lifecycle state of each specified
// storage entity (volume or filesystem), and then partitions the
// tags by them.
func storageEntityLife(ctx *context, tags []names.Tag) (alive, dying, dead []names.Tag, _ error) {
	lifeResults, err := ctx.life.Life(tags)
	if err != nil {
		return nil, nil, nil, errors.Annotate(err, "getting storage entity life")
	}
	for i, result := range lifeResults {
		if result.Error != nil {
			return nil, nil, nil, errors.Annotatef(
				result.Error, "getting life of %s",
				names.ReadableString(tags[i]),
			)
		}
		switch result.Life {
		case params.Alive:
			alive = append(alive, tags[i])
		case params.Dying:
			dying = append(dying, tags[i])
		case params.Dead:
			dead = append(dead, tags[i])
		}
	}
	return alive, dying, dead, nil
}
Exemplo n.º 19
0
func (api *API) listStorageDetails(filter params.StorageFilter) ([]params.StorageDetails, error) {
	if filter != (params.StorageFilter{}) {
		// StorageFilter has no fields at the time of writing, but
		// check that no fields are set in case we forget to update
		// this code.
		return nil, errors.NotSupportedf("storage filters")
	}
	stateInstances, err := api.storage.AllStorageInstances()
	if err != nil {
		return nil, common.ServerError(err)
	}
	results := make([]params.StorageDetails, len(stateInstances))
	for i, stateInstance := range stateInstances {
		details, err := createStorageDetails(api.storage, stateInstance)
		if err != nil {
			return nil, errors.Annotatef(
				err, "getting details for %s",
				names.ReadableString(stateInstance.Tag()),
			)
		}
		results[i] = *details
	}
	return results, nil
}
Exemplo n.º 20
0
// createFilesystems creates filesystems with the specified parameters.
func createFilesystems(ctx *context, ops map[names.FilesystemTag]*createFilesystemOp) error {
	filesystemParams := make([]storage.FilesystemParams, 0, len(ops))
	for _, op := range ops {
		filesystemParams = append(filesystemParams, op.args)
	}
	paramsBySource, filesystemSources, err := filesystemParamsBySource(
		ctx.environConfig, ctx.storageDir,
		filesystemParams, ctx.managedFilesystemSource,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var reschedule []scheduleOp
	var filesystems []storage.Filesystem
	var statuses []params.EntityStatusArgs
	for sourceName, filesystemParams := range paramsBySource {
		logger.Debugf("creating filesystems: %v", filesystemParams)
		filesystemSource := filesystemSources[sourceName]
		validFilesystemParams, validationErrors := validateFilesystemParams(
			filesystemSource, filesystemParams,
		)
		for i, err := range validationErrors {
			if err == nil {
				continue
			}
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    filesystemParams[i].Tag.String(),
				Status: params.StatusError,
				Info:   err.Error(),
			})
			logger.Debugf(
				"failed to validate parameters for %s: %v",
				names.ReadableString(filesystemParams[i].Tag), err,
			)
		}
		filesystemParams = validFilesystemParams
		if len(filesystemParams) == 0 {
			continue
		}
		results, err := filesystemSource.CreateFilesystems(filesystemParams)
		if err != nil {
			return errors.Annotatef(err, "creating filesystems from source %q", sourceName)
		}
		for i, result := range results {
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    filesystemParams[i].Tag.String(),
				Status: params.StatusAttaching,
			})
			status := &statuses[len(statuses)-1]
			if result.Error != nil {
				// Reschedule the filesystem creation.
				reschedule = append(reschedule, ops[filesystemParams[i].Tag])

				// Note: we keep the status as "pending" to indicate
				// that we will retry. When we distinguish between
				// transient and permanent errors, we will set the
				// status to "error" for permanent errors.
				status.Status = params.StatusPending
				status.Info = result.Error.Error()
				logger.Debugf(
					"failed to create %s: %v",
					names.ReadableString(filesystemParams[i].Tag),
					result.Error,
				)
				continue
			}
			filesystems = append(filesystems, *result.Filesystem)
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if len(filesystems) == 0 {
		return nil
	}
	// TODO(axw) we need to be able to list filesystems in the provider,
	// by environment, so that we can "harvest" them if they're
	// unknown. This will take care of killing filesystems that we fail
	// to record in state.
	errorResults, err := ctx.filesystemAccessor.SetFilesystemInfo(filesystemsFromStorage(filesystems))
	if err != nil {
		return errors.Annotate(err, "publishing filesystems to state")
	}
	for i, result := range errorResults {
		if result.Error != nil {
			logger.Errorf(
				"publishing filesystem %s to state: %v",
				filesystems[i].Tag.Id(),
				result.Error,
			)
		}
	}
	for _, v := range filesystems {
		updateFilesystem(ctx, v)
	}
	return nil
}
Exemplo n.º 21
0
func (s *baseStorageSuite) constructState() *mockState {
	s.unitTag = names.NewUnitTag("mysql/0")
	s.storageTag = names.NewStorageTag("data/0")

	s.storageInstance = &mockStorageInstance{
		kind:       state.StorageKindFilesystem,
		owner:      s.unitTag,
		storageTag: s.storageTag,
	}

	storageInstanceAttachment := &mockStorageAttachment{storage: s.storageInstance}

	s.machineTag = names.NewMachineTag("66")
	s.filesystemTag = names.NewFilesystemTag("104")
	s.volumeTag = names.NewVolumeTag("22")
	s.filesystem = &mockFilesystem{
		tag:     s.filesystemTag,
		storage: &s.storageTag,
	}
	s.filesystemAttachment = &mockFilesystemAttachment{
		filesystem: s.filesystemTag,
		machine:    s.machineTag,
	}
	s.volume = &mockVolume{tag: s.volumeTag, storage: &s.storageTag}
	s.volumeAttachment = &mockVolumeAttachment{
		VolumeTag:  s.volumeTag,
		MachineTag: s.machineTag,
	}

	s.blocks = make(map[state.BlockType]state.Block)
	return &mockState{
		allStorageInstances: func() ([]state.StorageInstance, error) {
			s.calls = append(s.calls, allStorageInstancesCall)
			return []state.StorageInstance{s.storageInstance}, nil
		},
		storageInstance: func(sTag names.StorageTag) (state.StorageInstance, error) {
			s.calls = append(s.calls, storageInstanceCall)
			if sTag == s.storageTag {
				return s.storageInstance, nil
			}
			return nil, errors.NotFoundf("%s", names.ReadableString(sTag))
		},
		storageInstanceAttachments: func(tag names.StorageTag) ([]state.StorageAttachment, error) {
			s.calls = append(s.calls, storageInstanceAttachmentsCall)
			if tag == s.storageTag {
				return []state.StorageAttachment{storageInstanceAttachment}, nil
			}
			return nil, errors.NotFoundf("%s", names.ReadableString(tag))
		},
		storageInstanceFilesystem: func(sTag names.StorageTag) (state.Filesystem, error) {
			s.calls = append(s.calls, storageInstanceFilesystemCall)
			if sTag == s.storageTag {
				return s.filesystem, nil
			}
			return nil, errors.NotFoundf("%s", names.ReadableString(sTag))
		},
		storageInstanceFilesystemAttachment: func(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) {
			s.calls = append(s.calls, storageInstanceFilesystemAttachmentCall)
			if m == s.machineTag && f == s.filesystemTag {
				return s.filesystemAttachment, nil
			}
			return nil, errors.NotFoundf("filesystem attachment %s:%s", m, f)
		},
		storageInstanceVolume: func(t names.StorageTag) (state.Volume, error) {
			s.calls = append(s.calls, storageInstanceVolumeCall)
			if t == s.storageTag {
				return s.volume, nil
			}
			return nil, errors.NotFoundf("%s", names.ReadableString(t))
		},
		unitAssignedMachine: func(u names.UnitTag) (names.MachineTag, error) {
			s.calls = append(s.calls, unitAssignedMachineCall)
			if u == s.unitTag {
				return s.machineTag, nil
			}
			return names.MachineTag{}, errors.NotFoundf("%s", names.ReadableString(u))
		},
		volume: func(tag names.VolumeTag) (state.Volume, error) {
			s.calls = append(s.calls, volumeCall)
			if tag == s.volumeTag {
				return s.volume, nil
			}
			return nil, errors.NotFoundf("%s", names.ReadableString(tag))
		},
		machineVolumeAttachments: func(machine names.MachineTag) ([]state.VolumeAttachment, error) {
			s.calls = append(s.calls, machineVolumeAttachmentsCall)
			if machine == s.machineTag {
				return []state.VolumeAttachment{s.volumeAttachment}, nil
			}
			return nil, nil
		},
		volumeAttachments: func(volume names.VolumeTag) ([]state.VolumeAttachment, error) {
			s.calls = append(s.calls, volumeAttachmentsCall)
			if volume == s.volumeTag {
				return []state.VolumeAttachment{s.volumeAttachment}, nil
			}
			return nil, nil
		},
		allVolumes: func() ([]state.Volume, error) {
			s.calls = append(s.calls, allVolumesCall)
			return []state.Volume{s.volume}, nil
		},
		filesystem: func(tag names.FilesystemTag) (state.Filesystem, error) {
			s.calls = append(s.calls, filesystemCall)
			if tag == s.filesystemTag {
				return s.filesystem, nil
			}
			return nil, errors.NotFoundf("%s", names.ReadableString(tag))
		},
		machineFilesystemAttachments: func(machine names.MachineTag) ([]state.FilesystemAttachment, error) {
			s.calls = append(s.calls, machineFilesystemAttachmentsCall)
			if machine == s.machineTag {
				return []state.FilesystemAttachment{s.filesystemAttachment}, nil
			}
			return nil, nil
		},
		filesystemAttachments: func(filesystem names.FilesystemTag) ([]state.FilesystemAttachment, error) {
			s.calls = append(s.calls, filesystemAttachmentsCall)
			if filesystem == s.filesystemTag {
				return []state.FilesystemAttachment{s.filesystemAttachment}, nil
			}
			return nil, nil
		},
		allFilesystems: func() ([]state.Filesystem, error) {
			s.calls = append(s.calls, allFilesystemsCall)
			return []state.Filesystem{s.filesystem}, nil
		},
		envName: "storagetest",
		addStorageForUnit: func(u names.UnitTag, name string, cons state.StorageConstraints) error {
			s.calls = append(s.calls, addStorageForUnitCall)
			return nil
		},
		getBlockForType: func(t state.BlockType) (state.Block, bool, error) {
			s.calls = append(s.calls, getBlockForTypeCall)
			val, found := s.blocks[t]
			return val, found, nil
		},
	}
}
Exemplo n.º 22
0
// detachVolumes destroys volume attachments with the specified parameters.
func detachVolumes(ctx *context, ops map[params.MachineStorageId]*detachVolumeOp) error {
	volumeAttachmentParams := make([]storage.VolumeAttachmentParams, 0, len(ops))
	for _, op := range ops {
		volumeAttachmentParams = append(volumeAttachmentParams, op.args)
	}
	paramsBySource, volumeSources, err := volumeAttachmentParamsBySource(
		ctx.environConfig, ctx.config.StorageDir, volumeAttachmentParams,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var reschedule []scheduleOp
	var statuses []params.EntityStatusArgs
	var remove []params.MachineStorageId
	for sourceName, volumeAttachmentParams := range paramsBySource {
		logger.Debugf("detaching volumes: %+v", volumeAttachmentParams)
		volumeSource := volumeSources[sourceName]
		errs, err := volumeSource.DetachVolumes(volumeAttachmentParams)
		if err != nil {
			return errors.Annotatef(err, "detaching volumes from source %q", sourceName)
		}
		for i, err := range errs {
			p := volumeAttachmentParams[i]
			statuses = append(statuses, params.EntityStatusArgs{
				Tag: p.Volume.String(),
				// TODO(axw) when we support multiple
				// attachment, we'll have to check if
				// there are any other attachments
				// before saying the status "detached".
				Status: params.StatusDetached,
			})
			id := params.MachineStorageId{
				MachineTag:    p.Machine.String(),
				AttachmentTag: p.Volume.String(),
			}
			status := &statuses[len(statuses)-1]
			if err != nil {
				reschedule = append(reschedule, ops[id])
				status.Status = params.StatusDetaching
				status.Info = err.Error()
				logger.Debugf(
					"failed to detach %s from %s: %v",
					names.ReadableString(p.Volume),
					names.ReadableString(p.Machine),
					err,
				)
				continue
			}
			remove = append(remove, id)
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if err := removeAttachments(ctx, remove); err != nil {
		return errors.Annotate(err, "removing attachments from state")
	}
	for _, id := range remove {
		delete(ctx.volumeAttachments, id)
	}
	return nil
}
Exemplo n.º 23
0
// destroyVolumes destroys volumes with the specified parameters.
func destroyVolumes(ctx *context, ops map[names.VolumeTag]*destroyVolumeOp) error {
	tags := make([]names.VolumeTag, 0, len(ops))
	for tag := range ops {
		tags = append(tags, tag)
	}
	volumeParams, err := volumeParams(ctx, tags)
	if err != nil {
		return errors.Trace(err)
	}
	paramsBySource, volumeSources, err := volumeParamsBySource(
		ctx.environConfig, ctx.config.StorageDir, volumeParams,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var remove []names.Tag
	var reschedule []scheduleOp
	var statuses []params.EntityStatusArgs
	for sourceName, volumeParams := range paramsBySource {
		logger.Debugf("destroying volumes from %q: %v", sourceName, volumeParams)
		volumeSource := volumeSources[sourceName]
		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
		for i, err := range validationErrors {
			if err == nil {
				continue
			}
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    volumeParams[i].Tag.String(),
				Status: params.StatusError,
				Info:   err.Error(),
			})
			logger.Debugf(
				"failed to validate parameters for %s: %v",
				names.ReadableString(volumeParams[i].Tag), err,
			)
		}
		volumeParams = validVolumeParams
		if len(volumeParams) == 0 {
			continue
		}
		volumeIds := make([]string, len(volumeParams))
		for i, volumeParams := range volumeParams {
			volume, ok := ctx.volumes[volumeParams.Tag]
			if !ok {
				return errors.NotFoundf("volume %s", volumeParams.Tag.Id())
			}
			volumeIds[i] = volume.VolumeId
		}
		errs, err := volumeSource.DestroyVolumes(volumeIds)
		if err != nil {
			return errors.Trace(err)
		}
		for i, err := range errs {
			tag := volumeParams[i].Tag
			if err == nil {
				remove = append(remove, tag)
				continue
			}
			// Failed to destroy volume; reschedule and update status.
			reschedule = append(reschedule, ops[tag])
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    tag.String(),
				Status: params.StatusDestroying,
				Info:   err.Error(),
			})
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if err := removeEntities(ctx, remove); err != nil {
		return errors.Annotate(err, "removing volumes from state")
	}
	return nil
}
Exemplo n.º 24
0
// createVolumes creates volumes with the specified parameters.
func createVolumes(ctx *context, ops map[names.VolumeTag]*createVolumeOp) error {
	volumeParams := make([]storage.VolumeParams, 0, len(ops))
	for _, op := range ops {
		volumeParams = append(volumeParams, op.args)
	}
	paramsBySource, volumeSources, err := volumeParamsBySource(
		ctx.environConfig, ctx.config.StorageDir, volumeParams,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var reschedule []scheduleOp
	var volumes []storage.Volume
	var volumeAttachments []storage.VolumeAttachment
	var statuses []params.EntityStatusArgs
	for sourceName, volumeParams := range paramsBySource {
		logger.Debugf("creating volumes: %v", volumeParams)
		volumeSource := volumeSources[sourceName]
		validVolumeParams, validationErrors := validateVolumeParams(volumeSource, volumeParams)
		for i, err := range validationErrors {
			if err == nil {
				continue
			}
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    volumeParams[i].Tag.String(),
				Status: params.StatusError,
				Info:   err.Error(),
			})
			logger.Debugf(
				"failed to validate parameters for %s: %v",
				names.ReadableString(volumeParams[i].Tag), err,
			)
		}
		volumeParams = validVolumeParams
		if len(volumeParams) == 0 {
			continue
		}
		results, err := volumeSource.CreateVolumes(volumeParams)
		if err != nil {
			return errors.Annotatef(err, "creating volumes from source %q", sourceName)
		}
		for i, result := range results {
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    volumeParams[i].Tag.String(),
				Status: params.StatusAttaching,
			})
			status := &statuses[len(statuses)-1]
			if result.Error != nil {
				// Reschedule the volume creation.
				reschedule = append(reschedule, ops[volumeParams[i].Tag])

				// Note: we keep the status as "pending" to indicate
				// that we will retry. When we distinguish between
				// transient and permanent errors, we will set the
				// status to "error" for permanent errors.
				status.Status = params.StatusPending
				status.Info = result.Error.Error()
				logger.Debugf(
					"failed to create %s: %v",
					names.ReadableString(volumeParams[i].Tag),
					result.Error,
				)
				continue
			}
			volumes = append(volumes, *result.Volume)
			if result.VolumeAttachment != nil {
				status.Status = params.StatusAttached
				volumeAttachments = append(volumeAttachments, *result.VolumeAttachment)
			}
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if len(volumes) == 0 {
		return nil
	}
	// TODO(axw) we need to be able to list volumes in the provider,
	// by environment, so that we can "harvest" them if they're
	// unknown. This will take care of killing volumes that we fail
	// to record in state.
	errorResults, err := ctx.config.Volumes.SetVolumeInfo(volumesFromStorage(volumes))
	if err != nil {
		return errors.Annotate(err, "publishing volumes to state")
	}
	for i, result := range errorResults {
		if result.Error != nil {
			logger.Errorf(
				"publishing volume %s to state: %v",
				volumes[i].Tag.Id(),
				result.Error,
			)
		}
	}
	for _, v := range volumes {
		updateVolume(ctx, v)
	}
	// Note: the storage provisioner that creates a volume is also
	// responsible for creating the volume attachment. It is therefore
	// safe to set the volume attachment info after the volume info,
	// without leading to the possibility of concurrent, duplicate
	// attachments.
	err = setVolumeAttachmentInfo(ctx, volumeAttachments)
	if err != nil {
		return errors.Trace(err)
	}
	return nil
}
Exemplo n.º 25
0
// validateFilesystemMountPoints validates the mount points of filesystems
// being attached to the specified machine. If there are any mount point
// path conflicts, an error will be returned.
func validateFilesystemMountPoints(m *Machine, newFilesystems []filesystemAttachmentTemplate) error {
	attachments, err := m.st.MachineFilesystemAttachments(m.MachineTag())
	if err != nil {
		return errors.Trace(err)
	}
	existing := make(map[names.FilesystemTag]string)
	for _, a := range attachments {
		params, ok := a.Params()
		if ok {
			existing[a.Filesystem()] = params.Location
			continue
		}
		info, err := a.Info()
		if err != nil {
			return errors.Trace(err)
		}
		existing[a.Filesystem()] = info.MountPoint
	}

	storageName := func(
		filesystemTag names.FilesystemTag,
		storageTag names.StorageTag,
	) string {
		if storageTag == (names.StorageTag{}) {
			return names.ReadableString(filesystemTag)
		}
		// We know the tag is valid, so ignore the error.
		storageName, _ := names.StorageName(storageTag.Id())
		return fmt.Sprintf("%q storage", storageName)
	}

	containsPath := func(a, b string) bool {
		a = path.Clean(a) + "/"
		b = path.Clean(b) + "/"
		return strings.HasPrefix(b, a)
	}

	// These sets are expected to be small, so sorting and comparing
	// adjacent values is not worth the cost of creating a reverse
	// lookup from location to filesystem.
	for _, template := range newFilesystems {
		newMountPoint := template.params.Location
		for oldFilesystemTag, oldMountPoint := range existing {
			var conflicted, swapOrder bool
			if containsPath(oldMountPoint, newMountPoint) {
				conflicted = true
			} else if containsPath(newMountPoint, oldMountPoint) {
				conflicted = true
				swapOrder = true
			}
			if !conflicted {
				continue
			}

			// Get a helpful identifier for the new filesystem. If it
			// is being created for a storage instance, then use
			// the storage name; otherwise use the filesystem name.
			newStorageName := storageName(template.tag, template.storage)

			// Likewise for the old filesystem, but this time we'll
			// need to consult state.
			oldFilesystem, err := m.st.Filesystem(oldFilesystemTag)
			if err != nil {
				return errors.Trace(err)
			}
			storageTag, err := oldFilesystem.Storage()
			if errors.IsNotAssigned(err) {
				storageTag = names.StorageTag{}
			} else if err != nil {
				return errors.Trace(err)
			}
			oldStorageName := storageName(oldFilesystemTag, storageTag)

			lhs := fmt.Sprintf("mount point %q for %s", oldMountPoint, oldStorageName)
			rhs := fmt.Sprintf("mount point %q for %s", newMountPoint, newStorageName)
			if swapOrder {
				lhs, rhs = rhs, lhs
			}
			return errors.Errorf("%s contains %s", lhs, rhs)
		}
	}
	return nil
}
Exemplo n.º 26
0
// attachFilesystems creates filesystem attachments with the specified parameters.
func attachFilesystems(ctx *context, ops map[params.MachineStorageId]*attachFilesystemOp) error {
	filesystemAttachmentParams := make([]storage.FilesystemAttachmentParams, 0, len(ops))
	for _, op := range ops {
		args := op.args
		if args.Path == "" {
			args.Path = filepath.Join(ctx.config.StorageDir, args.Filesystem.Id())
		}
		filesystemAttachmentParams = append(filesystemAttachmentParams, args)
	}
	paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(
		ctx.modelConfig,
		ctx.config.StorageDir,
		filesystemAttachmentParams,
		ctx.filesystems,
		ctx.managedFilesystemSource,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var reschedule []scheduleOp
	var filesystemAttachments []storage.FilesystemAttachment
	var statuses []params.EntityStatusArgs
	for sourceName, filesystemAttachmentParams := range paramsBySource {
		logger.Debugf("attaching filesystems: %+v", filesystemAttachmentParams)
		filesystemSource := filesystemSources[sourceName]
		results, err := filesystemSource.AttachFilesystems(filesystemAttachmentParams)
		if err != nil {
			return errors.Annotatef(err, "attaching filesystems from source %q", sourceName)
		}
		for i, result := range results {
			p := filesystemAttachmentParams[i]
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    p.Filesystem.String(),
				Status: status.StatusAttached,
			})
			entityStatus := &statuses[len(statuses)-1]
			if result.Error != nil {
				// Reschedule the filesystem attachment.
				id := params.MachineStorageId{
					MachineTag:    p.Machine.String(),
					AttachmentTag: p.Filesystem.String(),
				}
				reschedule = append(reschedule, ops[id])

				// Note: we keep the status as "attaching" to
				// indicate that we will retry. When we distinguish
				// between transient and permanent errors, we will
				// set the status to "error" for permanent errors.
				entityStatus.Status = status.StatusAttaching
				entityStatus.Info = result.Error.Error()
				logger.Debugf(
					"failed to attach %s to %s: %v",
					names.ReadableString(p.Filesystem),
					names.ReadableString(p.Machine),
					result.Error,
				)
				continue
			}
			filesystemAttachments = append(filesystemAttachments, *result.FilesystemAttachment)
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if err := setFilesystemAttachmentInfo(ctx, filesystemAttachments); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Exemplo n.º 27
0
// destroyFilesystems destroys filesystems with the specified parameters.
func destroyFilesystems(ctx *context, ops map[names.FilesystemTag]*destroyFilesystemOp) error {
	tags := make([]names.FilesystemTag, 0, len(ops))
	for tag := range ops {
		tags = append(tags, tag)
	}
	filesystemParams, err := filesystemParams(ctx, tags)
	if err != nil {
		return errors.Trace(err)
	}
	paramsBySource, filesystemSources, err := filesystemParamsBySource(
		ctx.modelConfig, ctx.config.StorageDir,
		filesystemParams, ctx.managedFilesystemSource,
	)
	if err != nil {
		return errors.Trace(err)
	}
	var remove []names.Tag
	var reschedule []scheduleOp
	var statuses []params.EntityStatusArgs
	for sourceName, filesystemParams := range paramsBySource {
		logger.Debugf("destroying filesystems from %q: %v", sourceName, filesystemParams)
		filesystemSource := filesystemSources[sourceName]
		validFilesystemParams, validationErrors := validateFilesystemParams(filesystemSource, filesystemParams)
		for i, err := range validationErrors {
			if err == nil {
				continue
			}
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    filesystemParams[i].Tag.String(),
				Status: status.StatusError,
				Info:   err.Error(),
			})
			logger.Debugf(
				"failed to validate parameters for %s: %v",
				names.ReadableString(filesystemParams[i].Tag), err,
			)
		}
		filesystemParams = validFilesystemParams
		if len(filesystemParams) == 0 {
			continue
		}
		filesystemIds := make([]string, len(filesystemParams))
		for i, filesystemParams := range filesystemParams {
			filesystem, ok := ctx.filesystems[filesystemParams.Tag]
			if !ok {
				return errors.NotFoundf("filesystem %s", filesystemParams.Tag.Id())
			}
			filesystemIds[i] = filesystem.FilesystemId
		}
		errs, err := filesystemSource.DestroyFilesystems(filesystemIds)
		if err != nil {
			return errors.Trace(err)
		}
		for i, err := range errs {
			tag := filesystemParams[i].Tag
			if err == nil {
				remove = append(remove, tag)
				continue
			}
			// Failed to destroy filesystem; reschedule and update status.
			reschedule = append(reschedule, ops[tag])
			statuses = append(statuses, params.EntityStatusArgs{
				Tag:    tag.String(),
				Status: status.StatusDestroying,
				Info:   err.Error(),
			})
		}
	}
	scheduleOperations(ctx, reschedule...)
	setStatus(ctx, statuses)
	if err := removeEntities(ctx, remove); err != nil {
		return errors.Annotate(err, "removing filesystems from state")
	}
	return nil
}