Ejemplo n.º 1
0
// migrateCharmStorage copies uploaded charms from provider storage
// to environment storage, and then adds the storage path into the
// charm's document in state.
func migrateCharmStorage(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating charms to environment storage")
	charms, err := st.AllCharms()
	if err != nil {
		return err
	}
	storage := storage.NewStorage(st.EnvironUUID(), st.MongoSession())

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	fetchCharmArchive := fetchCharmArchive
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		fetchCharmArchive = localstorage{storageDir}.fetchCharmArchive
	}

	storagePaths := make(map[*charm.URL]string)
	for _, ch := range charms {
		if ch.IsPlaceholder() {
			logger.Debugf("skipping %s, placeholder charm", ch.URL())
			continue
		}
		if !ch.IsUploaded() {
			logger.Debugf("skipping %s, not uploaded to provider storage", ch.URL())
			continue
		}
		if charmStoragePath(ch) != "" {
			logger.Debugf("skipping %s, already in environment storage", ch.URL())
			continue
		}
		url := charmBundleURL(ch)
		if url == nil {
			logger.Debugf("skipping %s, has no bundle URL", ch.URL())
			continue
		}
		uuid, err := utils.NewUUID()
		if err != nil {
			return err
		}
		data, err := fetchCharmArchive(url)
		if err != nil {
			return err
		}

		curl := ch.URL()
		storagePath := fmt.Sprintf("charms/%s-%s", curl, uuid)
		logger.Debugf("uploading %s to %q in environment storage", curl, storagePath)
		err = storage.Put(storagePath, bytes.NewReader(data), int64(len(data)))
		if err != nil {
			return errors.Annotatef(err, "failed to upload %s to storage", curl)
		}
		storagePaths[curl] = storagePath
	}

	return stateAddCharmStoragePaths(st, storagePaths)
}
Ejemplo n.º 2
0
// migrateJobManageNetworking adds the job JobManageNetworking to all
// machines except for:
//
// - machines in a MAAS environment,
// - machines in a Joyent environment,
// - machines in a manual environment,
// - bootstrap node (host machine) in a local environment, and
// - manually provisioned machines.
func MigrateJobManageNetworking(st *State) error {
	// Retrieve the provider.
	envConfig, err := st.EnvironConfig()
	if err != nil {
		return errors.Annotate(err, "failed to read current config")
	}
	envType := envConfig.Type()

	// Check for MAAS, Joyent, or manual (aka null) provider.
	if envType == provider.MAAS ||
		envType == provider.Joyent ||
		provider.IsManual(envType) {
		// No job adding for these environment types.
		return nil
	}

	// Iterate over all machines and create operations.
	machinesCollection, closer := st.getRawCollection(machinesC)
	defer closer()

	iter := machinesCollection.Find(nil).Iter()
	defer iter.Close()

	ops := []txn.Op{}
	mdoc := machineDoc{}

	for iter.Next(&mdoc) {
		// Check possible exceptions.
		localID := st.localID(mdoc.Id)
		if localID == "0" && envType == provider.Local {
			// Skip machine 0 in local environment.
			continue
		}
		if strings.HasPrefix(mdoc.Nonce, manualMachinePrefix) {
			// Skip manually provisioned machine in non-manual environments.
			continue
		}
		if hasJob(mdoc.Jobs, JobManageNetworking) {
			// Should not happen during update, but just to
			// prevent double entries.
			continue
		}
		// Everything fine, now add job.
		ops = append(ops, txn.Op{
			C:      machinesC,
			Id:     mdoc.DocID,
			Update: bson.D{{"$addToSet", bson.D{{"jobs", JobManageNetworking}}}},
		})
	}

	return st.runRawTransaction(ops)
}
Ejemplo n.º 3
0
// migrateCustomImageMetadata copies uploaded image metadata from provider
// storage to environment storage, preserving paths.
func migrateCustomImageMetadata(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating custom image metadata to environment storage")
	estor := newStateStorage(st.EnvironUUID(), st.MongoSession())

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	var pstor storage.StorageReader
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		var err error
		pstor, err = filestorage.NewFileStorageReader(storageDir)
		if err != nil {
			return errors.Annotate(err, "cannot get local filesystem storage reader")
		}
	} else {
		var err error
		pstor, err = environs.LegacyStorage(st)
		if errors.IsNotSupported(err) {
			return nil
		} else if err != nil {
			return errors.Annotate(err, "cannot get provider storage")
		}
	}

	paths, err := pstor.List(storage.BaseImagesPath)
	if err != nil {
		return err
	}
	for _, path := range paths {
		logger.Infof("migrating image metadata at path %q", path)
		data, err := readImageMetadata(pstor, path)
		if err != nil {
			return errors.Annotate(err, "failed to read image metadata")
		}
		err = estor.Put(path, bytes.NewReader(data), int64(len(data)))
		if err != nil {
			return errors.Annotate(err, "failed to write image metadata")
		}
	}
	return nil
}
Ejemplo n.º 4
0
// migrateToolsStorage copies tools from provider storage to
// environment storage.
func migrateToolsStorage(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating tools to environment storage")

	tstor, err := stateToolsStorage(st)
	if err != nil {
		return errors.Annotate(err, "cannot get tools storage")
	}
	defer tstor.Close()

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	var stor storage.StorageReader
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		var err error
		stor, err = filestorage.NewFileStorageReader(storageDir)
		if err != nil {
			return errors.Annotate(err, "cannot get local filesystem storage reader")
		}
	} else {
		var err error
		stor, err = environs.LegacyStorage(st)
		if errors.IsNotSupported(err) {
			return nil
		} else if err != nil {
			return errors.Annotate(err, "cannot get provider storage")
		}
	}

	// Search provider storage for tools.
	datasource := storage.NewStorageSimpleStreamsDataSource("provider storage", stor, storage.BaseToolsPath)
	toolsList, err := envtools.FindToolsForCloud(
		[]simplestreams.DataSource{datasource},
		simplestreams.CloudSpec{},
		envtools.ReleasedStream,
		-1, -1, tools.Filter{})
	switch err {
	case nil:
		break
	case tools.ErrNoMatches, envtools.ErrNoTools:
		// No tools in provider storage: nothing to do.
		return nil
	default:
		return errors.Annotate(err, "cannot find tools in provider storage")
	}

	for _, agentTools := range toolsList {
		logger.Infof("migrating %v tools to environment storage", agentTools.Version)
		data, err := fetchToolsArchive(stor, envtools.LegacyReleaseDirectory, agentTools)
		if err != nil {
			return errors.Annotatef(err, "failed to fetch %v tools", agentTools.Version)
		}
		err = tstor.AddTools(bytes.NewReader(data), toolstorage.Metadata{
			Version: agentTools.Version,
			Size:    agentTools.Size,
			SHA256:  agentTools.SHA256,
		})
		if err != nil {
			return errors.Annotatef(err, "failed to add %v tools to environment storage", agentTools.Version)
		}
	}
	return nil
}
Ejemplo n.º 5
0
// StateWorker returns a worker running all the workers that require
// a *state.State connection.
func (a *MachineAgent) StateWorker() (worker.Worker, error) {
	agentConfig := a.CurrentConfig()

	// Create system-identity file.
	if err := agent.WriteSystemIdentityFile(agentConfig); err != nil {
		return nil, err
	}

	// Start MongoDB server and dial.
	if err := a.ensureMongoServer(agentConfig); err != nil {
		return nil, err
	}
	st, m, err := openState(agentConfig, stateWorkerDialOpts)
	if err != nil {
		return nil, err
	}
	reportOpenedState(st)

	singularStateConn := singularStateConn{st.MongoSession(), m}
	runner := newRunner(connectionIsFatal(st), moreImportant)
	singularRunner, err := newSingularRunner(runner, singularStateConn)
	if err != nil {
		return nil, fmt.Errorf("cannot make singular State Runner: %v", err)
	}

	// Take advantage of special knowledge here in that we will only ever want
	// the storage provider on one machine, and that is the "bootstrap" node.
	providerType := agentConfig.Value(agent.ProviderType)
	if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId {
		a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) {
			// TODO(axw) 2013-09-24 bug #1229507
			// Make another job to enable storage.
			// There's nothing special about this.
			return localstorage.NewWorker(agentConfig), nil
		})
	}
	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			// Implemented in APIWorker.
		case state.JobManageEnviron:
			useMultipleCPUs()
			a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) {
				return instancepoller.NewWorker(st), nil
			})
			a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) {
				return peergrouperNew(st)
			})
			runner.StartWorker("apiserver", func() (worker.Worker, error) {
				// If the configuration does not have the required information,
				// it is currently not a recoverable error, so we kill the whole
				// agent, potentially enabling human intervention to fix
				// the agent's configuration file. In the future, we may retrieve
				// the state server certificate and key from the state, and
				// this should then change.
				info, ok := agentConfig.StateServingInfo()
				if !ok {
					return nil, &fatalError{"StateServingInfo not available and we need it"}
				}
				cert := []byte(info.Cert)
				key := []byte(info.PrivateKey)

				if len(cert) == 0 || len(key) == 0 {
					return nil, &fatalError{"configuration does not have state server cert/key"}
				}
				dataDir := agentConfig.DataDir()
				logDir := agentConfig.LogDir()

				endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort))
				listener, err := net.Listen("tcp", endpoint)
				if err != nil {
					return nil, err
				}
				return apiserver.NewServer(st, listener, apiserver.ServerConfig{
					Cert:      cert,
					Key:       key,
					DataDir:   dataDir,
					LogDir:    logDir,
					Validator: a.limitLoginsDuringUpgrade,
				})
			})
			a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) {
				return cleaner.NewCleaner(st), nil
			})
			a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) {
				// The action of resumer is so subtle that it is not tested,
				// because we can't figure out how to do so without brutalising
				// the transaction log.
				return resumer.NewResumer(st), nil
			})
			a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) {
				return minunitsworker.NewMinUnitsWorker(st), nil
			})
		case state.JobManageStateDeprecated:
			// Legacy environments may set this, but we ignore it.
		default:
			logger.Warningf("ignoring unknown job %q", job)
		}
	}
	return newCloseWorker(runner, st), nil
}