示例#1
0
文件: machine.go 项目: zhouqt/juju
func (a *MachineAgent) uninstallAgent(agentConfig agent.Config) error {
	var errors []error
	agentServiceName := agentConfig.Value(agent.AgentServiceName)
	if agentServiceName == "" {
		// For backwards compatibility, handle lack of AgentServiceName.
		agentServiceName = os.Getenv("UPSTART_JOB")
	}
	if agentServiceName != "" {
		if err := service.NewService(agentServiceName, common.Conf{}).Remove(); err != nil {
			errors = append(errors, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err))
		}
	}
	// Remove the juju-run symlink.
	if err := os.Remove(jujuRun); err != nil && !os.IsNotExist(err) {
		errors = append(errors, err)
	}

	namespace := agentConfig.Value(agent.Namespace)
	if err := mongo.RemoveService(namespace); err != nil {
		errors = append(errors, fmt.Errorf("cannot stop/remove mongo service with namespace %q: %v", namespace, err))
	}
	if err := os.RemoveAll(agentConfig.DataDir()); err != nil {
		errors = append(errors, err)
	}
	if len(errors) == 0 {
		return nil
	}
	return fmt.Errorf("uninstall failed: %v", errors)
}
示例#2
0
func ensureMongoService(agentConfig agent.Config) error {
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return errors.Annotatef(err, "invalid oplog size: %q", oplogSizeString)
		}
	}

	var numaCtlPolicy bool
	if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" {
		var err error
		if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil {
			return errors.Annotatef(err, "invalid numactl preference: %q", numaCtlString)
		}
	}

	si, ok := agentConfig.StateServingInfo()
	if !ok {
		return errors.Errorf("agent config has no state serving info")
	}

	err := mongo.EnsureServiceInstalled(agentConfig.DataDir(),
		si.StatePort,
		oplogSize,
		numaCtlPolicy)
	return errors.Annotate(err, "cannot ensure that mongo service start/stop scripts are in place")
}
func containerManagerConfig(
	containerType instance.ContainerType,
	provisioner *apiprovisioner.State,
	agentConfig agent.Config,
) (container.ManagerConfig, error) {
	// Ask the provisioner for the container manager configuration.
	managerConfigResult, err := provisioner.ContainerManagerConfig(
		params.ContainerManagerConfigParams{Type: containerType},
	)
	if params.IsCodeNotImplemented(err) {
		// We currently don't support upgrading;
		// revert to the old configuration.
		managerConfigResult.ManagerConfig = container.ManagerConfig{container.ConfigName: container.DefaultNamespace}
	}
	if err != nil {
		return nil, err
	}
	// If a namespace is specified, that should instead be used as the config name.
	if namespace := agentConfig.Value(agent.Namespace); namespace != "" {
		managerConfigResult.ManagerConfig[container.ConfigName] = namespace
	}
	managerConfig := container.ManagerConfig(managerConfigResult.ManagerConfig)

	return managerConfig, nil
}
示例#4
0
文件: bootstrap.go 项目: kapilt/juju
// newEnsureServerParams creates an EnsureServerParams from an agent configuration.
func newEnsureServerParams(agentConfig agent.Config) (mongo.EnsureServerParams, error) {
	// If oplog size is specified in the agent configuration, use that.
	// Otherwise leave the default zero value to indicate to EnsureServer
	// that it should calculate the size.
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return mongo.EnsureServerParams{}, fmt.Errorf("invalid oplog size: %q", oplogSizeString)
		}
	}

	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return mongo.EnsureServerParams{}, fmt.Errorf("agent config has no state serving info")
	}

	params := mongo.EnsureServerParams{
		StateServingInfo: servingInfo,
		DataDir:          agentConfig.DataDir(),
		Namespace:        agentConfig.Value(agent.Namespace),
		OplogSize:        oplogSize,
	}
	return params, nil
}
示例#5
0
// migrateCharmStorage copies uploaded charms from provider storage
// to environment storage, and then adds the storage path into the
// charm's document in state.
func migrateCharmStorage(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating charms to environment storage")
	charms, err := st.AllCharms()
	if err != nil {
		return err
	}
	storage := storage.NewStorage(st.EnvironUUID(), st.MongoSession())

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	fetchCharmArchive := fetchCharmArchive
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		fetchCharmArchive = localstorage{storageDir}.fetchCharmArchive
	}

	storagePaths := make(map[*charm.URL]string)
	for _, ch := range charms {
		if ch.IsPlaceholder() {
			logger.Debugf("skipping %s, placeholder charm", ch.URL())
			continue
		}
		if !ch.IsUploaded() {
			logger.Debugf("skipping %s, not uploaded to provider storage", ch.URL())
			continue
		}
		if charmStoragePath(ch) != "" {
			logger.Debugf("skipping %s, already in environment storage", ch.URL())
			continue
		}
		url := charmBundleURL(ch)
		if url == nil {
			logger.Debugf("skipping %s, has no bundle URL", ch.URL())
			continue
		}
		uuid, err := utils.NewUUID()
		if err != nil {
			return err
		}
		data, err := fetchCharmArchive(url)
		if err != nil {
			return err
		}

		curl := ch.URL()
		storagePath := fmt.Sprintf("charms/%s-%s", curl, uuid)
		logger.Debugf("uploading %s to %q in environment storage", curl, storagePath)
		err = storage.Put(storagePath, bytes.NewReader(data), int64(len(data)))
		if err != nil {
			return errors.Annotatef(err, "failed to upload %s to storage", curl)
		}
		storagePaths[curl] = storagePath
	}

	return stateAddCharmStoragePaths(st, storagePaths)
}
示例#6
0
// NewMachineEnvironmentWorker returns a worker.Worker that uses the notify
// watcher returned from the setup.
func NewMachineEnvironmentWorker(api *environment.Facade, agentConfig agent.Config) worker.Worker {
	// We don't write out system files for the local provider on machine zero
	// as that is the host machine.
	writeSystemFiles := (agentConfig.Tag() != names.NewMachineTag("0").String() ||
		agentConfig.Value(agent.ProviderType) != provider.Local)
	logger.Debugf("write system files: %v", writeSystemFiles)
	envWorker := &MachineEnvironmentWorker{
		api:              api,
		writeSystemFiles: writeSystemFiles,
		first:            true,
	}
	return worker.NewNotifyWorker(envWorker)
}
示例#7
0
func (c *BootstrapCommand) startMongo(addrs []network.Address, agentConfig agent.Config) error {
	logger.Debugf("starting mongo")

	info, ok := agentConfig.StateInfo()
	if !ok {
		return fmt.Errorf("no state info available")
	}
	// When bootstrapping, we need to allow enough time for mongo
	// to start as there's no retry loop in place.
	// 5 minutes should suffice.
	bootstrapDialOpts := mongo.DialOpts{Timeout: 5 * time.Minute}
	dialInfo, err := mongo.DialInfo(info.Info, bootstrapDialOpts)
	if err != nil {
		return err
	}
	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("agent config has no state serving info")
	}
	// Use localhost to dial the mongo server, because it's running in
	// auth mode and will refuse to perform any operations unless
	// we dial that address.
	dialInfo.Addrs = []string{
		net.JoinHostPort("127.0.0.1", fmt.Sprint(servingInfo.StatePort)),
	}

	logger.Debugf("calling ensureMongoServer")
	err = ensureMongoServer(
		agentConfig.DataDir(),
		agentConfig.Value(agent.Namespace),
		servingInfo,
	)
	if err != nil {
		return err
	}

	peerAddr := mongo.SelectPeerAddress(addrs)
	if peerAddr == "" {
		return fmt.Errorf("no appropriate peer address found in %q", addrs)
	}
	peerHostPort := net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort))

	return maybeInitiateMongoServer(peergrouper.InitiateMongoParams{
		DialInfo:       dialInfo,
		MemberHostPort: peerHostPort,
	})
}
示例#8
0
// NewEnsureServerParams creates an EnsureServerParams from an agent
// configuration.
func NewEnsureServerParams(agentConfig agent.Config) (mongo.EnsureServerParams, error) {
	// If oplog size is specified in the agent configuration, use that.
	// Otherwise leave the default zero value to indicate to EnsureServer
	// that it should calculate the size.
	var oplogSize int
	if oplogSizeString := agentConfig.Value(agent.MongoOplogSize); oplogSizeString != "" {
		var err error
		if oplogSize, err = strconv.Atoi(oplogSizeString); err != nil {
			return mongo.EnsureServerParams{}, fmt.Errorf("invalid oplog size: %q", oplogSizeString)
		}
	}

	// If numa ctl preference is specified in the agent configuration, use that.
	// Otherwise leave the default false value to indicate to EnsureServer
	// that numactl should not be used.
	var numaCtlPolicy bool
	if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" {
		var err error
		if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil {
			return mongo.EnsureServerParams{}, fmt.Errorf("invalid numactl preference: %q", numaCtlString)
		}
	}

	si, ok := agentConfig.StateServingInfo()
	if !ok {
		return mongo.EnsureServerParams{}, fmt.Errorf("agent config has no state serving info")
	}

	params := mongo.EnsureServerParams{
		APIPort:        si.APIPort,
		StatePort:      si.StatePort,
		Cert:           si.Cert,
		PrivateKey:     si.PrivateKey,
		CAPrivateKey:   si.CAPrivateKey,
		SharedSecret:   si.SharedSecret,
		SystemIdentity: si.SystemIdentity,

		DataDir:              agentConfig.DataDir(),
		OplogSize:            oplogSize,
		SetNumaControlPolicy: numaCtlPolicy,

		Version: agentConfig.MongoVersion(),
	}
	return params, nil
}
示例#9
0
// migrateCustomImageMetadata copies uploaded image metadata from provider
// storage to environment storage, preserving paths.
func migrateCustomImageMetadata(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating custom image metadata to environment storage")
	estor := newStateStorage(st.EnvironUUID(), st.MongoSession())

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	var pstor storage.StorageReader
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		var err error
		pstor, err = filestorage.NewFileStorageReader(storageDir)
		if err != nil {
			return errors.Annotate(err, "cannot get local filesystem storage reader")
		}
	} else {
		var err error
		pstor, err = environs.LegacyStorage(st)
		if errors.IsNotSupported(err) {
			return nil
		} else if err != nil {
			return errors.Annotate(err, "cannot get provider storage")
		}
	}

	paths, err := pstor.List(storage.BaseImagesPath)
	if err != nil {
		return err
	}
	for _, path := range paths {
		logger.Infof("migrating image metadata at path %q", path)
		data, err := readImageMetadata(pstor, path)
		if err != nil {
			return errors.Annotate(err, "failed to read image metadata")
		}
		err = estor.Put(path, bytes.NewReader(data), int64(len(data)))
		if err != nil {
			return errors.Annotate(err, "failed to write image metadata")
		}
	}
	return nil
}
func containerManagerConfig(
	containerType instance.ContainerType,
	provisioner *apiprovisioner.State,
	agentConfig agent.Config,
) (container.ManagerConfig, error) {
	// Ask the provisioner for the container manager configuration.
	managerConfigResult, err := provisioner.ContainerManagerConfig(
		params.ContainerManagerConfigParams{Type: containerType},
	)
	if err != nil {
		return nil, err
	}
	// If a namespace is specified, that should instead be used as the config name.
	if namespace := agentConfig.Value(agent.Namespace); namespace != "" {
		managerConfigResult.ManagerConfig[container.ConfigName] = namespace
	}
	managerConfig := container.ManagerConfig(managerConfigResult.ManagerConfig)

	return managerConfig, nil
}
示例#11
0
文件: machine.go 项目: zhouqt/juju
func (a *MachineAgent) ensureMongoAdminUser(agentConfig agent.Config) (added bool, err error) {
	stateInfo, ok1 := agentConfig.MongoInfo()
	servingInfo, ok2 := agentConfig.StateServingInfo()
	if !ok1 || !ok2 {
		return false, fmt.Errorf("no state serving info configuration")
	}
	dialInfo, err := mongo.DialInfo(stateInfo.Info, mongo.DefaultDialOpts())
	if err != nil {
		return false, err
	}
	if len(dialInfo.Addrs) > 1 {
		logger.Infof("more than one state server; admin user must exist")
		return false, nil
	}
	return ensureMongoAdminUser(mongo.EnsureAdminUserParams{
		DialInfo:  dialInfo,
		Namespace: agentConfig.Value(agent.Namespace),
		DataDir:   agentConfig.DataDir(),
		Port:      servingInfo.StatePort,
		User:      stateInfo.Tag.String(),
		Password:  stateInfo.Password,
	})
}
示例#12
0
文件: config.go 项目: klyachin/juju
func loadConfig(agentConfig agent.Config) (*config, error) {
	config := &config{
		storageDir:  agentConfig.Value(StorageDir),
		storageAddr: agentConfig.Value(StorageAddr),
		authkey:     agentConfig.Value(StorageAuthKey),
	}

	caCertPEM := agentConfig.Value(StorageCACert)
	if len(caCertPEM) > 0 {
		config.caCertPEM = caCertPEM
	}

	caKeyPEM := agentConfig.Value(StorageCAKey)
	if len(caKeyPEM) > 0 {
		config.caKeyPEM = caKeyPEM
	}

	hostnames := agentConfig.Value(StorageHostnames)
	if len(hostnames) > 0 {
		err := goyaml.Unmarshal([]byte(hostnames), &config.hostnames)
		if err != nil {
			return nil, err
		}
	}

	return config, nil
}
示例#13
0
// updateSupportedContainers records in state that a machine can run the specified containers.
// It starts a watcher and when a container of a given type is first added to the machine,
// the watcher is killed, the machine is set up to be able to start containers of the given type,
// and a suitable provisioner is started.
func (a *MachineAgent) updateSupportedContainers(
	runner worker.Runner,
	st api.Connection,
	containers []instance.ContainerType,
	agentConfig agent.Config,
) error {
	pr := st.Provisioner()
	tag := agentConfig.Tag().(names.MachineTag)
	machine, err := pr.Machine(tag)
	if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead {
		return worker.ErrTerminateAgent
	}
	if err != nil {
		return errors.Annotatef(err, "cannot load machine %s from state", tag)
	}
	if len(containers) == 0 {
		if err := machine.SupportsNoContainers(); err != nil {
			return errors.Annotatef(err, "clearing supported containers for %s", tag)
		}
		return nil
	}
	if err := machine.SetSupportedContainers(containers...); err != nil {
		return errors.Annotatef(err, "setting supported containers for %s", tag)
	}
	initLock, err := cmdutil.HookExecutionLock(agentConfig.DataDir())
	if err != nil {
		return err
	}
	// Start the watcher to fire when a container is first requested on the machine.
	modelUUID, err := st.ModelTag()
	if err != nil {
		return err
	}
	watcherName := fmt.Sprintf("%s-container-watcher", machine.Id())
	// There may not be a CA certificate private key available, and without
	// it we can't ensure that other Juju nodes can connect securely, so only
	// use an image URL getter if there's a private key.
	var imageURLGetter container.ImageURLGetter
	if agentConfig.Value(agent.AllowsSecureConnection) == "true" {
		cfg, err := pr.ModelConfig()
		if err != nil {
			return errors.Annotate(err, "unable to get environ config")
		}
		imageURLGetter = container.NewImageURLGetter(
			// Explicitly call the non-named constructor so if anyone
			// adds additional fields, this fails.
			container.ImageURLGetterConfig{
				ServerRoot:        st.Addr(),
				ModelUUID:         modelUUID.Id(),
				CACert:            []byte(agentConfig.CACert()),
				CloudimgBaseUrl:   cfg.CloudImageBaseURL(),
				Stream:            cfg.ImageStream(),
				ImageDownloadFunc: container.ImageDownloadURL,
			})
	}
	params := provisioner.ContainerSetupParams{
		Runner:              runner,
		WorkerName:          watcherName,
		SupportedContainers: containers,
		ImageURLGetter:      imageURLGetter,
		Machine:             machine,
		Provisioner:         pr,
		Config:              agentConfig,
		InitLock:            initLock,
	}
	handler := provisioner.NewContainerSetupHandler(params)
	a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) {
		w, err := watcher.NewStringsWorker(watcher.StringsConfig{
			Handler: handler,
		})
		if err != nil {
			return nil, errors.Annotatef(err, "cannot start %s worker", watcherName)
		}
		return w, nil
	})
	return nil
}
示例#14
0
// migrateToolsStorage copies tools from provider storage to
// environment storage.
func migrateToolsStorage(st *state.State, agentConfig agent.Config) error {
	logger.Debugf("migrating tools to environment storage")

	tstor, err := stateToolsStorage(st)
	if err != nil {
		return errors.Annotate(err, "cannot get tools storage")
	}
	defer tstor.Close()

	// Local and manual provider host storage on the state server's
	// filesystem, and serve via HTTP storage. The storage worker
	// doesn't run yet, so we just open the files directly.
	var stor storage.StorageReader
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType == provider.Local || provider.IsManual(providerType) {
		storageDir := agentConfig.Value(agent.StorageDir)
		var err error
		stor, err = filestorage.NewFileStorageReader(storageDir)
		if err != nil {
			return errors.Annotate(err, "cannot get local filesystem storage reader")
		}
	} else {
		var err error
		stor, err = environs.LegacyStorage(st)
		if errors.IsNotSupported(err) {
			return nil
		} else if err != nil {
			return errors.Annotate(err, "cannot get provider storage")
		}
	}

	// Search provider storage for tools.
	datasource := storage.NewStorageSimpleStreamsDataSource("provider storage", stor, storage.BaseToolsPath)
	toolsList, err := envtools.FindToolsForCloud(
		[]simplestreams.DataSource{datasource},
		simplestreams.CloudSpec{},
		envtools.ReleasedStream,
		-1, -1, tools.Filter{})
	switch err {
	case nil:
		break
	case tools.ErrNoMatches, envtools.ErrNoTools:
		// No tools in provider storage: nothing to do.
		return nil
	default:
		return errors.Annotate(err, "cannot find tools in provider storage")
	}

	for _, agentTools := range toolsList {
		logger.Infof("migrating %v tools to environment storage", agentTools.Version)
		data, err := fetchToolsArchive(stor, envtools.LegacyReleaseDirectory, agentTools)
		if err != nil {
			return errors.Annotatef(err, "failed to fetch %v tools", agentTools.Version)
		}
		err = tstor.AddTools(bytes.NewReader(data), toolstorage.Metadata{
			Version: agentTools.Version,
			Size:    agentTools.Size,
			SHA256:  agentTools.SHA256,
		})
		if err != nil {
			return errors.Annotatef(err, "failed to add %v tools to environment storage", agentTools.Version)
		}
	}
	return nil
}
示例#15
0
// ensureMongoServer ensures that mongo is installed and running,
// and ready for opening a state connection.
func (a *MachineAgent) ensureMongoServer(agentConfig agent.Config) error {
	a.mongoInitMutex.Lock()
	defer a.mongoInitMutex.Unlock()
	if a.mongoInitialized {
		logger.Debugf("mongo is already initialized")
		return nil
	}

	servingInfo, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("state worker was started with no state serving info")
	}
	namespace := agentConfig.Value(agent.Namespace)

	// When upgrading from a pre-HA-capable environment,
	// we must add machine-0 to the admin database and
	// initiate its replicaset.
	//
	// TODO(axw) remove this when we no longer need
	// to upgrade from pre-HA-capable environments.
	var shouldInitiateMongoServer bool
	var addrs []network.Address
	if isPreHAVersion(agentConfig.UpgradedToVersion()) {
		_, err := a.ensureMongoAdminUser(agentConfig)
		if err != nil {
			return err
		}
		if servingInfo.SharedSecret == "" {
			servingInfo.SharedSecret, err = mongo.GenerateSharedSecret()
			if err != nil {
				return err
			}
			if err = a.ChangeConfig(func(config agent.ConfigSetter) {
				config.SetStateServingInfo(servingInfo)
			}); err != nil {
				return err
			}
			agentConfig = a.CurrentConfig()
		}
		// Note: we set Direct=true in the mongo options because it's
		// possible that we've previously upgraded the mongo server's
		// configuration to form a replicaset, but failed to initiate it.
		st, m, err := openState(agentConfig, mongo.DialOpts{Direct: true})
		if err != nil {
			return err
		}
		if err := st.SetStateServingInfo(servingInfo); err != nil {
			st.Close()
			return fmt.Errorf("cannot set state serving info: %v", err)
		}
		st.Close()
		addrs = m.Addresses()
		shouldInitiateMongoServer = true
	}

	// ensureMongoServer installs/upgrades the upstart config as necessary.
	if err := ensureMongoServer(
		agentConfig.DataDir(),
		namespace,
		servingInfo,
	); err != nil {
		return err
	}
	if !shouldInitiateMongoServer {
		return nil
	}

	// Initiate the replicaset for upgraded environments.
	//
	// TODO(axw) remove this when we no longer need
	// to upgrade from pre-HA-capable environments.
	stateInfo, ok := agentConfig.StateInfo()
	if !ok {
		return fmt.Errorf("state worker was started with no state serving info")
	}
	dialInfo, err := mongo.DialInfo(stateInfo.Info, mongo.DefaultDialOpts())
	if err != nil {
		return err
	}
	peerAddr := mongo.SelectPeerAddress(addrs)
	if peerAddr == "" {
		return fmt.Errorf("no appropriate peer address found in %q", addrs)
	}
	if err := maybeInitiateMongoServer(peergrouper.InitiateMongoParams{
		DialInfo:       dialInfo,
		MemberHostPort: net.JoinHostPort(peerAddr, fmt.Sprint(servingInfo.StatePort)),
		User:           stateInfo.Tag,
		Password:       stateInfo.Password,
	}); err != nil {
		return err
	}
	a.mongoInitialized = true
	return nil
}