Beispiel #1
0
func (s *deployerSuite) SetUpTest(c *gc.C) {
	s.JujuConnSuite.SetUpTest(c)

	// The two known machines now contain the following units:
	// machine 0 (not authorized): mysql/1 (principal1)
	// machine 1 (authorized): mysql/0 (principal0), logging/0 (subordinate0)

	var err error
	s.machine0, err = s.State.AddMachine("quantal", state.JobManageEnviron, state.JobHostUnits)
	c.Assert(err, gc.IsNil)

	s.machine1, err = s.State.AddMachine("quantal", state.JobHostUnits)
	c.Assert(err, gc.IsNil)

	s.service0 = s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql"))

	s.service1 = s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging"))
	eps, err := s.State.InferEndpoints([]string{"mysql", "logging"})
	c.Assert(err, gc.IsNil)
	rel, err := s.State.AddRelation(eps...)
	c.Assert(err, gc.IsNil)

	s.principal0, err = s.service0.AddUnit()
	c.Assert(err, gc.IsNil)
	err = s.principal0.AssignToMachine(s.machine1)
	c.Assert(err, gc.IsNil)

	s.principal1, err = s.service0.AddUnit()
	c.Assert(err, gc.IsNil)
	err = s.principal1.AssignToMachine(s.machine0)
	c.Assert(err, gc.IsNil)

	relUnit0, err := rel.Unit(s.principal0)
	c.Assert(err, gc.IsNil)
	err = relUnit0.EnterScope(nil)
	c.Assert(err, gc.IsNil)
	s.subordinate0, err = s.service1.Unit("logging/0")
	c.Assert(err, gc.IsNil)

	// Create a FakeAuthorizer so we can check permissions,
	// set up assuming machine 1 has logged in.
	s.authorizer = apiservertesting.FakeAuthorizer{
		Tag:          names.MachineTag(s.machine1.Id()),
		LoggedIn:     true,
		MachineAgent: true,
	}

	// Create the resource registry separately to track invocations to
	// Register.
	s.resources = common.NewResources()

	// Create a deployer API for machine 1.
	deployer, err := deployer.NewDeployerAPI(
		s.State,
		s.resources,
		s.authorizer,
	)
	c.Assert(err, gc.IsNil)
	s.deployer = deployer
}
Beispiel #2
0
// primeAgent adds a new Machine to run the given jobs, and sets up the
// machine agent's directory.  It returns the new machine, the
// agent's configuration and the tools currently running.
func (s *commonMachineSuite) primeAgent(
	c *gc.C, vers version.Binary,
	jobs ...state.MachineJob) (m *state.Machine, config agent.ConfigSetterWriter, tools *tools.Tools) {

	// Add a machine and ensure it is provisioned.
	m, err := s.State.AddMachine("quantal", jobs...)
	c.Assert(err, gc.IsNil)
	inst, md := jujutesting.AssertStartInstance(c, s.Conn.Environ, m.Id())
	c.Assert(m.SetProvisioned(inst.Id(), state.BootstrapNonce, md), gc.IsNil)

	// Add an address for the tests in case the maybeInitiateMongoServer
	// codepath is exercised.
	s.setFakeMachineAddresses(c, m)

	// Set up the new machine.
	err = m.SetAgentVersion(vers)
	c.Assert(err, gc.IsNil)
	err = m.SetPassword(initialMachinePassword)
	c.Assert(err, gc.IsNil)
	tag := names.MachineTag(m.Id())
	if m.IsManager() {
		err = m.SetMongoPassword(initialMachinePassword)
		c.Assert(err, gc.IsNil)
		config, tools = s.agentSuite.primeStateAgent(c, tag, initialMachinePassword, vers)
		info, ok := config.StateServingInfo()
		c.Assert(ok, jc.IsTrue)
		err = s.State.SetStateServingInfo(info)
		c.Assert(err, gc.IsNil)
	} else {
		config, tools = s.agentSuite.primeAgent(c, tag, initialMachinePassword, vers)
	}
	err = config.Write()
	c.Assert(err, gc.IsNil)
	return m, config, tools
}
Beispiel #3
0
func (task *provisionerTask) populateMachineMaps(ids []string) error {
	task.instances = make(map[instance.Id]instance.Instance)

	instances, err := task.broker.AllInstances()
	if err != nil {
		logger.Errorf("failed to get all instances from broker: %v", err)
		return err
	}
	for _, i := range instances {
		task.instances[i.Id()] = i
	}

	// Update the machines map with new data for each of the machines in the
	// change list.
	// TODO(thumper): update for API server later to get all machines in one go.
	for _, id := range ids {
		machineTag := names.MachineTag(id)
		machine, err := task.machineGetter.Machine(machineTag)
		switch {
		case params.IsCodeNotFoundOrCodeUnauthorized(err):
			logger.Debugf("machine %q not found in state", id)
			delete(task.machines, id)
		case err == nil:
			task.machines[id] = machine
		default:
			logger.Errorf("failed to get machine: %v", err)
		}
	}
	return nil
}
Beispiel #4
0
func (st *State) checkCanUpgrade(currentVersion, newVersion string) error {
	matchCurrent := "^" + regexp.QuoteMeta(currentVersion) + "-"
	matchNew := "^" + regexp.QuoteMeta(newVersion) + "-"
	// Get all machines and units with a different or empty version.
	sel := bson.D{{"$or", []bson.D{
		{{"tools", bson.D{{"$exists", false}}}},
		{{"$and", []bson.D{
			{{"tools.version", bson.D{{"$not", bson.RegEx{matchCurrent, ""}}}}},
			{{"tools.version", bson.D{{"$not", bson.RegEx{matchNew, ""}}}}},
		}}},
	}}}
	var agentTags []string
	for _, collection := range []*mgo.Collection{st.machines, st.units} {
		var doc struct {
			Id string `bson:"_id"`
		}
		iter := collection.Find(sel).Select(bson.D{{"_id", 1}}).Iter()
		for iter.Next(&doc) {
			switch collection.Name {
			case "machines":
				agentTags = append(agentTags, names.MachineTag(doc.Id))
			case "units":
				agentTags = append(agentTags, names.UnitTag(doc.Id))
			}
		}
		if err := iter.Err(); err != nil {
			return err
		}
	}
	if len(agentTags) > 0 {
		return newVersionInconsistentError(version.MustParse(currentVersion), agentTags)
	}
	return nil
}
Beispiel #5
0
// DeployerTag returns the tag of the agent responsible for deploying
// the unit. If no such entity can be determined, false is returned.
func (u *Unit) DeployerTag() (string, bool) {
	if u.doc.Principal != "" {
		return names.UnitTag(u.doc.Principal), true
	} else if u.doc.MachineId != "" {
		return names.MachineTag(u.doc.MachineId), true
	}
	return "", false
}
Beispiel #6
0
// FakeAPIInfo holds information about no state - it will always
// give an error when connected to.  The machine id gives the machine id
// of the machine to be started.
func FakeAPIInfo(machineId string) *api.Info {
	return &api.Info{
		Addrs:    []string{"0.1.2.3:1234"},
		Tag:      names.MachineTag(machineId),
		Password: "******",
		CACert:   testing.CACert,
	}
}
Beispiel #7
0
// NewProvisionerAPI creates a new server-side ProvisionerAPI facade.
func NewProvisionerAPI(
	st *state.State,
	resources *common.Resources,
	authorizer common.Authorizer,
) (*ProvisionerAPI, error) {
	if !authorizer.AuthMachineAgent() && !authorizer.AuthEnvironManager() {
		return nil, common.ErrPerm
	}
	getAuthFunc := func() (common.AuthFunc, error) {
		isEnvironManager := authorizer.AuthEnvironManager()
		isMachineAgent := authorizer.AuthMachineAgent()
		authEntityTag := authorizer.GetAuthTag()

		return func(tag string) bool {
			if isMachineAgent && tag == authEntityTag {
				// A machine agent can always access its own machine.
				return true
			}
			_, id, err := names.ParseTag(tag, names.MachineTagKind)
			if err != nil {
				return false
			}
			parentId := state.ParentId(id)
			if parentId == "" {
				// All top-level machines are accessible by the
				// environment manager.
				return isEnvironManager
			}
			// All containers with the authenticated machine as a
			// parent are accessible by it.
			return isMachineAgent && names.MachineTag(parentId) == authEntityTag
		}, nil
	}
	// Both provisioner types can watch the environment.
	getCanWatch := common.AuthAlways(true)
	// Only the environment provisioner can read secrets.
	getCanReadSecrets := common.AuthAlways(authorizer.AuthEnvironManager())
	return &ProvisionerAPI{
		Remover:                common.NewRemover(st, false, getAuthFunc),
		StatusSetter:           common.NewStatusSetter(st, getAuthFunc),
		DeadEnsurer:            common.NewDeadEnsurer(st, getAuthFunc),
		PasswordChanger:        common.NewPasswordChanger(st, getAuthFunc),
		LifeGetter:             common.NewLifeGetter(st, getAuthFunc),
		StateAddresser:         common.NewStateAddresser(st),
		APIAddresser:           common.NewAPIAddresser(st, resources),
		ToolsGetter:            common.NewToolsGetter(st, getAuthFunc),
		EnvironWatcher:         common.NewEnvironWatcher(st, resources, getCanWatch, getCanReadSecrets),
		EnvironMachinesWatcher: common.NewEnvironMachinesWatcher(st, resources, getCanReadSecrets),
		InstanceIdGetter:       common.NewInstanceIdGetter(st, getAuthFunc),
		st:                     st,
		resources:              resources,
		authorizer:             authorizer,
		getAuthFunc:            getAuthFunc,
		getCanWatchMachines:    getCanReadSecrets,
	}, nil
}
Beispiel #8
0
func (s *lxcProvisionerSuite) newLxcProvisioner(c *gc.C) provisioner.Provisioner {
	parentMachineTag := names.MachineTag(s.parentMachineId)
	agentConfig := s.AgentConfigForTag(c, parentMachineTag)
	tools, err := s.provisioner.Tools(agentConfig.Tag())
	c.Assert(err, gc.IsNil)
	managerConfig := container.ManagerConfig{container.ConfigName: "juju"}
	broker, err := provisioner.NewLxcBroker(s.provisioner, tools, agentConfig, managerConfig)
	c.Assert(err, gc.IsNil)
	return provisioner.NewContainerProvisioner(instance.LXC, s.provisioner, agentConfig, broker)
}
// NewMachineEnvironmentWorker returns a worker.Worker that uses the notify
// watcher returned from the setup.
func NewMachineEnvironmentWorker(api *environment.Facade, agentConfig agent.Config) worker.Worker {
	// We don't write out system files for the local provider on machine zero
	// as that is the host machine.
	writeSystemFiles := (agentConfig.Tag() != names.MachineTag("0") ||
		agentConfig.Value(agent.ProviderType) != provider.Local)
	logger.Debugf("write system files: %v", writeSystemFiles)
	envWorker := &MachineEnvironmentWorker{
		api:              api,
		writeSystemFiles: writeSystemFiles,
		first:            true,
	}
	return worker.NewNotifyWorker(envWorker)
}
Beispiel #10
0
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *gc.C, m *state.Machine, secret string, cons constraints.Value, includeNetworks, excludeNetworks []string, networkInfo []network.Info, waitInstanceId bool) (inst instance.Instance) {
	s.BackingState.StartSync()
	for {
		select {
		case o := <-s.op:
			switch o := o.(type) {
			case dummy.OpStartInstance:
				inst = o.Instance
				if waitInstanceId {
					s.waitInstanceId(c, m, inst.Id())
				}

				// Check the instance was started with the expected params.
				c.Assert(o.MachineId, gc.Equals, m.Id())
				nonceParts := strings.SplitN(o.MachineNonce, ":", 2)
				c.Assert(nonceParts, gc.HasLen, 2)
				c.Assert(nonceParts[0], gc.Equals, names.MachineTag("0"))
				c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString)
				c.Assert(o.Secret, gc.Equals, secret)
				c.Assert(o.IncludeNetworks, jc.DeepEquals, includeNetworks)
				c.Assert(o.ExcludeNetworks, jc.DeepEquals, excludeNetworks)
				c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo)

				// All provisioned machines in this test suite have
				// their hardware characteristics attributes set to
				// the same values as the constraints due to the dummy
				// environment being used.
				if !constraints.IsEmpty(&cons) {
					c.Assert(o.Constraints, gc.DeepEquals, cons)
					hc, err := m.HardwareCharacteristics()
					c.Assert(err, gc.IsNil)
					c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{
						Arch:     cons.Arch,
						Mem:      cons.Mem,
						RootDisk: cons.RootDisk,
						CpuCores: cons.CpuCores,
						CpuPower: cons.CpuPower,
						Tags:     cons.Tags,
					})
				}
				return
			default:
				c.Logf("ignoring unexpected operation %#v", o)
			}
		case <-time.After(2 * time.Second):
			c.Fatalf("provisioner did not start an instance")
			return
		}
	}
	return
}
Beispiel #11
0
// makeMachineConfig produces a valid cloudinit machine config.
func makeMachineConfig(c *gc.C) *cloudinit.MachineConfig {
	machineID := "0"
	return &cloudinit.MachineConfig{
		MachineId:          machineID,
		MachineNonce:       "gxshasqlnng",
		DataDir:            environs.DataDir,
		LogDir:             agent.DefaultLogDir,
		Jobs:               []params.MachineJob{params.JobManageEnviron, params.JobHostUnits},
		CloudInitOutputLog: environs.CloudInitOutputLog,
		Tools:              &tools.Tools{URL: "file://" + c.MkDir()},
		StateInfo: &state.Info{
			CACert:   testing.CACert,
			Addrs:    []string{"127.0.0.1:123"},
			Tag:      names.MachineTag(machineID),
			Password: "******",
		},
		APIInfo: &api.Info{
			CACert: testing.CACert,
			Addrs:  []string{"127.0.0.1:123"},
			Tag:    names.MachineTag(machineID),
		},
		MachineAgentServiceName: "jujud-machine-0",
	}
}
Beispiel #12
0
func (c *RetryProvisioningCommand) Init(args []string) error {
	if err := c.EnsureEnvName(); err != nil {
		return err
	}
	if len(args) == 0 {
		return fmt.Errorf("no machine specified")
	}
	c.Machines = make([]string, len(args))
	for i, arg := range args {
		if !names.IsMachine(arg) {
			return fmt.Errorf("invalid machine %q", arg)
		}
		c.Machines[i] = names.MachineTag(arg)
	}
	return nil
}
Beispiel #13
0
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig) (instance.Instance, *instance.HardwareCharacteristics, error) {

	name := names.MachineTag(machineConfig.MachineId)
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Note here that the kvmObjectFacotry only returns a valid container
	// object, and doesn't actually construct the underlying kvm container on
	// disk.
	kvmContainer := KvmObjectFactory.New(name)

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to create container directory: %v", err)
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "failed to write user data: %v", err)
	}
	// Create the container.
	startParams := ParseConstraintsToStartParams(machineConfig.Constraints)
	startParams.Arch = version.Current.Arch
	startParams.Series = series
	startParams.Network = network
	startParams.UserDataFile = userDataFilename

	var hardware instance.HardwareCharacteristics
	hardware, err = instance.ParseHardware(
		fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v",
			startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
	if err != nil {
		logger.Warningf("failed to parse hardware: %v", err)
	}

	logger.Tracef("create the container, constraints: %v", machineConfig.Constraints)
	if err := kvmContainer.Start(startParams); err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "kvm container creation failed: %v", err)
	}
	logger.Tracef("kvm container created")
	return &kvmInstance{kvmContainer, name}, &hardware, nil
}
Beispiel #14
0
// MachinesWithTransientErrors returns a slice of machines and corresponding status information
// for those machines which have transient provisioning errors.
func (st *State) MachinesWithTransientErrors() ([]*Machine, []params.StatusResult, error) {
	var results params.StatusResults
	err := st.call("MachinesWithTransientErrors", nil, &results)
	if err != nil {
		return nil, nil, err
	}
	machines := make([]*Machine, len(results.Results))
	for i, status := range results.Results {
		if status.Error != nil {
			continue
		}
		machines[i] = &Machine{
			tag:  names.MachineTag(status.Id),
			life: status.Life,
			st:   st,
		}
	}
	return machines, results.Results, nil
}
Beispiel #15
0
func InitializeState(c ConfigSetter, envCfg *config.Config, machineCfg BootstrapMachineConfig, timeout state.DialOpts, policy state.Policy) (_ *state.State, _ *state.Machine, resultErr error) {
	if c.Tag() != names.MachineTag(BootstrapMachineId) {
		return nil, nil, fmt.Errorf("InitializeState not called with bootstrap machine's configuration")
	}
	servingInfo, ok := c.StateServingInfo()
	if !ok {
		return nil, nil, fmt.Errorf("state serving information not available")
	}
	// N.B. no users are set up when we're initializing the state,
	// so don't use any tag or password when opening it.
	info, ok := c.StateInfo()
	if !ok {
		return nil, nil, fmt.Errorf("stateinfo not available")
	}
	info.Tag = ""
	info.Password = ""

	logger.Debugf("initializing address %v", info.Addrs)
	st, err := state.Initialize(info, envCfg, timeout, policy)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to initialize state: %v", err)
	}
	logger.Debugf("connected to initial state")
	defer func() {
		if resultErr != nil {
			st.Close()
		}
	}()
	servingInfo.SharedSecret = machineCfg.SharedSecret
	c.SetStateServingInfo(servingInfo)
	if err = initAPIHostPorts(c, st, machineCfg.Addresses, servingInfo.APIPort); err != nil {
		return nil, nil, err
	}
	if err := st.SetStateServingInfo(servingInfo); err != nil {
		return nil, nil, fmt.Errorf("cannot set state serving info: %v", err)
	}
	m, err := initUsersAndBootstrapMachine(c, st, machineCfg)
	if err != nil {
		return nil, nil, err
	}
	return st, m, nil
}
Beispiel #16
0
// NewMachineConfig sets up a basic machine configuration, for a non-bootstrap
// node.  You'll still need to supply more information, but this takes care of
// the fixed entries and the ones that are always needed.
func NewMachineConfig(machineID, machineNonce string, includeNetworks, excludeNetworks []string,
	stateInfo *state.Info, apiInfo *api.Info) *cloudinit.MachineConfig {
	mcfg := &cloudinit.MachineConfig{
		// Fixed entries.
		DataDir:                 DataDir,
		LogDir:                  agent.DefaultLogDir,
		Jobs:                    []params.MachineJob{params.JobHostUnits},
		CloudInitOutputLog:      CloudInitOutputLog,
		MachineAgentServiceName: "jujud-" + names.MachineTag(machineID),

		// Parameter entries.
		MachineId:       machineID,
		MachineNonce:    machineNonce,
		IncludeNetworks: includeNetworks,
		ExcludeNetworks: excludeNetworks,
		StateInfo:       stateInfo,
		APIInfo:         apiInfo,
	}
	return mcfg
}
Beispiel #17
0
// GetAssignedMachine returns the assigned machine tag (if any) for
// each given unit.
func (f *FirewallerAPI) GetAssignedMachine(args params.Entities) (params.StringResults, error) {
	result := params.StringResults{
		Results: make([]params.StringResult, len(args.Entities)),
	}
	canAccess, err := f.accessUnit()
	if err != nil {
		return params.StringResults{}, err
	}
	for i, entity := range args.Entities {
		var unit *state.Unit
		unit, err = f.getUnit(canAccess, entity.Tag)
		if err == nil {
			var machineId string
			machineId, err = unit.AssignedMachineId()
			if err == nil {
				result.Results[i].Result = names.MachineTag(machineId)
			}
		}
		result.Results[i].Error = common.ServerError(err)
	}
	return result, nil
}
Beispiel #18
0
// TestCloudInit checks that the output from the various tests
// in cloudinitTests is well formed.
func (*cloudinitSuite) TestCloudInit(c *gc.C) {
	for i, test := range cloudinitTests {
		c.Logf("test %d", i)
		if test.setEnvConfig {
			test.cfg.Config = minimalConfig(c)
		}
		ci := coreCloudinit.New()
		err := cloudinit.Configure(&test.cfg, ci)
		c.Assert(err, gc.IsNil)
		c.Check(ci, gc.NotNil)
		// render the cloudinit config to bytes, and then
		// back to a map so we can introspect it without
		// worrying about internal details of the cloudinit
		// package.
		data, err := ci.Render()
		c.Assert(err, gc.IsNil)

		x := make(map[interface{}]interface{})
		err = goyaml.Unmarshal(data, &x)
		c.Assert(err, gc.IsNil)

		c.Check(x["apt_upgrade"], gc.Equals, true)
		c.Check(x["apt_update"], gc.Equals, true)

		scripts := getScripts(x)
		assertScriptMatch(c, scripts, test.expectScripts, !test.inexactMatch)
		if test.cfg.Config != nil {
			checkEnvConfig(c, test.cfg.Config, x, scripts)
		}
		checkPackage(c, x, "git", true)
		tag := names.MachineTag(test.cfg.MachineId)
		acfg := getAgentConfig(c, tag, scripts)
		c.Assert(acfg, jc.Contains, "AGENT_SERVICE_NAME: jujud-"+tag)
		source := "deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/cloud-tools main"
		needCloudArchive := test.cfg.Tools.Version.Series == "precise"
		checkAptSource(c, x, source, cloudinit.CanonicalCloudArchiveSigningKey, needCloudArchive)
	}
}
Beispiel #19
0
// MachineTag returns the machine tag of the interface.
func (ni *NetworkInterface) MachineTag() string {
	return names.MachineTag(ni.doc.MachineId)
}
Beispiel #20
0
func (s *machineSuite) TestMachineTag(c *gc.C) {
	c.Assert(names.MachineTag("10"), gc.Equals, "machine-10")
	// Check a container id.
	c.Assert(names.MachineTag("10/lxc/1"), gc.Equals, "machine-10-lxc-1")
}
Beispiel #21
0
// StartInstance is specified in the InstanceBroker interface.
func (e *environ) StartInstance(args environs.StartInstanceParams) (instance.Instance, *instance.HardwareCharacteristics, []network.Info, error) {

	defer delay()
	machineId := args.MachineConfig.MachineId
	logger.Infof("dummy startinstance, machine %s", machineId)
	if err := e.checkBroken("StartInstance"); err != nil {
		return nil, nil, nil, err
	}
	estate, err := e.state()
	if err != nil {
		return nil, nil, nil, err
	}
	estate.mu.Lock()
	defer estate.mu.Unlock()
	if args.MachineConfig.MachineNonce == "" {
		return nil, nil, nil, fmt.Errorf("cannot start instance: missing machine nonce")
	}
	if _, ok := e.Config().CACert(); !ok {
		return nil, nil, nil, fmt.Errorf("no CA certificate in environment configuration")
	}
	if args.MachineConfig.StateInfo.Tag != names.MachineTag(machineId) {
		return nil, nil, nil, fmt.Errorf("entity tag must match started machine")
	}
	if args.MachineConfig.APIInfo.Tag != names.MachineTag(machineId) {
		return nil, nil, nil, fmt.Errorf("entity tag must match started machine")
	}
	logger.Infof("would pick tools from %s", args.Tools)
	series := args.Tools.OneSeries()

	idString := fmt.Sprintf("%s-%d", e.name, estate.maxId)
	i := &dummyInstance{
		id:           instance.Id(idString),
		addresses:    instance.NewAddresses(idString + ".dns"),
		ports:        make(map[instance.Port]bool),
		machineId:    machineId,
		series:       series,
		firewallMode: e.Config().FirewallMode(),
		state:        estate,
	}

	var hc *instance.HardwareCharacteristics
	// To match current system capability, only provide hardware characteristics for
	// environ machines, not containers.
	if state.ParentId(machineId) == "" {
		// We will just assume the instance hardware characteristics exactly matches
		// the supplied constraints (if specified).
		hc = &instance.HardwareCharacteristics{
			Arch:     args.Constraints.Arch,
			Mem:      args.Constraints.Mem,
			RootDisk: args.Constraints.RootDisk,
			CpuCores: args.Constraints.CpuCores,
			CpuPower: args.Constraints.CpuPower,
			Tags:     args.Constraints.Tags,
		}
		// Fill in some expected instance hardware characteristics if constraints not specified.
		if hc.Arch == nil {
			arch := "amd64"
			hc.Arch = &arch
		}
		if hc.Mem == nil {
			mem := uint64(1024)
			hc.Mem = &mem
		}
		if hc.RootDisk == nil {
			disk := uint64(8192)
			hc.RootDisk = &disk
		}
		if hc.CpuCores == nil {
			cores := uint64(1)
			hc.CpuCores = &cores
		}
	}
	// Simulate networks added when requested.
	networkInfo := make([]network.Info, len(args.MachineConfig.IncludeNetworks))
	for i, netName := range args.MachineConfig.IncludeNetworks {
		if strings.HasPrefix(netName, "bad-") {
			// Simulate we didn't get correct information for the network.
			networkInfo[i] = network.Info{
				ProviderId:  network.Id(netName),
				NetworkName: netName,
				CIDR:        "invalid",
			}
		} else {
			networkInfo[i] = network.Info{
				ProviderId:    network.Id(netName),
				NetworkName:   netName,
				CIDR:          fmt.Sprintf("0.%d.2.0/24", i+1),
				InterfaceName: fmt.Sprintf("eth%d", i),
				VLANTag:       i,
				MACAddress:    fmt.Sprintf("aa:bb:cc:dd:ee:f%d", i),
				IsVirtual:     i > 0,
			}
		}
	}
	estate.insts[i.id] = i
	estate.maxId++
	estate.ops <- OpStartInstance{
		Env:             e.name,
		MachineId:       machineId,
		MachineNonce:    args.MachineConfig.MachineNonce,
		Constraints:     args.Constraints,
		IncludeNetworks: args.MachineConfig.IncludeNetworks,
		ExcludeNetworks: args.MachineConfig.ExcludeNetworks,
		NetworkInfo:     networkInfo,
		Instance:        i,
		Info:            args.MachineConfig.StateInfo,
		APIInfo:         args.MachineConfig.APIInfo,
		Secret:          e.ecfg().secret(),
	}
	return i, hc, networkInfo, nil
}
Beispiel #22
0
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {
	start := time.Now()
	name := names.MachineTag(machineConfig.MachineId)
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, err
	}
	logger.Tracef("write cloud-init")
	if manager.createWithClone {
		// If we are using clone, disable the apt-get steps
		machineConfig.DisablePackageCommands = true
	}
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		logger.Errorf("failed to write user data: %v", err)
		return nil, nil, err
	}
	logger.Tracef("write the lxc.conf file")
	configFile, err := writeLxcConfig(network, directory)
	if err != nil {
		logger.Errorf("failed to write config file: %v", err)
		return nil, nil, err
	}

	var lxcContainer golxc.Container
	if manager.createWithClone {
		templateContainer, err := EnsureCloneTemplate(
			manager.backingFilesystem,
			series,
			network,
			machineConfig.AuthorizedKeys,
			machineConfig.AptProxySettings,
		)
		if err != nil {
			return nil, nil, err
		}
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
		}
		var extraCloneArgs []string
		if manager.backingFilesystem == Btrfs || manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--snapshot")
		}
		if manager.backingFilesystem != Btrfs && manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs")
		}

		lock, err := AcquireTemplateLock(templateContainer.Name(), "clone")
		if err != nil {
			return nil, nil, fmt.Errorf("failed to acquire lock on template: %v", err)
		}
		defer lock.Unlock()
		lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams)
		if err != nil {
			logger.Errorf("lxc container cloning failed: %v", err)
			return nil, nil, err
		}
	} else {
		// Note here that the lxcObjectFacotry only returns a valid container
		// object, and doesn't actually construct the underlying lxc container on
		// disk.
		lxcContainer = LxcObjectFactory.New(name)
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
			"-r", series,
		}
		// Create the container.
		logger.Tracef("create the container")
		if err := lxcContainer.Create(configFile, defaultTemplate, nil, templateParams); err != nil {
			logger.Errorf("lxc container creation failed: %v", err)
			return nil, nil, err
		}
		logger.Tracef("lxc container created")
	}
	if err := autostartContainer(name); err != nil {
		return nil, nil, err
	}
	if err := mountHostLogDir(name, manager.logdir); err != nil {
		return nil, nil, err
	}
	// Start the lxc container with the appropriate settings for grabbing the
	// console output and a log file.
	consoleFile := filepath.Join(directory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")
	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Errorf("container failed to start: %v", err)
		return nil, nil, err
	}
	arch := version.Current.Arch
	hardware := &instance.HardwareCharacteristics{
		Arch: &arch,
	}
	logger.Tracef("container %q started: %v", name, time.Now().Sub(start))
	return &lxcInstance{lxcContainer, name}, hardware, nil
}
Beispiel #23
0
// ConfigureJuju updates the provided cloudinit.Config with configuration
// to initialise a Juju machine agent.
func ConfigureJuju(cfg *MachineConfig, c *cloudinit.Config) error {
	if err := verifyConfig(cfg); err != nil {
		return err
	}

	// Initialise progress reporting. We need to do separately for runcmd
	// and (possibly, below) for bootcmd, as they may be run in different
	// shell sessions.
	initProgressCmd := cloudinit.InitProgressCmd()
	c.AddRunCmd(initProgressCmd)

	// If we're doing synchronous bootstrap or manual provisioning, then
	// ConfigureBasic won't have been invoked; thus, the output log won't
	// have been set. We don't want to show the log to the user, so simply
	// append to the log file rather than teeing.
	if stdout, _ := c.Output(cloudinit.OutAll); stdout == "" {
		c.SetOutput(cloudinit.OutAll, ">> "+cfg.CloudInitOutputLog, "")
		c.AddBootCmd(initProgressCmd)
		c.AddBootCmd(cloudinit.LogProgressCmd("Logging to %s on remote host", cfg.CloudInitOutputLog))
	}

	if !cfg.DisablePackageCommands {
		AddAptCommands(cfg.AptProxySettings, c)
	}

	// Write out the normal proxy settings so that the settings are
	// sourced by bash, and ssh through that.
	c.AddScripts(
		// We look to see if the proxy line is there already as
		// the manual provider may have had it aleady. The ubuntu
		// user may not exist (local provider only).
		`([ ! -e /home/ubuntu/.profile ] || grep -q '.juju-proxy' /home/ubuntu/.profile) || ` +
			`printf '\n# Added by juju\n[ -f "$HOME/.juju-proxy" ] && . "$HOME/.juju-proxy"\n' >> /home/ubuntu/.profile`)
	if (cfg.ProxySettings != osenv.ProxySettings{}) {
		exportedProxyEnv := cfg.ProxySettings.AsScriptEnvironment()
		c.AddScripts(strings.Split(exportedProxyEnv, "\n")...)
		c.AddScripts(
			fmt.Sprintf(
				`[ -e /home/ubuntu ] && (printf '%%s\n' %s > /home/ubuntu/.juju-proxy && chown ubuntu:ubuntu /home/ubuntu/.juju-proxy)`,
				shquote(cfg.ProxySettings.AsScriptEnvironment())))
	}

	// Make the lock dir and change the ownership of the lock dir itself to
	// ubuntu:ubuntu from root:root so the juju-run command run as the ubuntu
	// user is able to get access to the hook execution lock (like the uniter
	// itself does.)
	lockDir := path.Join(cfg.DataDir, "locks")
	c.AddScripts(
		fmt.Sprintf("mkdir -p %s", lockDir),
		// We only try to change ownership if there is an ubuntu user
		// defined, and we determine this by the existance of the home dir.
		fmt.Sprintf("[ -e /home/ubuntu ] && chown ubuntu:ubuntu %s", lockDir),
		fmt.Sprintf("mkdir -p %s", cfg.LogDir),
		fmt.Sprintf("chown syslog:adm %s", cfg.LogDir),
	)

	// Make a directory for the tools to live in, then fetch the
	// tools and unarchive them into it.
	var copyCmd string
	if strings.HasPrefix(cfg.Tools.URL, fileSchemePrefix) {
		copyCmd = fmt.Sprintf("cp %s $bin/tools.tar.gz", shquote(cfg.Tools.URL[len(fileSchemePrefix):]))
	} else {
		curlCommand := "curl -sSfw 'tools from %{url_effective} downloaded: HTTP %{http_code}; time %{time_total}s; size %{size_download} bytes; speed %{speed_download} bytes/s '"
		if cfg.DisableSSLHostnameVerification {
			curlCommand += " --insecure"
		}
		copyCmd = fmt.Sprintf("%s -o $bin/tools.tar.gz %s", curlCommand, shquote(cfg.Tools.URL))
		c.AddRunCmd(cloudinit.LogProgressCmd("Fetching tools: %s", copyCmd))
	}
	toolsJson, err := json.Marshal(cfg.Tools)
	if err != nil {
		return err
	}
	c.AddScripts(
		"bin="+shquote(cfg.jujuTools()),
		"mkdir -p $bin",
		copyCmd,
		fmt.Sprintf("sha256sum $bin/tools.tar.gz > $bin/juju%s.sha256", cfg.Tools.Version),
		fmt.Sprintf(`grep '%s' $bin/juju%s.sha256 || (echo "Tools checksum mismatch"; exit 1)`,
			cfg.Tools.SHA256, cfg.Tools.Version),
		fmt.Sprintf("tar zxf $bin/tools.tar.gz -C $bin"),
		fmt.Sprintf("rm $bin/tools.tar.gz && rm $bin/juju%s.sha256", cfg.Tools.Version),
		fmt.Sprintf("printf %%s %s > $bin/downloaded-tools.txt", shquote(string(toolsJson))),
	)

	// We add the machine agent's configuration info
	// before running bootstrap-state so that bootstrap-state
	// has a chance to rerwrite it to change the password.
	// It would be cleaner to change bootstrap-state to
	// be responsible for starting the machine agent itself,
	// but this would not be backwardly compatible.
	machineTag := names.MachineTag(cfg.MachineId)
	_, err = cfg.addAgentInfo(c, machineTag)
	if err != nil {
		return err
	}

	// Add the cloud archive cloud-tools pocket to apt sources
	// for series that need it. This gives us up-to-date LXC,
	// MongoDB, and other infrastructure.
	if !cfg.DisablePackageCommands {
		series := cfg.Tools.Version.Series
		MaybeAddCloudArchiveCloudTools(c, series)
	}

	if cfg.Bootstrap {
		cons := cfg.Constraints.String()
		if cons != "" {
			cons = " --constraints " + shquote(cons)
		}
		var hardware string
		if cfg.HardwareCharacteristics != nil {
			if hardware = cfg.HardwareCharacteristics.String(); hardware != "" {
				hardware = " --hardware " + shquote(hardware)
			}
		}
		c.AddRunCmd(cloudinit.LogProgressCmd("Bootstrapping Juju machine agent"))
		c.AddScripts(
			// The bootstrapping is always run with debug on.
			cfg.jujuTools() + "/jujud bootstrap-state" +
				" --data-dir " + shquote(cfg.DataDir) +
				" --env-config " + shquote(base64yaml(cfg.Config)) +
				" --instance-id " + shquote(string(cfg.InstanceId)) +
				hardware +
				cons +
				" --debug",
		)
	}

	return cfg.addMachineAgentToBoot(c, machineTag, cfg.MachineId)
}
Beispiel #24
0
func verifyConfig(cfg *MachineConfig) (err error) {
	defer errors.Maskf(&err, "invalid machine configuration")
	if !names.IsMachine(cfg.MachineId) {
		return fmt.Errorf("invalid machine id")
	}
	if cfg.DataDir == "" {
		return fmt.Errorf("missing var directory")
	}
	if cfg.LogDir == "" {
		return fmt.Errorf("missing log directory")
	}
	if len(cfg.Jobs) == 0 {
		return fmt.Errorf("missing machine jobs")
	}
	if cfg.CloudInitOutputLog == "" {
		return fmt.Errorf("missing cloud-init output log path")
	}
	if cfg.Tools == nil {
		return fmt.Errorf("missing tools")
	}
	if cfg.Tools.URL == "" {
		return fmt.Errorf("missing tools URL")
	}
	if cfg.StateInfo == nil {
		return fmt.Errorf("missing state info")
	}
	if len(cfg.StateInfo.CACert) == 0 {
		return fmt.Errorf("missing CA certificate")
	}
	if cfg.APIInfo == nil {
		return fmt.Errorf("missing API info")
	}
	if len(cfg.APIInfo.CACert) == 0 {
		return fmt.Errorf("missing API CA certificate")
	}
	if cfg.MachineAgentServiceName == "" {
		return fmt.Errorf("missing machine agent service name")
	}
	if cfg.Bootstrap {
		if cfg.Config == nil {
			return fmt.Errorf("missing environment configuration")
		}
		if cfg.StateInfo.Tag != "" {
			return fmt.Errorf("entity tag must be blank when starting a state server")
		}
		if cfg.APIInfo.Tag != "" {
			return fmt.Errorf("entity tag must be blank when starting a state server")
		}
		if cfg.StateServingInfo == nil {
			return fmt.Errorf("missing state serving info")
		}
		if len(cfg.StateServingInfo.Cert) == 0 {
			return fmt.Errorf("missing state server certificate")
		}
		if len(cfg.StateServingInfo.PrivateKey) == 0 {
			return fmt.Errorf("missing state server private key")
		}
		if cfg.StateServingInfo.StatePort == 0 {
			return fmt.Errorf("missing state port")
		}
		if cfg.StateServingInfo.APIPort == 0 {
			return fmt.Errorf("missing API port")
		}
		if cfg.SystemPrivateSSHKey == "" {
			return fmt.Errorf("missing system ssh identity")
		}
		if cfg.InstanceId == "" {
			return fmt.Errorf("missing instance-id")
		}
	} else {
		if len(cfg.StateInfo.Addrs) == 0 {
			return fmt.Errorf("missing state hosts")
		}
		if cfg.StateInfo.Tag != names.MachineTag(cfg.MachineId) {
			return fmt.Errorf("entity tag must match started machine")
		}
		if len(cfg.APIInfo.Addrs) == 0 {
			return fmt.Errorf("missing API hosts")
		}
		if cfg.APIInfo.Tag != names.MachineTag(cfg.MachineId) {
			return fmt.Errorf("entity tag must match started machine")
		}
		if cfg.StateServingInfo != nil {
			return fmt.Errorf("state serving info unexpectedly present")
		}
	}
	if cfg.MachineNonce == "" {
		return fmt.Errorf("missing machine nonce")
	}
	return nil
}
Beispiel #25
0
func (a *MachineAgent) Tag() string {
	return names.MachineTag(a.MachineId)
}
Beispiel #26
0
func (fw *Firewaller) loop() error {
	defer fw.stopWatchers()

	var err error
	var reconciled bool

	fw.environ, err = worker.WaitForEnviron(fw.environWatcher, fw.st, fw.tomb.Dying())
	if err != nil {
		return err
	}
	if fw.environ.Config().FirewallMode() == config.FwGlobal {
		fw.globalMode = true
		fw.globalPortRef = make(map[instance.Port]int)
	}
	for {
		select {
		case <-fw.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-fw.environWatcher.Changes():
			if !ok {
				return watcher.MustErr(fw.environWatcher)
			}
			config, err := fw.st.EnvironConfig()
			if err != nil {
				return err
			}
			if err := fw.environ.SetConfig(config); err != nil {
				logger.Errorf("loaded invalid environment configuration: %v", err)
			}
		case change, ok := <-fw.machinesWatcher.Changes():
			if !ok {
				return watcher.MustErr(fw.machinesWatcher)
			}
			for _, machineId := range change {
				fw.machineLifeChanged(names.MachineTag(machineId))
			}
			if !reconciled {
				reconciled = true
				var err error
				if fw.globalMode {
					err = fw.reconcileGlobal()
				} else {
					err = fw.reconcileInstances()
				}
				if err != nil {
					return err
				}
			}
		case change := <-fw.unitsChange:
			if err := fw.unitsChanged(change); err != nil {
				return err
			}
		case change := <-fw.portsChange:
			change.unitd.ports = change.ports
			if err := fw.flushUnits([]*unitData{change.unitd}); err != nil {
				return errgo.Annotate(err, "cannot change firewall ports")
			}
		case change := <-fw.exposedChange:
			change.serviced.exposed = change.exposed
			unitds := []*unitData{}
			for _, unitd := range change.serviced.unitds {
				unitds = append(unitds, unitd)
			}
			if err := fw.flushUnits(unitds); err != nil {
				return errgo.Annotate(err, "cannot change firewall ports")
			}
		}
	}
}
func agentConfig(machineId, provider string) *mockConfig {
	return &mockConfig{tag: names.MachineTag(machineId), provider: provider}
}
Beispiel #28
0
// Tag returns a name identifying the machine that is safe to use
// as a file name.  The returned name will be different from other
// Tag values returned by any other entities from the same state.
func (m *Machine) Tag() string {
	return names.MachineTag(m.Id())
}