Esempio n. 1
0
func buildMachineMatcherShims(m *state.Machine, patterns []string) (shims []closurePredicate, _ error) {
	// Look at machine status.
	statusInfo, err := m.Status()
	if err != nil {
		return nil, err
	}
	shims = append(shims, func() (bool, bool, error) { return matchAgentStatus(patterns, statusInfo.Status) })

	// Look at machine addresses. WARNING: Avoid the temptation to
	// bring the append into the loop. The value we would close over
	// will continue to change after the closure is created, and we'd
	// only examine the last element of the loop for all closures.
	var addrs []string
	for _, a := range m.Addresses() {
		addrs = append(addrs, a.Value)
	}
	shims = append(shims, func() (bool, bool, error) { return matchSubnet(patterns, addrs...) })

	// If the machine hosts a unit that matches any of the given
	// criteria, consider the machine a match as well.
	unitShims, err := buildShimsForUnit(m.Units, patterns...)
	if err != nil {
		return nil, err
	}
	shims = append(shims, unitShims...)

	// Units may be able to match the pattern. Ultimately defer to
	// that logic, and guard against breaking the predicate-chain.
	if len(unitShims) <= 0 {
		shims = append(shims, func() (bool, bool, error) { return false, true, nil })
	}

	return
}
Esempio n. 2
0
// machineTags returns machine-specific tags to set on the instance.
func (p *ProvisionerAPI) machineTags(m *state.Machine, jobs []multiwatcher.MachineJob) (map[string]string, error) {
	// Names of all units deployed to the machine.
	//
	// TODO(axw) 2015-06-02 #1461358
	// We need a worker that periodically updates
	// instance tags with current deployment info.
	units, err := m.Units()
	if err != nil {
		return nil, errors.Trace(err)
	}
	unitNames := make([]string, 0, len(units))
	for _, unit := range units {
		if !unit.IsPrincipal() {
			continue
		}
		unitNames = append(unitNames, unit.Name())
	}
	sort.Strings(unitNames)

	cfg, err := p.st.EnvironConfig()
	if err != nil {
		return nil, errors.Trace(err)
	}
	machineTags := instancecfg.InstanceTags(cfg, jobs)
	if len(unitNames) > 0 {
		machineTags[tags.JujuUnitsDeployed] = strings.Join(unitNames, " ")
	}
	return machineTags, nil
}
Esempio n. 3
0
func getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) {
	cons, err := m.Constraints()
	if err != nil {
		return nil, err
	}
	// TODO(dimitern) For now, since network names and
	// provider ids are the same, we return what we got
	// from state. In the future, when networks can be
	// added before provisioning, we should convert both
	// slices from juju network names to provider-specific
	// ids before returning them.
	networks, err := m.RequestedNetworks()
	if err != nil {
		return nil, err
	}
	var jobs []params.MachineJob
	for _, job := range m.Jobs() {
		jobs = append(jobs, job.ToParams())
	}
	return &params.ProvisioningInfo{
		Constraints: cons,
		Series:      m.Series(),
		Placement:   m.Placement(),
		Networks:    networks,
		Jobs:        jobs,
	}, nil
}
Esempio n. 4
0
func (api *MachinerAPI) SetMachineAddresses(args params.SetMachinesAddresses) (params.ErrorResults, error) {
	results := params.ErrorResults{
		Results: make([]params.ErrorResult, len(args.MachineAddresses)),
	}
	canModify, err := api.getCanModify()
	if err != nil {
		return results, err
	}
	for i, arg := range args.MachineAddresses {
		tag, err := names.ParseMachineTag(arg.Tag)
		if err != nil {
			results.Results[i].Error = common.ServerError(common.ErrPerm)
			continue
		}
		err = common.ErrPerm
		if canModify(tag) {
			var m *state.Machine
			m, err = api.getMachine(tag)
			if err == nil {
				err = m.SetMachineAddresses(arg.Addresses...)
			} else if errors.IsNotFound(err) {
				err = common.ErrPerm
			}
		}
		results.Results[i].Error = common.ServerError(err)
	}
	return results, nil
}
Esempio n. 5
0
File: machiner.go Progetto: bac/juju
func (api *MachinerAPI) getOneMachineProviderNetworkConfig(m *state.Machine) ([]params.NetworkConfig, error) {
	instId, err := m.InstanceId()
	if err != nil {
		return nil, errors.Trace(err)
	}

	netEnviron, err := networkingcommon.NetworkingEnvironFromModelConfig(
		stateenvirons.EnvironConfigGetter{api.st},
	)
	if errors.IsNotSupported(err) {
		logger.Infof("not updating provider network config: %v", err)
		return nil, nil
	} else if err != nil {
		return nil, errors.Annotate(err, "cannot get provider network config")
	}

	interfaceInfos, err := netEnviron.NetworkInterfaces(instId)
	if err != nil {
		return nil, errors.Annotatef(err, "cannot get network interfaces of %q", instId)
	}
	if len(interfaceInfos) == 0 {
		logger.Infof("not updating provider network config: no interfaces returned")
		return nil, nil
	}

	providerConfig := networkingcommon.NetworkConfigFromInterfaceInfo(interfaceInfos)
	logger.Tracef("provider network config instance %q: %+v", instId, providerConfig)

	return providerConfig, nil
}
Esempio n. 6
0
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string {
	// This check in particular leads to tests just hanging
	// indefinitely quite often on i386.
	coretesting.SkipIfI386(c, "lp:1425569")

	var event mock.Event
	s.State.StartSync()
	select {
	case event = <-s.events:
		c.Assert(event.Action, gc.Equals, mock.Created)
		argsSet := set.NewStrings(event.TemplateArgs...)
		c.Assert(argsSet.Contains("imageURL"), jc.IsTrue)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timeout while waiting the mock container to get created")
	}

	select {
	case event = <-s.events:
		c.Assert(event.Action, gc.Equals, mock.Started)
		err := machine.Refresh()
		c.Assert(err, jc.ErrorIsNil)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timeout while waiting the mock container to start")
	}

	s.waitInstanceId(c, machine, instance.Id(event.InstanceId))
	return event.InstanceId
}
Esempio n. 7
0
// newAgent returns a new MachineAgent instance
func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent {
	a := &MachineAgent{}
	s.initAgent(c, a, "--machine-id", m.Id())
	err := a.ReadConfig(m.Tag().String())
	c.Assert(err, gc.IsNil)
	return a
}
Esempio n. 8
0
// commonServiceInstances returns instances with
// services in common with the specified machine.
func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) {
	units, err := m.Units()
	if err != nil {
		return nil, err
	}
	instanceIdSet := make(set.Strings)
	for _, unit := range units {
		if !unit.IsPrincipal() {
			continue
		}
		instanceIds, err := state.ServiceInstances(st, unit.ApplicationName())
		if err != nil {
			return nil, err
		}
		for _, instanceId := range instanceIds {
			instanceIdSet.Add(string(instanceId))
		}
	}
	instanceIds := make([]instance.Id, instanceIdSet.Size())
	// Sort values to simplify testing.
	for i, instanceId := range instanceIdSet.SortedValues() {
		instanceIds[i] = instance.Id(instanceId)
	}
	return instanceIds, nil
}
Esempio n. 9
0
// constructImageConstraint returns model-specific criteria used to look for image metadata.
func (p *ProvisionerAPI) constructImageConstraint(m *state.Machine) (*imagemetadata.ImageConstraint, environs.Environ, error) {
	// If we can determine current region,
	// we want only metadata specific to this region.
	cloud, env, err := p.obtainEnvCloudConfig()
	if err != nil {
		return nil, nil, errors.Trace(err)
	}

	lookup := simplestreams.LookupParams{
		Series: []string{m.Series()},
		Stream: env.Config().ImageStream(),
	}

	mcons, err := m.Constraints()
	if err != nil {
		return nil, nil, errors.Annotatef(err, "cannot get machine constraints for machine %v", m.MachineTag().Id())
	}

	if mcons.Arch != nil {
		lookup.Arches = []string{*mcons.Arch}
	}
	if cloud != nil {
		lookup.CloudSpec = *cloud
	}

	return imagemetadata.NewImageConstraint(lookup), env, nil
}
Esempio n. 10
0
// runMachineUpdate connects via ssh to the machine and runs the update script
func runMachineUpdate(m *state.Machine, sshArg string) error {
	progress("updating machine: %v\n", m)
	addr := network.SelectPublicAddress(m.Addresses())
	if addr == "" {
		return fmt.Errorf("no appropriate public address found")
	}
	return runViaSsh(addr, sshArg)
}
Esempio n. 11
0
func (s *commonMachineSuite) primeAgentWithMachine(c *gc.C, m *state.Machine, vers version.Binary) (*state.Machine, agent.ConfigSetterWriter, *tools.Tools) {
	pinger, err := m.SetAgentPresence()
	c.Assert(err, jc.ErrorIsNil)
	s.AddCleanup(func(c *gc.C) {
		c.Assert(worker.Stop(pinger), jc.ErrorIsNil)
	})
	return s.configureMachine(c, m.Id(), vers)
}
Esempio n. 12
0
func (s *assignCleanSuite) assertAssignUnit(c *gc.C, expectedMachine *state.Machine) {
	unit, err := s.wordpress.AddUnit()
	c.Assert(err, jc.ErrorIsNil)
	reusedMachine, err := s.assignUnit(unit)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(reusedMachine.Id(), gc.Equals, expectedMachine.Id())
	c.Assert(reusedMachine.Clean(), jc.IsFalse)
}
Esempio n. 13
0
func (s *kvmProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string {
	s.State.StartSync()
	event := s.nextEvent(c)
	c.Assert(event.Action, gc.Equals, mock.Started)
	err := machine.Refresh()
	c.Assert(err, jc.ErrorIsNil)
	s.waitInstanceId(c, machine, instance.Id(event.InstanceId))
	return event.InstanceId
}
Esempio n. 14
0
func (s *clientSuite) assertRetryProvisioning(c *gc.C, machine *state.Machine) {
	_, err := s.APIState.Client().RetryProvisioning(machine.Tag().(names.MachineTag))
	c.Assert(err, jc.ErrorIsNil)
	statusInfo, err := machine.Status()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(statusInfo.Status, gc.Equals, status.Error)
	c.Assert(statusInfo.Message, gc.Equals, "error")
	c.Assert(statusInfo.Data["transient"], jc.IsTrue)
}
Esempio n. 15
0
func (p *ProvisionerAPI) machineEndpointBindings(m *state.Machine) (map[string]string, error) {
	units, err := m.Units()
	if err != nil {
		return nil, errors.Trace(err)
	}

	spacesNamesToProviderIds, err := p.allSpaceNamesToProviderIds()
	if err != nil {
		return nil, errors.Trace(err)
	}

	var combinedBindings map[string]string
	processedServicesSet := set.NewStrings()
	for _, unit := range units {
		if !unit.IsPrincipal() {
			continue
		}
		service, err := unit.Application()
		if err != nil {
			return nil, errors.Trace(err)
		}
		if processedServicesSet.Contains(service.Name()) {
			// Already processed, skip it.
			continue
		}
		bindings, err := service.EndpointBindings()
		if err != nil {
			return nil, errors.Trace(err)
		}
		processedServicesSet.Add(service.Name())

		if len(bindings) == 0 {
			continue
		}
		if combinedBindings == nil {
			combinedBindings = make(map[string]string)
		}

		for endpoint, spaceName := range bindings {
			if spaceName == "" {
				// Skip unspecified bindings, as they won't affect the instance
				// selected for provisioning.
				continue
			}

			spaceProviderId, nameKnown := spacesNamesToProviderIds[spaceName]
			if nameKnown {
				combinedBindings[endpoint] = spaceProviderId
			} else {
				// Technically, this can't happen in practice, as we're
				// validating the bindings during service deployment.
				return nil, errors.Errorf("unknown space %q with no provider ID specified for endpoint %q", spaceName, endpoint)
			}
		}
	}
	return combinedBindings, nil
}
Esempio n. 16
0
// runMachineUpdate connects via ssh to the machine and runs the update script.
func runMachineUpdate(machine *state.Machine, sshArg string) error {
	addr, err := machine.PublicAddress()
	if err != nil {
		if network.IsNoAddress(err) {
			return errors.Annotatef(err, "no appropriate public address found")
		}
		return errors.Trace(err)
	}
	return runViaSSH(addr.Value, sshArg)
}
Esempio n. 17
0
func (p *ProvisionerAPI) getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) {
	cons, err := m.Constraints()
	if err != nil {
		return nil, err
	}

	volumes, err := p.machineVolumeParams(m)
	if err != nil {
		return nil, errors.Trace(err)
	}

	// TODO(dimitern) Drop this once we only use spaces for
	// deployments.
	networks, err := m.RequestedNetworks()
	if err != nil {
		return nil, err
	}

	var jobs []multiwatcher.MachineJob
	for _, job := range m.Jobs() {
		jobs = append(jobs, job.ToParams())
	}

	tags, err := p.machineTags(m, jobs)
	if err != nil {
		return nil, errors.Trace(err)
	}

	subnetsToZones, err := p.machineSubnetsAndZones(m)
	if err != nil {
		return nil, errors.Annotate(err, "cannot match subnets to zones")
	}

	endpointBindings, err := p.machineEndpointBindings(m)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine machine endpoint bindings")
	}
	imageMetadata, err := p.availableImageMetadata(m)
	if err != nil {
		return nil, errors.Annotate(err, "cannot get available image metadata")
	}

	return &params.ProvisioningInfo{
		Constraints:      cons,
		Series:           m.Series(),
		Placement:        m.Placement(),
		Networks:         networks,
		Jobs:             jobs,
		Volumes:          volumes,
		Tags:             tags,
		SubnetsToZones:   subnetsToZones,
		EndpointBindings: endpointBindings,
		ImageMetadata:    imageMetadata,
	}, nil
}
Esempio n. 18
0
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string {
	s.State.StartSync()
	event := <-s.events
	c.Assert(event.Action, gc.Equals, mock.Created)
	event = <-s.events
	c.Assert(event.Action, gc.Equals, mock.Started)
	err := machine.Refresh()
	c.Assert(err, gc.IsNil)
	s.waitInstanceId(c, machine, instance.Id(event.InstanceId))
	return event.InstanceId
}
Esempio n. 19
0
File: status.go Progetto: bac/juju
// processMachine retrieves version and status information for the given machine.
// It also returns deprecated legacy status information.
func processMachine(machine *state.Machine) (out params.DetailedStatus) {
	statusInfo, err := common.MachineStatus(machine)
	populateStatusFromStatusInfoAndErr(&out, statusInfo, err)

	out.Life = processLife(machine)

	if t, err := machine.AgentTools(); err == nil {
		out.Version = t.Version.Number.String()
	}
	return
}
Esempio n. 20
0
func (s *ipAddressesStateSuite) addNamedDeviceForMachine(c *gc.C, name string, machine *state.Machine) *state.LinkLayerDevice {
	deviceArgs := state.LinkLayerDeviceArgs{
		Name: name,
		Type: state.EthernetDevice,
	}
	err := machine.SetLinkLayerDevices(deviceArgs)
	c.Assert(err, jc.ErrorIsNil)
	device, err := machine.LinkLayerDevice(name)
	c.Assert(err, jc.ErrorIsNil)
	return device
}
Esempio n. 21
0
// waitRemoved waits for the supplied machine to be removed from state.
func (s *CommonProvisionerSuite) waitRemoved(c *gc.C, m *state.Machine) {
	s.waitMachine(c, m, func() bool {
		err := m.Refresh()
		if errors.IsNotFound(err) {
			return true
		}
		c.Assert(err, jc.ErrorIsNil)
		c.Logf("machine %v is still %s", m, m.Life())
		return false
	})
}
Esempio n. 22
0
func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) {
	addrs := network.NewAddresses("0.1.2.3")
	err := machine.SetProviderAddresses(addrs...)
	c.Assert(err, jc.ErrorIsNil)
	// Set the addresses in the environ instance as well so that if the instance poller
	// runs it won't overwrite them.
	instId, err := machine.InstanceId()
	c.Assert(err, jc.ErrorIsNil)
	insts, err := s.Environ.Instances([]instance.Id{instId})
	c.Assert(err, jc.ErrorIsNil)
	dummy.SetInstanceAddresses(insts[0], addrs)
}
Esempio n. 23
0
func (s *kvmProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string {
	// This check in particular leads to tests just hanging
	// indefinitely quite often on i386.
	coretesting.SkipIfI386(c, "lp:1425569")

	s.State.StartSync()
	event := s.nextEvent(c)
	c.Assert(event.Action, gc.Equals, mock.Started)
	err := machine.Refresh()
	c.Assert(err, jc.ErrorIsNil)
	s.waitInstanceId(c, machine, instance.Id(event.InstanceId))
	return event.InstanceId
}
Esempio n. 24
0
// waitInstanceId waits until the supplied machine has an instance id, then
// asserts it is as expected.
func (s *CommonProvisionerSuite) waitInstanceId(c *gc.C, m *state.Machine, expect instance.Id) {
	s.waitHardwareCharacteristics(c, m, func() bool {
		if actual, err := m.InstanceId(); err == nil {
			c.Assert(actual, gc.Equals, expect)
			return true
		} else if !errors.IsNotProvisioned(err) {
			// We don't expect any errors.
			panic(err)
		}
		c.Logf("machine %v is still unprovisioned", m)
		return false
	})
}
Esempio n. 25
0
func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) {
	addrs := []network.Address{
		network.NewAddress("0.1.2.3", network.ScopeUnknown),
	}
	err := machine.SetAddresses(addrs...)
	c.Assert(err, gc.IsNil)
	// Set the addresses in the environ instance as well so that if the instance poller
	// runs it won't overwrite them.
	instId, err := machine.InstanceId()
	c.Assert(err, gc.IsNil)
	insts, err := s.Conn.Environ.Instances([]instance.Id{instId})
	c.Assert(err, gc.IsNil)
	dummy.SetInstanceAddresses(insts[0], addrs)
}
Esempio n. 26
0
// waitForRemovalMark waits for the supplied machine to be marked for removal.
func (s *CommonProvisionerSuite) waitForRemovalMark(c *gc.C, m *state.Machine) {
	w := s.BackingState.WatchMachineRemovals()
	name := fmt.Sprintf("machine %v marked for removal", m)
	s.waitForWatcher(c, w, name, func() bool {
		removals, err := s.BackingState.AllMachineRemovals()
		c.Assert(err, jc.ErrorIsNil)
		for _, removal := range removals {
			if removal == m.Id() {
				return true
			}
		}
		return false
	})
}
Esempio n. 27
0
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *gc.C, m *state.Machine, secret string, cons constraints.Value, networks []string, networkInfo []network.Info, waitInstanceId bool) (inst instance.Instance) {
	s.BackingState.StartSync()
	for {
		select {
		case o := <-s.op:
			switch o := o.(type) {
			case dummy.OpStartInstance:
				inst = o.Instance
				if waitInstanceId {
					s.waitInstanceId(c, m, inst.Id())
				}

				// Check the instance was started with the expected params.
				c.Assert(o.MachineId, gc.Equals, m.Id())
				nonceParts := strings.SplitN(o.MachineNonce, ":", 2)
				c.Assert(nonceParts, gc.HasLen, 2)
				c.Assert(nonceParts[0], gc.Equals, names.NewMachineTag("0").String())
				c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString)
				c.Assert(o.Secret, gc.Equals, secret)
				c.Assert(o.Networks, jc.DeepEquals, networks)
				c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo)

				// All provisioned machines in this test suite have
				// their hardware characteristics attributes set to
				// the same values as the constraints due to the dummy
				// environment being used.
				if !constraints.IsEmpty(&cons) {
					c.Assert(o.Constraints, gc.DeepEquals, cons)
					hc, err := m.HardwareCharacteristics()
					c.Assert(err, gc.IsNil)
					c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{
						Arch:     cons.Arch,
						Mem:      cons.Mem,
						RootDisk: cons.RootDisk,
						CpuCores: cons.CpuCores,
						CpuPower: cons.CpuPower,
						Tags:     cons.Tags,
					})
				}
				return
			default:
				c.Logf("ignoring unexpected operation %#v", o)
			}
		case <-time.After(2 * time.Second):
			c.Fatalf("provisioner did not start an instance")
			return
		}
	}
	return
}
Esempio n. 28
0
// updateMachineAddresses will update the machine doc to the current addresses
func updateMachineAddresses(machine *state.Machine, privateAddress, publicAddress string) error {
	privateAddressAddress := network.Address{
		Value: privateAddress,
		Type:  network.DeriveAddressType(privateAddress),
	}
	publicAddressAddress := network.Address{
		Value: publicAddress,
		Type:  network.DeriveAddressType(publicAddress),
	}
	if err := machine.SetProviderAddresses(publicAddressAddress, privateAddressAddress); err != nil {
		return errors.Trace(err)
	}
	return nil
}
Esempio n. 29
0
File: run.go Progetto: imoapps/juju
// remoteParamsForMachine returns a filled in RemoteExec instance
// based on the machine, command and timeout params.  If the machine
// does not have an internal address, the Host is empty. This is caught
// by the function that actually tries to execute the command.
func remoteParamsForMachine(machine *state.Machine, command string, timeout time.Duration) *RemoteExec {
	// magic boolean parameters are bad :-(
	address, ok := network.SelectInternalAddress(machine.Addresses(), false)
	execParams := &RemoteExec{
		ExecParams: ssh.ExecParams{
			Command: command,
			Timeout: timeout,
		},
		MachineId: machine.Id(),
	}
	if ok {
		execParams.Host = fmt.Sprintf("ubuntu@%s", address.Value)
	}
	return execParams
}
Esempio n. 30
0
func newMachineToolWaiter(m *state.Machine) *toolsWaiter {
	w := m.Watch()
	waiter := &toolsWaiter{
		changes: make(chan struct{}, 1),
		watcher: w,
		tooler:  m,
	}
	go func() {
		for _ = range w.Changes() {
			waiter.changes <- struct{}{}
		}
		close(waiter.changes)
	}()
	return waiter
}