Exemple #1
0
func (s *MachinerSuite) TestMachinerConfigValidate(c *gc.C) {
	_, err := machiner.NewMachiner(machiner.Config{})
	c.Assert(err, gc.ErrorMatches, "validating config: unspecified MachineAccessor not valid")
	_, err = machiner.NewMachiner(machiner.Config{
		MachineAccessor: &mockMachineAccessor{},
	})
	c.Assert(err, gc.ErrorMatches, "validating config: unspecified Tag not valid")
	_, err = machiner.NewMachiner(machiner.Config{
		MachineAccessor: &mockMachineAccessor{},
		Tag:             names.NewMachineTag("123"),
	})
	c.Assert(err, jc.ErrorIsNil)
}
Exemple #2
0
func (s *MachinerSuite) TestMachinerStorageAttached(c *gc.C) {
	// Machine is dying. We'll respond to "EnsureDead" by
	// saying that there are still storage attachments;
	// this should not cause an error.
	s.accessor.machine.life = params.Dying
	s.accessor.machine.SetErrors(
		nil, // SetMachineAddresses
		nil, // SetStatus
		nil, // Watch
		nil, // Refresh
		nil, // SetStatus
		&params.Error{Code: params.CodeMachineHasAttachedStorage},
	)

	worker := machiner.NewMachiner(s.accessor, s.agentConfig, false)
	s.accessor.machine.watcher.changes <- struct{}{}
	worker.Kill()
	c.Check(worker.Wait(), jc.ErrorIsNil)

	s.accessor.CheckCalls(c, []gitjujutesting.StubCall{{
		FuncName: "Machine",
		Args:     []interface{}{s.agentConfig.Tag()},
	}})

	s.accessor.machine.watcher.CheckCalls(c, []gitjujutesting.StubCall{
		{FuncName: "Changes"}, {FuncName: "Changes"}, {FuncName: "Stop"},
	})

	s.accessor.machine.CheckCalls(c, []gitjujutesting.StubCall{{
		FuncName: "SetMachineAddresses",
		Args: []interface{}{
			network.NewAddresses(
				"255.255.255.255",
				"0.0.0.0",
			),
		},
	}, {
		FuncName: "SetStatus",
		Args: []interface{}{
			params.StatusStarted,
			"",
			map[string]interface{}(nil),
		},
	}, {
		FuncName: "Watch",
	}, {
		FuncName: "Refresh",
	}, {
		FuncName: "Life",
	}, {
		FuncName: "SetStatus",
		Args: []interface{}{
			params.StatusStopped,
			"",
			map[string]interface{}(nil),
		},
	}, {
		FuncName: "EnsureDead",
	}})
}
Exemple #3
0
func (s *MachinerStateSuite) makeMachiner(ignoreAddresses bool) worker.Worker {
	return machiner.NewMachiner(
		machiner.APIMachineAccessor{s.machinerState},
		agentConfig(s.apiMachine.Tag()),
		ignoreAddresses,
	)
}
Exemple #4
0
func (s *MachinerSuite) TestMachinerSetStatusStopped(c *gc.C) {
	w, err := machiner.NewMachiner(machiner.Config{
		MachineAccessor: s.accessor,
		Tag:             s.machineTag,
	})
	c.Assert(err, jc.ErrorIsNil)
	s.accessor.machine.life = params.Dying
	s.accessor.machine.SetErrors(
		nil, // SetMachineAddresses
		nil, // SetStatus (started)
		nil, // Watch
		nil, // Refresh
		errors.New("cannot set status"), // SetStatus (stopped)
	)
	s.accessor.machine.watcher.changes <- struct{}{}
	err = stopWorker(w)
	c.Assert(
		err, gc.ErrorMatches,
		"machine-123 failed to set status stopped: cannot set status",
	)
	s.accessor.machine.CheckCallNames(c,
		"SetMachineAddresses",
		"SetStatus",
		"Watch",
		"Refresh",
		"Life",
		"SetStatus",
	)
	s.accessor.machine.CheckCall(
		c, 5, "SetStatus",
		params.StatusStopped,
		"",
		map[string]interface{}(nil),
	)
}
Exemple #5
0
func (s *MachinerSuite) TestMachinerMachineAssignedUnits(c *gc.C) {
	w, err := machiner.NewMachiner(machiner.Config{
		MachineAccessor: s.accessor,
		Tag:             s.machineTag,
	})
	c.Assert(err, jc.ErrorIsNil)
	s.accessor.machine.life = params.Dying
	s.accessor.machine.SetErrors(
		nil, // SetMachineAddresses
		nil, // SetStatus
		nil, // Watch
		nil, // Refresh
		nil, // SetStatus
		&params.Error{Code: params.CodeHasAssignedUnits}, // EnsureDead
	)
	s.accessor.machine.watcher.changes <- struct{}{}
	err = stopWorker(w)

	// If EnsureDead fails with "machine has assigned units", then
	// the worker will not fail, but will wait for more events.
	c.Check(err, jc.ErrorIsNil)

	s.accessor.machine.CheckCallNames(c,
		"SetMachineAddresses",
		"SetStatus",
		"Watch",
		"Refresh",
		"Life",
		"SetStatus",
		"EnsureDead",
	)
}
Exemple #6
0
func (s *MachinerStateSuite) TestNotFoundOrUnauthorized(c *gc.C) {
	mr := machiner.NewMachiner(
		machiner.APIMachineAccessor{s.machinerState},
		agentConfig(names.NewMachineTag("99")),
	)
	c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent)
}
Exemple #7
0
func (s *MachinerSuite) TestMachinerConfigValidate(c *gc.C) {
	_, err := machiner.NewMachiner(machiner.Config{})
	c.Assert(err, gc.ErrorMatches, "validating config: unspecified MachineAccessor not valid")
	_, err = machiner.NewMachiner(machiner.Config{
		MachineAccessor: &mockMachineAccessor{},
	})
	c.Assert(err, gc.ErrorMatches, "validating config: unspecified Tag not valid")
	w, err := machiner.NewMachiner(machiner.Config{
		MachineAccessor: &mockMachineAccessor{},
		Tag:             names.NewMachineTag("123"),
	})
	c.Assert(err, jc.ErrorIsNil)

	// must stop the worker to prevent a data race when cleanup suite
	// rolls back the patches
	err = stopWorker(w)
	c.Assert(err, jc.ErrorIsNil)
}
Exemple #8
0
func (s *MachinerStateSuite) TestNotFoundOrUnauthorized(c *gc.C) {
	mr, err := machiner.NewMachiner(machiner.Config{
		machiner.APIMachineAccessor{s.machinerState},
		names.NewMachineTag("99"),
		false,
		// the "machineDead" callback should not be invoked
		// because we don't know whether the agent is
		// legimitately not found or unauthorized; we err on
		// the side of caution, in case the password got mucked
		// up, or state got mucked up (e.g. during an upgrade).
		func() error { return errors.New("should not be called") },
	})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent)
}
Exemple #9
0
func (s *MachinerStateSuite) makeMachiner(
	c *gc.C,
	ignoreAddresses bool,
	machineDead func() error,
) worker.Worker {
	if machineDead == nil {
		machineDead = func() error { return nil }
	}
	w, err := machiner.NewMachiner(machiner.Config{
		machiner.APIMachineAccessor{s.machinerState},
		s.apiMachine.Tag().(names.MachineTag),
		ignoreAddresses,
		machineDead,
	})
	c.Assert(err, jc.ErrorIsNil)
	return w
}
Exemple #10
0
func (s *MachinerSuite) TestMachinerMachineNotFound(c *gc.C) {
	// Accessing the machine initially yields "not found or unauthorized".
	// We don't know which, so we don't report that the machine is dead.
	var machineDead machineDeathTracker
	w, err := machiner.NewMachiner(machiner.Config{
		s.accessor, s.machineTag, false,
		machineDead.machineDead,
	})
	c.Assert(err, jc.ErrorIsNil)
	s.accessor.machine.SetErrors(
		nil, // SetMachineAddresses
		nil, // SetStatus
		nil, // Watch
		&params.Error{Code: params.CodeNotFound}, // Refresh
	)
	s.accessor.machine.watcher.changes <- struct{}{}
	err = stopWorker(w)
	c.Assert(errors.Cause(err), gc.Equals, worker.ErrTerminateAgent)
	c.Assert(bool(machineDead), jc.IsFalse)
}
Exemple #11
0
func (s *MachinerSuite) TestMachinerMachineEnsureDeadError(c *gc.C) {
	w, err := machiner.NewMachiner(machiner.Config{
		MachineAccessor: s.accessor,
		Tag:             s.machineTag,
	})
	c.Assert(err, jc.ErrorIsNil)
	s.accessor.machine.life = params.Dying
	s.accessor.machine.SetErrors(
		nil, // SetMachineAddresses
		nil, // SetStatus
		nil, // Watch
		nil, // Refresh
		nil, // SetStatus
		errors.New("cannot ensure machine is dead"), // EnsureDead
	)
	s.accessor.machine.watcher.changes <- struct{}{}
	err = stopWorker(w)
	c.Check(
		err, gc.ErrorMatches,
		"machine-123 failed to set machine to dead: cannot ensure machine is dead",
	)
}
Exemple #12
0
func (s *MachinerSuite) makeMachiner() worker.Worker {
	return machiner.NewMachiner(s.machinerState, agentConfig(s.apiMachine.Tag()))
}
Exemple #13
0
func (s *MachinerSuite) TestNotFoundOrUnauthorized(c *gc.C) {
	mr := machiner.NewMachiner(s.machinerState, agentConfig("machine-99"))
	c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent)
}
Exemple #14
0
// APIWorker returns a Worker that connects to the API and starts any
// workers that need an API connection.
func (a *MachineAgent) APIWorker() (worker.Worker, error) {
	agentConfig := a.CurrentConfig()
	st, entity, err := openAPIState(agentConfig, a)
	if err != nil {
		return nil, err
	}
	reportOpenedAPI(st)

	// Check if the network management is disabled.
	envConfig, err := st.Environment().EnvironConfig()
	if err != nil {
		return nil, fmt.Errorf("cannot read environment config: %v", err)
	}
	disableNetworkManagement, _ := envConfig.DisableNetworkManagement()
	if disableNetworkManagement {
		logger.Infof("network management is disabled")
	}

	// Refresh the configuration, since it may have been updated after opening state.
	agentConfig = a.CurrentConfig()
	for _, job := range entity.Jobs() {
		if job.NeedsState() {
			info, err := st.Agent().StateServingInfo()
			if err != nil {
				return nil, fmt.Errorf("cannot get state serving info: %v", err)
			}
			err = a.ChangeConfig(func(config agent.ConfigSetter) error {
				config.SetStateServingInfo(info)
				return nil
			})
			if err != nil {
				return nil, err
			}
			agentConfig = a.CurrentConfig()
			break
		}
	}

	rsyslogMode := rsyslog.RsyslogModeForwarding
	runner := newRunner(connectionIsFatal(st), moreImportant)
	var singularRunner worker.Runner
	for _, job := range entity.Jobs() {
		if job == params.JobManageEnviron {
			rsyslogMode = rsyslog.RsyslogModeAccumulate
			conn := singularAPIConn{st, st.Agent()}
			singularRunner, err = newSingularRunner(runner, conn)
			if err != nil {
				return nil, fmt.Errorf("cannot make singular API Runner: %v", err)
			}
			break
		}
	}

	// Before starting any workers, ensure we record the Juju version this machine
	// agent is running.
	currentTools := &coretools.Tools{Version: version.Current}
	if err := st.Upgrader().SetVersion(agentConfig.Tag().String(), currentTools.Version); err != nil {
		return nil, errors.Annotate(err, "cannot set machine agent version")
	}

	providerType := agentConfig.Value(agent.ProviderType)

	// Run the upgrader and the upgrade-steps worker without waiting for
	// the upgrade steps to complete.
	runner.StartWorker("upgrader", func() (worker.Worker, error) {
		return upgrader.NewUpgrader(
			st.Upgrader(),
			agentConfig,
			a.previousAgentVersion,
			a.upgradeWorkerContext.IsUpgradeRunning,
		), nil
	})
	runner.StartWorker("upgrade-steps", func() (worker.Worker, error) {
		return a.upgradeWorkerContext.Worker(a, st, entity.Jobs()), nil
	})

	// All other workers must wait for the upgrade steps to complete
	// before starting.
	a.startWorkerAfterUpgrade(runner, "machiner", func() (worker.Worker, error) {
		return machiner.NewMachiner(st.Machiner(), agentConfig), nil
	})
	a.startWorkerAfterUpgrade(runner, "apiaddressupdater", func() (worker.Worker, error) {
		return apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), a), nil
	})
	a.startWorkerAfterUpgrade(runner, "logger", func() (worker.Worker, error) {
		return workerlogger.NewLogger(st.Logger(), agentConfig), nil
	})
	a.startWorkerAfterUpgrade(runner, "machineenvironmentworker", func() (worker.Worker, error) {
		return machineenvironmentworker.NewMachineEnvironmentWorker(st.Environment(), agentConfig), nil
	})
	a.startWorkerAfterUpgrade(runner, "rsyslog", func() (worker.Worker, error) {
		return newRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslogMode)
	})
	// TODO (mfoord 8/8/2014) improve the way we detect networking capabilities. Bug lp:1354365
	writeNetworkConfig := providerType == "maas"
	if disableNetworkManagement || !writeNetworkConfig {
		a.startWorkerAfterUpgrade(runner, "networker", func() (worker.Worker, error) {
			return newSafeNetworker(st.Networker(), agentConfig, networker.DefaultConfigDir)
		})
	} else if !disableNetworkManagement && writeNetworkConfig {
		a.startWorkerAfterUpgrade(runner, "networker", func() (worker.Worker, error) {
			return newNetworker(st.Networker(), agentConfig, networker.DefaultConfigDir)
		})
	}

	// If not a local provider bootstrap machine, start the worker to
	// manage SSH keys.
	if providerType != provider.Local || a.MachineId != bootstrapMachineId {
		a.startWorkerAfterUpgrade(runner, "authenticationworker", func() (worker.Worker, error) {
			return authenticationworker.NewWorker(st.KeyUpdater(), agentConfig), nil
		})
	}

	// Perform the operations needed to set up hosting for containers.
	if err := a.setupContainerSupport(runner, st, entity, agentConfig); err != nil {
		cause := errors.Cause(err)
		if params.IsCodeDead(cause) || cause == worker.ErrTerminateAgent {
			return nil, worker.ErrTerminateAgent
		}
		return nil, fmt.Errorf("setting up container support: %v", err)
	}
	for _, job := range entity.Jobs() {
		switch job {
		case params.JobHostUnits:
			a.startWorkerAfterUpgrade(runner, "deployer", func() (worker.Worker, error) {
				apiDeployer := st.Deployer()
				context := newDeployContext(apiDeployer, agentConfig)
				return deployer.NewDeployer(apiDeployer, context), nil
			})
		case params.JobManageEnviron:
			a.startWorkerAfterUpgrade(singularRunner, "environ-provisioner", func() (worker.Worker, error) {
				return provisioner.NewEnvironProvisioner(st.Provisioner(), agentConfig), nil
			})
			// TODO(axw) 2013-09-24 bug #1229506
			// Make another job to enable the firewaller. Not all
			// environments are capable of managing ports
			// centrally.
			a.startWorkerAfterUpgrade(singularRunner, "firewaller", func() (worker.Worker, error) {
				return firewaller.NewFirewaller(st.Firewaller())
			})
			a.startWorkerAfterUpgrade(singularRunner, "charm-revision-updater", func() (worker.Worker, error) {
				return charmrevisionworker.NewRevisionUpdateWorker(st.CharmRevisionUpdater()), nil
			})
		case params.JobManageStateDeprecated:
			// Legacy environments may set this, but we ignore it.
		default:
			// TODO(dimitern): Once all workers moved over to using
			// the API, report "unknown job type" here.
		}
	}
	return newCloseWorker(runner, st), nil // Note: a worker.Runner is itself a worker.Worker.
}
Exemple #15
0
// APIWorker returns a Worker that connects to the API and starts any
// workers that need an API connection.
func (a *MachineAgent) APIWorker() (worker.Worker, error) {
	agentConfig := a.CurrentConfig()
	st, entity, err := openAPIState(agentConfig, a)
	if err != nil {
		return nil, err
	}
	reportOpenedAPI(st)

	// Refresh the configuration, since it may have been updated after opening state.
	agentConfig = a.CurrentConfig()

	for _, job := range entity.Jobs() {
		if job.NeedsState() {
			info, err := st.Agent().StateServingInfo()
			if err != nil {
				return nil, fmt.Errorf("cannot get state serving info: %v", err)
			}
			err = a.ChangeConfig(func(config agent.ConfigSetter) {
				config.SetStateServingInfo(info)
			})
			if err != nil {
				return nil, err
			}
			agentConfig = a.CurrentConfig()
			break
		}
	}

	rsyslogMode := rsyslog.RsyslogModeForwarding
	runner := newRunner(connectionIsFatal(st), moreImportant)
	var singularRunner worker.Runner
	for _, job := range entity.Jobs() {
		if job == params.JobManageEnviron {
			rsyslogMode = rsyslog.RsyslogModeAccumulate
			conn := singularAPIConn{st, st.Agent()}
			singularRunner, err = newSingularRunner(runner, conn)
			if err != nil {
				return nil, fmt.Errorf("cannot make singular API Runner: %v", err)
			}
			break
		}
	}

	// Run the upgrader and the upgrade-steps worker without waiting for
	// the upgrade steps to complete.
	runner.StartWorker("upgrader", func() (worker.Worker, error) {
		return upgrader.NewUpgrader(st.Upgrader(), agentConfig), nil
	})
	runner.StartWorker("upgrade-steps", func() (worker.Worker, error) {
		return a.upgradeWorker(st, entity.Jobs(), agentConfig), nil
	})

	// All other workers must wait for the upgrade steps to complete
	// before starting.
	a.startWorkerAfterUpgrade(runner, "machiner", func() (worker.Worker, error) {
		return machiner.NewMachiner(st.Machiner(), agentConfig), nil
	})
	a.startWorkerAfterUpgrade(runner, "apiaddressupdater", func() (worker.Worker, error) {
		return apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), a), nil
	})
	a.startWorkerAfterUpgrade(runner, "logger", func() (worker.Worker, error) {
		return workerlogger.NewLogger(st.Logger(), agentConfig), nil
	})
	a.startWorkerAfterUpgrade(runner, "machineenvironmentworker", func() (worker.Worker, error) {
		return machineenvironmentworker.NewMachineEnvironmentWorker(st.Environment(), agentConfig), nil
	})
	a.startWorkerAfterUpgrade(runner, "rsyslog", func() (worker.Worker, error) {
		return newRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslogMode)
	})

	// If not a local provider bootstrap machine, start the worker to
	// manage SSH keys.
	providerType := agentConfig.Value(agent.ProviderType)
	if providerType != provider.Local || a.MachineId != bootstrapMachineId {
		a.startWorkerAfterUpgrade(runner, "authenticationworker", func() (worker.Worker, error) {
			return authenticationworker.NewWorker(st.KeyUpdater(), agentConfig), nil
		})
	}

	// Perform the operations needed to set up hosting for containers.
	if err := a.setupContainerSupport(runner, st, entity, agentConfig); err != nil {
		return nil, fmt.Errorf("setting up container support: %v", err)
	}
	for _, job := range entity.Jobs() {
		switch job {
		case params.JobHostUnits:
			a.startWorkerAfterUpgrade(runner, "deployer", func() (worker.Worker, error) {
				apiDeployer := st.Deployer()
				context := newDeployContext(apiDeployer, agentConfig)
				return deployer.NewDeployer(apiDeployer, context), nil
			})
		case params.JobManageEnviron:
			a.startWorkerAfterUpgrade(singularRunner, "environ-provisioner", func() (worker.Worker, error) {
				return provisioner.NewEnvironProvisioner(st.Provisioner(), agentConfig), nil
			})
			// TODO(axw) 2013-09-24 bug #1229506
			// Make another job to enable the firewaller. Not all
			// environments are capable of managing ports
			// centrally.
			a.startWorkerAfterUpgrade(singularRunner, "firewaller", func() (worker.Worker, error) {
				return firewaller.NewFirewaller(st.Firewaller())
			})
			a.startWorkerAfterUpgrade(singularRunner, "charm-revision-updater", func() (worker.Worker, error) {
				return charmrevisionworker.NewRevisionUpdateWorker(st.CharmRevisionUpdater()), nil
			})
		case params.JobManageStateDeprecated:
			// Legacy environments may set this, but we ignore it.
		default:
			// TODO(dimitern): Once all workers moved over to using
			// the API, report "unknown job type" here.
		}
	}
	return newCloseWorker(runner, st), nil // Note: a worker.Runner is itself a worker.Worker.
}
Exemple #16
0
func (s *MachinerStateSuite) makeMachiner() worker.Worker {
	return machiner.NewMachiner(
		machiner.APIMachineAccessor{s.machinerState},
		agentConfig(s.apiMachine.Tag()),
	)
}