Example #1
0
func (a *MachineAgent) uninstallAgent() error {
	// We should only uninstall if the uninstall file is present.
	if !agent.CanUninstall(a) {
		logger.Infof("ignoring uninstall request")
		return nil
	}
	logger.Infof("uninstalling agent")

	agentConfig := a.CurrentConfig()
	var errs []error
	agentServiceName := agentConfig.Value(agent.AgentServiceName)
	if agentServiceName == "" {
		// For backwards compatibility, handle lack of AgentServiceName.
		agentServiceName = os.Getenv("UPSTART_JOB")
	}

	if agentServiceName != "" {
		svc, err := service.DiscoverService(agentServiceName, common.Conf{})
		if err != nil {
			errs = append(errs, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err))
		} else if err := svc.Remove(); err != nil {
			errs = append(errs, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err))
		}
	}

	errs = append(errs, a.removeJujudSymlinks()...)

	// TODO(fwereade): surely this shouldn't be happening here? Once we're
	// at this point we should expect to be killed in short order; if this
	// work is remotely important we should be blocking machine death on
	// its completion.
	insideContainer := container.RunningInContainer()
	if insideContainer {
		// We're running inside LXC, so loop devices may leak. Detach
		// any loop devices that are backed by files on this machine.
		//
		// It is necessary to do this here as well as in container/lxc,
		// as container/lxc needs to check in the container's rootfs
		// to see if the loop device is attached to the container; that
		// will fail if the data-dir is removed first.
		if err := a.loopDeviceManager.DetachLoopDevices("/", agentConfig.DataDir()); err != nil {
			errs = append(errs, err)
		}
	}

	if err := mongo.RemoveService(); err != nil {
		errs = append(errs, errors.Annotate(err, "cannot stop/remove mongo service"))
	}
	if err := os.RemoveAll(agentConfig.DataDir()); err != nil {
		errs = append(errs, err)
	}
	if len(errs) == 0 {
		return nil
	}
	return fmt.Errorf("uninstall failed: %v", errs)
}
Example #2
0
// NewSimpleContext returns a new SimpleContext, acting on behalf of
// the specified deployer, that deploys unit agents.
// Paths to which agents and tools are installed are relative to dataDir.
func NewSimpleContext(agentConfig agent.Config, api APICalls) *SimpleContext {
	return &SimpleContext{
		api:         api,
		agentConfig: agentConfig,
		discoverService: func(name string, conf common.Conf) (deployerService, error) {
			return service.DiscoverService(name, conf)
		},
		listServices: func() ([]string, error) {
			return service.ListServices()
		},
	}
}
Example #3
0
func (s *discoverySuite) TestDiscoverServiceVersionFallback(c *gc.C) {
	for _, test := range discoveryTests {
		test.log(c)

		test.disableLocalDiscovery(c, s)
		test.setVersion(s)

		svc, err := service.DiscoverService(s.name, s.conf)

		test.checkService(c, svc, err, s.name, s.conf)
	}
}
Example #4
0
func (s *discoverySuite) TestDiscoverServiceLocalHost(c *gc.C) {
	var localInitSystem string
	var err error
	switch runtime.GOOS {
	case "windows":
		localInitSystem = service.InitSystemWindows
	case "linux":
		localInitSystem, err = service.VersionInitSystem(series.HostSeries())
	}
	c.Assert(err, gc.IsNil)

	test := discoveryTest{
		os:       jujuos.HostOS(),
		series:   series.HostSeries(),
		expected: localInitSystem,
	}
	test.disableVersionDiscovery(s)

	svc, err := service.DiscoverService(s.name, s.conf)
	c.Assert(err, jc.ErrorIsNil)

	test.checkService(c, svc, err, s.name, s.conf)
}
Example #5
0
		return err
	}
	return nil
}

type agentService interface {
	Stop() error
	Remove() error
}

var mongoRemoveService = func(namespace string) error {
	return mongo.RemoveService(namespace)
}

var discoverService = func(name string) (agentService, error) {
	return service.DiscoverService(name, servicecommon.Conf{})
}

// OpenPorts is specified in the Environ interface.
func (env *localEnviron) OpenPorts(ports []network.PortRange) error {
	return fmt.Errorf("open ports not implemented")
}

// ClosePorts is specified in the Environ interface.
func (env *localEnviron) ClosePorts(ports []network.PortRange) error {
	return fmt.Errorf("close ports not implemented")
}

// Ports is specified in the Environ interface.
func (env *localEnviron) Ports() ([]network.PortRange, error) {
	return nil, nil
Example #6
0
// Restore implements the server side of Backups.Restore.
func (a *API) Restore(p params.RestoreArgs) error {

	// Get hold of a backup file Reader
	backup, closer := newBackups(a.st)
	defer closer.Close()

	// Obtain the address of current machine, where we will be performing restore.
	machine, err := a.st.Machine(a.machineID)
	if err != nil {
		return errors.Trace(err)
	}

	addr, err := machine.PrivateAddress()
	if err != nil {
		return errors.Annotatef(err, "error fetching internal address for machine %q", machine)

	}

	publicAddress, err := machine.PublicAddress()
	if err != nil {
		return errors.Annotatef(err, "error fetching public address for machine %q", machine)

	}

	info := a.st.RestoreInfo()
	// Signal to current state and api server that restore will begin
	err = info.SetStatus(state.RestoreInProgress)
	if err != nil {
		return errors.Annotatef(err, "cannot set the server to %q mode", state.RestoreInProgress)
	}
	// Any abnormal termination of this function will mark restore as failed,
	// succesful termination will call Exit and never run this.
	defer info.SetStatus(state.RestoreFailed)

	instanceId, err := machine.InstanceId()
	if err != nil {
		return errors.Annotate(err, "cannot obtain instance id for machine to be restored")
	}

	logger.Infof("beginning server side restore of backup %q", p.BackupId)
	// Restore
	restoreArgs := backups.RestoreArgs{
		PrivateAddress: addr.Value,
		PublicAddress:  publicAddress.Value,
		NewInstId:      instanceId,
		NewInstTag:     machine.Tag(),
		NewInstSeries:  machine.Series(),
	}

	oldTagString, err := backup.Restore(p.BackupId, restoreArgs)
	if err != nil {
		return errors.Annotate(err, "restore failed")
	}

	// A backup can be made of any component of an ha array.
	// The files in a backup dont contain purely relativized paths.
	// If the backup is made of the bootstrap node (machine 0) the
	// recently created machine will have the same paths and therefore
	// the startup scripts will fit the new juju. If the backup belongs
	// to a different machine, we need to create a new set of startup
	// scripts and exit with 0 (so that the current script does not try
	// to restart the old juju, which will no longer be there).
	if oldTagString != nil && oldTagString != bootstrapNode {
		srvName := fmt.Sprintf("jujud-%s", oldTagString)
		srv, err := service.DiscoverService(srvName, common.Conf{})
		if err != nil {
			return errors.Annotatef(err, "cannot find %q service", srvName)
		}
		if err := srv.Start(); err != nil {
			return errors.Annotatef(err, "cannot start %q service", srvName)
		}
		// We dont want machine-0 to restart since the new one has a different tag.
		// We started the new one above.
		os.Exit(0)
	}

	// After restoring, the api server needs a forced restart, tomb will not work
	// this is because we change all of juju configuration files and mongo too.
	// Exiting with 0 would prevent upstart to respawn the process

	// NOTE(fwereade): the apiserver needs to be restarted, yes, but
	// this approach is completely broken. The only place it's ever
	// ok to use os.Exit is in a main() func that's *so* simple as to
	// be reasonably left untested.
	//
	// And passing os.Exit in wouldn't make this any better either,
	// just using it subverts the expectations of *everything* else
	// running in the process.
	os.Exit(1)
	return nil
}