Exemplo n.º 1
0
// Bootstrap is specified in the Environ interface.
func (env *localEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (string, string, environs.BootstrapFinalizer, error) {
	if err := ensureNotRoot(); err != nil {
		return "", "", nil, err
	}

	// Make sure there are tools available for the
	// host's architecture and series.
	if _, err := args.AvailableTools.Match(tools.Filter{
		Arch:   arch.HostArch(),
		Series: version.Current.Series,
	}); err != nil {
		return "", "", nil, err
	}

	cfg, err := env.Config().Apply(map[string]interface{}{
		// Record the bootstrap IP, so the containers know where to go for storage.
		"bootstrap-ip": env.bridgeAddress,
	})
	if err == nil {
		err = env.SetConfig(cfg)
	}
	if err != nil {
		logger.Errorf("failed to apply bootstrap-ip to config: %v", err)
		return "", "", nil, err
	}
	return arch.HostArch(), version.Current.Series, env.finishBootstrap, nil
}
Exemplo n.º 2
0
func (s *toolsSuite) testFindToolsExact(c *gc.C, t common.ToolsStorageGetter, inStorage bool, develVersion bool) {
	var called bool
	s.PatchValue(common.EnvtoolsFindTools, func(e environs.Environ, major, minor int, stream string, filter coretools.Filter) (list coretools.List, err error) {
		called = true
		c.Assert(filter.Number, gc.Equals, version.Current.Number)
		c.Assert(filter.Series, gc.Equals, version.Current.Series)
		c.Assert(filter.Arch, gc.Equals, arch.HostArch())
		if develVersion {
			c.Assert(stream, gc.Equals, "devel")
		} else {
			c.Assert(stream, gc.Equals, "released")
		}
		return nil, errors.NotFoundf("tools")
	})
	toolsFinder := common.NewToolsFinder(s.State, t, sprintfURLGetter("tools:%s"))
	result, err := toolsFinder.FindTools(params.FindToolsParams{
		Number:       version.Current.Number,
		MajorVersion: -1,
		MinorVersion: -1,
		Series:       version.Current.Series,
		Arch:         arch.HostArch(),
	})
	c.Assert(err, jc.ErrorIsNil)
	if inStorage {
		c.Assert(result.Error, gc.IsNil)
		c.Assert(called, jc.IsFalse)
	} else {
		c.Assert(result.Error, gc.ErrorMatches, "tools not found")
		c.Assert(called, jc.IsTrue)
	}
}
Exemplo n.º 3
0
// validateUploadAllowed returns an error if an attempt to upload tools should
// not be allowed.
func validateUploadAllowed(env environs.Environ, toolsArch *string) error {
	// Now check that the architecture for which we are setting up an
	// environment matches that from which we are bootstrapping.
	hostArch := arch.HostArch()
	// We can't build tools for a different architecture if one is specified.
	if toolsArch != nil && *toolsArch != hostArch {
		return fmt.Errorf("cannot build tools for %q using a machine running on %q", *toolsArch, hostArch)
	}
	// If no architecture is specified, ensure the target provider supports instances matching our architecture.
	supportedArchitectures, err := env.SupportedArchitectures()
	if err != nil {
		return fmt.Errorf(
			"no packaged tools available and cannot determine environment's supported architectures: %v", err)
	}
	archSupported := false
	for _, arch := range supportedArchitectures {
		if hostArch == arch {
			archSupported = true
			break
		}
	}
	if !archSupported {
		envType := env.Config().Type()
		return errors.Errorf("environment %q of type %s does not support instances running on %q", env.Config().Name(), envType, hostArch)
	}
	return nil
}
Exemplo n.º 4
0
func (s *toolsSuite) TestFindAvailableToolsCompleteNoValidate(c *gc.C) {
	s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 })

	var allTools tools.List
	for _, series := range series.SupportedSeries() {
		binary := version.Binary{
			Number: version.Current.Number,
			Series: series,
			Arch:   arch.HostArch(),
			OS:     version.Current.OS,
		}
		allTools = append(allTools, &tools.Tools{
			Version: binary,
			URL:     "http://testing.invalid/tools.tar.gz",
		})
	}

	s.PatchValue(bootstrap.FindTools, func(_ environs.Environ, major, minor int, stream string, f tools.Filter) (tools.List, error) {
		return allTools, nil
	})
	env := newEnviron("foo", useDefaultKeys, nil)
	availableTools, err := bootstrap.FindAvailableTools(env, nil, nil, false)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(availableTools, gc.HasLen, len(allTools))
	c.Assert(env.supportedArchitecturesCount, gc.Equals, 0)
}
Exemplo n.º 5
0
func (e *bootstrapEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (string, string, environs.BootstrapFinalizer, error) {
	e.bootstrapCount++
	e.args = args
	finalizer := func(_ environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		e.finalizerCount++
		e.instanceConfig = icfg
		return nil
	}
	return arch.HostArch(), series.HostSeries(), finalizer, nil
}
Exemplo n.º 6
0
func (f mockToolsFinder) FindTools(number version.Number, series string, a string) (coretools.List, error) {
	v, err := version.ParseBinary(fmt.Sprintf("%s-%s-%s", number, series, arch.HostArch()))
	if err != nil {
		return nil, err
	}
	if a != "" {
		v.Arch = a
	}
	return coretools.List{&coretools.Tools{Version: v}}, nil
}
Exemplo n.º 7
0
func (s *KVMSuite) createRunningContainer(c *gc.C, name string) kvm.Container {
	kvmContainer := s.ContainerFactory.New(name)
	network := container.BridgeNetworkConfig("testbr0", 0, nil)
	c.Assert(kvmContainer.Start(kvm.StartParams{
		Series:       "quantal",
		Arch:         arch.HostArch(),
		UserDataFile: "userdata.txt",
		Network:      network}), gc.IsNil)
	return kvmContainer
}
Exemplo n.º 8
0
func (s *localJujuTestSuite) TestConstraintsValidator(c *gc.C) {
	ctx := envtesting.BootstrapContext(c)
	env, err := local.Provider.PrepareForBootstrap(ctx, minimalConfig(c))
	c.Assert(err, jc.ErrorIsNil)
	validator, err := env.ConstraintsValidator()
	c.Assert(err, jc.ErrorIsNil)
	hostArch := arch.HostArch()
	cons := constraints.MustParse(fmt.Sprintf("arch=%s instance-type=foo tags=bar cpu-power=10 cpu-cores=2", hostArch))
	unsupported, err := validator.Validate(cons)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(unsupported, jc.SameContents, []string{"cpu-cores", "cpu-power", "instance-type", "tags"})
}
Exemplo n.º 9
0
func (s *BootstrapSuite) TestAutoUploadAfterFailedSync(c *gc.C) {
	s.PatchValue(&version.Current.Series, config.LatestLtsSeries())
	s.setupAutoUploadTest(c, "1.7.3", "quantal")
	// Run command and check for that upload has been run for tools matching
	// the current juju version.
	opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), envcmd.Wrap(new(BootstrapCommand)), "-e", "devenv")
	c.Assert(<-errc, gc.IsNil)
	c.Check((<-opc).(dummy.OpBootstrap).Env, gc.Equals, "devenv")
	icfg := (<-opc).(dummy.OpFinalizeBootstrap).InstanceConfig
	c.Assert(icfg, gc.NotNil)
	c.Assert(icfg.Tools.Version.String(), gc.Equals, "1.7.3.1-raring-"+arch.HostArch())
}
Exemplo n.º 10
0
func (s *provisionerSuite) testFindTools(c *gc.C, matchArch bool, apiError, logicError error) {
	var toolsList = coretools.List{&coretools.Tools{Version: version.Current}}
	var called bool
	provisioner.PatchFacadeCall(s, s.provisioner, func(request string, args, response interface{}) error {
		called = true
		c.Assert(request, gc.Equals, "FindTools")
		expected := params.FindToolsParams{
			Number:       version.Current.Number,
			Series:       version.Current.Series,
			MinorVersion: -1,
			MajorVersion: -1,
		}
		if matchArch {
			expected.Arch = arch.HostArch()
		}
		c.Assert(args, gc.Equals, expected)
		result := response.(*params.FindToolsResult)
		result.List = toolsList
		if logicError != nil {
			result.Error = common.ServerError(logicError)
		}
		return apiError
	})

	var a *string
	if matchArch {
		arch := arch.HostArch()
		a = &arch
	}
	apiList, err := s.provisioner.FindTools(version.Current.Number, version.Current.Series, a)
	c.Assert(called, jc.IsTrue)
	if apiError != nil {
		c.Assert(err, gc.Equals, apiError)
	} else if logicError != nil {
		c.Assert(err.Error(), gc.Equals, logicError.Error())
	} else {
		c.Assert(err, jc.ErrorIsNil)
		c.Assert(apiList, jc.SameContents, toolsList)
	}
}
Exemplo n.º 11
0
func (s *BootstrapSuite) setupAutoUploadTest(c *gc.C, vers, series string) environs.Environ {
	s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c))
	sourceDir := createToolsSource(c, vAll)
	s.PatchValue(&envtools.DefaultBaseURL, sourceDir)

	// Change the tools location to be the test location and also
	// the version and ensure their later restoring.
	// Set the current version to be something for which there are no tools
	// so we can test that an upload is forced.
	s.PatchValue(&version.Current, version.MustParseBinary(vers+"-"+series+"-"+arch.HostArch()))

	// Create home with dummy provider and remove all
	// of its envtools.
	return resetJujuHome(c, "devenv")
}
Exemplo n.º 12
0
func (s *localJujuTestSuite) TestConstraintsValidatorVocab(c *gc.C) {
	env := s.Prepare(c)
	validator, err := env.ConstraintsValidator()
	c.Assert(err, jc.ErrorIsNil)

	hostArch := arch.HostArch()
	var invalidArch string
	for _, a := range arch.AllSupportedArches {
		if a != hostArch {
			invalidArch = a
			break
		}
	}
	cons := constraints.MustParse(fmt.Sprintf("arch=%s", invalidArch))
	_, err = validator.Validate(cons)
	c.Assert(err, gc.ErrorMatches, "invalid constraint value: arch="+invalidArch+"\nvalid values are:.*")
}
Exemplo n.º 13
0
// UploadFakeTools uploads fake tools of the architectures in
// s.UploadArches for each LTS release to the specified storage.
func (s *ToolsFixture) UploadFakeTools(c *gc.C, stor storage.Storage, toolsDir, stream string) {
	arches := s.UploadArches
	if len(arches) == 0 {
		arches = []string{arch.HostArch()}
	}
	var versions []version.Binary
	for _, arch := range arches {
		v := version.Current
		v.Arch = arch
		for _, series := range toolsLtsSeries {
			v.Series = series
			versions = append(versions, v)
		}
	}
	_, err := UploadFakeToolsVersions(stor, toolsDir, stream, versions...)
	c.Assert(err, jc.ErrorIsNil)
}
Exemplo n.º 14
0
// locallyBuildableTools returns the list of tools that
// can be built locally, for series of the same OS.
func locallyBuildableTools() (buildable coretools.List) {
	for _, series := range version.SupportedSeries() {
		if os, err := version.GetOSFromSeries(series); err != nil || os != version.Current.OS {
			continue
		}
		binary := version.Binary{
			Number: version.Current.Number,
			Series: series,
			Arch:   arch.HostArch(),
			OS:     version.Current.OS,
		}
		// Increment the build number so we know it's a development build.
		binary.Build++
		buildable = append(buildable, &coretools.Tools{Version: binary})
	}
	return buildable
}
Exemplo n.º 15
0
func (s *BootstrapSuite) TestMissingToolsUploadFailedError(c *gc.C) {

	buildToolsTarballAlwaysFails := func(forceVersion *version.Number, stream string) (*sync.BuiltTools, error) {
		return nil, fmt.Errorf("an error")
	}

	s.setupAutoUploadTest(c, "1.7.3", "precise")
	s.PatchValue(&sync.BuildToolsTarball, buildToolsTarballAlwaysFails)

	ctx, err := coretesting.RunCommand(c, envcmd.Wrap(&BootstrapCommand{}), "-e", "devenv")

	c.Check(coretesting.Stderr(ctx), gc.Equals, fmt.Sprintf(`
Bootstrapping environment "devenv"
Starting new instance for initial state server
Building tools to upload (1.7.3.1-raring-%s)
`[1:], arch.HostArch()))
	c.Check(err, gc.ErrorMatches, "failed to bootstrap environment: cannot upload bootstrap tools: an error")
}
Exemplo n.º 16
0
func (s *bootstrapSuite) TestBootstrapClearsUseSSHStorage(c *gc.C) {
	s.PatchValue(&manualDetectSeriesAndHardwareCharacteristics, func(string) (instance.HardwareCharacteristics, string, error) {
		arch := arch.HostArch()
		return instance.HardwareCharacteristics{Arch: &arch}, "precise", nil
	})
	s.PatchValue(&manualCheckProvisioned, func(string) (bool, error) {
		return false, nil
	})

	// use-sshstorage is initially true.
	cfg := s.env.Config()
	c.Assert(cfg.UnknownAttrs()["use-sshstorage"], jc.IsTrue)

	_, _, _, err := s.env.Bootstrap(envtesting.BootstrapContext(c), environs.BootstrapParams{})
	c.Assert(err, jc.ErrorIsNil)

	// Bootstrap must set use-sshstorage to false within the environment.
	cfg = s.env.Config()
	c.Assert(cfg.UnknownAttrs()["use-sshstorage"], jc.IsFalse)
}
Exemplo n.º 17
0
func (s *provisionerSuite) testFindTools(c *gc.C, matchArch bool, apiError, logicError error) {
	var toolsList = coretools.List{&coretools.Tools{Version: version.Current}}
	var called bool
	var a string
	if matchArch {
		// if matchArch is true, this will be overwriten with the host's arch, otherwise
		// leave a blank.
		a = arch.HostArch()
	}

	provisioner.PatchFacadeCall(s, s.provisioner, func(request string, args, response interface{}) error {
		called = true
		c.Assert(request, gc.Equals, "FindTools")
		expected := params.FindToolsParams{
			Number:       version.Current.Number,
			Series:       series.HostSeries(),
			Arch:         a,
			MinorVersion: -1,
			MajorVersion: -1,
		}
		c.Assert(args, gc.Equals, expected)
		result := response.(*params.FindToolsResult)
		result.List = toolsList
		if logicError != nil {
			result.Error = common.ServerError(logicError)
		}
		return apiError
	})
	apiList, err := s.provisioner.FindTools(version.Current.Number, series.HostSeries(), a)
	c.Assert(called, jc.IsTrue)
	if apiError != nil {
		c.Assert(err, gc.Equals, apiError)
	} else if logicError != nil {
		c.Assert(err.Error(), gc.Equals, logicError.Error())
	} else {
		c.Assert(err, jc.ErrorIsNil)
		c.Assert(apiList, jc.SameContents, toolsList)
	}
}
Exemplo n.º 18
0
func createContainer(c *gc.C, manager container.Manager, machineId string) instance.Instance {
	machineNonce := "fake-nonce"
	stateInfo := jujutesting.FakeStateInfo(machineId)
	apiInfo := jujutesting.FakeAPIInfo(machineId)
	instanceConfig, err := instancecfg.NewInstanceConfig(machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", true, nil, stateInfo, apiInfo)
	c.Assert(err, jc.ErrorIsNil)
	network := container.BridgeNetworkConfig("virbr0", 0, nil)

	instanceConfig.Tools = &tools.Tools{
		Version: version.MustParseBinary("2.3.4-foo-bar"),
		URL:     "http://tools.testing.invalid/2.3.4-foo-bar.tgz",
	}
	environConfig := dummyConfig(c)
	err = instancecfg.FinishInstanceConfig(instanceConfig, environConfig)
	c.Assert(err, jc.ErrorIsNil)

	inst, hardware, err := manager.CreateContainer(instanceConfig, "precise", network, nil)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(hardware, gc.NotNil)
	expected := fmt.Sprintf("arch=%s cpu-cores=1 mem=512M root-disk=8192M", arch.HostArch())
	c.Assert(hardware.String(), gc.Equals, expected)
	return inst
}
Exemplo n.º 19
0
// SupportedArchitectures is specified on the EnvironCapability interface.
func (*localEnviron) SupportedArchitectures() ([]string, error) {
	localArch := arch.HostArch()
	return []string{localArch}, nil
}
Exemplo n.º 20
0
const version = "1.21-alpha1"

// The version that we switched over from old style numbering to new style.
var switchOverVersion = MustParse("1.19.9")

// lsbReleaseFile is the name of the file that is read in order to determine
// the release version of ubuntu.
var lsbReleaseFile = "/etc/lsb-release"

// Current gives the current version of the system.  If the file
// "FORCE-VERSION" is present in the same directory as the running
// binary, it will override this.
var Current = Binary{
	Number: MustParse(version),
	Series: mustOSVersion(),
	Arch:   arch.HostArch(),
}

var Compiler = runtime.Compiler

func init() {
	toolsDir := filepath.Dir(os.Args[0])
	v, err := ioutil.ReadFile(filepath.Join(toolsDir, "FORCE-VERSION"))
	if err != nil {
		if !os.IsNotExist(err) {
			fmt.Fprintf(os.Stderr, "WARNING: cannot read forced version: %v\n", err)
		}
		return
	}
	Current.Number = MustParse(strings.TrimSpace(string(v)))
}
Exemplo n.º 21
0
// CreateContainer creates or clones an LXC container.
func (manager *containerManager) CreateContainer(
	instanceConfig *instancecfg.InstanceConfig,
	series string,
	networkConfig *container.NetworkConfig,
	storageConfig *container.StorageConfig,
) (inst instance.Instance, _ *instance.HardwareCharacteristics, err error) {
	// Check our preconditions
	if manager == nil {
		panic("manager is nil")
	} else if series == "" {
		panic("series not set")
	} else if networkConfig == nil {
		panic("networkConfig is nil")
	} else if storageConfig == nil {
		panic("storageConfig is nil")
	}

	// Log how long the start took
	defer func(start time.Time) {
		if err == nil {
			logger.Tracef("container %q started: %v", inst.Id(), time.Now().Sub(start))
		}
	}(time.Now())

	name := names.NewMachineTag(instanceConfig.MachineId).String()
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to create a directory for the container")
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := containerinit.WriteUserData(instanceConfig, networkConfig, directory)
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to write user data")
	}

	var lxcContainer golxc.Container
	if manager.createWithClone {
		templateContainer, err := EnsureCloneTemplate(
			manager.backingFilesystem,
			series,
			networkConfig,
			instanceConfig.AuthorizedKeys,
			instanceConfig.AptProxySettings,
			instanceConfig.AptMirror,
			instanceConfig.EnableOSRefreshUpdate,
			instanceConfig.EnableOSUpgrade,
			manager.imageURLGetter,
			manager.useAUFS,
		)
		if err != nil {
			return nil, nil, errors.Annotate(err, "failed to retrieve the template to clone")
		}
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
		}
		var extraCloneArgs []string
		if manager.backingFilesystem == Btrfs || manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--snapshot")
		}
		if manager.backingFilesystem != Btrfs && manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs")
		}

		lock, err := AcquireTemplateLock(templateContainer.Name(), "clone")
		if err != nil {
			return nil, nil, errors.Annotate(err, "failed to acquire lock on template")
		}
		defer lock.Unlock()

		// Ensure the run-time effective config of the template
		// container has correctly ordered network settings, otherwise
		// Clone() below will fail. This is needed in case we haven't
		// created a new template now but are reusing an existing one.
		// See LP bug #1414016.
		configPath := containerConfigFilename(templateContainer.Name())
		if _, err := reorderNetworkConfig(configPath); err != nil {
			return nil, nil, errors.Annotate(err, "failed to reorder network settings")
		}

		lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams)
		if err != nil {
			return nil, nil, errors.Annotate(err, "lxc container cloning failed")
		}
	} else {
		// Note here that the lxcObjectFacotry only returns a valid container
		// object, and doesn't actually construct the underlying lxc container on
		// disk.
		lxcContainer = LxcObjectFactory.New(name)
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
			"-r", series,
		}
		var caCert []byte
		if manager.imageURLGetter != nil {
			arch := arch.HostArch()
			imageURL, err := manager.imageURLGetter.ImageURL(instance.LXC, series, arch)
			if err != nil {
				return nil, nil, errors.Annotatef(err, "cannot determine cached image URL")
			}
			templateParams = append(templateParams, "-T", imageURL)
			caCert = manager.imageURLGetter.CACert()
		}
		err = createContainer(
			lxcContainer,
			directory,
			networkConfig,
			nil,
			templateParams,
			caCert,
		)
		if err != nil {
			return nil, nil, errors.Trace(err)
		}
	}

	if err := autostartContainer(name); err != nil {
		return nil, nil, errors.Annotate(err, "failed to configure the container for autostart")
	}
	if err := mountHostLogDir(name, manager.logdir); err != nil {
		return nil, nil, errors.Annotate(err, "failed to mount the directory to log to")
	}
	if storageConfig.AllowMount {
		// Add config to allow loop devices to be mounted inside the container.
		if err := allowLoopbackBlockDevices(name); err != nil {
			return nil, nil, errors.Annotate(err, "failed to configure the container for loopback devices")
		}
	}
	// Update the network settings inside the run-time config of the
	// container (e.g. /var/lib/lxc/<name>/config) before starting it.
	netConfig := generateNetworkConfig(networkConfig)
	if err := updateContainerConfig(name, netConfig); err != nil {
		return nil, nil, errors.Annotate(err, "failed to update network config")
	}
	configPath := containerConfigFilename(name)
	logger.Tracef("updated network config in %q for container %q", configPath, name)
	// Ensure the run-time config of the new container has correctly
	// ordered network settings, otherwise Start() below will fail. We
	// need this now because after lxc-create or lxc-clone the initial
	// lxc.conf generated inside createContainer gets merged with
	// other settings (e.g. system-wide overrides, changes made by
	// hooks, etc.) and the result can still be incorrectly ordered.
	// See LP bug #1414016.
	if _, err := reorderNetworkConfig(configPath); err != nil {
		return nil, nil, errors.Annotate(err, "failed to reorder network settings")
	}

	// To speed-up the initial container startup we pre-render the
	// /etc/network/interfaces directly inside the rootfs. This won't
	// work if we use AUFS snapshots, so it's disabled if useAUFS is
	// true (for now).
	if networkConfig != nil && len(networkConfig.Interfaces) > 0 {
		interfacesFile := filepath.Join(LxcContainerDir, name, "rootfs", etcNetworkInterfaces)
		if manager.useAUFS {
			logger.Tracef("not pre-rendering %q when using AUFS-backed rootfs", interfacesFile)
		} else {
			data, err := containerinit.GenerateNetworkConfig(networkConfig)
			if err != nil {
				return nil, nil, errors.Annotatef(err, "failed to generate %q", interfacesFile)
			}
			if err := utils.AtomicWriteFile(interfacesFile, []byte(data), 0644); err != nil {
				return nil, nil, errors.Annotatef(err, "cannot write generated %q", interfacesFile)
			}
			logger.Tracef("pre-rendered network config in %q", interfacesFile)
		}
	}

	// Start the lxc container with the appropriate settings for
	// grabbing the console output and a log file.
	consoleFile := filepath.Join(directory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")

	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Warningf("container failed to start %v", err)
		// if the container fails to start we should try to destroy it
		// check if the container has been constructed
		if lxcContainer.IsConstructed() {
			// if so, then we need to destroy the leftover container
			if derr := lxcContainer.Destroy(); derr != nil {
				// if an error is reported there is probably a leftover
				// container that the user should clean up manually
				logger.Errorf("container failed to start and failed to destroy: %v", derr)
				return nil, nil, errors.Annotate(err, "container failed to start and failed to destroy: manual cleanup of containers needed")
			}
			logger.Warningf("container failed to start and was destroyed - safe to retry")
			return nil, nil, errors.Wrap(err, instance.NewRetryableCreationError("container failed to start and was destroyed: "+lxcContainer.Name()))
		}
		logger.Warningf("container failed to start: %v", err)
		return nil, nil, errors.Annotate(err, "container failed to start")
	}

	hardware := &instance.HardwareCharacteristics{
		Arch: &version.Current.Arch,
	}

	return &lxcInstance{lxcContainer, name}, hardware, nil
}
Exemplo n.º 22
0
// FindTools is defined on the ToolsFinder interface.
func (h hostArchToolsFinder) FindTools(v version.Number, series string, _ *string) (tools.List, error) {
	// Override the arch constraint with the arch of the host.
	arch := arch.HostArch()
	return h.f.FindTools(v, series, &arch)
}
Exemplo n.º 23
0
// StartInstance is specified in the Broker interface.
func (broker *lxcBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting lxc containers with networks is not supported yet")
	}
	// TODO: refactor common code out of the container brokers.
	machineId := args.InstanceConfig.MachineId
	lxcLogger.Infof("starting lxc container for machineId: %s", machineId)

	// Default to using the host network until we can configure.
	bridgeDevice := broker.agentConfig.Value(agent.LxcBridge)
	if bridgeDevice == "" {
		bridgeDevice = lxc.DefaultLxcBridge
	}

	if !environs.AddressAllocationEnabled() {
		logger.Debugf(
			"address allocation feature flag not enabled; using DHCP for container %q",
			machineId,
		)
	} else {
		logger.Debugf("trying to allocate static IP for container %q", machineId)
		allocatedInfo, err := configureContainerNetwork(
			machineId,
			bridgeDevice,
			broker.api,
			args.NetworkInfo,
			true, // allocate a new address.
			broker.enableNAT,
		)
		if err != nil {
			// It's fine, just ignore it. The effect will be that the
			// container won't have a static address configured.
			logger.Infof("not allocating static IP for container %q: %v", machineId, err)
		} else {
			args.NetworkInfo = allocatedInfo
		}
	}
	network := container.BridgeNetworkConfig(bridgeDevice, broker.defaultMTU, args.NetworkInfo)

	// The provisioner worker will provide all tools it knows about
	// (after applying explicitly specified constraints), which may
	// include tools for architectures other than the host's. We
	// must constrain to the host's architecture for LXC.
	arch := arch.HostArch()
	archTools, err := args.Tools.Match(tools.Filter{
		Arch: arch,
	})
	if err == tools.ErrNoMatches {
		return nil, errors.Errorf(
			"need tools for arch %s, only found %s",
			arch,
			args.Tools.Arches(),
		)
	}

	series := archTools.OneSeries()
	args.InstanceConfig.MachineContainerType = instance.LXC
	args.InstanceConfig.Tools = archTools[0]

	config, err := broker.api.ContainerConfig()
	if err != nil {
		lxcLogger.Errorf("failed to get container config: %v", err)
		return nil, err
	}
	storageConfig := &container.StorageConfig{
		AllowMount: config.AllowLXCLoopMounts,
	}

	if err := instancecfg.PopulateInstanceConfig(
		args.InstanceConfig,
		config.ProviderType,
		config.AuthorizedKeys,
		config.SSLHostnameVerification,
		config.Proxy,
		config.AptProxy,
		config.AptMirror,
		config.PreferIPv6,
		config.EnableOSRefreshUpdate,
		config.EnableOSUpgrade,
	); err != nil {
		lxcLogger.Errorf("failed to populate machine config: %v", err)
		return nil, err
	}

	inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig)
	if err != nil {
		lxcLogger.Errorf("failed to start container: %v", err)
		return nil, err
	}
	lxcLogger.Infof("started lxc container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String())
	return &environs.StartInstanceResult{
		Instance:    inst,
		Hardware:    hardware,
		NetworkInfo: network.Interfaces,
	}, nil
}
Exemplo n.º 24
0
func (manager *containerManager) CreateContainer(
	instanceConfig *instancecfg.InstanceConfig,
	series string,
	networkConfig *container.NetworkConfig,
	storageConfig *container.StorageConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {

	name := names.NewMachineTag(instanceConfig.MachineId).String()
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}

	// Set the MachineContainerHostname to match the name returned by virsh list
	instanceConfig.MachineContainerHostname = name

	// Note here that the kvmObjectFacotry only returns a valid container
	// object, and doesn't actually construct the underlying kvm container on
	// disk.
	kvmContainer := KvmObjectFactory.New(name)

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to create container directory")
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := containerinit.WriteUserData(instanceConfig, networkConfig, directory)
	if err != nil {
		logger.Infof("machine config api %#v", *instanceConfig.APIInfo)
		err = errors.Annotate(err, "failed to write user data")
		logger.Infof(err.Error())
		return nil, nil, err
	}
	// Create the container.
	startParams = ParseConstraintsToStartParams(instanceConfig.Constraints)
	startParams.Arch = arch.HostArch()
	startParams.Series = series
	startParams.Network = networkConfig
	startParams.UserDataFile = userDataFilename

	// If the Simplestream requested is anything but released, update
	// our StartParams to request it.
	if instanceConfig.ImageStream != imagemetadata.ReleasedStream {
		startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + instanceConfig.ImageStream
	}

	var hardware instance.HardwareCharacteristics
	hardware, err = instance.ParseHardware(
		fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v",
			startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
	if err != nil {
		logger.Warningf("failed to parse hardware: %v", err)
	}

	logger.Tracef("create the container, constraints: %v", instanceConfig.Constraints)
	if err := kvmContainer.Start(startParams); err != nil {
		err = errors.Annotate(err, "kvm container creation failed")
		logger.Infof(err.Error())
		return nil, nil, err
	}
	logger.Tracef("kvm container created")
	return &kvmInstance{kvmContainer, name}, &hardware, nil
}
Exemplo n.º 25
0
// Make sure a template exists that we can clone from.
func EnsureCloneTemplate(
	backingFilesystem string,
	series string,
	networkConfig *container.NetworkConfig,
	authorizedKeys string,
	aptProxy proxy.Settings,
	aptMirror string,
	enablePackageUpdates bool,
	enableOSUpgrades bool,
	imageURLGetter container.ImageURLGetter,
	useAUFS bool,
) (golxc.Container, error) {
	name := fmt.Sprintf("juju-%s-lxc-template", series)
	containerDirectory, err := container.NewDirectory(name)
	if err != nil {
		return nil, err
	}

	lock, err := AcquireTemplateLock(name, "ensure clone exists")
	if err != nil {
		return nil, err
	}
	defer lock.Unlock()

	lxcContainer := LxcObjectFactory.New(name)
	// Early exit if the container has been constructed before.
	if lxcContainer.IsConstructed() {
		logger.Infof("template exists, continuing")
		return lxcContainer, nil
	}
	logger.Infof("template does not exist, creating")

	userData, err := containerinit.TemplateUserData(
		series,
		authorizedKeys,
		aptProxy,
		aptMirror,
		enablePackageUpdates,
		enableOSUpgrades,
		networkConfig,
	)
	if err != nil {
		logger.Tracef("failed to create template user data for template: %v", err)
		return nil, err
	}
	userDataFilename, err := containerinit.WriteCloudInitFile(containerDirectory, userData)
	if err != nil {
		return nil, err
	}

	templateParams := []string{
		"--debug",                      // Debug errors in the cloud image
		"--userdata", userDataFilename, // Our groovey cloud-init
		"--hostid", name, // Use the container name as the hostid
		"-r", series,
	}
	var caCert []byte
	if imageURLGetter != nil {
		arch := arch.HostArch()
		imageURL, err := imageURLGetter.ImageURL(instance.LXC, series, arch)
		if err != nil {
			return nil, errors.Annotatef(err, "cannot determine cached image URL")
		}
		templateParams = append(templateParams, "-T", imageURL)
		caCert = imageURLGetter.CACert()
	}
	var extraCreateArgs []string
	if backingFilesystem == Btrfs {
		extraCreateArgs = append(extraCreateArgs, "-B", Btrfs)
	}

	// Create the container.
	logger.Tracef("create the template container")
	err = createContainer(
		lxcContainer,
		containerDirectory,
		networkConfig,
		extraCreateArgs,
		templateParams,
		caCert,
	)
	if err != nil {
		logger.Errorf("lxc template container creation failed: %v", err)
		return nil, err
	}
	// Make sure that the mount dir has been created.
	logger.Tracef("make the mount dir for the shared logs")
	if err := os.MkdirAll(internalLogDir(name), 0755); err != nil {
		logger.Tracef("failed to create internal /var/log/juju mount dir: %v", err)
		return nil, err
	}

	// Start the lxc container with the appropriate settings for grabbing the
	// console output and a log file.
	consoleFile := filepath.Join(containerDirectory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(containerDirectory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")
	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Errorf("container failed to start: %v", err)
		return nil, err
	}
	logger.Infof("template container started, now wait for it to stop")
	// Perhaps we should wait for it to finish, and the question becomes "how
	// long do we wait for it to complete?"

	console, err := os.Open(consoleFile)
	if err != nil {
		// can't listen
		return nil, err
	}

	tailWriter := &logTail{tick: time.Now()}
	consoleTailer := tailer.NewTailer(console, tailWriter, nil)
	defer consoleTailer.Stop()

	// We should wait maybe 1 minute between output?
	// if no output check to see if stopped
	// If we have no output and still running, something has probably gone wrong
	for lxcContainer.IsRunning() {
		if tailWriter.lastTick().Before(time.Now().Add(-TemplateStopTimeout)) {
			logger.Infof("not heard anything from the template log for five minutes")
			return nil, fmt.Errorf("template container %q did not stop", name)
		}
		time.Sleep(time.Second)
	}

	return lxcContainer, nil
}
Exemplo n.º 26
0
func (s *archSuite) TestHostArch(c *gc.C) {
	a := arch.HostArch()
	c.Assert(arch.IsSupportedArch(a), jc.IsTrue)
}
Exemplo n.º 27
0
	"path/filepath"
	"runtime"
	"strconv"
	"strings"

	"github.com/juju/juju/juju/arch"
)

const (
	// preallocAlign must divide all preallocated files' sizes.
	preallocAlign = 4096
)

var (
	runtimeGOOS  = runtime.GOOS
	hostWordSize = arch.Info[arch.HostArch()].WordSize

	// zeroes is used by preallocFile to write zeroes to
	// preallocated Mongo data files.
	zeroes = make([]byte, 64*1024)

	minOplogSizeMB = 1024
	maxOplogSizeMB = 50 * 1024

	availSpace   = fsAvailSpace
	preallocFile = doPreallocFile
)

// preallocOplog preallocates the Mongo oplog in the
// specified Mongo datadabase directory.
func preallocOplog(dir string) error {