Ejemplo n.º 1
0
func (s *DirectorySuite) TestRemoveContainerDir(c *gc.C) {
	dir, err := container.NewDirectory("testing")
	c.Assert(err, jc.ErrorIsNil)
	err = container.RemoveDirectory("testing")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(dir, jc.DoesNotExist)
	c.Assert(filepath.Join(s.removedDir, "testing"), jc.IsDirectory)
}
Ejemplo n.º 2
0
Archivo: kvm.go Proyecto: kapilt/juju
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {

	name := names.NewMachineTag(machineConfig.MachineId).String()
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Note here that the kvmObjectFacotry only returns a valid container
	// object, and doesn't actually construct the underlying kvm container on
	// disk.
	kvmContainer := KvmObjectFactory.New(name)

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to create container directory: %v", err)
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "failed to write user data: %v", err)
	}
	// Create the container.
	startParams = ParseConstraintsToStartParams(machineConfig.Constraints)
	startParams.Arch = version.Current.Arch
	startParams.Series = series
	startParams.Network = network
	startParams.UserDataFile = userDataFilename

	// If the Simplestream requested is anything but released, update
	// our StartParams to request it.
	if machineConfig.ImageStream != imagemetadata.ReleasedStream {
		startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + machineConfig.ImageStream
	}

	var hardware instance.HardwareCharacteristics
	hardware, err = instance.ParseHardware(
		fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v",
			startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
	if err != nil {
		logger.Warningf("failed to parse hardware: %v", err)
	}

	logger.Tracef("create the container, constraints: %v", machineConfig.Constraints)
	if err := kvmContainer.Start(startParams); err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "kvm container creation failed: %v", err)
	}
	logger.Tracef("kvm container created")
	return &kvmInstance{kvmContainer, name}, &hardware, nil
}
Ejemplo n.º 3
0
func (s *DirectorySuite) TestRemoveContainerDirWithClash(c *gc.C) {
	dir, err := container.NewDirectory("testing")
	c.Assert(err, jc.ErrorIsNil)

	clash := filepath.Join(s.removedDir, "testing")
	err = os.MkdirAll(clash, 0755)
	c.Assert(err, jc.ErrorIsNil)

	err = container.RemoveDirectory("testing")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(dir, jc.DoesNotExist)
	c.Assert(filepath.Join(s.removedDir, "testing.1"), jc.IsDirectory)
}
Ejemplo n.º 4
0
func (*DirectorySuite) TestNewContainerDir(c *gc.C) {
	dir, err := container.NewDirectory("testing")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(dir, jc.IsDirectory)
}
Ejemplo n.º 5
0
// Make sure a template exists that we can clone from.
func EnsureCloneTemplate(
	backingFilesystem string,
	series string,
	networkConfig *container.NetworkConfig,
	authorizedKeys string,
	aptProxy proxy.Settings,
	aptMirror string,
	enablePackageUpdates bool,
	enableOSUpgrades bool,
	imageURLGetter container.ImageURLGetter,
	useAUFS bool,
	callback func(containerStatus status.Status, info string, data map[string]interface{}) error,
) (_ golxc.Container, err error) {
	name := fmt.Sprintf("juju-%s-lxc-template", series)

	defer func() {
		if err != nil {
			callback(status.StatusProvisioningError, fmt.Sprintf("Creating container: %v", err), nil)
		}
	}()
	containerDirectory, err := container.NewDirectory(name)
	if err != nil {
		return nil, err
	}

	lock, err := AcquireTemplateLock(name, "ensure clone exists")
	if err != nil {
		return nil, err
	}
	defer lock.Unlock()

	lxcContainer := LxcObjectFactory.New(name)
	// Early exit if the container has been constructed before.
	if lxcContainer.IsConstructed() {
		logger.Infof("template exists, continuing")
		return lxcContainer, nil
	}
	logger.Infof("template does not exist, creating")

	callback(status.StatusAllocating, "Creating template container; downloading image may take some time", nil)

	userData, err := containerinit.TemplateUserData(
		series,
		authorizedKeys,
		aptProxy,
		aptMirror,
		enablePackageUpdates,
		enableOSUpgrades,
		networkConfig,
	)
	if err != nil {
		logger.Tracef("failed to create template user data for template: %v", err)
		return nil, err
	}
	userDataFilename, err := containerinit.WriteCloudInitFile(containerDirectory, userData)
	if err != nil {
		return nil, err
	}

	templateParams := []string{
		"--debug",                      // Debug errors in the cloud image
		"--userdata", userDataFilename, // Our groovey cloud-init
		"--hostid", name, // Use the container name as the hostid
		"-r", series,
	}
	var caCert []byte
	if imageURLGetter != nil {
		arch := arch.HostArch()
		imageURL, err := imageURLGetter.ImageURL(instance.LXC, series, arch)
		if err != nil {
			return nil, errors.Annotatef(err, "cannot determine cached image URL")
		}
		templateParams = append(templateParams, "-T", imageURL)
		caCert = imageURLGetter.CACert()
	}
	var extraCreateArgs []string
	if backingFilesystem == Btrfs {
		extraCreateArgs = append(extraCreateArgs, "-B", Btrfs)
	}

	// Create the container.
	logger.Tracef("create the template container")
	err = createContainer(
		lxcContainer,
		containerDirectory,
		networkConfig,
		extraCreateArgs,
		templateParams,
		caCert,
	)
	if err != nil {
		logger.Errorf("lxc template container creation failed: %v", err)
		return nil, err
	}
	// Make sure that the mount dir has been created.
	logger.Tracef("make the mount dir for the shared logs")
	if err := os.MkdirAll(internalLogDir(name), 0755); err != nil {
		logger.Tracef("failed to create internal /var/log/juju mount dir: %v", err)
		return nil, err
	}

	// Start the lxc container with the appropriate settings for grabbing the
	// console output and a log file.
	consoleFile := filepath.Join(containerDirectory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(containerDirectory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")
	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Errorf("container failed to start: %v", err)
		return nil, err
	}
	logger.Infof("template container started, now wait for it to stop")
	callback(status.StatusAllocating, "Template container created; waiting for cloud-init to complete", nil)
	// Perhaps we should wait for it to finish, and the question becomes "how
	// long do we wait for it to complete?"

	console, err := os.Open(consoleFile)
	if err != nil {
		// can't listen
		return nil, err
	}

	tailWriter := &logTail{tick: time.Now()}
	consoleTailer := tailer.NewTailer(console, tailWriter, nil)
	defer consoleTailer.Stop()

	// We should wait maybe 1 minute between output?
	// if no output check to see if stopped
	// If we have no output and still running, something has probably gone wrong
	for lxcContainer.IsRunning() {
		if tailWriter.lastTick().Before(time.Now().Add(-TemplateStopTimeout)) {
			logger.Infof("not heard anything from the template log for five minutes")
			callback(status.StatusProvisioningError, "Container creation failed: template container has not stopped", nil)
			return nil, fmt.Errorf("template container %q did not stop", name)
		}
		time.Sleep(time.Second)
	}

	return lxcContainer, nil
}
Ejemplo n.º 6
0
Archivo: kvm.go Proyecto: kat-co/juju
func (manager *containerManager) CreateContainer(
	instanceConfig *instancecfg.InstanceConfig,
	cons constraints.Value,
	series string,
	networkConfig *container.NetworkConfig,
	storageConfig *container.StorageConfig,
	callback container.StatusCallback,
) (_ instance.Instance, _ *instance.HardwareCharacteristics, err error) {

	name, err := manager.namespace.Hostname(instanceConfig.MachineId)
	if err != nil {
		return nil, nil, errors.Trace(err)
	}

	defer func() {
		if err != nil {
			callback(status.ProvisioningError, fmt.Sprintf("Creating container: %v", err), nil)
		}
	}()

	// Set the MachineContainerHostname to match the name returned by virsh list
	instanceConfig.MachineContainerHostname = name

	// Note here that the kvmObjectFactory only returns a valid container
	// object, and doesn't actually construct the underlying kvm container on
	// disk.
	kvmContainer := KvmObjectFactory.New(name)

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to create container directory")
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := containerinit.WriteUserData(instanceConfig, networkConfig, directory)
	if err != nil {
		logger.Infof("machine config api %#v", *instanceConfig.APIInfo)
		err = errors.Annotate(err, "failed to write user data")
		logger.Infof(err.Error())
		return nil, nil, err
	}
	// Create the container.
	startParams = ParseConstraintsToStartParams(cons)
	startParams.Arch = arch.HostArch()
	startParams.Series = series
	startParams.Network = networkConfig
	startParams.UserDataFile = userDataFilename

	// If the Simplestream requested is anything but released, update
	// our StartParams to request it.
	if instanceConfig.ImageStream != imagemetadata.ReleasedStream {
		startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + instanceConfig.ImageStream
	}

	var hardware instance.HardwareCharacteristics
	hardware, err = instance.ParseHardware(
		fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cores=%v",
			startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to parse hardware")
	}

	callback(status.Allocating, "Creating container; it might take some time", nil)
	logger.Tracef("create the container, constraints: %v", cons)
	if err := kvmContainer.Start(startParams); err != nil {
		err = errors.Annotate(err, "kvm container creation failed")
		logger.Infof(err.Error())
		return nil, nil, err
	}
	logger.Tracef("kvm container created")
	return &kvmInstance{kvmContainer, name}, &hardware, nil
}
Ejemplo n.º 7
0
// Make sure a template exists that we can clone from.
func EnsureCloneTemplate(
	backingFilesystem string,
	series string,
	network *container.NetworkConfig,
	authorizedKeys string,
	aptProxy proxy.Settings,
) (golxc.Container, error) {
	name := fmt.Sprintf("juju-%s-template", series)
	containerDirectory, err := container.NewDirectory(name)
	if err != nil {
		return nil, err
	}

	lock, err := AcquireTemplateLock(name, "ensure clone exists")
	if err != nil {
		return nil, err
	}
	defer lock.Unlock()

	lxcContainer := LxcObjectFactory.New(name)
	// Early exit if the container has been constructed before.
	if lxcContainer.IsConstructed() {
		logger.Infof("template exists, continuing")
		return lxcContainer, nil
	}
	logger.Infof("template does not exist, creating")

	userData, err := templateUserData(series, authorizedKeys, aptProxy)
	if err != nil {
		logger.Tracef("failed to create template user data for template: %v", err)
		return nil, err
	}
	userDataFilename, err := container.WriteCloudInitFile(containerDirectory, userData)
	if err != nil {
		return nil, err
	}

	configFile, err := writeLxcConfig(network, containerDirectory)
	if err != nil {
		logger.Errorf("failed to write config file: %v", err)
		return nil, err
	}
	templateParams := []string{
		"--debug",                      // Debug errors in the cloud image
		"--userdata", userDataFilename, // Our groovey cloud-init
		"--hostid", name, // Use the container name as the hostid
		"-r", series,
	}
	var extraCreateArgs []string
	if backingFilesystem == Btrfs {
		extraCreateArgs = append(extraCreateArgs, "-B", Btrfs)
	}
	// Create the container.
	logger.Tracef("create the container")
	if err := lxcContainer.Create(configFile, defaultTemplate, extraCreateArgs, templateParams); err != nil {
		logger.Errorf("lxc container creation failed: %v", err)
		return nil, err
	}
	// Make sure that the mount dir has been created.
	logger.Tracef("make the mount dir for the shared logs")
	if err := os.MkdirAll(internalLogDir(name), 0755); err != nil {
		logger.Tracef("failed to create internal /var/log/juju mount dir: %v", err)
		return nil, err
	}

	// Start the lxc container with the appropriate settings for grabbing the
	// console output and a log file.
	consoleFile := filepath.Join(containerDirectory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(containerDirectory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")
	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Errorf("container failed to start: %v", err)
		return nil, err
	}
	logger.Infof("template container started, now wait for it to stop")
	// Perhaps we should wait for it to finish, and the question becomes "how
	// long do we wait for it to complete?"

	console, err := os.Open(consoleFile)
	if err != nil {
		// can't listen
		return nil, err
	}

	tailWriter := &logTail{tick: time.Now()}
	consoleTailer := tailer.NewTailer(console, tailWriter, nil)
	defer consoleTailer.Stop()

	// We should wait maybe 1 minute between output?
	// if no output check to see if stopped
	// If we have no output and still running, something has probably gone wrong
	for lxcContainer.IsRunning() {
		if tailWriter.lastTick().Before(time.Now().Add(-TemplateStopTimeout)) {
			logger.Infof("not heard anything from the template log for five minutes")
			return nil, fmt.Errorf("template container %q did not stop", name)
		}
		time.Sleep(time.Second)
	}

	return lxcContainer, nil
}
Ejemplo n.º 8
0
// CreateContainer creates or clones an LXC container.
func (manager *containerManager) CreateContainer(
	instanceConfig *instancecfg.InstanceConfig,
	series string,
	networkConfig *container.NetworkConfig,
	storageConfig *container.StorageConfig,
) (inst instance.Instance, _ *instance.HardwareCharacteristics, err error) {
	// Check our preconditions
	if manager == nil {
		panic("manager is nil")
	} else if series == "" {
		panic("series not set")
	} else if networkConfig == nil {
		panic("networkConfig is nil")
	} else if storageConfig == nil {
		panic("storageConfig is nil")
	}

	// Log how long the start took
	defer func(start time.Time) {
		if err == nil {
			logger.Tracef("container %q started: %v", inst.Id(), time.Now().Sub(start))
		}
	}(time.Now())

	name := names.NewMachineTag(instanceConfig.MachineId).String()
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to create a directory for the container")
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := containerinit.WriteUserData(instanceConfig, networkConfig, directory)
	if err != nil {
		return nil, nil, errors.Annotate(err, "failed to write user data")
	}

	var lxcContainer golxc.Container
	if manager.createWithClone {
		templateContainer, err := EnsureCloneTemplate(
			manager.backingFilesystem,
			series,
			networkConfig,
			instanceConfig.AuthorizedKeys,
			instanceConfig.AptProxySettings,
			instanceConfig.AptMirror,
			instanceConfig.EnableOSRefreshUpdate,
			instanceConfig.EnableOSUpgrade,
			manager.imageURLGetter,
			manager.useAUFS,
		)
		if err != nil {
			return nil, nil, errors.Annotate(err, "failed to retrieve the template to clone")
		}
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
		}
		var extraCloneArgs []string
		if manager.backingFilesystem == Btrfs || manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--snapshot")
		}
		if manager.backingFilesystem != Btrfs && manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs")
		}

		lock, err := AcquireTemplateLock(templateContainer.Name(), "clone")
		if err != nil {
			return nil, nil, errors.Annotate(err, "failed to acquire lock on template")
		}
		defer lock.Unlock()

		// Ensure the run-time effective config of the template
		// container has correctly ordered network settings, otherwise
		// Clone() below will fail. This is needed in case we haven't
		// created a new template now but are reusing an existing one.
		// See LP bug #1414016.
		configPath := containerConfigFilename(templateContainer.Name())
		if _, err := reorderNetworkConfig(configPath); err != nil {
			return nil, nil, errors.Annotate(err, "failed to reorder network settings")
		}

		lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams)
		if err != nil {
			return nil, nil, errors.Annotate(err, "lxc container cloning failed")
		}
	} else {
		// Note here that the lxcObjectFacotry only returns a valid container
		// object, and doesn't actually construct the underlying lxc container on
		// disk.
		lxcContainer = LxcObjectFactory.New(name)
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
			"-r", series,
		}
		var caCert []byte
		if manager.imageURLGetter != nil {
			arch := arch.HostArch()
			imageURL, err := manager.imageURLGetter.ImageURL(instance.LXC, series, arch)
			if err != nil {
				return nil, nil, errors.Annotatef(err, "cannot determine cached image URL")
			}
			templateParams = append(templateParams, "-T", imageURL)
			caCert = manager.imageURLGetter.CACert()
		}
		err = createContainer(
			lxcContainer,
			directory,
			networkConfig,
			nil,
			templateParams,
			caCert,
		)
		if err != nil {
			return nil, nil, errors.Trace(err)
		}
	}

	if err := autostartContainer(name); err != nil {
		return nil, nil, errors.Annotate(err, "failed to configure the container for autostart")
	}
	if err := mountHostLogDir(name, manager.logdir); err != nil {
		return nil, nil, errors.Annotate(err, "failed to mount the directory to log to")
	}
	if storageConfig.AllowMount {
		// Add config to allow loop devices to be mounted inside the container.
		if err := allowLoopbackBlockDevices(name); err != nil {
			return nil, nil, errors.Annotate(err, "failed to configure the container for loopback devices")
		}
	}
	// Update the network settings inside the run-time config of the
	// container (e.g. /var/lib/lxc/<name>/config) before starting it.
	netConfig := generateNetworkConfig(networkConfig)
	if err := updateContainerConfig(name, netConfig); err != nil {
		return nil, nil, errors.Annotate(err, "failed to update network config")
	}
	configPath := containerConfigFilename(name)
	logger.Tracef("updated network config in %q for container %q", configPath, name)
	// Ensure the run-time config of the new container has correctly
	// ordered network settings, otherwise Start() below will fail. We
	// need this now because after lxc-create or lxc-clone the initial
	// lxc.conf generated inside createContainer gets merged with
	// other settings (e.g. system-wide overrides, changes made by
	// hooks, etc.) and the result can still be incorrectly ordered.
	// See LP bug #1414016.
	if _, err := reorderNetworkConfig(configPath); err != nil {
		return nil, nil, errors.Annotate(err, "failed to reorder network settings")
	}

	// To speed-up the initial container startup we pre-render the
	// /etc/network/interfaces directly inside the rootfs. This won't
	// work if we use AUFS snapshots, so it's disabled if useAUFS is
	// true (for now).
	if networkConfig != nil && len(networkConfig.Interfaces) > 0 {
		interfacesFile := filepath.Join(LxcContainerDir, name, "rootfs", etcNetworkInterfaces)
		if manager.useAUFS {
			logger.Tracef("not pre-rendering %q when using AUFS-backed rootfs", interfacesFile)
		} else {
			data, err := containerinit.GenerateNetworkConfig(networkConfig)
			if err != nil {
				return nil, nil, errors.Annotatef(err, "failed to generate %q", interfacesFile)
			}
			if err := utils.AtomicWriteFile(interfacesFile, []byte(data), 0644); err != nil {
				return nil, nil, errors.Annotatef(err, "cannot write generated %q", interfacesFile)
			}
			logger.Tracef("pre-rendered network config in %q", interfacesFile)
		}
	}

	// Start the lxc container with the appropriate settings for
	// grabbing the console output and a log file.
	consoleFile := filepath.Join(directory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")

	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Warningf("container failed to start %v", err)
		// if the container fails to start we should try to destroy it
		// check if the container has been constructed
		if lxcContainer.IsConstructed() {
			// if so, then we need to destroy the leftover container
			if derr := lxcContainer.Destroy(); derr != nil {
				// if an error is reported there is probably a leftover
				// container that the user should clean up manually
				logger.Errorf("container failed to start and failed to destroy: %v", derr)
				return nil, nil, errors.Annotate(err, "container failed to start and failed to destroy: manual cleanup of containers needed")
			}
			logger.Warningf("container failed to start and was destroyed - safe to retry")
			return nil, nil, errors.Wrap(err, instance.NewRetryableCreationError("container failed to start and was destroyed: "+lxcContainer.Name()))
		}
		logger.Warningf("container failed to start: %v", err)
		return nil, nil, errors.Annotate(err, "container failed to start")
	}

	hardware := &instance.HardwareCharacteristics{
		Arch: &version.Current.Arch,
	}

	return &lxcInstance{lxcContainer, name}, hardware, nil
}
Ejemplo n.º 9
0
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {
	start := time.Now()
	name := names.NewMachineTag(machineConfig.MachineId).String()
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, err
	}
	logger.Tracef("write cloud-init")
	if manager.createWithClone {
		// If we are using clone, disable the apt-get steps
		machineConfig.DisablePackageCommands = true
	}
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		logger.Errorf("failed to write user data: %v", err)
		return nil, nil, err
	}
	logger.Tracef("write the lxc.conf file")
	configFile, err := writeLxcConfig(network, directory)
	if err != nil {
		logger.Errorf("failed to write config file: %v", err)
		return nil, nil, err
	}

	var lxcContainer golxc.Container
	if manager.createWithClone {
		templateContainer, err := EnsureCloneTemplate(
			manager.backingFilesystem,
			series,
			network,
			machineConfig.AuthorizedKeys,
			machineConfig.AptProxySettings,
		)
		if err != nil {
			return nil, nil, err
		}
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
		}
		var extraCloneArgs []string
		if manager.backingFilesystem == Btrfs || manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--snapshot")
		}
		if manager.backingFilesystem != Btrfs && manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs")
		}

		lock, err := AcquireTemplateLock(templateContainer.Name(), "clone")
		if err != nil {
			return nil, nil, fmt.Errorf("failed to acquire lock on template: %v", err)
		}
		defer lock.Unlock()
		lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams)
		if err != nil {
			logger.Errorf("lxc container cloning failed: %v", err)
			return nil, nil, err
		}
	} else {
		// Note here that the lxcObjectFacotry only returns a valid container
		// object, and doesn't actually construct the underlying lxc container on
		// disk.
		lxcContainer = LxcObjectFactory.New(name)
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
			"-r", series,
		}
		// Create the container.
		logger.Tracef("create the container")
		if err := lxcContainer.Create(configFile, defaultTemplate, nil, templateParams); err != nil {
			logger.Errorf("lxc container creation failed: %v", err)
			return nil, nil, err
		}
		logger.Tracef("lxc container created")
	}
	if err := autostartContainer(name); err != nil {
		return nil, nil, err
	}
	if err := mountHostLogDir(name, manager.logdir); err != nil {
		return nil, nil, err
	}
	// Start the lxc container with the appropriate settings for grabbing the
	// console output and a log file.
	consoleFile := filepath.Join(directory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")
	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Errorf("container failed to start: %v", err)
		return nil, nil, err
	}
	arch := version.Current.Arch
	hardware := &instance.HardwareCharacteristics{
		Arch: &arch,
	}
	logger.Tracef("container %q started: %v", name, time.Now().Sub(start))
	return &lxcInstance{lxcContainer, name}, hardware, nil
}