Beispiel #1
0
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig) (instance.Instance, *instance.HardwareCharacteristics, error) {

	name := names.MachineTag(machineConfig.MachineId)
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Note here that the kvmObjectFacotry only returns a valid container
	// object, and doesn't actually construct the underlying kvm container on
	// disk.
	kvmContainer := KvmObjectFactory.New(name)

	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, fmt.Errorf("failed to create container directory: %v", err)
	}
	logger.Tracef("write cloud-init")
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "failed to write user data: %v", err)
	}
	// Create the container.
	startParams := ParseConstraintsToStartParams(machineConfig.Constraints)
	startParams.Arch = version.Current.Arch
	startParams.Series = series
	startParams.Network = network
	startParams.UserDataFile = userDataFilename

	var hardware instance.HardwareCharacteristics
	hardware, err = instance.ParseHardware(
		fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v",
			startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores))
	if err != nil {
		logger.Warningf("failed to parse hardware: %v", err)
	}

	logger.Tracef("create the container, constraints: %v", machineConfig.Constraints)
	if err := kvmContainer.Start(startParams); err != nil {
		return nil, nil, errors.LoggedErrorf(logger, "kvm container creation failed: %v", err)
	}
	logger.Tracef("kvm container created")
	return &kvmInstance{kvmContainer, name}, &hardware, nil
}
Beispiel #2
0
func (manager *containerManager) CreateContainer(
	machineConfig *cloudinit.MachineConfig,
	series string,
	network *container.NetworkConfig,
) (instance.Instance, *instance.HardwareCharacteristics, error) {
	start := time.Now()
	name := names.MachineTag(machineConfig.MachineId)
	if manager.name != "" {
		name = fmt.Sprintf("%s-%s", manager.name, name)
	}
	// Create the cloud-init.
	directory, err := container.NewDirectory(name)
	if err != nil {
		return nil, nil, err
	}
	logger.Tracef("write cloud-init")
	if manager.createWithClone {
		// If we are using clone, disable the apt-get steps
		machineConfig.DisablePackageCommands = true
	}
	userDataFilename, err := container.WriteUserData(machineConfig, directory)
	if err != nil {
		logger.Errorf("failed to write user data: %v", err)
		return nil, nil, err
	}
	logger.Tracef("write the lxc.conf file")
	configFile, err := writeLxcConfig(network, directory)
	if err != nil {
		logger.Errorf("failed to write config file: %v", err)
		return nil, nil, err
	}

	var lxcContainer golxc.Container
	if manager.createWithClone {
		templateContainer, err := EnsureCloneTemplate(
			manager.backingFilesystem,
			series,
			network,
			machineConfig.AuthorizedKeys,
			machineConfig.AptProxySettings,
		)
		if err != nil {
			return nil, nil, err
		}
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
		}
		var extraCloneArgs []string
		if manager.backingFilesystem == Btrfs || manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--snapshot")
		}
		if manager.backingFilesystem != Btrfs && manager.useAUFS {
			extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs")
		}

		lock, err := AcquireTemplateLock(templateContainer.Name(), "clone")
		if err != nil {
			return nil, nil, fmt.Errorf("failed to acquire lock on template: %v", err)
		}
		defer lock.Unlock()
		lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams)
		if err != nil {
			logger.Errorf("lxc container cloning failed: %v", err)
			return nil, nil, err
		}
	} else {
		// Note here that the lxcObjectFacotry only returns a valid container
		// object, and doesn't actually construct the underlying lxc container on
		// disk.
		lxcContainer = LxcObjectFactory.New(name)
		templateParams := []string{
			"--debug",                      // Debug errors in the cloud image
			"--userdata", userDataFilename, // Our groovey cloud-init
			"--hostid", name, // Use the container name as the hostid
			"-r", series,
		}
		// Create the container.
		logger.Tracef("create the container")
		if err := lxcContainer.Create(configFile, defaultTemplate, nil, templateParams); err != nil {
			logger.Errorf("lxc container creation failed: %v", err)
			return nil, nil, err
		}
		logger.Tracef("lxc container created")
	}
	if err := autostartContainer(name); err != nil {
		return nil, nil, err
	}
	if err := mountHostLogDir(name, manager.logdir); err != nil {
		return nil, nil, err
	}
	// Start the lxc container with the appropriate settings for grabbing the
	// console output and a log file.
	consoleFile := filepath.Join(directory, "console.log")
	lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug)
	logger.Tracef("start the container")
	// We explicitly don't pass through the config file to the container.Start
	// method as we have passed it through at container creation time.  This
	// is necessary to get the appropriate rootfs reference without explicitly
	// setting it ourselves.
	if err = lxcContainer.Start("", consoleFile); err != nil {
		logger.Errorf("container failed to start: %v", err)
		return nil, nil, err
	}
	arch := version.Current.Arch
	hardware := &instance.HardwareCharacteristics{
		Arch: &arch,
	}
	logger.Tracef("container %q started: %v", name, time.Now().Sub(start))
	return &lxcInstance{lxcContainer, name}, hardware, nil
}