func (s *DirectorySuite) TestRemoveContainerDir(c *gc.C) { dir, err := container.NewDirectory("testing") c.Assert(err, gc.IsNil) err = container.RemoveDirectory("testing") c.Assert(err, gc.IsNil) c.Assert(dir, jc.DoesNotExist) c.Assert(filepath.Join(s.removedDir, "testing"), jc.IsDirectory) }
func (s *DirectorySuite) TestRemoveContainerDirWithClash(c *gc.C) { dir, err := container.NewDirectory("testing") c.Assert(err, gc.IsNil) clash := filepath.Join(s.removedDir, "testing") err = os.MkdirAll(clash, 0755) c.Assert(err, gc.IsNil) err = container.RemoveDirectory("testing") c.Assert(err, gc.IsNil) c.Assert(dir, jc.DoesNotExist) c.Assert(filepath.Join(s.removedDir, "testing.1"), jc.IsDirectory) }
func (manager *containerManager) CreateContainer( machineConfig *cloudinit.MachineConfig, series string, network *container.NetworkConfig) (instance.Instance, *instance.HardwareCharacteristics, error) { name := names.MachineTag(machineConfig.MachineId) if manager.name != "" { name = fmt.Sprintf("%s-%s", manager.name, name) } // Note here that the kvmObjectFacotry only returns a valid container // object, and doesn't actually construct the underlying kvm container on // disk. kvmContainer := KvmObjectFactory.New(name) // Create the cloud-init. directory, err := container.NewDirectory(name) if err != nil { return nil, nil, fmt.Errorf("failed to create container directory: %v", err) } logger.Tracef("write cloud-init") userDataFilename, err := container.WriteUserData(machineConfig, directory) if err != nil { return nil, nil, errors.LoggedErrorf(logger, "failed to write user data: %v", err) } // Create the container. startParams := ParseConstraintsToStartParams(machineConfig.Constraints) startParams.Arch = version.Current.Arch startParams.Series = series startParams.Network = network startParams.UserDataFile = userDataFilename var hardware instance.HardwareCharacteristics hardware, err = instance.ParseHardware( fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v", startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores)) if err != nil { logger.Warningf("failed to parse hardware: %v", err) } logger.Tracef("create the container, constraints: %v", machineConfig.Constraints) if err := kvmContainer.Start(startParams); err != nil { return nil, nil, errors.LoggedErrorf(logger, "kvm container creation failed: %v", err) } logger.Tracef("kvm container created") return &kvmInstance{kvmContainer, name}, &hardware, nil }
// Make sure a template exists that we can clone from. func EnsureCloneTemplate( backingFilesystem string, series string, network *container.NetworkConfig, authorizedKeys string, aptProxy osenv.ProxySettings, ) (golxc.Container, error) { name := fmt.Sprintf("juju-%s-template", series) containerDirectory, err := container.NewDirectory(name) if err != nil { return nil, err } lock, err := AcquireTemplateLock(name, "ensure clone exists") if err != nil { return nil, err } defer lock.Unlock() lxcContainer := LxcObjectFactory.New(name) // Early exit if the container has been constructed before. if lxcContainer.IsConstructed() { logger.Infof("template exists, continuing") return lxcContainer, nil } logger.Infof("template does not exist, creating") userData, err := templateUserData(series, authorizedKeys, aptProxy) if err != nil { logger.Tracef("failed to create template user data for template: %v", err) return nil, err } userDataFilename, err := container.WriteCloudInitFile(containerDirectory, userData) if err != nil { return nil, err } configFile, err := writeLxcConfig(network, containerDirectory) if err != nil { logger.Errorf("failed to write config file: %v", err) return nil, err } templateParams := []string{ "--debug", // Debug errors in the cloud image "--userdata", userDataFilename, // Our groovey cloud-init "--hostid", name, // Use the container name as the hostid "-r", series, } var extraCreateArgs []string if backingFilesystem == Btrfs { extraCreateArgs = append(extraCreateArgs, "-B", Btrfs) } // Create the container. logger.Tracef("create the container") if err := lxcContainer.Create(configFile, defaultTemplate, extraCreateArgs, templateParams); err != nil { logger.Errorf("lxc container creation failed: %v", err) return nil, err } // Make sure that the mount dir has been created. logger.Tracef("make the mount dir for the shared logs") if err := os.MkdirAll(internalLogDir(name), 0755); err != nil { logger.Tracef("failed to create internal /var/log/juju mount dir: %v", err) return nil, err } // Start the lxc container with the appropriate settings for grabbing the // console output and a log file. consoleFile := filepath.Join(containerDirectory, "console.log") lxcContainer.SetLogFile(filepath.Join(containerDirectory, "container.log"), golxc.LogDebug) logger.Tracef("start the container") // We explicitly don't pass through the config file to the container.Start // method as we have passed it through at container creation time. This // is necessary to get the appropriate rootfs reference without explicitly // setting it ourselves. if err = lxcContainer.Start("", consoleFile); err != nil { logger.Errorf("container failed to start: %v", err) return nil, err } logger.Infof("template container started, now wait for it to stop") // Perhaps we should wait for it to finish, and the question becomes "how // long do we wait for it to complete?" console, err := os.Open(consoleFile) if err != nil { // can't listen return nil, err } tailWriter := &logTail{tick: time.Now()} consoleTailer := tailer.NewTailer(console, tailWriter, nil) defer consoleTailer.Stop() // We should wait maybe 1 minute between output? // if no output check to see if stopped // If we have no output and still running, something has probably gone wrong for lxcContainer.IsRunning() { if tailWriter.lastTick().Before(time.Now().Add(-TemplateStopTimeout)) { logger.Infof("not heard anything from the template log for five minutes") return nil, fmt.Errorf("template container %q did not stop", name) } time.Sleep(time.Second) } return lxcContainer, nil }
func (*DirectorySuite) TestNewContainerDir(c *gc.C) { dir, err := container.NewDirectory("testing") c.Assert(err, gc.IsNil) c.Assert(dir, jc.IsDirectory) }
func (manager *containerManager) CreateContainer( machineConfig *cloudinit.MachineConfig, series string, network *container.NetworkConfig, ) (instance.Instance, *instance.HardwareCharacteristics, error) { start := time.Now() name := names.MachineTag(machineConfig.MachineId) if manager.name != "" { name = fmt.Sprintf("%s-%s", manager.name, name) } // Create the cloud-init. directory, err := container.NewDirectory(name) if err != nil { return nil, nil, err } logger.Tracef("write cloud-init") if manager.createWithClone { // If we are using clone, disable the apt-get steps machineConfig.DisablePackageCommands = true } userDataFilename, err := container.WriteUserData(machineConfig, directory) if err != nil { logger.Errorf("failed to write user data: %v", err) return nil, nil, err } logger.Tracef("write the lxc.conf file") configFile, err := writeLxcConfig(network, directory) if err != nil { logger.Errorf("failed to write config file: %v", err) return nil, nil, err } var lxcContainer golxc.Container if manager.createWithClone { templateContainer, err := EnsureCloneTemplate( manager.backingFilesystem, series, network, machineConfig.AuthorizedKeys, machineConfig.AptProxySettings, ) if err != nil { return nil, nil, err } templateParams := []string{ "--debug", // Debug errors in the cloud image "--userdata", userDataFilename, // Our groovey cloud-init "--hostid", name, // Use the container name as the hostid } var extraCloneArgs []string if manager.backingFilesystem == Btrfs || manager.useAUFS { extraCloneArgs = append(extraCloneArgs, "--snapshot") } if manager.backingFilesystem != Btrfs && manager.useAUFS { extraCloneArgs = append(extraCloneArgs, "--backingstore", "aufs") } lock, err := AcquireTemplateLock(templateContainer.Name(), "clone") if err != nil { return nil, nil, fmt.Errorf("failed to acquire lock on template: %v", err) } defer lock.Unlock() lxcContainer, err = templateContainer.Clone(name, extraCloneArgs, templateParams) if err != nil { logger.Errorf("lxc container cloning failed: %v", err) return nil, nil, err } } else { // Note here that the lxcObjectFacotry only returns a valid container // object, and doesn't actually construct the underlying lxc container on // disk. lxcContainer = LxcObjectFactory.New(name) templateParams := []string{ "--debug", // Debug errors in the cloud image "--userdata", userDataFilename, // Our groovey cloud-init "--hostid", name, // Use the container name as the hostid "-r", series, } // Create the container. logger.Tracef("create the container") if err := lxcContainer.Create(configFile, defaultTemplate, nil, templateParams); err != nil { logger.Errorf("lxc container creation failed: %v", err) return nil, nil, err } logger.Tracef("lxc container created") } if err := autostartContainer(name); err != nil { return nil, nil, err } if err := mountHostLogDir(name, manager.logdir); err != nil { return nil, nil, err } // Start the lxc container with the appropriate settings for grabbing the // console output and a log file. consoleFile := filepath.Join(directory, "console.log") lxcContainer.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug) logger.Tracef("start the container") // We explicitly don't pass through the config file to the container.Start // method as we have passed it through at container creation time. This // is necessary to get the appropriate rootfs reference without explicitly // setting it ourselves. if err = lxcContainer.Start("", consoleFile); err != nil { logger.Errorf("container failed to start: %v", err) return nil, nil, err } arch := version.Current.Arch hardware := &instance.HardwareCharacteristics{ Arch: &arch, } logger.Tracef("container %q started: %v", name, time.Now().Sub(start)) return &lxcInstance{lxcContainer, name}, hardware, nil }