コード例 #1
0
ファイル: container.go プロジェクト: tomzhang/changes-client
// Creates the rootfs tarball and all other metadata that the lxc-download
// template expects. This allows us to "act" like an image that the lxc-download
// template would download, but in fact is something entirely different that just
// needs to be treated similarly. The download template expects images to be stored
// on some sort of official server (not s3), but uses cached images when available.
// The image we are creating is to be used as a cached image for the download template.
func (c *Container) CreateImage(snapshot string, clientLog *client.Log) error {
	var err error

	err = c.Stop()
	if err != nil {
		return err
	}

	dest := filepath.Join("/var/cache/lxc/download", c.getImagePath(snapshot))
	clientLog.Writeln(fmt.Sprintf("==> Saving snapshot to %s", dest))
	start := time.Now()

	os.MkdirAll(dest, 0755)

	err = c.createImageMetadata(dest, clientLog)
	if err != nil {
		return err
	}

	err = c.createImageSnapshotID(dest, clientLog)
	if err != nil {
		return err
	}

	err = c.createImageRootFs(dest, clientLog)
	if err != nil {
		return err
	}

	clientLog.Writeln(fmt.Sprintf("==> Snapshot created in %s", time.Since(start)))

	return nil
}
コード例 #2
0
ファイル: container.go プロジェクト: tomzhang/changes-client
// To avoid complexity of having a sort-of public host, and to ensure we
// can just instead easily store images on S3 (or similar) we attempt to
// sync images in a similar fashion to the LXC image downloader. This means
// that when we attempt to run the image, the download will look for our
// existing cache (that we've correctly populated) and just reference the
// image from there.
func (c *Container) ensureImageCached(snapshot string, clientLog *client.Log) error {
	var err error

	relPath := c.getImagePath(snapshot)
	localPath := filepath.Join("/var/cache/lxc/download", relPath)

	// list of files required to avoid network hit
	fileList := []string{fmt.Sprintf("rootfs.tar.%s", c.Compression), "config", "snapshot_id"}

	var missingFiles bool = false
	for n := range fileList {
		if _, err = os.Stat(filepath.Join(localPath, fileList[n])); os.IsNotExist(err) {
			missingFiles = true
			break
		}
	}
	if !missingFiles {
		return nil
	}

	if c.S3Bucket == "" {
		return errors.New("Unable to find cached image, and no S3 bucket defined.")
	}

	err = os.MkdirAll(localPath, 0755)
	if err != nil {
		return err
	}

	remotePath := fmt.Sprintf("s3://%s/%s", c.S3Bucket, relPath)

	clientLog.Writeln(fmt.Sprintf("==> Downloading image %s", snapshot))
	// TODO(dcramer): verify env is passed correctly here
	cw := client.NewCmdWrapper([]string{"aws", "s3", "sync", "--quiet", remotePath, localPath}, "", []string{
		"HOME=/root",
	})

	start := time.Now()
	result, err := cw.Run(false, clientLog)
	dur := time.Since(start)

	if err != nil {
		return err
	}
	if !result.Success {
		return errors.New("Failed downloading image")
	}

	clientLog.Writeln(fmt.Sprintf("==> Image downloaded in %s", dur))

	return nil
}
コード例 #3
0
ファイル: adapter.go プロジェクト: aroravishal/changes-client
// Prepare the environment for future commands. This is run before any
// commands are processed and is run once.
func (a *Adapter) Prepare(clientLog *client.Log) error {
	clientLog.Writeln("LXC version: " + lxc.Version())
	err := a.container.Launch(clientLog)
	if err != nil {
		return err
	}

	workspace := "/home/ubuntu"
	if a.config.Workspace != "" {
		workspace = path.Join(workspace, a.config.Workspace)
	}
	workspace, err = filepath.Abs(path.Join(a.container.RootFs(), strings.TrimLeft(workspace, "/")))
	if err != nil {
		return err
	}
	a.workspace = workspace

	return nil
}
コード例 #4
0
ファイル: adapter.go プロジェクト: tomzhang/changes-client
// Prepare the environment for future commands. This is run before any
// commands are processed and is run once.
func (a *Adapter) Prepare(clientLog *client.Log) error {
	clientLog.Writeln("LXC version: " + lxc.Version())
	err := a.container.Launch(clientLog)
	if err != nil {
		return err
	}

	artifactSource := "/home/ubuntu"
	if a.config.ArtifactSearchPath != "" {
		artifactSource = a.config.ArtifactSearchPath
	}
	artifactSource, err = filepath.Abs(path.Join(a.container.RootFs(), strings.TrimLeft(artifactSource, "/")))
	if err != nil {
		return err
	}
	a.artifactSource = artifactSource

	return nil
}
コード例 #5
0
ファイル: container.go プロジェクト: tomzhang/changes-client
// Compresses the root of the filesystem into the desired compressed tarball.
// The compression here can vary based on flags.
func (c *Container) createImageRootFs(snapshotPath string, clientLog *client.Log) error {
	rootFsTxz := filepath.Join(snapshotPath, fmt.Sprintf("rootfs.tar.%s", c.Compression))

	clientLog.Writeln(fmt.Sprintf("==> Creating rootfs.tar.%s", c.Compression))

	var cw *client.CmdWrapper
	if c.Compression == "xz" {
		cw = client.NewCmdWrapper([]string{"tar", "-Jcf", rootFsTxz, "-C", c.RootFs(), "."}, "", []string{})
	} else {
		cw = client.NewCmdWrapper([]string{"tar", "-cf", rootFsTxz, "-I", "lz4", "-C", c.RootFs(), "."}, "", []string{})
	}
	result, err := cw.Run(false, clientLog)

	if err != nil {
		return err
	}
	if !result.Success {
		return errors.New(fmt.Sprintf("Failed creating rootfs.tar.%s", c.Compression))
	}

	return nil
}
コード例 #6
0
ファイル: container.go プロジェクト: tomzhang/changes-client
// Uploads a snapshot outcome to an s3 bucket, at the same path that
// changes-client will expect to download it from. The snapshot itself
// is just a tarball of the rootfs of the container - compressed with
// either xz for high compression or lz4 for raw speed.
func (c *Container) UploadImage(snapshot string, clientLog *client.Log) error {
	relPath := c.getImagePath(snapshot)
	localPath := filepath.Join("/var/cache/lxc/download", relPath)
	remotePath := fmt.Sprintf("s3://%s/%s", c.S3Bucket, relPath)

	clientLog.Writeln(fmt.Sprintf("==> Uploading image %s", snapshot))
	// TODO(dcramer): verify env is passed correctly here
	cw := client.NewCmdWrapper([]string{"aws", "s3", "sync", "--quiet", localPath, remotePath}, "", []string{})

	start := time.Now()
	result, err := cw.Run(false, clientLog)
	dur := time.Since(start)

	if err != nil {
		return err
	}
	if !result.Success {
		return errors.New("Failed uploading image")
	}
	clientLog.Writeln(fmt.Sprintf("==> Image uploaded in %s", dur))

	return nil
}
コード例 #7
0
func (r *Reporter) PublishArtifacts(cmd client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) {
	if len(cmd.Artifacts) == 0 {
		clientLog.Writeln("==> Skipping artifact collection")
		return
	}

	clientLog.Writeln(fmt.Sprintf("==> Collecting artifacts matching %s", cmd.Artifacts))

	matches, err := a.CollectArtifacts(cmd.Artifacts, clientLog)
	if err != nil {
		clientLog.Writeln(fmt.Sprintf("==> ERROR: " + err.Error()))
		return
	}

	for _, artifact := range matches {
		clientLog.Writeln(fmt.Sprintf("==> Found: %s", artifact))
	}

	r.pushArtifacts(matches)
}
コード例 #8
0
// we want to output the log from running the container
func (s *AdapterSuite) reportLogChunks(clientLog *client.Log) {
	for chunk, ok := clientLog.GetChunk(); ok; chunk, ok = clientLog.GetChunk() {
		log.Print(string(chunk))
	}
}
コード例 #9
0
func (r *Reporter) PublishArtifacts(cmdCnf client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) {
	if r.bucket == nil {
		return
	}

	if len(cmdCnf.Artifacts) == 0 {
		return
	}

	matches, err := a.CollectArtifacts(cmdCnf.Artifacts, clientLog)
	if err != nil {
		clientLog.Writeln(fmt.Sprintf("[artifactstore] ERROR filtering artifacts: " + err.Error()))
		return
	}

	var wg sync.WaitGroup
	for _, artifact := range matches {
		wg.Add(1)
		go func(artifact string) {
			defer wg.Done()

			clientLog.Writeln(fmt.Sprintf("[artifactstore] Uploading: %s", artifact))
			fileBaseName := filepath.Base(artifact)

			if f, err := os.Open(artifact); err != nil {
				clientLog.Writeln(fmt.Sprintf("[artifactstore] Error opening file for streaming %s: %s", artifact, err))
				return
			} else if stat, err := f.Stat(); err != nil {
				clientLog.Writeln(fmt.Sprintf("[artifactstore] Error stat'ing file for streaming %s: %s", artifact, err))
				return
			} else if sAfct, err := r.bucket.NewStreamedArtifact(fileBaseName, stat.Size()); err != nil {
				clientLog.Writeln(fmt.Sprintf("[artifactstore] Error creating streaming artifact for %s: %s", artifact, err))
				return
			} else {
				// TODO: If possible, avoid reading entire contents of the file into memory, and pass the
				// file io.Reader directly to http.Post.
				//
				// The reason it is done this way is because, using bytes.NewReader() ensures that
				// Content-Length header is set to a correct value. If not, it is left blank. Alternately,
				// we could remove this requirement from the server where Content-Length is verified before
				// starting upload to S3.
				if contents, err := ioutil.ReadAll(f); err != nil {
					clientLog.Writeln(fmt.Sprintf("[artifactstore] Error reading file for streaming %s: %s", artifact, err))
					return
				} else if err := sAfct.UploadArtifact(bytes.NewReader(contents)); err != nil {
					// TODO retry if not a terminal error
					clientLog.Writeln(fmt.Sprintf("[artifactstore] Error uploading contents of %s", artifact, err))
					return
				}
			}
			clientLog.Writeln(fmt.Sprintf("[artifactstore] Successfully uploaded artifact %s", artifact))
		}(artifact)
	}

	wg.Wait()
}
コード例 #10
0
ファイル: engine.go プロジェクト: tomzhang/changes-client
func reportLogChunks(name string, clientLog *client.Log, r reporter.Reporter) {
	for ch, ok := clientLog.GetChunk(); ok; ch, ok = clientLog.GetChunk() {
		r.PushLogChunk(name, ch)
	}
}
コード例 #11
0
func (cw *LxcCommand) Run(captureOutput bool, clientLog *client.Log, container *lxc.Container) (*client.CommandResult, error) {
	var err error

	// TODO(dcramer):
	clientLog.Writeln(fmt.Sprintf("==> Executing %s", strings.Join(cw.Args, " ")))

	inreader, inwriter, err := os.Pipe()
	if err != nil {
		return nil, err
	}

	cmdreader, cmdwriter, err := os.Pipe()
	if err != nil {
		return nil, err
	}

	var buffer *bytes.Buffer
	var reader io.Reader = cmdreader

	// If user has requested to buffer command output, tee output to in memory buffer.
	if captureOutput {
		buffer = &bytes.Buffer{}
		reader = io.TeeReader(cmdreader, buffer)
	}

	cmdwriterFd := cmdwriter.Fd()

	inreader.Close()
	inwriter.Close()

	cmdAsUser := generateCommand(cw.Args, cw.User)

	homeDir := getHomeDir(cw.User)

	// we want to ensure that our path is always treated as relative to our
	// home directory
	cwd := filepath.Join(homeDir, cw.Cwd)

	env := []string{
		fmt.Sprintf("USER=%s", cw.User),
		// TODO(dcramer): HOME is pretty hacky here
		fmt.Sprintf("HOME=%s", homeDir),
		fmt.Sprintf("PWD=%s", cwd),
		fmt.Sprintf("DEBIAN_FRONTEND=noninteractive"),
		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
	}
	for i := 0; i < len(cw.Env); i++ {
		env = append(env, cw.Env[i])
	}

	// TODO(dcramer): we are currently unable to get the exit status of
	// the command. https://github.com/lxc/go-lxc/issues/9

	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		defer wg.Done()
		clientLog.WriteStream(reader)
	}()

	log.Printf("[lxc] Executing %s from [%s]", cmdAsUser, cwd)
	ok, err := container.RunCommand(cmdAsUser, lxc.AttachOptions{
		StdinFd:    inwriter.Fd(),
		StdoutFd:   cmdwriterFd,
		StderrFd:   cmdwriterFd,
		Env:        env,
		Cwd:        cwd,
		Arch:       lxc.X86_64,
		Namespaces: -1,
		UID:        -1,
		GID:        -1,
		ClearEnv:   true,
	})
	if err != nil {
		clientLog.Writeln(fmt.Sprintf("Command failed: %s", err.Error()))
		cmdwriter.Close()
		return nil, err
	}

	// Wait 10 seconds for the pipe to close. If it doesn't we give up on actually closing
	// as a child process might be causing things to stick around.
	// XXX: this logic is duplicated in client.CmdWrapper
	timeLimit := time.After(10 * time.Second)
	sem := make(chan struct{}) // lol struct{} is cheaper than bool
	go func() {
		cmdwriter.Close()
		sem <- struct{}{}
	}()

	select {
	case <-timeLimit:
		clientLog.Writeln(fmt.Sprintf("Failed to close all file descriptors! Ignoring and moving on.."))
		break
	case <-sem:
		break
	}

	wg.Wait()

	result := &client.CommandResult{
		Success: ok,
	}

	if captureOutput {
		result.Output = buffer.Bytes()
	} else {
		result.Output = []byte("")
	}
	return result, nil
}
コード例 #12
0
ファイル: container.go プロジェクト: tomzhang/changes-client
// In this phase we actually launch the container that the tests
// will be run in.
//
// There are essentially four cases:
//  - we aren't using a snapshot
//  - we are using a snapshot but don't have it cached
//  - we are using a cached snapshot but don't have a base container
//  - we are using a cached snapshot and have a base container
//
// The first case is clearly different from the latter three, and indeed
// the process it follows is rather different because it doesn't use the
// same template. In the first case, without a snapshot, we use the
// ubuntu template (see /usr/share/lxc/templates) to create a container,
// and destroy it at the end. Only the first run of this will be extremely
// slow to create the container itself, but after that it will be faster,
// although it still must pay a heavy cost for provisioning.
//
// For the latter three cases, it follows a general process saving work
// where work doesn't have to be done. First, it checks if we have a
// snapshot base container or not. If we do, it clones it using overlayfs
// and then starts the resulting container; the existing base container
// is not modified.
//
// If we don't have a base container, then it checks for a compressed
// tarball of the filesystem. This is either are .tar.xz or .tar.lz4
// and the compression must match what compression changes-client is
// being used for. If this file doesn't exist, the client fetches it
// from the given s3 bucket in a folder qualified by its arch, dist,
// release, and snapshot id.
//
// Once we have guaranteed that we have a snapshot image, the snapshot
// image is loaded using the "download" template (or a variant for it
// if we are using the faster lz4 compression). This template will
// require the image already to be cached - as it can't download it
// like normal - so we use --force-cached as a template option. Once
// the base container is up, we proceed as normal, and we leave the
// base container alive so that future runs are fast.
//
// Once the container is started, we mount the container and perform
// basic configurations as well as run the pre-launch script.
func (c *Container) launchContainer(clientLog *client.Log) error {
	var err error

	c.Executor.Clean()

	if c.Snapshot != "" {
		err := c.launchOverlayContainer(clientLog)
		if err != nil {
			return err
		}
	} else {
		log.Print("[lxc] Creating new container")
		base, err := lxc.NewContainer(c.Name, lxc.DefaultConfigPath())
		base.SetVerbosity(lxc.Quiet)
		if err != nil {
			return err
		}
		defer lxc.Release(base)

		clientLog.Writeln(fmt.Sprintf("==> Creating container: %s", c.Name))
		err = base.Create(lxc.TemplateOptions{
			Template: c.Dist,
			Arch:     c.Arch,
			Release:  c.Release,
		})
		if err != nil {
			return err
		}
		clientLog.Writeln(fmt.Sprintf("==> Created container: %s", c.Name))
	}

	c.lxc, err = lxc.NewContainer(c.Name, lxc.DefaultConfigPath())
	if err != nil {
		return err
	}
	c.lxc.SetVerbosity(lxc.Quiet)

	c.Executor.Register(c.Name)

	if c.PreLaunch != "" {
		log.Print("[lxc] Running pre-launch script")
		err = c.runPreLaunch(clientLog)
		if err != nil {
			return err
		}
	}

	log.Print("[lxc] Configuring container options")
	// More or less disable apparmor
	if e := c.lxc.SetConfigItem("lxc.aa_profile", "unconfined"); e != nil {
		return e
	}

	// Allow loop/squashfs in container
	if e := c.lxc.SetConfigItem("lxc.cgroup.devices.allow", "b 7:* rwm"); e != nil {
		return e
	}
	if e := c.lxc.SetConfigItem("lxc.cgroup.devices.allow", "c 10:137 rwm"); e != nil {
		return e
	}

	if e := c.lxc.SetConfigItem("lxc.utsname", fmt.Sprintf("%s-build", c.Name)); e != nil {
		return e
	}

	// the default value for cpu_shares is 1024, so we make a soft assumption
	// that we can just magnifiy the value based on the number of cpus we're requesting
	// but it doesnt actually mean we'll get that many cpus
	// http://www.mjmwired.net/kernel/Documentation/scheduler/sched-design-CFS.txt
	if c.CpuLimit != 0 {
		c.lxc.SetCgroupItem("cpu.shares", string(c.CpuLimit*1024))
	}

	// http://www.mjmwired.net/kernel/Documentation/cgroups/memory.txt
	if c.MemoryLimit != 0 {
		c.lxc.SetCgroupItem("memory.limit_in_bytes", string(c.MemoryLimit))
	}

	// Enable autodev: https://wiki.archlinux.org/index.php/Lxc-systemd
	c.lxc.SetConfigItem("lxc.autodev", "1")
	c.lxc.SetConfigItem("lxc.pts", "1024")
	c.lxc.SetConfigItem("lxc.kmsg", "0")

	if c.BindMounts != nil {
		for _, mount := range c.BindMounts {
			c.lxc.SetConfigItem("lxc.mount.entry", mount.Format())
		}
	}

	clientLog.Writeln("==> Waiting for container to be ready")

	log.Print("[lxc] Starting the container")
	err = c.lxc.Start()
	if err != nil {
		return err
	}

	log.Print("[lxc] Waiting for container to startup networking")
	_, err = c.lxc.WaitIPAddresses(30 * time.Second)
	if err != nil {
		return err
	}

	return nil
}
コード例 #13
0
ファイル: container.go プロジェクト: tomzhang/changes-client
func (c *Container) launchOverlayContainer(clientLog *client.Log) error {
	var base *lxc.Container

	clientLog.Writeln(fmt.Sprintf("==> Acquiring lock on container: %s", c.Snapshot))
	lock, err := c.acquireLock(c.Snapshot)
	if err != nil {
		return err
	}
	defer func() {
		clientLog.Writeln(fmt.Sprintf("==> Releasing lock on container: %s", c.Snapshot))
		lock.Unlock()
	}()

	log.Print("[lxc] Checking for cached snapshot")

	if c.snapshotIsCached(c.Snapshot) == false {
		if err := c.ensureImageCached(c.Snapshot, clientLog); err != nil {
			return err
		}

		template := "download"
		if c.Compression != "xz" {
			template = fmt.Sprintf("download-%s", c.Compression)
		}

		clientLog.Writeln(fmt.Sprintf("==> Creating new base container: %s", c.Snapshot))
		clientLog.Writeln(fmt.Sprintf("      Template: %s", template))
		clientLog.Writeln(fmt.Sprintf("      Arch:     %s", c.Arch))
		clientLog.Writeln(fmt.Sprintf("      Distro:   %s", c.Dist))
		clientLog.Writeln(fmt.Sprintf("      Release:  %s", c.Release))
		clientLog.Writeln("    (grab a coffee, this could take a while)")

		start := time.Now()

		base, err = lxc.NewContainer(c.Snapshot, lxc.DefaultConfigPath())
		if err != nil {
			return err
		}
		defer lxc.Release(base)
		log.Print("[lxc] Creating base container")
		// We can't use Arch/Dist/Release/Variant for anything except
		// for the "download" template, so we specify them manually. However,
		// we can't use extraargs to specify arch/dist/release because the
		// lxc go bindings are lame. (Arch/Distro/Release are all required
		// to be passed, but for consistency we just pass all of them in the
		// case that we are using the download template)
		if template == "download" {
			err = base.Create(lxc.TemplateOptions{
				Template:   "download",
				Arch:       c.Arch,
				Distro:     c.Dist,
				Release:    c.Release,
				Variant:    c.Snapshot,
				ForceCache: true,
			})
		} else {
			err = base.Create(lxc.TemplateOptions{
				Template: template,
				ExtraArgs: []string{
					"--arch", c.Arch,
					"--dist", c.Dist,
					"--release", c.Release,
					"--variant", c.Snapshot,
					"--force-cache",
				},
			})
		}
		if err != nil {
			return err
		}
		clientLog.Writeln(fmt.Sprintf("==> Base container online in %s", time.Since(start)))
	} else {
		clientLog.Writeln(fmt.Sprintf("==> Launching existing base container: %s", c.Snapshot))
		log.Print("[lxc] Creating base container")

		start := time.Now()
		base, err = lxc.NewContainer(c.Snapshot, lxc.DefaultConfigPath())
		if err != nil {
			return err
		}
		defer lxc.Release(base)
		clientLog.Writeln(fmt.Sprintf("==> Base container online in %s", time.Since(start)))
	}

	clientLog.Writeln(fmt.Sprintf("==> Clearing lxc cache for base container: %s", c.Snapshot))
	c.removeCachedImage()

	// XXX There must be some odd race condition here as doing `return base.Clone` causes
	// go-lxc to die with a nil-pointer but assigning it to a variable and then returning
	// the variable doesn't. If in the future we see the error again adding a sleep
	// for 0.1 seconds may resolve it (going on the assumption that this part is race-y)
	clientLog.Writeln(fmt.Sprintf("==> Creating overlay container: %s", c.Name))
	err = base.Clone(c.Name, lxc.CloneOptions{
		KeepName: true,
		Snapshot: true,
		Backend:  lxc.Overlayfs,
	})
	if err == nil {
		clientLog.Writeln(fmt.Sprintf("==> Created overlay container:% s", c.Name))
	}
	return err
}