Пример #1
0
// newFileReader initializes a file reader for the remote file. The read takes
// on the offset and size at the time the reader is created. If the underlying
// file changes, one must create a new fileReader.
func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) {
	rd := &fileReader{
		driver: driver,
		path:   path,
	}

	// Grab the size of the layer file, ensuring existence.
	if fi, err := driver.Stat(path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			// NOTE(stevvooe): We really don't care if the file is not
			// actually present for the reader. If the caller needs to know
			// whether or not the file exists, they should issue a stat call
			// on the path. There is still no guarantee, since the file may be
			// gone by the time the reader is created. The only correct
			// behavior is to return a reader that immediately returns EOF.
		default:
			// Any other error we want propagated up the stack.
			return nil, err
		}
	} else {
		if fi.IsDir() {
			return nil, fmt.Errorf("cannot read a directory")
		}

		// Fill in file information
		rd.size = fi.Size()
		rd.modtime = fi.ModTime()
	}

	return rd, nil
}
Пример #2
0
// newFileWriter returns a prepared fileWriter for the driver and path. This
// could be considered similar to an "open" call on a regular filesystem.
func newFileWriter(driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) {
	fw := fileWriter{
		driver: driver,
		path:   path,
	}

	if fi, err := driver.Stat(path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			// ignore, offset is zero
		default:
			return nil, err
		}
	} else {
		if fi.IsDir() {
			return nil, fmt.Errorf("cannot write to a directory")
		}

		fw.size = fi.Size()
	}

	buffered := bufferedFileWriter{
		fileWriter: fw,
	}
	buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize)

	return &buffered, nil
}
Пример #3
0
// WalkWithChildrenFilter traverses a filesystem defined within driver,
// starting from the given path, calling f on each file. Given filter will be
// called on a list of directory children before being recursively processed.
func WalkWithChildrenFilter(ctx context.Context, driver storageDriver.StorageDriver, from string, filter WalkChildrenFilter, f WalkFn) error {
	children, err := driver.List(ctx, from)
	if err != nil {
		return err
	}
	filter(children)
	for _, child := range children {
		// TODO(stevvooe): Calling driver.Stat for every entry is quite
		// expensive when running against backends with a slow Stat
		// implementation, such as s3. This is very likely a serious
		// performance bottleneck.
		fileInfo, err := driver.Stat(ctx, child)
		if err != nil {
			return err
		}
		err = f(fileInfo)
		skipDir := (err == ErrSkipDir)
		if err != nil && !skipDir {
			return err
		}

		if fileInfo.IsDir() && !skipDir {
			if err := WalkWithChildrenFilter(ctx, driver, child, filter, f); err != nil {
				return err
			}
		}
	}
	return nil
}
Пример #4
0
// readStartedAtFile reads the date from an upload's startedAtFile
func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) {
	startedAtBytes, err := driver.GetContent(path)
	if err != nil {
		return time.Now(), err
	}
	startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
	if err != nil {
		return time.Now(), err
	}
	return startedAt, nil
}
Пример #5
0
// readStartedAtFile reads the date from an upload's startedAtFile
func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) {
	// todo:(richardscothern) - pass in a context
	startedAtBytes, err := driver.GetContent(context.Background(), path)
	if err != nil {
		return time.Now(), err
	}
	startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
	if err != nil {
		return time.Now(), err
	}
	return startedAt, nil
}
Пример #6
0
// Exists provides a utility method to test whether or not a path exists in
// the given driver.
func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) {
	if _, err := drv.Stat(ctx, path); err != nil {
		switch err := err.(type) {
		case driver.PathNotFoundError:
			return false, nil
		default:
			return false, err
		}
	}

	return true, nil
}
Пример #7
0
// exists provides a utility method to test whether or not
func exists(driver storagedriver.StorageDriver, path string) (bool, error) {
	if _, err := driver.Stat(path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			return false, nil
		default:
			return false, err
		}
	}

	return true, nil
}
Пример #8
0
func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) {
	dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID})
	if err != nil {
		t.Fatalf("Unable to resolve path")
	}
	if err := d.PutContent(dataPath, []byte("")); err != nil {
		t.Fatalf("Unable to write data file")
	}

	startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, uuid: uploadID})
	if err != nil {
		t.Fatalf("Unable to resolve path")
	}

	if d.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
		t.Fatalf("Unable to write startedAt file")
	}

}
Пример #9
0
// createTestLayer creates a simple test layer in the provided driver under
// tarsum dgst, returning the sha256 digest location. This is implemented
// piecemeal and should probably be replaced by the uploader when it's ready.
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
	h := sha256.New()
	rd := io.TeeReader(content, h)

	p, err := ioutil.ReadAll(rd)

	if err != nil {
		return "", nil
	}

	blobDigestSHA := digest.NewDigest("sha256", h)

	blobPath, err := pathMapper.path(blobDataPathSpec{
		digest: dgst,
	})

	ctx := context.Background()
	if err := driver.PutContent(ctx, blobPath, p); err != nil {
		return "", err
	}

	if err != nil {
		return "", err
	}

	layerLinkPath, err := pathMapper.path(layerLinkPathSpec{
		name:   name,
		digest: dgst,
	})

	if err != nil {
		return "", err
	}

	if err := driver.PutContent(ctx, layerLinkPath, []byte(dgst)); err != nil {
		return "", nil
	}

	return blobDigestSHA, err
}
Пример #10
0
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file
func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error {
	children, err := driver.List(from)
	if err != nil {
		return err
	}
	for _, child := range children {
		fileInfo, err := driver.Stat(child)
		if err != nil {
			return err
		}
		err = f(fileInfo)
		skipDir := (err == ErrSkipDir)
		if err != nil && !skipDir {
			return err
		}

		if fileInfo.IsDir() && !skipDir {
			Walk(driver, child, f)
		}
	}
	return nil
}
Пример #11
0
// PurgeUploads deletes files from the upload directory
// created before olderThan.  The list of files deleted and errors
// encountered are returned
func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) {
	log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete)
	uploadData, errors := getOutstandingUploads(ctx, driver)
	var deleted []string
	for _, uploadData := range uploadData {
		if uploadData.startedAt.Before(olderThan) {
			var err error
			log.Infof("Upload files in %s have older date (%s) than purge date (%s).  Removing upload directory.",
				uploadData.containingDir, uploadData.startedAt, olderThan)
			if actuallyDelete {
				err = driver.Delete(ctx, uploadData.containingDir)
			}
			if err == nil {
				deleted = append(deleted, uploadData.containingDir)
			} else {
				errors = append(errors, err)
			}
		}
	}

	log.Infof("Purge uploads finished.  Num deleted=%d, num errors=%d", len(deleted), len(errors))
	return deleted, errors
}
Пример #12
0
// newFileWriter returns a prepared fileWriter for the driver and path. This
// could be considered similar to an "open" call on a regular filesystem.
func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*fileWriter, error) {
	fw := fileWriter{
		driver: driver,
		path:   path,
		ctx:    ctx,
	}

	if fi, err := driver.Stat(ctx, path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			// ignore, offset is zero
		default:
			return nil, err
		}
	} else {
		if fi.IsDir() {
			return nil, fmt.Errorf("cannot write to a directory")
		}

		fw.size = fi.Size()
	}

	return &fw, nil
}
Пример #13
0
func build(
	conf *Config,
	storageDriver storagedriver.StorageDriver,
	kubeClient *client.Client,
	fs sys.FS,
	env sys.Env,
	builderKey,
	rawGitSha string) error {

	dockerBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.DockerBuilderImagePullPolicy)
	if err != nil {
		return err
	}

	slugBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.SlugBuilderImagePullPolicy)
	if err != nil {
		return err
	}

	repo := conf.Repository
	gitSha, err := git.NewSha(rawGitSha)
	if err != nil {
		return err
	}

	appName := conf.App()

	repoDir := filepath.Join(conf.GitHome, repo)
	buildDir := filepath.Join(repoDir, "build")

	slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short())
	if err := os.MkdirAll(buildDir, os.ModeDir); err != nil {
		return fmt.Errorf("making the build directory %s (%s)", buildDir, err)
	}

	tmpDir, err := ioutil.TempDir(buildDir, "tmp")
	if err != nil {
		return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err)
	}
	defer func() {
		if err := os.RemoveAll(tmpDir); err != nil {
			log.Info("unable to remove tmpdir %s (%s)", tmpDir, err)
		}
	}()

	client, err := controller.New(conf.ControllerHost, conf.ControllerPort)
	if err != nil {
		return err
	}

	// Get the application config from the controller, so we can check for a custom buildpack URL
	appConf, err := hooks.GetAppConfig(client, conf.Username, appName)
	if controller.CheckAPICompat(client, err) != nil {
		return err
	}

	log.Debug("got the following config back for app %s: %+v", appName, appConf)
	var buildPackURL string
	if buildPackURLInterface, ok := appConf.Values["BUILDPACK_URL"]; ok {
		if bpStr, ok := buildPackURLInterface.(string); ok {
			log.Debug("found custom buildpack URL %s", bpStr)
			buildPackURL = bpStr
		}
	}

	_, disableCaching := appConf.Values["DEIS_DISABLE_CACHE"]
	slugBuilderInfo := NewSlugBuilderInfo(appName, gitSha.Short(), disableCaching)

	if slugBuilderInfo.DisableCaching() {
		log.Debug("caching disabled for app %s", appName)
		// If cache file exists, delete it
		if _, err := storageDriver.Stat(context.Background(), slugBuilderInfo.CacheKey()); err == nil {
			log.Debug("deleting cache %s for app %s", slugBuilderInfo.CacheKey(), appName)
			if err := storageDriver.Delete(context.Background(), slugBuilderInfo.CacheKey()); err != nil {
				return err
			}
		}
	}

	// build a tarball from the new objects
	appTgz := fmt.Sprintf("%s.tar.gz", appName)
	gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short())
	gitArchiveCmd.Stdout = os.Stdout
	gitArchiveCmd.Stderr = os.Stderr
	if err := run(gitArchiveCmd); err != nil {
		return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err)
	}
	absAppTgz := fmt.Sprintf("%s/%s", repoDir, appTgz)

	// untar the archive into the temp dir
	tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir))
	tarCmd.Stdout = os.Stdout
	tarCmd.Stderr = os.Stderr
	if err := run(tarCmd); err != nil {
		return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err)
	}

	bType := getBuildTypeForDir(tmpDir)
	usingDockerfile := bType == buildTypeDockerfile

	appTgzdata, err := ioutil.ReadFile(absAppTgz)
	if err != nil {
		return fmt.Errorf("error while reading file %s: (%s)", appTgz, err)
	}

	log.Debug("Uploading tar to %s", slugBuilderInfo.TarKey())

	if err := storageDriver.PutContent(context.Background(), slugBuilderInfo.TarKey(), appTgzdata); err != nil {
		return fmt.Errorf("uploading %s to %s (%v)", absAppTgz, slugBuilderInfo.TarKey(), err)
	}

	var pod *api.Pod
	var buildPodName string
	image := appName
	if usingDockerfile {
		buildPodName = dockerBuilderPodName(appName, gitSha.Short())
		registryLocation := conf.RegistryLocation
		registryEnv := make(map[string]string)
		if registryLocation != "on-cluster" {
			registryEnv, err = getRegistryDetails(kubeClient, &image, registryLocation, conf.PodNamespace, conf.RegistrySecretPrefix)
			if err != nil {
				return fmt.Errorf("error getting private registry details %s", err)
			}
			image = image + ":git-" + gitSha.Short()
		}
		registryEnv["DEIS_REGISTRY_PROXY_PORT"] = conf.RegistryProxyPort
		registryEnv["DEIS_REGISTRY_LOCATION"] = registryLocation

		pod = dockerBuilderPod(
			conf.Debug,
			buildPodName,
			conf.PodNamespace,
			appConf.Values,
			slugBuilderInfo.TarKey(),
			gitSha.Short(),
			slugName,
			conf.StorageType,
			conf.DockerBuilderImage,
			conf.RegistryHost,
			conf.RegistryPort,
			registryEnv,
			dockerBuilderImagePullPolicy,
		)
	} else {
		buildPodName = slugBuilderPodName(appName, gitSha.Short())

		cacheKey := ""
		if !slugBuilderInfo.DisableCaching() {
			cacheKey = slugBuilderInfo.CacheKey()
		}
		pod = slugbuilderPod(
			conf.Debug,
			buildPodName,
			conf.PodNamespace,
			appConf.Values,
			slugBuilderInfo.TarKey(),
			slugBuilderInfo.PushKey(),
			cacheKey,
			gitSha.Short(),
			buildPackURL,
			conf.StorageType,
			conf.SlugBuilderImage,
			slugBuilderImagePullPolicy,
		)
	}

	log.Info("Starting build... but first, coffee!")
	log.Debug("Starting pod %s", buildPodName)
	json, err := prettyPrintJSON(pod)
	if err == nil {
		log.Debug("Pod spec: %v", json)
	} else {
		log.Debug("Error creating json representaion of pod spec: %v", err)
	}

	podsInterface := kubeClient.Pods(conf.PodNamespace)

	newPod, err := podsInterface.Create(pod)
	if err != nil {
		return fmt.Errorf("creating builder pod (%s)", err)
	}

	pw := k8s.NewPodWatcher(kubeClient, "deis")
	stopCh := make(chan struct{})
	defer close(stopCh)
	go pw.Controller.Run(stopCh)

	if err := waitForPod(pw, newPod.Namespace, newPod.Name, conf.SessionIdleInterval(), conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil {
		return fmt.Errorf("watching events for builder pod startup (%s)", err)
	}

	req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams(
		&api.PodLogOptions{
			Follow: true,
		}, api.ParameterCodec)

	rc, err := req.Stream()
	if err != nil {
		return fmt.Errorf("attempting to stream logs (%s)", err)
	}
	defer rc.Close()

	size, err := io.Copy(os.Stdout, rc)
	if err != nil {
		return fmt.Errorf("fetching builder logs (%s)", err)
	}
	log.Debug("size of streamed logs %v", size)

	log.Debug(
		"Waiting for the %s/%s pod to end. Checking every %s for %s",
		newPod.Namespace,
		newPod.Name,
		conf.BuilderPodTickDuration(),
		conf.BuilderPodWaitDuration(),
	)
	// check the state and exit code of the build pod.
	// if the code is not 0 return error
	if err := waitForPodEnd(pw, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil {
		return fmt.Errorf("error getting builder pod status (%s)", err)
	}
	log.Debug("Done")
	log.Debug("Checking for builder pod exit code")
	buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name)
	if err != nil {
		return fmt.Errorf("error getting builder pod status (%s)", err)
	}

	for _, containerStatus := range buildPod.Status.ContainerStatuses {
		state := containerStatus.State.Terminated
		if state.ExitCode != 0 {
			return fmt.Errorf("Build pod exited with code %d, stopping build.", state.ExitCode)
		}
	}
	log.Debug("Done")

	procType := deisAPI.ProcessType{}
	if procType, err = getProcFile(storageDriver, tmpDir, slugBuilderInfo.AbsoluteProcfileKey(), bType); err != nil {
		return err
	}

	log.Info("Build complete.")

	quit := progress("...", conf.SessionIdleInterval())
	log.Info("Launching App...")
	if !usingDockerfile {
		image = slugBuilderInfo.AbsoluteSlugObjectKey()
	}
	release, err := hooks.CreateBuild(client, conf.Username, conf.App(), image, gitSha.Short(), procType, usingDockerfile)
	quit <- true
	<-quit
	if controller.CheckAPICompat(client, err) != nil {
		return fmt.Errorf("publishing release (%s)", err)
	}

	log.Info("Done, %s:v%d deployed to Workflow\n", appName, release)
	log.Info("Use 'deis open' to view this application in your browser\n")
	log.Info("To learn more, use 'deis help' or visit https://deis.com/\n")

	run(repoCmd(repoDir, "git", "gc"))

	return nil
}
Пример #14
0
// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go
// Responds to requests using the Request.ResponseChannel
func handleRequest(driver storagedriver.StorageDriver, request Request) {
	switch request.Type {
	case "Version":
		err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion})
		if err != nil {
			panic(err)
		}
	case "GetContent":
		path, _ := request.Parameters["Path"].(string)
		content, err := driver.GetContent(path)
		var response ReadStreamResponse
		if err != nil {
			response = ReadStreamResponse{Error: WrapError(err)}
		} else {
			response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))}
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "PutContent":
		path, _ := request.Parameters["Path"].(string)
		reader, _ := request.Parameters["Reader"].(io.ReadCloser)
		contents, err := ioutil.ReadAll(reader)
		defer reader.Close()
		if err == nil {
			err = driver.PutContent(path, contents)
		}
		response := WriteStreamResponse{
			Error: WrapError(err),
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "ReadStream":
		path, _ := request.Parameters["Path"].(string)
		// Depending on serialization method, Offset may be converted to any int/uint type
		offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
		reader, err := driver.ReadStream(path, offset)
		var response ReadStreamResponse
		if err != nil {
			response = ReadStreamResponse{Error: WrapError(err)}
		} else {
			response = ReadStreamResponse{Reader: reader}
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "WriteStream":
		path, _ := request.Parameters["Path"].(string)
		// Depending on serialization method, Offset may be converted to any int/uint type
		offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
		// Depending on serialization method, Size may be converted to any int/uint type
		size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int()
		reader, _ := request.Parameters["Reader"].(io.ReadCloser)
		err := driver.WriteStream(path, offset, size, reader)
		response := WriteStreamResponse{
			Error: WrapError(err),
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "CurrentSize":
		path, _ := request.Parameters["Path"].(string)
		position, err := driver.CurrentSize(path)
		response := CurrentSizeResponse{
			Position: position,
			Error:    WrapError(err),
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "List":
		path, _ := request.Parameters["Path"].(string)
		keys, err := driver.List(path)
		response := ListResponse{
			Keys:  keys,
			Error: WrapError(err),
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "Move":
		sourcePath, _ := request.Parameters["SourcePath"].(string)
		destPath, _ := request.Parameters["DestPath"].(string)
		err := driver.Move(sourcePath, destPath)
		response := MoveResponse{
			Error: WrapError(err),
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	case "Delete":
		path, _ := request.Parameters["Path"].(string)
		err := driver.Delete(path)
		response := DeleteResponse{
			Error: WrapError(err),
		}
		err = request.ResponseChannel.Send(&response)
		if err != nil {
			panic(err)
		}
	default:
		panic(request)
	}
}
Пример #15
0
func build(
	conf *Config,
	storageDriver storagedriver.StorageDriver,
	kubeClient *client.Client,
	fs sys.FS,
	env sys.Env,
	builderKey,
	rawGitSha string) error {

	dockerBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.DockerBuilderImagePullPolicy)
	if err != nil {
		return err
	}

	slugBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.SlugBuilderImagePullPolicy)
	if err != nil {
		return nil
	}

	repo := conf.Repository
	gitSha, err := git.NewSha(rawGitSha)
	if err != nil {
		return err
	}

	appName := conf.App()

	repoDir := filepath.Join(conf.GitHome, repo)
	buildDir := filepath.Join(repoDir, "build")

	slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short())
	if err := os.MkdirAll(buildDir, os.ModeDir); err != nil {
		return fmt.Errorf("making the build directory %s (%s)", buildDir, err)
	}

	tmpDir, err := ioutil.TempDir(buildDir, "tmp")
	if err != nil {
		return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err)
	}
	defer func() {
		if err := os.RemoveAll(tmpDir); err != nil {
			fmt.Errorf("unable to remove tmpdir %s (%s)", tmpDir, err)
		}
	}()

	slugBuilderInfo := NewSlugBuilderInfo(slugName)

	// Get the application config from the controller, so we can check for a custom buildpack URL
	appConf, err := getAppConfig(conf, builderKey, conf.Username, appName)
	if err != nil {
		return fmt.Errorf("getting app config for %s (%s)", appName, err)
	}
	log.Debug("got the following config back for app %s: %+v", appName, *appConf)
	var buildPackURL string
	if buildPackURLInterface, ok := appConf.Values["BUILDPACK_URL"]; ok {
		if bpStr, ok := buildPackURLInterface.(string); ok {
			log.Debug("found custom buildpack URL %s", bpStr)
			buildPackURL = bpStr
		}
	}

	// build a tarball from the new objects
	appTgz := fmt.Sprintf("%s.tar.gz", appName)
	gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short())
	gitArchiveCmd.Stdout = os.Stdout
	gitArchiveCmd.Stderr = os.Stderr
	if err := run(gitArchiveCmd); err != nil {
		return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err)
	}
	absAppTgz := fmt.Sprintf("%s/%s", repoDir, appTgz)

	// untar the archive into the temp dir
	tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir))
	tarCmd.Stdout = os.Stdout
	tarCmd.Stderr = os.Stderr
	if err := run(tarCmd); err != nil {
		return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err)
	}

	bType := getBuildTypeForDir(tmpDir)
	usingDockerfile := bType == buildTypeDockerfile

	appTgzdata, err := ioutil.ReadFile(absAppTgz)
	if err != nil {
		return fmt.Errorf("error while reading file %s: (%s)", appTgz, err)
	}

	log.Debug("Uploading tar to %s", slugBuilderInfo.TarKey())

	if err := storageDriver.PutContent(context.Background(), slugBuilderInfo.TarKey(), appTgzdata); err != nil {
		return fmt.Errorf("uploading %s to %s (%v)", absAppTgz, slugBuilderInfo.TarKey(), err)
	}

	var pod *api.Pod
	var buildPodName string
	if usingDockerfile {
		buildPodName = dockerBuilderPodName(appName, gitSha.Short())
		pod = dockerBuilderPod(
			conf.Debug,
			buildPodName,
			conf.PodNamespace,
			appConf.Values,
			slugBuilderInfo.TarKey(),
			slugName,
			conf.StorageType,
			conf.DockerBuilderImage,
			dockerBuilderImagePullPolicy,
		)
	} else {
		buildPodName = slugBuilderPodName(appName, gitSha.Short())
		pod = slugbuilderPod(
			conf.Debug,
			buildPodName,
			conf.PodNamespace,
			appConf.Values,
			slugBuilderInfo.TarKey(),
			slugBuilderInfo.PushKey(),
			buildPackURL,
			conf.StorageType,
			conf.SlugBuilderImage,
			slugBuilderImagePullPolicy,
		)
	}

	log.Info("Starting build... but first, coffee!")
	log.Debug("Starting pod %s", buildPodName)
	json, err := prettyPrintJSON(pod)
	if err == nil {
		log.Debug("Pod spec: %v", json)
	} else {
		log.Debug("Error creating json representaion of pod spec: %v", err)
	}

	podsInterface := kubeClient.Pods(conf.PodNamespace)

	newPod, err := podsInterface.Create(pod)
	if err != nil {
		return fmt.Errorf("creating builder pod (%s)", err)
	}

	if err := waitForPod(kubeClient, newPod.Namespace, newPod.Name, conf.SessionIdleInterval(), conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil {
		return fmt.Errorf("watching events for builder pod startup (%s)", err)
	}

	req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams(
		&api.PodLogOptions{
			Follow: true,
		}, api.Scheme)

	rc, err := req.Stream()
	if err != nil {
		return fmt.Errorf("attempting to stream logs (%s)", err)
	}
	defer rc.Close()

	size, err := io.Copy(os.Stdout, rc)
	if err != nil {
		return fmt.Errorf("fetching builder logs (%s)", err)
	}
	log.Debug("size of streamed logs %v", size)

	log.Debug(
		"Waiting for the %s/%s pod to end. Checking every %s for %s",
		newPod.Namespace,
		newPod.Name,
		conf.BuilderPodTickDuration(),
		conf.BuilderPodWaitDuration(),
	)
	// check the state and exit code of the build pod.
	// if the code is not 0 return error
	if err := waitForPodEnd(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil {
		return fmt.Errorf("error getting builder pod status (%s)", err)
	}
	log.Debug("Done")
	log.Debug("Checking for builder pod exit code")
	buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name)
	if err != nil {
		return fmt.Errorf("error getting builder pod status (%s)", err)
	}

	for _, containerStatus := range buildPod.Status.ContainerStatuses {
		state := containerStatus.State.Terminated
		if state.ExitCode != 0 {
			return fmt.Errorf("Build pod exited with code %d, stopping build.", state.ExitCode)
		}
	}
	log.Debug("Done")

	procType := pkg.ProcessType{}
	if bType == buildTypeProcfile {
		if procType, err = getProcFile(storageDriver, tmpDir, slugBuilderInfo.AbsoluteProcfileKey()); err != nil {
			return err
		}
	}

	log.Info("Build complete.")

	buildHook := createBuildHook(slugBuilderInfo, gitSha, conf.Username, appName, procType, usingDockerfile)
	quit := progress("...", conf.SessionIdleInterval())
	buildHookResp, err := publishRelease(conf, builderKey, buildHook)
	quit <- true
	<-quit
	log.Info("Launching App...")
	if err != nil {
		return fmt.Errorf("publishing release (%s)", err)
	}
	release, ok := buildHookResp.Release["version"]
	if !ok {
		return fmt.Errorf("No release returned from Deis controller")
	}

	log.Info("Done, %s:v%d deployed to Deis\n", appName, release)
	log.Info("Use 'deis open' to view this application in your browser\n")
	log.Info("To learn more, use 'deis help' or visit http://deis.io\n")

	run(repoCmd(repoDir, "git", "gc"))

	return nil
}