// handleConn handles an individual client connection. // // It manages the connection, but passes channels on to `answer()`. func (s *server) handleConn(conn net.Conn, conf *ssh.ServerConfig) { defer conn.Close() log.Info("Accepted connection.") sshConn, chans, reqs, err := ssh.NewServerConn(conn, conf) if err != nil { // Handshake failure. log.Err("Failed handshake: %s", err) return } // Discard global requests. We're only concerned with channels. go ssh.DiscardRequests(reqs) condata := sshConnection(conn) // Now we handle the channels. for incoming := range chans { log.Info("Channel type: %s\n", incoming.ChannelType()) if incoming.ChannelType() != "session" { incoming.Reject(ssh.UnknownChannelType, "Unknown channel type") } channel, req, err := incoming.Accept() if err != nil { // Should close request and move on. panic(err) } go s.answer(channel, req, condata, sshConn) } conn.Close() }
// AuthKey authenticates based on a public key. func AuthKey(key ssh.PublicKey, cnf *Config) (*ssh.Permissions, error) { log.Info("Starting ssh authentication") client, err := controller.New(cnf.ControllerHost, cnf.ControllerPort) if err != nil { return nil, err } fp := fingerprint(key) userInfo, err := hooks.UserFromKey(client, fp) if controller.CheckAPICompat(client, err) != nil { log.Info("Failed to authenticate user ssh key %s with the controller: %s", fp, err) return nil, err } apps := strings.Join(userInfo.Apps, ", ") log.Debug("Key accepted for user %s.", userInfo.Username) perm := &ssh.Permissions{ Extensions: map[string]string{ "user": userInfo.Username, "fingerprint": fp, "apps": apps, }, } return perm, nil }
func main() { if os.Getenv("DEBUG") == "true" { pkglog.DefaultLogger.SetDebug(true) cookoolog.Level = cookoolog.LogDebug } pkglog.Debug("Running in debug mode") app := cli.NewApp() app.Commands = []cli.Command{ { Name: "server", Aliases: []string{"srv"}, Usage: "Run the git server", Action: func(c *cli.Context) { cnf := new(sshd.Config) if err := conf.EnvConfig(serverConfAppName, cnf); err != nil { pkglog.Err("getting config for %s [%s]", serverConfAppName, err) os.Exit(1) } pkglog.Info("starting fetcher on port %d", cnf.FetcherPort) go fetcher.Serve(cnf.FetcherPort) pkglog.Info("starting SSH server on %s:%d", cnf.SSHHostIP, cnf.SSHHostPort) os.Exit(pkg.Run(cnf.SSHHostIP, cnf.SSHHostPort, "boot")) }, }, { Name: "git-receive", Aliases: []string{"gr"}, Usage: "Run the git-receive hook", Action: func(c *cli.Context) { cnf := new(gitreceive.Config) if err := conf.EnvConfig(gitReceiveConfAppName, cnf); err != nil { pkglog.Err("Error getting config for %s [%s]", gitReceiveConfAppName, err) os.Exit(1) } cnf.CheckDurations() if err := gitreceive.Run(cnf); err != nil { pkglog.Err("running git receive hook [%s]", err) os.Exit(1) } }, }, } app.Run(os.Args) }
// createRepo creates a new Git repo if it is not present already. // // Largely inspired by gitreceived from Flynn. // // Returns a bool indicating whether a project was created (true) or already // existed (false). func createRepo(repoPath string) (bool, error) { createLock.Lock() defer createLock.Unlock() fi, err := os.Stat(repoPath) if err == nil && fi.IsDir() { // Nothing to do. log.Debug("Directory %s already exists.", repoPath) return false, nil } else if os.IsNotExist(err) { log.Debug("Creating new directory at %s", repoPath) // Create directory if err := os.MkdirAll(repoPath, 0755); err != nil { log.Err("Failed to create repository: %s", err) return false, err } cmd := exec.Command("git", "init", "--bare") cmd.Dir = repoPath if out, err := cmd.CombinedOutput(); err != nil { log.Info("git init output: %s", out) return false, err } return true, nil } else if err == nil { return false, errors.New("Expected directory, found file.") } return false, err }
// Serve starts a native SSH server. func Serve( cfg *ssh.ServerConfig, serverCircuit *Circuit, gitHomeDir string, concurrentPushLock RepositoryLock, addr, receivetype string) error { listener, err := net.Listen("tcp", addr) if err != nil { return err } srv := &server{ gitHome: gitHomeDir, pushLock: concurrentPushLock, receivetype: receivetype, } closer := make(chan interface{}, 1) log.Info("Listening on %s", addr) serverCircuit.Close() srv.listen(listener, cfg, closer) return nil }
// Ping handles a simple test SSH exec. // // Returns the string PONG and exit status 0. // // Params: // - channel (ssh.Channel): The channel to respond on. // - request (*ssh.Request): The request. // func Ping(channel ssh.Channel, req *ssh.Request) error { log.Info("PING") if _, err := channel.Write([]byte("pong")); err != nil { log.Err("Failed to write to channel: %s", err) } sendExitStatus(0, channel) req.Reply(true, nil) return nil }
// CheckAPICompat checks for API compatibility errors and warns about them. func CheckAPICompat(c *deis.Client, err error) error { if err == deis.ErrAPIMismatch { log.Info("WARNING: SDK and Controller API versions do not match. SDK: %s Controller: %s", deis.APIVersion, c.ControllerAPIVersion) // API mismatch isn't fatal, so after warning continue on. return nil } return err }
// listen handles accepting and managing connections. However, since closer // is len(1), it will not block the sender. func (s *server) listen(l net.Listener, conf *ssh.ServerConfig, closer chan interface{}) error { log.Info("Accepting new connections.") defer l.Close() // FIXME: Since Accept blocks, closer may not be checked often enough. for { log.Info("Checking closer.") if len(closer) > 0 { <-closer log.Info("Shutting down SSHD listener.") return nil } conn, err := l.Accept() if err != nil { log.Err("Error during Accept: %s", err) // We shouldn't kill the listener because of an error. return err } go s.handleConn(conn, conf) } }
// createPreReceiveHook renders preReceiveHookTpl to repoPath/hooks/pre-receive func createPreReceiveHook(gitHome, repoPath string) error { // parse & generate the template anew each receive for each new git home var hookByteBuf bytes.Buffer if err := preReceiveHookTpl.Execute(&hookByteBuf, map[string]string{"GitHome": gitHome}); err != nil { return err } writePath := filepath.Join(repoPath, "hooks", "pre-receive") log.Info("Writing pre-receive hook to %s", writePath) if err := ioutil.WriteFile(writePath, hookByteBuf.Bytes(), 0755); err != nil { return fmt.Errorf("Cannot write pre-receive hook to %s (%s)", writePath, err) } return nil }
// listen handles accepting and managing connections. However, since closer // is len(1), it will not block the sender. func (s *server) listen(l net.Listener, conf *ssh.ServerConfig) error { log.Info("Accepting new connections.") defer l.Close() for { conn, err := l.Accept() if err != nil { log.Err("Error during Accept: %s", err) // We shut down the listener if Accept errors return err } go s.handleConn(conn, conf) } }
// AuthKey authenticates based on a public key. func AuthKey(key ssh.PublicKey) (*ssh.Permissions, error) { log.Info("Starting ssh authentication") userInfo, err := controller.UserInfoFromKey(key) if err != nil { return nil, err } userInfo.Key = string(ssh.MarshalAuthorizedKey(key)) apps := strings.Join(userInfo.Apps, ", ") log.Debug("Key accepted for user %s.", userInfo.Username) perm := &ssh.Permissions{ Extensions: map[string]string{ "user": userInfo.Username, "fingerprint": userInfo.Fingerprint, "apps": apps, }, } return perm, nil }
// Receive receives a Git repo. // This will only work for git-receive-pack. func Receive( repo, operation, gitHome string, channel ssh.Channel, fingerprint, username, conndata, receivetype string) error { log.Info("receiving git repo name: %s, operation: %s, fingerprint: %s, user: %s", repo, operation, fingerprint, username) if receivetype == "mock" { channel.Write([]byte("OK")) return nil } repoPath := filepath.Join(gitHome, repo) log.Info("creating repo directory %s", repoPath) if _, err := createRepo(repoPath); err != nil { err = fmt.Errorf("Did not create new repo (%s)", err) return err } log.Info("writing pre-receive hook under %s", repoPath) if err := createPreReceiveHook(gitHome, repoPath); err != nil { err = fmt.Errorf("Did not write pre-receive hook (%s)", err) return err } cmd := exec.Command("git-shell", "-c", fmt.Sprintf("%s '%s'", operation, repo)) log.Info(strings.Join(cmd.Args, " ")) var errbuff bytes.Buffer cmd.Dir = gitHome cmd.Env = []string{ fmt.Sprintf("RECEIVE_USER=%s", username), fmt.Sprintf("RECEIVE_REPO=%s", repo), fmt.Sprintf("RECEIVE_FINGERPRINT=%s", fingerprint), fmt.Sprintf("SSH_ORIGINAL_COMMAND=%s '%s'", operation, repo), fmt.Sprintf("SSH_CONNECTION=%s", conndata), } cmd.Env = append(cmd.Env, os.Environ()...) log.Debug("Working Dir: %s", cmd.Dir) log.Debug("Environment: %s", strings.Join(cmd.Env, ",")) inpipe, err := cmd.StdinPipe() if err != nil { return err } cmd.Stdout = channel cmd.Stderr = io.MultiWriter(channel.Stderr(), &errbuff) if err := cmd.Start(); err != nil { err = fmt.Errorf("Failed to start git pre-receive hook: %s (%s)", err, errbuff.Bytes()) return err } if _, err := io.Copy(inpipe, channel); err != nil { err = fmt.Errorf("Failed to write git objects into the git pre-receive hook (%s)", err) return err } fmt.Println("Waiting for git-receive to run.") fmt.Println("Waiting for deploy.") if err := cmd.Wait(); err != nil { err = fmt.Errorf("Failed to run git pre-receive hook: %s (%s)", errbuff.Bytes(), err) return err } if errbuff.Len() > 0 { log.Err("Unreported error: %s", errbuff.Bytes()) return errors.New(errbuff.String()) } log.Info("Deploy complete.") return nil }
func build( conf *Config, storageDriver storagedriver.StorageDriver, kubeClient *client.Client, fs sys.FS, env sys.Env, builderKey, rawGitSha string) error { dockerBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.DockerBuilderImagePullPolicy) if err != nil { return err } slugBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.SlugBuilderImagePullPolicy) if err != nil { return nil } repo := conf.Repository gitSha, err := git.NewSha(rawGitSha) if err != nil { return err } appName := conf.App() repoDir := filepath.Join(conf.GitHome, repo) buildDir := filepath.Join(repoDir, "build") slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short()) if err := os.MkdirAll(buildDir, os.ModeDir); err != nil { return fmt.Errorf("making the build directory %s (%s)", buildDir, err) } tmpDir, err := ioutil.TempDir(buildDir, "tmp") if err != nil { return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err) } defer func() { if err := os.RemoveAll(tmpDir); err != nil { fmt.Errorf("unable to remove tmpdir %s (%s)", tmpDir, err) } }() slugBuilderInfo := NewSlugBuilderInfo(slugName) // Get the application config from the controller, so we can check for a custom buildpack URL appConf, err := getAppConfig(conf, builderKey, conf.Username, appName) if err != nil { return fmt.Errorf("getting app config for %s (%s)", appName, err) } log.Debug("got the following config back for app %s: %+v", appName, *appConf) var buildPackURL string if buildPackURLInterface, ok := appConf.Values["BUILDPACK_URL"]; ok { if bpStr, ok := buildPackURLInterface.(string); ok { log.Debug("found custom buildpack URL %s", bpStr) buildPackURL = bpStr } } // build a tarball from the new objects appTgz := fmt.Sprintf("%s.tar.gz", appName) gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short()) gitArchiveCmd.Stdout = os.Stdout gitArchiveCmd.Stderr = os.Stderr if err := run(gitArchiveCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err) } absAppTgz := fmt.Sprintf("%s/%s", repoDir, appTgz) // untar the archive into the temp dir tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir)) tarCmd.Stdout = os.Stdout tarCmd.Stderr = os.Stderr if err := run(tarCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err) } bType := getBuildTypeForDir(tmpDir) usingDockerfile := bType == buildTypeDockerfile appTgzdata, err := ioutil.ReadFile(absAppTgz) if err != nil { return fmt.Errorf("error while reading file %s: (%s)", appTgz, err) } log.Debug("Uploading tar to %s", slugBuilderInfo.TarKey()) if err := storageDriver.PutContent(context.Background(), slugBuilderInfo.TarKey(), appTgzdata); err != nil { return fmt.Errorf("uploading %s to %s (%v)", absAppTgz, slugBuilderInfo.TarKey(), err) } var pod *api.Pod var buildPodName string if usingDockerfile { buildPodName = dockerBuilderPodName(appName, gitSha.Short()) pod = dockerBuilderPod( conf.Debug, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarKey(), slugName, conf.StorageType, conf.DockerBuilderImage, dockerBuilderImagePullPolicy, ) } else { buildPodName = slugBuilderPodName(appName, gitSha.Short()) pod = slugbuilderPod( conf.Debug, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarKey(), slugBuilderInfo.PushKey(), buildPackURL, conf.StorageType, conf.SlugBuilderImage, slugBuilderImagePullPolicy, ) } log.Info("Starting build... but first, coffee!") log.Debug("Starting pod %s", buildPodName) json, err := prettyPrintJSON(pod) if err == nil { log.Debug("Pod spec: %v", json) } else { log.Debug("Error creating json representaion of pod spec: %v", err) } podsInterface := kubeClient.Pods(conf.PodNamespace) newPod, err := podsInterface.Create(pod) if err != nil { return fmt.Errorf("creating builder pod (%s)", err) } if err := waitForPod(kubeClient, newPod.Namespace, newPod.Name, conf.SessionIdleInterval(), conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("watching events for builder pod startup (%s)", err) } req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams( &api.PodLogOptions{ Follow: true, }, api.Scheme) rc, err := req.Stream() if err != nil { return fmt.Errorf("attempting to stream logs (%s)", err) } defer rc.Close() size, err := io.Copy(os.Stdout, rc) if err != nil { return fmt.Errorf("fetching builder logs (%s)", err) } log.Debug("size of streamed logs %v", size) log.Debug( "Waiting for the %s/%s pod to end. Checking every %s for %s", newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration(), ) // check the state and exit code of the build pod. // if the code is not 0 return error if err := waitForPodEnd(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } log.Debug("Done") log.Debug("Checking for builder pod exit code") buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name) if err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } for _, containerStatus := range buildPod.Status.ContainerStatuses { state := containerStatus.State.Terminated if state.ExitCode != 0 { return fmt.Errorf("Build pod exited with code %d, stopping build.", state.ExitCode) } } log.Debug("Done") procType := pkg.ProcessType{} if bType == buildTypeProcfile { if procType, err = getProcFile(storageDriver, tmpDir, slugBuilderInfo.AbsoluteProcfileKey()); err != nil { return err } } log.Info("Build complete.") buildHook := createBuildHook(slugBuilderInfo, gitSha, conf.Username, appName, procType, usingDockerfile) quit := progress("...", conf.SessionIdleInterval()) buildHookResp, err := publishRelease(conf, builderKey, buildHook) quit <- true <-quit log.Info("Launching App...") if err != nil { return fmt.Errorf("publishing release (%s)", err) } release, ok := buildHookResp.Release["version"] if !ok { return fmt.Errorf("No release returned from Deis controller") } log.Info("Done, %s:v%d deployed to Deis\n", appName, release) log.Info("Use 'deis open' to view this application in your browser\n") log.Info("To learn more, use 'deis help' or visit http://deis.io\n") run(repoCmd(repoDir, "git", "gc")) return nil }
// answer handles answering requests and channel requests // // Currently, an exec must be either "ping", "git-receive-pack" or // "git-upload-pack". Anything else will result in a failure response. Right // now, we leave the channel open on failure because it is unclear what the // correct behavior for a failed exec is. // // Support for setting environment variables via `env` has been disabled. func (s *server) answer(channel ssh.Channel, requests <-chan *ssh.Request, condata string, sshconn *ssh.ServerConn) error { defer channel.Close() // Answer all the requests on this connection. for req := range requests { ok := false switch req.Type { case "env": o := &EnvVar{} ssh.Unmarshal(req.Payload, o) log.Info("Key='%s', Value='%s'\n", o.Name, o.Value) req.Reply(true, nil) case "exec": clean := cleanExec(req.Payload) parts := strings.SplitN(clean, " ", 2) switch parts[0] { case "ping": err := Ping(channel, req) if err != nil { log.Info("Error pinging: %s", err) } return err case "git-receive-pack", "git-upload-pack": if len(parts) < 2 { log.Info("Expected two-part command.") req.Reply(ok, nil) break } repoName, err := cleanRepoName(parts[1]) if err != nil { log.Err("Illegal repo name: %s.", err) channel.Stderr().Write([]byte("No repo given")) return err } wrapErr := wrapInLock(s.pushLock, repoName, time.Duration(0), s.runReceive(req, sshconn, channel, repoName, parts, condata)) if wrapErr == errAlreadyLocked { log.Info(multiplePush) // The error must be in git format if pktErr := gitPktLine(channel, fmt.Sprintf("ERR %v\n", multiplePush)); pktErr != nil { log.Err("Failed to write to channel: %s", err) } sendExitStatus(1, channel) req.Reply(false, nil) return nil } var xs uint32 if wrapErr != nil { log.Err("Failed git receive: %v", err) xs = 1 } sendExitStatus(xs, channel) return nil default: log.Info("Illegal command is '%s'\n", clean) req.Reply(false, nil) return nil } if err := sendExitStatus(0, channel); err != nil { log.Err("Failed to write exit status: %s", err) } return nil default: // We simply ignore all of the other cases and leave the // channel open to take additional requests. log.Info("Received request of type %s\n", req.Type) req.Reply(false, nil) } } return nil }
func build(conf *Config, kubeClient *client.Client, rawGitSha string) error { repo := conf.Repository gitSha, err := git.NewSha(rawGitSha) if err != nil { return err } appName := conf.App() repoDir := filepath.Join(conf.GitHome, repo) buildDir := filepath.Join(repoDir, "build") slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short()) if err := os.MkdirAll(buildDir, os.ModeDir); err != nil { return fmt.Errorf("making the build directory %s (%s)", buildDir, err) } tmpDir := buildDir + gitSha.Short() err = os.MkdirAll(tmpDir, 0777) if err != nil { return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err) } slugBuilderInfo := storage.NewSlugBuilderInfo(appName, slugName, gitSha) // build a tarball from the new objects appTgz := fmt.Sprintf("%s.tar.gz", appName) gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short()) gitArchiveCmd.Stdout = os.Stdout gitArchiveCmd.Stderr = os.Stderr if err := run(gitArchiveCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err) } // untar the archive into the temp dir tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir)) tarCmd.Stdout = os.Stdout tarCmd.Stderr = os.Stderr if err := run(tarCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err) } bType := getBuildTypeForDir(tmpDir) usingDockerfile := bType == buildTypeDockerfile procType := pkg.ProcessType{} if bType == buildTypeProcfile { rawProcFile, err := ioutil.ReadFile(fmt.Sprintf("%s/Procfile", tmpDir)) if err != nil { return fmt.Errorf("reading %s/Procfile", tmpDir) } if err := yaml.Unmarshal(rawProcFile, &procType); err != nil { return fmt.Errorf("procfile %s/ProcFile is malformed (%s)", tmpDir, err) } } var pod *api.Pod var buildPodName string if usingDockerfile { buildPodName = dockerBuilderPodName(appName, gitSha.Short()) pod = dockerBuilderPod( conf.Debug, false, buildPodName, conf.PodNamespace, slugBuilderInfo.TarURL(), slugName, ) } else { buildPodName = slugBuilderPodName(appName, gitSha.Short()) pod = slugbuilderPod( conf.Debug, false, buildPodName, conf.PodNamespace, slugBuilderInfo.TarURL(), slugBuilderInfo.PushURL(), ) } log.Info("Starting build... but first, coffee!") log.Debug("Starting pod %s", buildPodName) json, err := prettyPrintJSON(pod) if err == nil { log.Debug("Pod spec: %v", json) } else { log.Debug("Error creating json representaion of pod spec: %v", err) } podsInterface := kubeClient.Pods(conf.PodNamespace) newPod, err := podsInterface.Create(pod) if err != nil { return fmt.Errorf("creating builder pod (%s)", err) } if err := waitForPod(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("watching events for builder pod startup (%s)", err) } req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams( &api.PodLogOptions{ Follow: true, }, api.Scheme) rc, err := req.Stream() if err != nil { return fmt.Errorf("attempting to stream logs (%s)", err) } defer rc.Close() size, err := io.Copy(os.Stdout, rc) if err != nil { return fmt.Errorf("fetching builder logs (%s)", err) } log.Debug("size of streamed logs %v", size) // check the state and exit code of the build pod. // if the code is not 0 return error if err := waitForPodEnd(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name) if err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } for _, containerStatus := range buildPod.Status.ContainerStatuses { state := containerStatus.State.Terminated if state.ExitCode != 0 { return fmt.Errorf("Stopping build.") } } // poll the s3 server to ensure the slug exists buildPodName = slugBuilderPodName(appName+"run", gitSha.Short()) pod = slugrunnerPod( conf.Debug, false, buildPodName, conf.PodNamespace, slugBuilderInfo.SlugURL(), ) newPod, err = podsInterface.Create(pod) if err != nil { return fmt.Errorf("creating builder pod (%s)", err) } if err := waitForPod(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("watching events for builder pod startup (%s)", err) } log.Info("Build complete.") log.Info("Launching app.") log.Info("Launching...") // buildHook := &pkg.BuildHook{ // Sha: gitSha.Short(), // ReceiveUser: conf.Username, // ReceiveRepo: appName, // Image: appName, // Procfile: procType, // } // if !usingDockerfile { // buildHook.Dockerfile = "" // // need this to tell the controller what URL to give the slug runner // buildHook.Image = slugBuilderInfo.PushURL() + "/slug.tgz" // } else { // buildHook.Dockerfile = "true" // } // buildHookResp, err := publishRelease(conf, builderKey, buildHook) // if err != nil { // return fmt.Errorf("publishing release (%s)", err) // } // release, ok := buildHookResp.Release["version"] // if !ok { // return fmt.Errorf("No release returned from Deis controller") // } // // log.Info("Done, %s:v%d deployed to Deis\n", appName, release) // log.Info("Use 'deis open' to view this application in your browser\n") // log.Info("To learn more, use 'deis help' or visit http://deis.io\n") // gcCmd := repoCmd(repoDir, "git", "gc") if err := run(gcCmd); err != nil { return fmt.Errorf("cleaning up the repository with %s (%s)", strings.Join(gcCmd.Args, " "), err) } return nil }
func build( conf *Config, storageDriver storagedriver.StorageDriver, kubeClient *client.Client, fs sys.FS, env sys.Env, builderKey, rawGitSha string) error { dockerBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.DockerBuilderImagePullPolicy) if err != nil { return err } slugBuilderImagePullPolicy, err := k8s.PullPolicyFromString(conf.SlugBuilderImagePullPolicy) if err != nil { return err } repo := conf.Repository gitSha, err := git.NewSha(rawGitSha) if err != nil { return err } appName := conf.App() repoDir := filepath.Join(conf.GitHome, repo) buildDir := filepath.Join(repoDir, "build") slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short()) if err := os.MkdirAll(buildDir, os.ModeDir); err != nil { return fmt.Errorf("making the build directory %s (%s)", buildDir, err) } tmpDir, err := ioutil.TempDir(buildDir, "tmp") if err != nil { return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err) } defer func() { if err := os.RemoveAll(tmpDir); err != nil { log.Info("unable to remove tmpdir %s (%s)", tmpDir, err) } }() client, err := controller.New(conf.ControllerHost, conf.ControllerPort) if err != nil { return err } // Get the application config from the controller, so we can check for a custom buildpack URL appConf, err := hooks.GetAppConfig(client, conf.Username, appName) if controller.CheckAPICompat(client, err) != nil { return err } log.Debug("got the following config back for app %s: %+v", appName, appConf) var buildPackURL string if buildPackURLInterface, ok := appConf.Values["BUILDPACK_URL"]; ok { if bpStr, ok := buildPackURLInterface.(string); ok { log.Debug("found custom buildpack URL %s", bpStr) buildPackURL = bpStr } } _, disableCaching := appConf.Values["DEIS_DISABLE_CACHE"] slugBuilderInfo := NewSlugBuilderInfo(appName, gitSha.Short(), disableCaching) if slugBuilderInfo.DisableCaching() { log.Debug("caching disabled for app %s", appName) // If cache file exists, delete it if _, err := storageDriver.Stat(context.Background(), slugBuilderInfo.CacheKey()); err == nil { log.Debug("deleting cache %s for app %s", slugBuilderInfo.CacheKey(), appName) if err := storageDriver.Delete(context.Background(), slugBuilderInfo.CacheKey()); err != nil { return err } } } // build a tarball from the new objects appTgz := fmt.Sprintf("%s.tar.gz", appName) gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short()) gitArchiveCmd.Stdout = os.Stdout gitArchiveCmd.Stderr = os.Stderr if err := run(gitArchiveCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err) } absAppTgz := fmt.Sprintf("%s/%s", repoDir, appTgz) // untar the archive into the temp dir tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir)) tarCmd.Stdout = os.Stdout tarCmd.Stderr = os.Stderr if err := run(tarCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err) } bType := getBuildTypeForDir(tmpDir) usingDockerfile := bType == buildTypeDockerfile appTgzdata, err := ioutil.ReadFile(absAppTgz) if err != nil { return fmt.Errorf("error while reading file %s: (%s)", appTgz, err) } log.Debug("Uploading tar to %s", slugBuilderInfo.TarKey()) if err := storageDriver.PutContent(context.Background(), slugBuilderInfo.TarKey(), appTgzdata); err != nil { return fmt.Errorf("uploading %s to %s (%v)", absAppTgz, slugBuilderInfo.TarKey(), err) } var pod *api.Pod var buildPodName string image := appName if usingDockerfile { buildPodName = dockerBuilderPodName(appName, gitSha.Short()) registryLocation := conf.RegistryLocation registryEnv := make(map[string]string) if registryLocation != "on-cluster" { registryEnv, err = getRegistryDetails(kubeClient, &image, registryLocation, conf.PodNamespace, conf.RegistrySecretPrefix) if err != nil { return fmt.Errorf("error getting private registry details %s", err) } image = image + ":git-" + gitSha.Short() } registryEnv["DEIS_REGISTRY_PROXY_PORT"] = conf.RegistryProxyPort registryEnv["DEIS_REGISTRY_LOCATION"] = registryLocation pod = dockerBuilderPod( conf.Debug, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarKey(), gitSha.Short(), slugName, conf.StorageType, conf.DockerBuilderImage, conf.RegistryHost, conf.RegistryPort, registryEnv, dockerBuilderImagePullPolicy, ) } else { buildPodName = slugBuilderPodName(appName, gitSha.Short()) cacheKey := "" if !slugBuilderInfo.DisableCaching() { cacheKey = slugBuilderInfo.CacheKey() } pod = slugbuilderPod( conf.Debug, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarKey(), slugBuilderInfo.PushKey(), cacheKey, gitSha.Short(), buildPackURL, conf.StorageType, conf.SlugBuilderImage, slugBuilderImagePullPolicy, ) } log.Info("Starting build... but first, coffee!") log.Debug("Starting pod %s", buildPodName) json, err := prettyPrintJSON(pod) if err == nil { log.Debug("Pod spec: %v", json) } else { log.Debug("Error creating json representaion of pod spec: %v", err) } podsInterface := kubeClient.Pods(conf.PodNamespace) newPod, err := podsInterface.Create(pod) if err != nil { return fmt.Errorf("creating builder pod (%s)", err) } pw := k8s.NewPodWatcher(kubeClient, "deis") stopCh := make(chan struct{}) defer close(stopCh) go pw.Controller.Run(stopCh) if err := waitForPod(pw, newPod.Namespace, newPod.Name, conf.SessionIdleInterval(), conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("watching events for builder pod startup (%s)", err) } req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams( &api.PodLogOptions{ Follow: true, }, api.ParameterCodec) rc, err := req.Stream() if err != nil { return fmt.Errorf("attempting to stream logs (%s)", err) } defer rc.Close() size, err := io.Copy(os.Stdout, rc) if err != nil { return fmt.Errorf("fetching builder logs (%s)", err) } log.Debug("size of streamed logs %v", size) log.Debug( "Waiting for the %s/%s pod to end. Checking every %s for %s", newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration(), ) // check the state and exit code of the build pod. // if the code is not 0 return error if err := waitForPodEnd(pw, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } log.Debug("Done") log.Debug("Checking for builder pod exit code") buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name) if err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } for _, containerStatus := range buildPod.Status.ContainerStatuses { state := containerStatus.State.Terminated if state.ExitCode != 0 { return fmt.Errorf("Build pod exited with code %d, stopping build.", state.ExitCode) } } log.Debug("Done") procType := deisAPI.ProcessType{} if procType, err = getProcFile(storageDriver, tmpDir, slugBuilderInfo.AbsoluteProcfileKey(), bType); err != nil { return err } log.Info("Build complete.") quit := progress("...", conf.SessionIdleInterval()) log.Info("Launching App...") if !usingDockerfile { image = slugBuilderInfo.AbsoluteSlugObjectKey() } release, err := hooks.CreateBuild(client, conf.Username, conf.App(), image, gitSha.Short(), procType, usingDockerfile) quit <- true <-quit if controller.CheckAPICompat(client, err) != nil { return fmt.Errorf("publishing release (%s)", err) } log.Info("Done, %s:v%d deployed to Workflow\n", appName, release) log.Info("Use 'deis open' to view this application in your browser\n") log.Info("To learn more, use 'deis help' or visit https://deis.com/\n") run(repoCmd(repoDir, "git", "gc")) return nil }
func build(conf *Config, s3Client *s3.S3, kubeClient *client.Client, builderKey, rawGitSha string) error { repo := conf.Repository gitSha, err := git.NewSha(rawGitSha) if err != nil { return err } appName := conf.App() repoDir := filepath.Join(conf.GitHome, repo) buildDir := filepath.Join(repoDir, "build") slugName := fmt.Sprintf("%s:git-%s", appName, gitSha.Short()) if err := os.MkdirAll(buildDir, os.ModeDir); err != nil { return fmt.Errorf("making the build directory %s (%s)", buildDir, err) } tmpDir, err := ioutil.TempDir(buildDir, "tmp") if err != nil { return fmt.Errorf("unable to create tmpdir %s (%s)", buildDir, err) } slugBuilderInfo := storage.NewSlugBuilderInfo(s3Client.Endpoint, appName, slugName, gitSha) // Get the application config from the controller, so we can check for a custom buildpack URL appConf, err := getAppConfig(conf, builderKey, conf.Username, appName) if err != nil { return fmt.Errorf("getting app config for %s (%s)", appName, err) } log.Debug("got the following config back for app %s: %+v", appName, *appConf) var buildPackURL string if buildPackURLInterface, ok := appConf.Values["BUILDPACK_URL"]; ok { if bpStr, ok := buildPackURLInterface.(string); ok { log.Debug("found custom buildpack URL %s", bpStr) buildPackURL = bpStr } } // build a tarball from the new objects appTgz := fmt.Sprintf("%s.tar.gz", appName) gitArchiveCmd := repoCmd(repoDir, "git", "archive", "--format=tar.gz", fmt.Sprintf("--output=%s", appTgz), gitSha.Short()) gitArchiveCmd.Stdout = os.Stdout gitArchiveCmd.Stderr = os.Stderr if err := run(gitArchiveCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(gitArchiveCmd.Args, " "), err) } absAppTgz := fmt.Sprintf("%s/%s", repoDir, appTgz) // untar the archive into the temp dir tarCmd := repoCmd(repoDir, "tar", "-xzf", appTgz, "-C", fmt.Sprintf("%s/", tmpDir)) tarCmd.Stdout = os.Stdout tarCmd.Stderr = os.Stderr if err := run(tarCmd); err != nil { return fmt.Errorf("running %s (%s)", strings.Join(tarCmd.Args, " "), err) } bType := getBuildTypeForDir(tmpDir) usingDockerfile := bType == buildTypeDockerfile procType := pkg.ProcessType{} if bType == buildTypeProcfile { rawProcFile, err := ioutil.ReadFile(fmt.Sprintf("%s/Procfile", tmpDir)) if err != nil { return fmt.Errorf("reading %s/Procfile", tmpDir) } if err := yaml.Unmarshal(rawProcFile, &procType); err != nil { return fmt.Errorf("procfile %s/ProcFile is malformed (%s)", tmpDir, err) } } bucketName := "git" if err := storage.CreateBucket(s3Client, bucketName); err != nil { log.Warn("create bucket error: %+v", err) } appTgzReader, err := os.Open(absAppTgz) if err != nil { return fmt.Errorf("opening %s for read (%s)", appTgz, err) } log.Debug("Uploading tar to %s/%s/%s", s3Client.Endpoint, bucketName, slugBuilderInfo.TarKey()) if err := storage.UploadObject(s3Client, bucketName, slugBuilderInfo.TarKey(), appTgzReader); err != nil { return fmt.Errorf("uploading %s to %s/%s (%v)", absAppTgz, bucketName, slugBuilderInfo.TarKey(), err) } creds := storage.CredsOK() var pod *api.Pod var buildPodName string if usingDockerfile { buildPodName = dockerBuilderPodName(appName, gitSha.Short()) pod = dockerBuilderPod( conf.Debug, creds, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarURL(), slugName, ) } else { buildPodName = slugBuilderPodName(appName, gitSha.Short()) pod = slugbuilderPod( conf.Debug, creds, buildPodName, conf.PodNamespace, appConf.Values, slugBuilderInfo.TarURL(), slugBuilderInfo.PushURL(), buildPackURL, ) } log.Info("Starting build... but first, coffee!") log.Debug("Starting pod %s", buildPodName) json, err := prettyPrintJSON(pod) if err == nil { log.Debug("Pod spec: %v", json) } else { log.Debug("Error creating json representaion of pod spec: %v", err) } podsInterface := kubeClient.Pods(conf.PodNamespace) newPod, err := podsInterface.Create(pod) if err != nil { return fmt.Errorf("creating builder pod (%s)", err) } if err := waitForPod(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("watching events for builder pod startup (%s)", err) } req := kubeClient.Get().Namespace(newPod.Namespace).Name(newPod.Name).Resource("pods").SubResource("log").VersionedParams( &api.PodLogOptions{ Follow: true, }, api.Scheme) rc, err := req.Stream() if err != nil { return fmt.Errorf("attempting to stream logs (%s)", err) } defer rc.Close() size, err := io.Copy(os.Stdout, rc) if err != nil { return fmt.Errorf("fetching builder logs (%s)", err) } log.Debug("size of streamed logs %v", size) // check the state and exit code of the build pod. // if the code is not 0 return error if err := waitForPodEnd(kubeClient, newPod.Namespace, newPod.Name, conf.BuilderPodTickDuration(), conf.BuilderPodWaitDuration()); err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } buildPod, err := kubeClient.Pods(newPod.Namespace).Get(newPod.Name) if err != nil { return fmt.Errorf("error getting builder pod status (%s)", err) } for _, containerStatus := range buildPod.Status.ContainerStatuses { state := containerStatus.State.Terminated if state.ExitCode != 0 { return fmt.Errorf("Stopping build.") } } // poll the s3 server to ensure the slug exists err = wait.PollImmediate(conf.ObjectStorageTickDuration(), conf.ObjectStorageWaitDuration(), func() (bool, error) { exists, err := storage.ObjectExists(s3Client, bucketName, slugBuilderInfo.PushKey()) if err != nil { return false, fmt.Errorf("Checking if object %s/%s exists (%s)", bucketName, slugBuilderInfo.PushKey(), err) } return exists, nil }) if err != nil { return fmt.Errorf("Timed out waiting for object in storage. Aborting build...") } log.Info("Build complete.") log.Info("Launching app.") log.Info("Launching...") buildHook := &pkg.BuildHook{ Sha: gitSha.Short(), ReceiveUser: conf.Username, ReceiveRepo: appName, Image: appName, Procfile: procType, } if !usingDockerfile { buildHook.Dockerfile = "" // need this to tell the controller what URL to give the slug runner buildHook.Image = slugBuilderInfo.PushURL() + "/slug.tgz" } else { buildHook.Dockerfile = "true" } buildHookResp, err := publishRelease(conf, builderKey, buildHook) if err != nil { return fmt.Errorf("publishing release (%s)", err) } release, ok := buildHookResp.Release["version"] if !ok { return fmt.Errorf("No release returned from Deis controller") } log.Info("Done, %s:v%d deployed to Deis\n", appName, release) log.Info("Use 'deis open' to view this application in your browser\n") log.Info("To learn more, use 'deis help' or visit http://deis.io\n") gcCmd := repoCmd(repoDir, "git", "gc") if err := run(gcCmd); err != nil { return fmt.Errorf("cleaning up the repository with %s (%s)", strings.Join(gcCmd.Args, " "), err) } return nil }