func (b *Builder) Run() error { // teardown will remove the Image and stop and // remove the service containers after the // build is done running. defer b.teardown() // setup will create the Image and supporting // service containers. if err := b.setup(); err != nil { return err } // make sure build state is not nil b.BuildState = &BuildState{} b.BuildState.ExitCode = 0 b.BuildState.Started = time.Now().UTC().Unix() c := make(chan error, 1) go func() { c <- b.run() }() // wait for either a) the job to complete or b) the job to timeout select { case err := <-c: return err case <-time.After(b.Timeout): log.Errf("time limit exceeded for build %s", b.Build.Name) b.BuildState.ExitCode = 124 b.BuildState.Finished = time.Now().UTC().Unix() return nil } }
// teardown is a helper function that we can use to // stop and remove the build container, its supporting image, // and the supporting service containers. func (b *Builder) teardown() error { defer b.dockerClient.CloseIdleConnections() // stop and destroy the container if b.container != nil { // debugging log.Info("removing build container") // stop the container, ignore error message b.dockerClient.Containers.Stop(b.container.ID, 15) // remove the container, ignore error message if err := b.dockerClient.Containers.Remove(b.container.ID); err != nil { log.Errf("failed to delete build container %s", b.container.ID) } } // stop and destroy the container services for i, container := range b.services { // debugging log.Infof("removing service container %s", b.Build.Services[i]) // stop the service container, ignore the error b.dockerClient.Containers.Stop(container.ID, 15) // remove the service container, ignore the error if err := b.dockerClient.Containers.Remove(container.ID); err != nil { log.Errf("failed to delete service container %s", container.ID) } } // destroy the underlying image if b.image != nil { // debugging log.Info("removing build image") if _, err := b.dockerClient.Images.Remove(b.image.ID); err != nil { log.Errf("failed to completely delete build image %s. %s", b.image.ID, err.Error()) } } return nil }
func main() { log.SetPriority(log.LOG_NOTICE) // Parses flags. The only flag that can be passed into the // application is the location of the configuration (.toml) file. var conf string flag.StringVar(&conf, "config", "", "") flag.Parse() config.Var(&nodes, "worker-nodes") // Parses config data. The config data can be stored in a config // file (.toml format) or environment variables, or a combo. config.SetPrefix("DRONE_") err := config.Parse(conf) if err != nil { log.Errf("Unable to parse config: %v", err) os.Exit(1) } // Setup the remote services. We need to execute these to register // the remote plugins with the system. // // NOTE: this cannot be done via init() because they need to be // executed after config.Parse bitbucket.Register() github.Register() gitlab.Register() gogs.Register() caps = map[string]bool{} caps[capability.Registration] = *open // setup the database and cancel all pending // commits in the system. db = database.MustConnect(*driver, *datasource) go database.NewCommitstore(db).KillCommits() // Create the worker, director and builders workers = pool.New() worker = director.New() if nodes == nil || len(nodes) == 0 { workers.Allocate(docker.New()) workers.Allocate(docker.New()) } else { for _, node := range nodes { if strings.HasPrefix(node, "unix://") { workers.Allocate(docker.NewHost(node)) } else if *dockercert != "" && *dockerkey != "" { workers.Allocate(docker.NewHostCertFile(node, *dockercert, *dockerkey)) } else { fmt.Println(DockerTLSWarning) workers.Allocate(docker.NewHost(node)) } } } pub = pubsub.NewPubSub() // create handler for static resources assets := rice.MustFindBox("app").HTTPBox() assetserve := http.FileServer(rice.MustFindBox("app").HTTPBox()) http.Handle("/robots.txt", assetserve) http.Handle("/static/", http.StripPrefix("/static", assetserve)) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write(assets.MustBytes("index.html")) }) // create the router and add middleware mux := router.New() mux.Use(middleware.Options) mux.Use(ContextMiddleware) mux.Use(middleware.SetHeaders) mux.Use(middleware.SetUser) http.Handle("/api/", mux) // start the http server in either http or https mode, // depending on whether a certificate was provided. if len(*sslcrt) == 0 { panic(http.ListenAndServe(*port, nil)) } else { panic(http.ListenAndServeTLS(*port, *sslcrt, *sslkey, nil)) } }
func (b *Builder) setup() error { // temp directory to store all files required // to generate the Docker image. dir, err := ioutil.TempDir("", "drone-") if err != nil { return err } // clean up after our mess. defer os.RemoveAll(dir) // make sure the image isn't empty. this would be bad if len(b.Build.Image) == 0 { log.Err("Fatal Error, No Docker Image specified") return fmt.Errorf("Error: missing Docker image") } // if we're using an alias for the build name we // should substitute it now if alias, ok := builders[b.Build.Image]; ok { b.Build.Image = alias.Tag } // if this is a local repository we should symlink // to the source code in our temp directory if b.Repo.IsLocal() { // this is where we used to use symlinks. We should // talk to the docker team about this, since copying // the entire repository is slow :( // // see https://github.com/dotcloud/docker/pull/3567 //src := filepath.Join(dir, "src") //err = os.Symlink(b.Repo.Path, src) //if err != nil { // return err //} src := filepath.Join(dir, "src") cmd := exec.Command("cp", "-a", b.Repo.Path, src) if err := cmd.Run(); err != nil { return fmt.Errorf("Error: Unable to copy repository. %s", err) } } // start all services required for the build // that will get linked to the container. for _, service := range b.Build.Services { // Parse the name of the Docker image // And then construct a fully qualified image name owner, name, tag := parseImageName(service) cname := fmt.Sprintf("%s/%s:%s", owner, name, tag) // Get the image info img, err := b.dockerClient.Images.Inspect(cname) if err != nil { // Get the image if it doesn't exist if err := b.dockerClient.Images.Pull(cname); err != nil { return fmt.Errorf("Error: Unable to pull image %s", cname) } img, err = b.dockerClient.Images.Inspect(cname) if err != nil { return fmt.Errorf("Error: Invalid or unknown image %s", cname) } } // debugging log.Infof("starting service container %s", cname) // Run the contianer run, err := b.dockerClient.Containers.RunDaemonPorts(cname, img.Config.ExposedPorts) if err != nil { return err } // Get the container info info, err := b.dockerClient.Containers.Inspect(run.ID) if err != nil { // on error kill the container since it hasn't yet been // added to the array and would therefore not get // removed in the defer statement. b.dockerClient.Containers.Stop(run.ID, 10) b.dockerClient.Containers.Remove(run.ID) return err } // Add the running service to the list b.services = append(b.services, info) } if err := b.writeBuildScript(dir); err != nil { return err } if err := b.writeProxyScript(dir); err != nil { return err } if err := b.writeDockerfile(dir); err != nil { return err } // debugging log.Info("creating build image") // check for build container (ie bradrydzewski/go:1.2) // and download if it doesn't already exist or it's :latest tag if _, err := b.dockerClient.Images.Inspect(b.Build.Image); err == docker.ErrNotFound || strings.HasSuffix(b.Build.Image, ":latest") { // download the image if it doesn't exist if err := b.dockerClient.Images.Pull(b.Build.Image); err != nil { return fmt.Errorf("Error: Unable to pull image %s. %s", b.Build.Image, err) } } else if err != nil { log.Errf("failed to inspect image %s", b.Build.Image) } // create the Docker image id := createUID() if err := b.dockerClient.Images.Build(id, dir); err != nil { return err } // debugging log.Infof("copying repository to %s", b.Repo.Dir) // get the image details b.image, err = b.dockerClient.Images.Inspect(id) if err != nil { // if we have problems with the image make sure // we remove it before we exit log.Errf("failed to verify build image %s", id) return err } return nil }
// TODO this has gotten a bit out of hand. refactor input params func run(path, identity, sshconfig, dockerhost, dockercert, dockerkey string, publish, deploy, privileged bool) (int, error) { dockerClient, err := docker.NewHostCertFile(dockerhost, dockercert, dockerkey) if err != nil { log.Err(err.Error()) return EXIT_STATUS, err } // parse the private environment variables envs := getParamMap("DRONE_ENV_") // parse the Drone yml file s, err := script.ParseBuildFile(script.Inject(path, envs)) if err != nil { log.Err(err.Error()) return EXIT_STATUS, err } // inject private environment variables into build script for key, val := range envs { s.Env = append(s.Env, key+"="+val) } if deploy == false { s.Publish = nil } if publish == false { s.Deploy = nil } // get the repository root directory dir := filepath.Dir(path) code := repo.Repo{ Name: filepath.Base(dir), Branch: "HEAD", // should we do this? Path: dir, } // does the local repository match the // $GOPATH/src/{package} pattern? This is // important so we know the target location // where the code should be copied inside // the container. if gopath, ok := getRepoPath(dir); ok { code.Dir = gopath } else if gopath, ok := getGoPath(dir); ok { // in this case we found a GOPATH and // reverse engineered the package path code.Dir = gopath } else { // otherwise just use directory name code.Dir = filepath.Base(dir) } // this is where the code gets uploaded to the container // TODO move this code to the build package code.Dir = filepath.Join("/var/cache/drone/src", filepath.Clean(code.Dir)) // ssh key to import into container var key []byte if len(identity) != 0 { key, err = ioutil.ReadFile(identity) if err != nil { fmt.Printf("[Error] Could not find or read identity file %s\n", identity) return EXIT_STATUS, err } } // ssh-config file to import into container var sshconfigcontent []byte if len(sshconfig) != 0 { sshconfigcontent, err = ioutil.ReadFile(sshconfig) if err != nil { fmt.Printf("[Error] Could not find or read ssh-config file %s\n", sshconfig) return EXIT_STATUS, err } } // loop through and create builders builder := build.New(dockerClient) builder.Build = s builder.Repo = &code builder.Key = key builder.SSHConfig = sshconfigcontent builder.Stdout = os.Stdout builder.Timeout = 300 * time.Minute builder.Privileged = privileged // execute the build if err := builder.Run(); err != nil { log.Errf("Error executing build: %s", err.Error()) return EXIT_STATUS, err } fmt.Printf("\nDrone Build Results \033[90m(%s)\033[0m\n", dir) // loop through and print results build := builder.Build res := builder.BuildState duration := time.Duration(res.Finished - res.Started) switch { case builder.BuildState.ExitCode == 0: fmt.Printf(" \033[32m\u2713\033[0m %v \033[90m(%v)\033[0m\n", build.Name, humanizeDuration(duration*time.Second)) case builder.BuildState.ExitCode != 0: fmt.Printf(" \033[31m\u2717\033[0m %v \033[90m(%v)\033[0m\n", build.Name, humanizeDuration(duration*time.Second)) } return builder.BuildState.ExitCode, nil }