// EnsureCode makes sure the code is in the ProjectDir. // NOTE(termie): When launched by kiddie-pool the ProjectPath will be // set to the location where grappler checked out the code and the copy // will be a little superfluous, but in the case where this is being // run in Single Player Mode this copy is necessary to avoid screwing // with the local dir. func (p *Runner) EnsureCode() (string, error) { projectDir := p.ProjectDir() if p.options.DirectMount { return projectDir, nil } // If the target is a tarball feetch and build that if p.options.ProjectURL != "" { resp, err := util.FetchTarball(p.options.ProjectURL) if err != nil { return projectDir, err } err = util.Untargzip(projectDir, resp.Body) if err != nil { return projectDir, err } } else { // We were pointed at a path with ProjectPath, copy it to projectDir ignoreFiles := []string{ p.options.BuildPath(), p.options.ProjectDownloadPath(), p.options.StepPath(), p.options.ContainerPath(), p.options.CachePath(), } var err error // Make sure we don't accidentally recurse or copy extra files ignoreFunc := func(src string, files []os.FileInfo) []string { ignores := []string{} for _, file := range files { abspath, err := filepath.Abs(filepath.Join(src, file.Name())) if err != nil { // Something went sufficiently wrong panic(err) } if util.ContainsString(ignoreFiles, abspath) { ignores = append(ignores, file.Name()) } } return ignores } copyOpts := &shutil.CopyTreeOptions{Ignore: ignoreFunc, CopyFunction: shutil.Copy} os.Rename(projectDir, fmt.Sprintf("%s-%s", projectDir, uuid.NewRandom().String())) err = shutil.CopyTree(p.options.ProjectPath, projectDir, copyOpts) if err != nil { return projectDir, err } } return projectDir, nil }
func executePipeline(cmdCtx context.Context, options *core.PipelineOptions, dockerOptions *dockerlocal.DockerOptions, getter pipelineGetter) (*RunnerShared, error) { // Boilerplate soft := NewSoftExit(options.GlobalOptions) logger := util.RootLogger().WithField("Logger", "Main") e, err := core.EmitterFromContext(cmdCtx) if err != nil { return nil, err } f := &util.Formatter{options.GlobalOptions.ShowColors} // Set up the runner r, err := NewRunner(cmdCtx, options, dockerOptions, getter) if err != nil { return nil, err } // Main timer mainTimer := util.NewTimer() timer := util.NewTimer() // These will be emitted at the end of the execution, we're going to be // pessimistic and report that we failed, unless overridden at the end of the // execution. fullPipelineFinisher := r.StartFullPipeline(options) pipelineArgs := &core.FullPipelineFinishedArgs{} defer fullPipelineFinisher.Finish(pipelineArgs) buildFinisher := r.StartBuild(options) buildFinishedArgs := &core.BuildFinishedArgs{Box: nil, Result: "failed"} defer buildFinisher.Finish(buildFinishedArgs) // Debug information DumpOptions(options) // Do some sanity checks before starting err = dockerlocal.RequireDockerEndpoint(dockerOptions) if err != nil { return nil, soft.Exit(err) } // Start copying code logger.Println(f.Info("Executing pipeline")) timer.Reset() _, err = r.EnsureCode() if err != nil { e.Emit(core.Logs, &core.LogsArgs{ Stream: "stderr", Logs: err.Error() + "\n", }) return nil, soft.Exit(err) } err = r.CleanupOldBuilds() if err != nil { e.Emit(core.Logs, &core.LogsArgs{ Stream: "stderr", Logs: err.Error() + "\n", }) } if options.Verbose { logger.Printf(f.Success("Copied working dir", timer.String())) } // Setup environment is still a fairly special step, it needs // to start our boxes and get everything set up logger.Println(f.Info("Running step", "setup environment")) timer.Reset() shared, err := r.SetupEnvironment(cmdCtx) if shared.box != nil { if options.ShouldRemove { defer shared.box.Clean() } defer shared.box.Stop() } if err != nil { logger.Errorln(f.Fail("Step failed", "setup environment", timer.String())) e.Emit(core.Logs, &core.LogsArgs{ Stream: "stderr", Logs: err.Error() + "\n", }) return nil, soft.Exit(err) } if options.Verbose { logger.Printf(f.Success("Step passed", "setup environment", timer.String())) } // Expand our context object box := shared.box buildFinishedArgs.Box = box pipeline := shared.pipeline repoName := pipeline.DockerRepo() tag := pipeline.DockerTag() message := pipeline.DockerMessage() shouldStore := options.ShouldArtifacts // TODO(termie): hack for now, probably can be made into a naive class var storeStep core.Step if shouldStore { storeStep = &core.ExternalStep{ BaseStep: core.NewBaseStep(core.BaseStepOptions{ Name: "store", Owner: "wercker", Version: util.Version(), }), } } e.Emit(core.BuildStepsAdded, &core.BuildStepsAddedArgs{ Build: pipeline, Steps: pipeline.Steps(), StoreStep: storeStep, AfterSteps: pipeline.AfterSteps(), }) pr := &core.PipelineResult{ Success: true, FailedStepName: "", FailedStepMessage: "", } // stepCounter starts at 3, step 1 is "get code", step 2 is "setup // environment". stepCounter := &util.Counter{Current: 3} checkpoint := false for _, step := range pipeline.Steps() { // we always want to run the wercker-init step to provide some functions if !checkpoint && stepCounter.Current > 3 { if options.EnableDevSteps && options.Checkpoint != "" { logger.Printf(f.Info("Skipping step", step.DisplayName())) // start at the one after the checkpoint if step.Checkpoint() == options.Checkpoint { logger.Printf(f.Info("Found checkpoint", options.Checkpoint)) checkpoint = true } stepCounter.Increment() continue } } logger.Printf(f.Info("Running step", step.DisplayName())) timer.Reset() sr, err := r.RunStep(shared, step, stepCounter.Increment()) if err != nil { pr.Success = false pr.FailedStepName = step.DisplayName() pr.FailedStepMessage = sr.Message logger.Printf(f.Fail("Step failed", step.DisplayName(), timer.String())) break } if options.EnableDevSteps && step.Checkpoint() != "" { logger.Printf(f.Info("Checkpointing", step.Checkpoint())) box.Commit(box.Repository(), fmt.Sprintf("w-%s", step.Checkpoint()), "checkpoint", false) } if options.Verbose { logger.Printf(f.Success("Step passed", step.DisplayName(), timer.String())) } } if options.ShouldCommit { _, err = box.Commit(repoName, tag, message, true) if err != nil { logger.Errorln("Failed to commit:", err.Error()) } } // We need to wind the counter to where it should be if we failed a step // so that is the number of steps + get code + setup environment + store // TODO(termie): remove all the this "order" stuff completely stepCounter.Current = len(pipeline.Steps()) + 3 if pr.Success && options.ShouldArtifacts { // At this point the build has effectively passed but we can still mess it // up by being unable to deliver the artifacts err = func() error { sr := &StepResult{ Success: false, Artifact: nil, Message: "", PackageURL: "", ExitCode: 1, } finisher := r.StartStep(shared, storeStep, stepCounter.Increment()) defer finisher.Finish(sr) pr.FailedStepName = storeStep.Name() pr.FailedStepMessage = "Unable to store pipeline output" e.Emit(core.Logs, &core.LogsArgs{ Logs: "Storing artifacts\n", }) artifact, err := pipeline.CollectArtifact(shared.containerID) // Ignore ErrEmptyTarball errors if err != util.ErrEmptyTarball { if err != nil { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Collecting files from %s\n", artifact.GuestPath), }) ignoredDirectories := []string{".git", "node_modules", "vendor", "site-packages"} nameEmit := func(path string, info os.FileInfo, err error) error { relativePath := strings.TrimPrefix(path, artifact.HostPath) if info.IsDir() { if util.ContainsString(ignoredDirectories, info.Name()) { e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf(".%s/ (content omitted)\n", relativePath), }) return filepath.SkipDir } return nil } e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf(".%s\n", relativePath), }) return nil } err = filepath.Walk(artifact.HostPath, nameEmit) if err != nil { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } tarInfo, err := os.Stat(artifact.HostTarPath) if err != nil { if os.IsNotExist(err) { e.Emit(core.Logs, &core.LogsArgs{ Logs: "No artifacts stored", }) } else { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } } else { size, unit := util.ConvertUnit(tarInfo.Size()) e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Total artifact size: %d %s\n", size, unit), }) } if options.ShouldStoreS3 { artificer := dockerlocal.NewArtificer(options, dockerOptions) err = artificer.Upload(artifact) if err != nil { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } } sr.PackageURL = artifact.URL() } else { e.Emit(core.Logs, &core.LogsArgs{ Logs: "No artifacts found\n", }) } e.Emit(core.Logs, &core.LogsArgs{ Logs: "Storing artifacts complete\n", }) sr.Success = true sr.ExitCode = 0 return nil }() if err != nil { pr.Success = false logger.WithField("Error", err).Error("Unable to store pipeline output") } } else { stepCounter.Increment() } // We're sending our build finished but we're not done yet, // now is time to run after-steps if we have any if pr.Success { logger.Println(f.Success("Steps passed", mainTimer.String())) buildFinishedArgs.Result = "passed" } buildFinisher.Finish(buildFinishedArgs) pipelineArgs.MainSuccessful = pr.Success if len(pipeline.AfterSteps()) == 0 { // We're about to end the build, so pull the cache and explode it // into the CacheDir if !options.DirectMount { timer.Reset() err = pipeline.CollectCache(shared.containerID) if err != nil { logger.WithField("Error", err).Error("Unable to store cache") } if options.Verbose { logger.Printf(f.Success("Exported Cache", timer.String())) } } if pr.Success { logger.Println(f.Success("Pipeline finished", mainTimer.String())) } else { logger.Println(f.Fail("Pipeline failed", mainTimer.String())) } if !pr.Success { return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName) } return shared, nil } pipelineArgs.RanAfterSteps = true logger.Println(f.Info("Starting after-steps")) // The container may have died, either way we'll have a fresh env container, err := box.Restart() if err != nil { logger.Panicln(err) } newSessCtx, newSess, err := r.GetSession(cmdCtx, container.ID) if err != nil { logger.Panicln(err) } newShared := &RunnerShared{ box: shared.box, pipeline: shared.pipeline, sess: newSess, sessionCtx: newSessCtx, containerID: shared.containerID, config: shared.config, } // Set up the base environment err = pipeline.ExportEnvironment(newSessCtx, newSess) if err != nil { return nil, err } // Add the After-Step parts err = pr.ExportEnvironment(newSessCtx, newSess) if err != nil { return nil, err } for _, step := range pipeline.AfterSteps() { logger.Println(f.Info("Running after-step", step.DisplayName())) timer.Reset() _, err := r.RunStep(newShared, step, stepCounter.Increment()) if err != nil { logger.Println(f.Fail("After-step failed", step.DisplayName(), timer.String())) break } logger.Println(f.Success("After-step passed", step.DisplayName(), timer.String())) } // We're about to end the build, so pull the cache and explode it // into the CacheDir if !options.DirectMount { timer.Reset() err = pipeline.CollectCache(newShared.containerID) if err != nil { logger.WithField("Error", err).Error("Unable to store cache") } if options.Verbose { logger.Printf(f.Success("Exported Cache", timer.String())) } } if pr.Success { logger.Println(f.Success("Pipeline finished", mainTimer.String())) } else { logger.Println(f.Fail("Pipeline failed", mainTimer.String())) } if !pr.Success { return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName) } pipelineArgs.AfterStepSuccessful = pr.Success return shared, nil }
// EnsureCode makes sure the code is in the ProjectDir. // NOTE(termie): When launched by kiddie-pool the ProjectPath will be // set to the location where grappler checked out the code and the copy // will be a little superfluous, but in the case where this is being // run in Single Player Mode this copy is necessary to avoid screwing // with the local dir. func (p *Runner) EnsureCode() (string, error) { projectDir := p.ProjectDir() if p.options.DirectMount { return projectDir, nil } // If the target is a tarball feetch and build that if p.options.ProjectURL != "" { resp, err := util.FetchTarball(p.options.ProjectURL) if err != nil { return projectDir, err } err = util.Untargzip(projectDir, resp.Body) if err != nil { return projectDir, err } } else { // We were pointed at a path with ProjectPath, copy it to projectDir ignoreFiles := []string{ p.options.WorkingDir, } oldbuilds, _ := filepath.Abs("./_builds") oldprojects, _ := filepath.Abs("./_projects") oldsteps, _ := filepath.Abs("./_steps") oldcache, _ := filepath.Abs("./_cache") oldcontainers, _ := filepath.Abs("./_containers") deprecatedPaths := []string{ oldbuilds, oldprojects, oldsteps, oldcache, oldcontainers, } var err error // Make sure we don't accidentally recurse or copy extra files ignoreFunc := func(src string, files []os.FileInfo) []string { ignores := []string{} for _, file := range files { abspath, err := filepath.Abs(filepath.Join(src, file.Name())) if err != nil { // Something went sufficiently wrong panic(err) } if util.ContainsString(ignoreFiles, abspath) { ignores = append(ignores, file.Name()) } // TODO(termie): remove this warning after a while if util.ContainsString(deprecatedPaths, abspath) { p.logger.Warnln(fmt.Sprintf("Not ignoring deprecated runtime path, %s. You probably want to delete it so it doesn't get copied into your container. Runtime files are now stored under '.wercker' by default. This message will go away in a future update.", file.Name())) } } return ignores } copyOpts := &shutil.CopyTreeOptions{Ignore: ignoreFunc, CopyFunction: shutil.Copy} os.Rename(projectDir, fmt.Sprintf("%s-%s", projectDir, uuid.NewRandom().String())) err = shutil.CopyTree(p.options.ProjectPath, projectDir, copyOpts) if err != nil { return projectDir, err } } return projectDir, nil }