// CopySource copies the source into the HostPath func (p *Runner) CopySource() error { timer := util.NewTimer() f := p.formatter err := os.MkdirAll(p.options.HostPath(), 0755) if err != nil { return err } // Link the path to BuildPath("latest") for easy access err = os.RemoveAll(p.options.BuildPath("latest")) if err != nil { return err } err = os.Symlink(p.options.HostPath(), p.options.BuildPath("latest")) if err != nil { return err } err = os.Symlink(p.ProjectDir(), p.options.HostPath("source")) if err != nil { return err } if p.options.Verbose { p.logger.Printf(f.Success("Source -> Staging Area", timer.String())) } return nil }
// SetupGuest ensures that the guest is prepared to run the pipeline. func (p *BasePipeline) SetupGuest(sessionCtx context.Context, sess *Session) error { sess.HideLogs() defer sess.ShowLogs() timer := util.NewTimer() f := &util.Formatter{p.options.GlobalOptions.ShowColors} cmds := []string{} if !p.options.DirectMount { cmds = append(cmds, // Make sure our guest path exists fmt.Sprintf(`mkdir -p "%s"`, p.options.GuestPath()), // Make sure our base path exists fmt.Sprintf(`rm -rf "%s"`, filepath.Dir(p.options.BasePath())), fmt.Sprintf(`mkdir -p "%s"`, filepath.Dir(p.options.BasePath())), // Copy the source from the mounted directory to the base path fmt.Sprintf(`cp -r "%s" "%s"`, p.options.MntPath("source"), p.options.BasePath()), // Copy the cache from the mounted directory to the pipeline dir fmt.Sprintf(`cp -r "%s" "%s"`, p.options.MntPath("cache"), p.options.GuestPath("cache")), ) } // Make sure the output path exists cmds = append(cmds, fmt.Sprintf(`mkdir -p "%s"`, p.options.GuestPath("output"))) cmds = append(cmds, fmt.Sprintf(`chmod a+rx "%s"`, p.options.BasePath())) p.logger.Printf(f.Info("Copying source to container")) for _, cmd := range cmds { exit, _, err := sess.SendChecked(sessionCtx, cmd) if exit != 0 { return fmt.Errorf("Guest command failed with exit code %d: %s", exit, cmd) } if err != nil { return err } } if p.options.Verbose { p.logger.Printf(f.Success("Source+Cache -> Guest", timer.String())) } return nil }
// AddServices fetches and links the services to the base box. func (p *Runner) AddServices(ctx context.Context, pipeline core.Pipeline, box core.Box) error { f := p.formatter timer := util.NewTimer() for _, service := range pipeline.Services() { timer.Reset() if _, err := service.Fetch(ctx, pipeline.Env()); err != nil { return err } box.AddService(service) if p.options.Verbose { p.logger.Printf(f.Success(fmt.Sprintf("Fetched %s", service.GetName()), timer.String())) } // TODO(mh): We want to make sure container is running fully before // allowing build steps to run. We may need custom steps which block // until service services are running. } return nil }
// CopyCache copies the source into the HostPath func (p *Runner) CopyCache() error { timer := util.NewTimer() f := p.formatter err := os.MkdirAll(p.options.CachePath(), 0755) if err != nil { return err } err = os.Symlink(p.options.CachePath(), p.options.HostPath("cache")) if err != nil { return err } if p.options.Verbose { p.logger.Printf(f.Success("Cache -> Staging Area", timer.String())) } if p.options.Verbose { p.logger.Printf(f.Success("Cache -> Staging Area", timer.String())) } return nil }
func executePipeline(cmdCtx context.Context, options *core.PipelineOptions, dockerOptions *dockerlocal.DockerOptions, getter pipelineGetter) (*RunnerShared, error) { // Boilerplate soft := NewSoftExit(options.GlobalOptions) logger := util.RootLogger().WithField("Logger", "Main") e, err := core.EmitterFromContext(cmdCtx) if err != nil { return nil, err } f := &util.Formatter{options.GlobalOptions.ShowColors} // Set up the runner r, err := NewRunner(cmdCtx, options, dockerOptions, getter) if err != nil { return nil, err } // Main timer mainTimer := util.NewTimer() timer := util.NewTimer() // These will be emitted at the end of the execution, we're going to be // pessimistic and report that we failed, unless overridden at the end of the // execution. fullPipelineFinisher := r.StartFullPipeline(options) pipelineArgs := &core.FullPipelineFinishedArgs{} defer fullPipelineFinisher.Finish(pipelineArgs) buildFinisher := r.StartBuild(options) buildFinishedArgs := &core.BuildFinishedArgs{Box: nil, Result: "failed"} defer buildFinisher.Finish(buildFinishedArgs) // Debug information DumpOptions(options) // Do some sanity checks before starting err = dockerlocal.RequireDockerEndpoint(dockerOptions) if err != nil { return nil, soft.Exit(err) } // Start copying code logger.Println(f.Info("Executing pipeline")) timer.Reset() _, err = r.EnsureCode() if err != nil { e.Emit(core.Logs, &core.LogsArgs{ Stream: "stderr", Logs: err.Error() + "\n", }) return nil, soft.Exit(err) } err = r.CleanupOldBuilds() if err != nil { e.Emit(core.Logs, &core.LogsArgs{ Stream: "stderr", Logs: err.Error() + "\n", }) } if options.Verbose { logger.Printf(f.Success("Copied working dir", timer.String())) } // Setup environment is still a fairly special step, it needs // to start our boxes and get everything set up logger.Println(f.Info("Running step", "setup environment")) timer.Reset() shared, err := r.SetupEnvironment(cmdCtx) if shared.box != nil { if options.ShouldRemove { defer shared.box.Clean() } defer shared.box.Stop() } if err != nil { logger.Errorln(f.Fail("Step failed", "setup environment", timer.String())) e.Emit(core.Logs, &core.LogsArgs{ Stream: "stderr", Logs: err.Error() + "\n", }) return nil, soft.Exit(err) } if options.Verbose { logger.Printf(f.Success("Step passed", "setup environment", timer.String())) } // Expand our context object box := shared.box buildFinishedArgs.Box = box pipeline := shared.pipeline repoName := pipeline.DockerRepo() tag := pipeline.DockerTag() message := pipeline.DockerMessage() shouldStore := options.ShouldArtifacts // TODO(termie): hack for now, probably can be made into a naive class var storeStep core.Step if shouldStore { storeStep = &core.ExternalStep{ BaseStep: core.NewBaseStep(core.BaseStepOptions{ Name: "store", Owner: "wercker", Version: util.Version(), }), } } e.Emit(core.BuildStepsAdded, &core.BuildStepsAddedArgs{ Build: pipeline, Steps: pipeline.Steps(), StoreStep: storeStep, AfterSteps: pipeline.AfterSteps(), }) pr := &core.PipelineResult{ Success: true, FailedStepName: "", FailedStepMessage: "", } // stepCounter starts at 3, step 1 is "get code", step 2 is "setup // environment". stepCounter := &util.Counter{Current: 3} checkpoint := false for _, step := range pipeline.Steps() { // we always want to run the wercker-init step to provide some functions if !checkpoint && stepCounter.Current > 3 { if options.EnableDevSteps && options.Checkpoint != "" { logger.Printf(f.Info("Skipping step", step.DisplayName())) // start at the one after the checkpoint if step.Checkpoint() == options.Checkpoint { logger.Printf(f.Info("Found checkpoint", options.Checkpoint)) checkpoint = true } stepCounter.Increment() continue } } logger.Printf(f.Info("Running step", step.DisplayName())) timer.Reset() sr, err := r.RunStep(shared, step, stepCounter.Increment()) if err != nil { pr.Success = false pr.FailedStepName = step.DisplayName() pr.FailedStepMessage = sr.Message logger.Printf(f.Fail("Step failed", step.DisplayName(), timer.String())) break } if options.EnableDevSteps && step.Checkpoint() != "" { logger.Printf(f.Info("Checkpointing", step.Checkpoint())) box.Commit(box.Repository(), fmt.Sprintf("w-%s", step.Checkpoint()), "checkpoint", false) } if options.Verbose { logger.Printf(f.Success("Step passed", step.DisplayName(), timer.String())) } } if options.ShouldCommit { _, err = box.Commit(repoName, tag, message, true) if err != nil { logger.Errorln("Failed to commit:", err.Error()) } } // We need to wind the counter to where it should be if we failed a step // so that is the number of steps + get code + setup environment + store // TODO(termie): remove all the this "order" stuff completely stepCounter.Current = len(pipeline.Steps()) + 3 if pr.Success && options.ShouldArtifacts { // At this point the build has effectively passed but we can still mess it // up by being unable to deliver the artifacts err = func() error { sr := &StepResult{ Success: false, Artifact: nil, Message: "", PackageURL: "", ExitCode: 1, } finisher := r.StartStep(shared, storeStep, stepCounter.Increment()) defer finisher.Finish(sr) pr.FailedStepName = storeStep.Name() pr.FailedStepMessage = "Unable to store pipeline output" e.Emit(core.Logs, &core.LogsArgs{ Logs: "Storing artifacts\n", }) artifact, err := pipeline.CollectArtifact(shared.containerID) // Ignore ErrEmptyTarball errors if err != util.ErrEmptyTarball { if err != nil { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Collecting files from %s\n", artifact.GuestPath), }) ignoredDirectories := []string{".git", "node_modules", "vendor", "site-packages"} nameEmit := func(path string, info os.FileInfo, err error) error { relativePath := strings.TrimPrefix(path, artifact.HostPath) if info.IsDir() { if util.ContainsString(ignoredDirectories, info.Name()) { e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf(".%s/ (content omitted)\n", relativePath), }) return filepath.SkipDir } return nil } e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf(".%s\n", relativePath), }) return nil } err = filepath.Walk(artifact.HostPath, nameEmit) if err != nil { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } tarInfo, err := os.Stat(artifact.HostTarPath) if err != nil { if os.IsNotExist(err) { e.Emit(core.Logs, &core.LogsArgs{ Logs: "No artifacts stored", }) } else { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } } else { size, unit := util.ConvertUnit(tarInfo.Size()) e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Total artifact size: %d %s\n", size, unit), }) } if options.ShouldStoreS3 { artificer := dockerlocal.NewArtificer(options, dockerOptions) err = artificer.Upload(artifact) if err != nil { sr.Message = err.Error() e.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message), }) return err } } sr.PackageURL = artifact.URL() } else { e.Emit(core.Logs, &core.LogsArgs{ Logs: "No artifacts found\n", }) } e.Emit(core.Logs, &core.LogsArgs{ Logs: "Storing artifacts complete\n", }) sr.Success = true sr.ExitCode = 0 return nil }() if err != nil { pr.Success = false logger.WithField("Error", err).Error("Unable to store pipeline output") } } else { stepCounter.Increment() } // We're sending our build finished but we're not done yet, // now is time to run after-steps if we have any if pr.Success { logger.Println(f.Success("Steps passed", mainTimer.String())) buildFinishedArgs.Result = "passed" } buildFinisher.Finish(buildFinishedArgs) pipelineArgs.MainSuccessful = pr.Success if len(pipeline.AfterSteps()) == 0 { // We're about to end the build, so pull the cache and explode it // into the CacheDir if !options.DirectMount { timer.Reset() err = pipeline.CollectCache(shared.containerID) if err != nil { logger.WithField("Error", err).Error("Unable to store cache") } if options.Verbose { logger.Printf(f.Success("Exported Cache", timer.String())) } } if pr.Success { logger.Println(f.Success("Pipeline finished", mainTimer.String())) } else { logger.Println(f.Fail("Pipeline failed", mainTimer.String())) } if !pr.Success { return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName) } return shared, nil } pipelineArgs.RanAfterSteps = true logger.Println(f.Info("Starting after-steps")) // The container may have died, either way we'll have a fresh env container, err := box.Restart() if err != nil { logger.Panicln(err) } newSessCtx, newSess, err := r.GetSession(cmdCtx, container.ID) if err != nil { logger.Panicln(err) } newShared := &RunnerShared{ box: shared.box, pipeline: shared.pipeline, sess: newSess, sessionCtx: newSessCtx, containerID: shared.containerID, config: shared.config, } // Set up the base environment err = pipeline.ExportEnvironment(newSessCtx, newSess) if err != nil { return nil, err } // Add the After-Step parts err = pr.ExportEnvironment(newSessCtx, newSess) if err != nil { return nil, err } for _, step := range pipeline.AfterSteps() { logger.Println(f.Info("Running after-step", step.DisplayName())) timer.Reset() _, err := r.RunStep(newShared, step, stepCounter.Increment()) if err != nil { logger.Println(f.Fail("After-step failed", step.DisplayName(), timer.String())) break } logger.Println(f.Success("After-step passed", step.DisplayName(), timer.String())) } // We're about to end the build, so pull the cache and explode it // into the CacheDir if !options.DirectMount { timer.Reset() err = pipeline.CollectCache(newShared.containerID) if err != nil { logger.WithField("Error", err).Error("Unable to store cache") } if options.Verbose { logger.Printf(f.Success("Exported Cache", timer.String())) } } if pr.Success { logger.Println(f.Success("Pipeline finished", mainTimer.String())) } else { logger.Println(f.Fail("Pipeline failed", mainTimer.String())) } if !pr.Success { return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName) } pipelineArgs.AfterStepSuccessful = pr.Success return shared, nil }
// SetupEnvironment does a lot of boilerplate legwork and returns a pipeline, // box, and session. This is a bit of a long method, but it is pretty much // the entire "Setup Environment" step. func (p *Runner) SetupEnvironment(runnerCtx context.Context) (*RunnerShared, error) { shared := &RunnerShared{} f := &util.Formatter{p.options.GlobalOptions.ShowColors} timer := util.NewTimer() sr := &StepResult{ Success: false, Artifact: nil, Message: "", ExitCode: 1, } setupEnvironmentStep := &core.ExternalStep{ BaseStep: core.NewBaseStep(core.BaseStepOptions{ Name: "setup environment", Owner: "wercker", Version: util.Version(), }), } finisher := p.StartStep(shared, setupEnvironmentStep, 2) defer finisher.Finish(sr) if p.options.Verbose { p.emitter.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Running wercker version: %s\n", util.FullVersion()), }) } p.logger.Debugln("Application:", p.options.ApplicationName) // Grab our config rawConfig, stringConfig, err := p.GetConfig() if err != nil { sr.Message = err.Error() return shared, err } shared.config = rawConfig sr.WerckerYamlContents = stringConfig // Init the pipeline pipeline, err := p.GetPipeline(rawConfig) if err != nil { sr.Message = err.Error() return shared, err } pipeline.InitEnv(p.options.HostEnv) shared.pipeline = pipeline if p.options.Verbose { p.emitter.Emit(core.Logs, &core.LogsArgs{ Logs: fmt.Sprintf("Using config:\n%s\n", stringConfig), }) } // Fetch the box timer.Reset() box := pipeline.Box() _, err = box.Fetch(runnerCtx, pipeline.Env()) if err != nil { sr.Message = err.Error() return shared, err } // TODO(termie): dump some logs about the image shared.box = box if p.options.Verbose { p.logger.Printf(f.Success(fmt.Sprintf("Fetched %s", box.GetName()), timer.String())) } // Fetch the services and add them to the box if err := p.AddServices(runnerCtx, pipeline, box); err != nil { sr.Message = err.Error() return shared, err } // Start setting up the pipeline dir p.logger.Debugln("Copying source to build directory") err = p.CopySource() if err != nil { sr.Message = err.Error() return shared, err } // ... and the cache dir p.logger.Debugln("Copying cache to build directory") err = p.CopyCache() if err != nil { sr.Message = err.Error() return shared, err } p.logger.Debugln("Steps:", len(pipeline.Steps())) // Fetch the steps steps := pipeline.Steps() for _, step := range steps { timer.Reset() if _, err := step.Fetch(); err != nil { sr.Message = err.Error() return shared, err } if p.options.Verbose { p.logger.Printf(f.Success("Prepared step", step.Name(), timer.String())) } } // ... and the after steps afterSteps := pipeline.AfterSteps() for _, step := range afterSteps { timer.Reset() if _, err := step.Fetch(); err != nil { sr.Message = err.Error() return shared, err } if p.options.Verbose { p.logger.Printf(f.Success("Prepared step", step.Name(), timer.String())) } } // Boot up our main container, it will run the services container, err := box.Run(runnerCtx, pipeline.Env()) if err != nil { sr.Message = err.Error() return shared, err } shared.containerID = container.ID // Register our signal handler to clean the box up // NOTE(termie): we're expecting that this is going to be the last handler // to be run since it calls exit, in the future we might be // able to do something like close the calling context and // short circuit / let the rest of things play out boxCleanupHandler := &util.SignalHandler{ ID: "box-cleanup", F: func() bool { p.logger.Errorln("Interrupt detected, cleaning up containers and shutting down") box.Stop() if p.options.ShouldRemove { box.Clean() } os.Exit(1) return true }, } util.GlobalSigint().Add(boxCleanupHandler) util.GlobalSigterm().Add(boxCleanupHandler) p.logger.Debugln("Attaching session to base box") // Start our session sessionCtx, sess, err := p.GetSession(runnerCtx, container.ID) if err != nil { sr.Message = err.Error() return shared, err } shared.sess = sess shared.sessionCtx = sessionCtx // Some helpful logging pipeline.LogEnvironment() p.logger.Debugln("Setting up guest (base box)") err = pipeline.SetupGuest(sessionCtx, sess) if err != nil { sr.Message = err.Error() return shared, err } err = pipeline.ExportEnvironment(sessionCtx, sess) if err != nil { sr.Message = err.Error() return shared, err } sr.Message = "" sr.Success = true sr.ExitCode = 0 return shared, nil }