Пример #1
0
func GetApp() *cli.App {
	// logger.SetLevel(logger.DebugLevel)
	// util.RootLogger().SetLevel("debug")
	// util.RootLogger().Formatter = &logger.JSONFormatter{}

	app := cli.NewApp()
	setupUsageFormatter(app)
	app.Author = "Team wercker"
	app.Name = "wercker"
	app.Usage = "build and deploy from the command line"
	app.Email = "*****@*****.**"
	app.Version = util.FullVersion()
	app.Flags = FlagsFor(GlobalFlagSet)
	app.Commands = []cli.Command{
		buildCommand,
		devCommand,
		checkConfigCommand,
		deployCommand,
		detectCommand,
		// inspectCommand,
		loginCommand,
		logoutCommand,
		pullCommand,
		versionCommand,
		documentCommand(app),
	}
	app.Before = func(ctx *cli.Context) error {
		if ctx.GlobalBool("debug") {
			util.RootLogger().Formatter = &util.VerboseFormatter{}
			util.RootLogger().SetLevel("debug")
		} else {
			util.RootLogger().Formatter = &util.TerseFormatter{}
			util.RootLogger().SetLevel("info")
		}
		if ctx.GlobalBool("journal") {
			util.RootLogger().Hooks.Add(&journalhook.JournalHook{})
			util.RootLogger().Out = ioutil.Discard
		}
		// Register the global signal handler
		util.GlobalSigint().Register(os.Interrupt)
		util.GlobalSigterm().Register(unix.SIGTERM)
		return nil
	}
	return app
}
Пример #2
0
func executePipeline(cmdCtx context.Context, options *core.PipelineOptions, dockerOptions *dockerlocal.DockerOptions, getter pipelineGetter) (*RunnerShared, error) {
	// Boilerplate
	soft := NewSoftExit(options.GlobalOptions)
	logger := util.RootLogger().WithFields(util.LogFields{
		"Logger": "Main",
		"RunID":  options.RunID,
	})
	e, err := core.EmitterFromContext(cmdCtx)
	if err != nil {
		return nil, err
	}
	f := &util.Formatter{options.GlobalOptions.ShowColors}

	// Set up the runner
	r, err := NewRunner(cmdCtx, options, dockerOptions, getter)
	if err != nil {
		return nil, err
	}

	// Main timer
	mainTimer := util.NewTimer()
	timer := util.NewTimer()

	// These will be emitted at the end of the execution, we're going to be
	// pessimistic and report that we failed, unless overridden at the end of the
	// execution.
	fullPipelineFinisher := r.StartFullPipeline(options)
	pipelineArgs := &core.FullPipelineFinishedArgs{}
	defer fullPipelineFinisher.Finish(pipelineArgs)

	buildFinisher := r.StartBuild(options)
	buildFinishedArgs := &core.BuildFinishedArgs{Box: nil, Result: "failed"}
	defer buildFinisher.Finish(buildFinishedArgs)

	// Debug information
	DumpOptions(options)

	// Do some sanity checks before starting
	err = dockerlocal.RequireDockerEndpoint(dockerOptions)
	if err != nil {
		return nil, soft.Exit(err)
	}

	// Make sure that "include-file" is read from the config file before copying code
	r.GetConfig()

	// Start copying code
	logger.Println(f.Info("Executing pipeline"))
	timer.Reset()
	_, err = r.EnsureCode()
	if err != nil {
		e.Emit(core.Logs, &core.LogsArgs{
			Stream: "stderr",
			Logs:   err.Error() + "\n",
		})
		return nil, soft.Exit(err)
	}
	err = r.CleanupOldBuilds()
	if err != nil {
		e.Emit(core.Logs, &core.LogsArgs{
			Stream: "stderr",
			Logs:   err.Error() + "\n",
		})
	}
	if options.Verbose {
		logger.Printf(f.Success("Copied working dir", timer.String()))
	}

	// Setup environment is still a fairly special step, it needs
	// to start our boxes and get everything set up
	logger.Println(f.Info("Running step", "setup environment"))
	timer.Reset()
	shared, err := r.SetupEnvironment(cmdCtx)
	if shared.box != nil {
		if options.ShouldRemove {
			defer shared.box.Clean()
		}
		defer shared.box.Stop()
	}
	if err != nil {
		logger.Errorln(f.Fail("Step failed", "setup environment", timer.String()))
		e.Emit(core.Logs, &core.LogsArgs{
			Stream: "stderr",
			Logs:   err.Error() + "\n",
		})
		return nil, soft.Exit(err)
	}
	if options.Verbose {
		logger.Printf(f.Success("Step passed", "setup environment", timer.String()))
	}

	// Once SetupEnvironment has finished we want to register some signal
	// handlers to emit step ended if we get killed but aren't fast enough
	// at cleaning up the containers before our grace period ends
	// Signals are process LIFO so we want to register this after the
	// box cleanup
	buildFailedHandler := &util.SignalHandler{
		ID: "build-failed",
		F: func() bool {
			logger.Errorln("Interrupt detected, sending build / pipeline failed")
			fullPipelineFinisher.Finish(pipelineArgs)
			buildFinisher.Finish(buildFinishedArgs)
			return true
		},
	}
	util.GlobalSigint().Add(buildFailedHandler)
	util.GlobalSigterm().Add(buildFailedHandler)

	// Expand our context object
	box := shared.box
	buildFinishedArgs.Box = box
	pipeline := shared.pipeline
	repoName := pipeline.DockerRepo()
	tag := pipeline.DockerTag()
	message := pipeline.DockerMessage()

	shouldStore := options.ShouldArtifacts

	// TODO(termie): hack for now, probably can be made into a naive class
	var storeStep core.Step

	if shouldStore {
		storeStep = &core.ExternalStep{
			BaseStep: core.NewBaseStep(core.BaseStepOptions{
				Name:    "store",
				Owner:   "wercker",
				Version: util.Version(),
				SafeID:  "store",
			}),
		}
	}

	e.Emit(core.BuildStepsAdded, &core.BuildStepsAddedArgs{
		Build:      pipeline,
		Steps:      pipeline.Steps(),
		StoreStep:  storeStep,
		AfterSteps: pipeline.AfterSteps(),
	})

	pr := &core.PipelineResult{
		Success:           true,
		FailedStepName:    "",
		FailedStepMessage: "",
	}

	// stepCounter starts at 3, step 1 is "get code", step 2 is "setup
	// environment".
	stepCounter := &util.Counter{Current: 3}
	checkpoint := false
	for _, step := range pipeline.Steps() {
		// we always want to run the wercker-init step to provide some functions
		if !checkpoint && stepCounter.Current > 3 {
			if options.EnableDevSteps && options.Checkpoint != "" {
				logger.Printf(f.Info("Skipping step", step.DisplayName()))
				// start at the one after the checkpoint
				if step.Checkpoint() == options.Checkpoint {
					logger.Printf(f.Info("Found checkpoint", options.Checkpoint))
					checkpoint = true
				}
				stepCounter.Increment()
				continue
			}
		}
		logger.Printf(f.Info("Running step", step.DisplayName()))
		timer.Reset()
		sr, err := r.RunStep(shared, step, stepCounter.Increment())
		if err != nil {
			pr.Success = false
			pr.FailedStepName = step.DisplayName()
			pr.FailedStepMessage = sr.Message
			logger.Printf(f.Fail("Step failed", step.DisplayName(), timer.String()))
			break
		}

		if options.EnableDevSteps && step.Checkpoint() != "" {
			logger.Printf(f.Info("Checkpointing", step.Checkpoint()))
			box.Commit(box.Repository(), fmt.Sprintf("w-%s", step.Checkpoint()), "checkpoint", false)
		}

		if options.Verbose {
			logger.Printf(f.Success("Step passed", step.DisplayName(), timer.String()))
		}
	}

	if options.ShouldCommit {
		_, err = box.Commit(repoName, tag, message, true)
		if err != nil {
			logger.Errorln("Failed to commit:", err.Error())
		}
	}

	// We need to wind the counter to where it should be if we failed a step
	// so that is the number of steps + get code + setup environment + store
	// TODO(termie): remove all the this "order" stuff completely
	stepCounter.Current = len(pipeline.Steps()) + 3

	if pr.Success && options.ShouldArtifacts {
		// At this point the build has effectively passed but we can still mess it
		// up by being unable to deliver the artifacts

		err = func() error {
			sr := &StepResult{
				Success:    false,
				Artifact:   nil,
				Message:    "",
				PackageURL: "",
				ExitCode:   1,
			}
			finisher := r.StartStep(shared, storeStep, stepCounter.Increment())
			defer finisher.Finish(sr)

			pr.FailedStepName = storeStep.Name()
			pr.FailedStepMessage = "Unable to store pipeline output"

			e.Emit(core.Logs, &core.LogsArgs{
				Logs: "Storing artifacts\n",
			})

			artifact, err := pipeline.CollectArtifact(shared.containerID)
			// Ignore ErrEmptyTarball errors
			if err != util.ErrEmptyTarball {
				if err != nil {
					sr.Message = err.Error()
					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
					})
					return err
				}

				e.Emit(core.Logs, &core.LogsArgs{
					Logs: fmt.Sprintf("Collecting files from %s\n", artifact.GuestPath),
				})

				ignoredDirectories := []string{".git", "node_modules", "vendor", "site-packages"}
				nameEmit := func(path string, info os.FileInfo, err error) error {
					relativePath := strings.TrimPrefix(path, artifact.HostPath)
					if info == nil {
						return nil
					}

					if info.IsDir() {
						if util.ContainsString(ignoredDirectories, info.Name()) {
							e.Emit(core.Logs, &core.LogsArgs{
								Logs: fmt.Sprintf(".%s/ (content omitted)\n", relativePath),
							})
							return filepath.SkipDir
						}

						return nil
					}

					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf(".%s\n", relativePath),
					})

					return nil
				}

				err = filepath.Walk(artifact.HostPath, nameEmit)
				if err != nil {
					sr.Message = err.Error()
					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
					})
					return err
				}

				tarInfo, err := os.Stat(artifact.HostTarPath)
				if err != nil {
					if os.IsNotExist(err) {
						e.Emit(core.Logs, &core.LogsArgs{
							Logs: "No artifacts stored",
						})
					} else {
						sr.Message = err.Error()
						e.Emit(core.Logs, &core.LogsArgs{
							Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
						})
						return err
					}
				} else {
					size, unit := util.ConvertUnit(tarInfo.Size())
					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf("Total artifact size: %d %s\n", size, unit),
					})
				}

				if options.ShouldStoreS3 {
					artificer := dockerlocal.NewArtificer(options, dockerOptions)
					err = artificer.Upload(artifact)
					if err != nil {
						sr.Message = err.Error()
						e.Emit(core.Logs, &core.LogsArgs{
							Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
						})
						return err
					}
				}

				sr.PackageURL = artifact.URL()
			} else {
				e.Emit(core.Logs, &core.LogsArgs{
					Logs: "No artifacts found\n",
				})
			}

			e.Emit(core.Logs, &core.LogsArgs{
				Logs: "Storing artifacts complete\n",
			})

			sr.Success = true
			sr.ExitCode = 0

			return nil
		}()
		if err != nil {
			pr.Success = false
			logger.WithField("Error", err).Error("Unable to store pipeline output")
		}
	} else {
		stepCounter.Increment()
	}

	// We're sending our build finished but we're not done yet,
	// now is time to run after-steps if we have any
	if pr.Success {
		logger.Println(f.Success("Steps passed", mainTimer.String()))
		buildFinishedArgs.Result = "passed"
	}
	buildFinisher.Finish(buildFinishedArgs)
	pipelineArgs.MainSuccessful = pr.Success

	if len(pipeline.AfterSteps()) == 0 {
		// We're about to end the build, so pull the cache and explode it
		// into the CacheDir
		if !options.DirectMount {
			timer.Reset()
			err = pipeline.CollectCache(shared.containerID)
			if err != nil {
				logger.WithField("Error", err).Error("Unable to store cache")
			}
			if options.Verbose {
				logger.Printf(f.Success("Exported Cache", timer.String()))
			}
		}

		if pr.Success {
			logger.Println(f.Success("Pipeline finished", mainTimer.String()))
		} else {
			logger.Println(f.Fail("Pipeline failed", mainTimer.String()))
		}

		if !pr.Success {
			return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName)
		}
		return shared, nil
	}

	pipelineArgs.RanAfterSteps = true

	logger.Println(f.Info("Starting after-steps"))
	// The container may have died, either way we'll have a fresh env
	container, err := box.Restart()
	if err != nil {
		logger.Panicln(err)
	}

	newSessCtx, newSess, err := r.GetSession(cmdCtx, container.ID)
	if err != nil {
		logger.Panicln(err)
	}

	newShared := &RunnerShared{
		box:         shared.box,
		pipeline:    shared.pipeline,
		sess:        newSess,
		sessionCtx:  newSessCtx,
		containerID: shared.containerID,
		config:      shared.config,
	}

	// Set up the base environment
	err = pipeline.ExportEnvironment(newSessCtx, newSess)
	if err != nil {
		return nil, err
	}

	// Add the After-Step parts
	err = pr.ExportEnvironment(newSessCtx, newSess)
	if err != nil {
		return nil, err
	}

	for _, step := range pipeline.AfterSteps() {
		logger.Println(f.Info("Running after-step", step.DisplayName()))
		timer.Reset()
		_, err := r.RunStep(newShared, step, stepCounter.Increment())
		if err != nil {
			logger.Println(f.Fail("After-step failed", step.DisplayName(), timer.String()))
			break
		}
		logger.Println(f.Success("After-step passed", step.DisplayName(), timer.String()))
	}

	// We're about to end the build, so pull the cache and explode it
	// into the CacheDir
	if !options.DirectMount {
		timer.Reset()
		err = pipeline.CollectCache(newShared.containerID)
		if err != nil {
			logger.WithField("Error", err).Error("Unable to store cache")
		}
		if options.Verbose {
			logger.Printf(f.Success("Exported Cache", timer.String()))
		}
	}

	if pr.Success {
		logger.Println(f.Success("Pipeline finished", mainTimer.String()))
	} else {
		logger.Println(f.Fail("Pipeline failed", mainTimer.String()))
	}

	if !pr.Success {
		return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName)
	}

	pipelineArgs.AfterStepSuccessful = pr.Success

	return shared, nil
}
Пример #3
0
// SetupEnvironment does a lot of boilerplate legwork and returns a pipeline,
// box, and session. This is a bit of a long method, but it is pretty much
// the entire "Setup Environment" step.
func (p *Runner) SetupEnvironment(runnerCtx context.Context) (*RunnerShared, error) {
	shared := &RunnerShared{}
	f := &util.Formatter{p.options.GlobalOptions.ShowColors}
	timer := util.NewTimer()

	sr := &StepResult{
		Success:  false,
		Artifact: nil,
		Message:  "",
		ExitCode: 1,
	}

	setupEnvironmentStep := &core.ExternalStep{
		BaseStep: core.NewBaseStep(core.BaseStepOptions{
			Name:    "setup environment",
			Owner:   "wercker",
			Version: util.Version(),
		}),
	}
	finisher := p.StartStep(shared, setupEnvironmentStep, 2)
	defer finisher.Finish(sr)

	if p.options.Verbose {
		p.emitter.Emit(core.Logs, &core.LogsArgs{
			Logs: fmt.Sprintf("Running wercker version: %s\n", util.FullVersion()),
		})
	}

	p.logger.Debugln("Application:", p.options.ApplicationName)

	// Grab our config
	rawConfig, stringConfig, err := p.GetConfig()
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}
	shared.config = rawConfig
	sr.WerckerYamlContents = stringConfig

	// Init the pipeline
	pipeline, err := p.GetPipeline(rawConfig)
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}
	pipeline.InitEnv(p.options.HostEnv)
	shared.pipeline = pipeline

	if p.options.Verbose {
		p.emitter.Emit(core.Logs, &core.LogsArgs{
			Logs: fmt.Sprintf("Using config:\n%s\n", stringConfig),
		})
	}

	// Fetch the box
	timer.Reset()
	box := pipeline.Box()
	_, err = box.Fetch(runnerCtx, pipeline.Env())
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}
	// TODO(termie): dump some logs about the image
	shared.box = box
	if p.options.Verbose {
		p.logger.Printf(f.Success(fmt.Sprintf("Fetched %s", box.GetName()), timer.String()))
	}

	// Fetch the services and add them to the box
	if err := p.AddServices(runnerCtx, pipeline, box); err != nil {
		sr.Message = err.Error()
		return shared, err
	}

	// Start setting up the pipeline dir
	p.logger.Debugln("Copying source to build directory")
	err = p.CopySource()
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}

	// ... and the cache dir
	p.logger.Debugln("Copying cache to build directory")
	err = p.CopyCache()
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}

	p.logger.Debugln("Steps:", len(pipeline.Steps()))

	// Fetch the steps
	steps := pipeline.Steps()
	for _, step := range steps {
		timer.Reset()
		if _, err := step.Fetch(); err != nil {
			sr.Message = err.Error()
			return shared, err
		}
		if p.options.Verbose {
			p.logger.Printf(f.Success("Prepared step", step.Name(), timer.String()))
		}

	}

	// ... and the after steps
	afterSteps := pipeline.AfterSteps()
	for _, step := range afterSteps {
		timer.Reset()
		if _, err := step.Fetch(); err != nil {
			sr.Message = err.Error()
			return shared, err
		}

		if p.options.Verbose {
			p.logger.Printf(f.Success("Prepared step", step.Name(), timer.String()))
		}
	}

	// Boot up our main container, it will run the services
	container, err := box.Run(runnerCtx, pipeline.Env())
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}
	shared.containerID = container.ID

	// Register our signal handler to clean the box up
	// NOTE(termie): we're expecting that this is going to be the last handler
	//               to be run since it calls exit, in the future we might be
	//               able to do something like close the calling context and
	//               short circuit / let the rest of things play out
	boxCleanupHandler := &util.SignalHandler{
		ID: "box-cleanup",
		F: func() bool {
			p.logger.Errorln("Interrupt detected, cleaning up containers and shutting down")
			box.Stop()
			if p.options.ShouldRemove {
				box.Clean()
			}
			os.Exit(1)
			return true
		},
	}
	util.GlobalSigint().Add(boxCleanupHandler)
	util.GlobalSigterm().Add(boxCleanupHandler)

	p.logger.Debugln("Attaching session to base box")
	// Start our session
	sessionCtx, sess, err := p.GetSession(runnerCtx, container.ID)
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}
	shared.sess = sess
	shared.sessionCtx = sessionCtx

	// Some helpful logging
	pipeline.LogEnvironment()

	p.logger.Debugln("Setting up guest (base box)")
	err = pipeline.SetupGuest(sessionCtx, sess)
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}

	err = pipeline.ExportEnvironment(sessionCtx, sess)
	if err != nil {
		sr.Message = err.Error()
		return shared, err
	}

	sr.Message = ""
	sr.Success = true
	sr.ExitCode = 0
	return shared, nil
}
Пример #4
0
// Execute runs a command and optionally reloads it
func (s *WatchStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return -1, err
	}

	// TODO(termie): PACKAGING make this a feature of session and remove
	//               the calls into its struct
	// Start watching our stdout
	stopListening := make(chan struct{})
	defer func() { stopListening <- struct{}{} }()
	go func() {
		for {
			select {
			case line := <-sess.Recv():
				e.Emit(core.Logs, &core.LogsArgs{
					// Hidden: sess.logsHidden,
					Logs: line,
				})
			// We need to make sure we stop eating the stdout from the container
			// promiscuously when we finish out step
			case <-stopListening:
				return
			}
		}
	}()

	// cheating to get containerID
	// TODO(termie): we should deal with this eventually
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	// Set up a signal handler to end our step.
	finishedStep := make(chan struct{})
	stopWatchHandler := &util.SignalHandler{
		ID: "stop-watch",
		// Signal our stuff to stop and finish the step, return false to
		// signify that we've handled the signal and don't process further
		F: func() bool {
			s.logger.Println("Keyboard interrupt detected, finishing step")
			finishedStep <- struct{}{}
			return false
		},
	}
	util.GlobalSigint().Add(stopWatchHandler)
	// NOTE(termie): I think the only way to exit this code is via this
	//               signal handler and the signal monkey removes handlers
	//               after it processes them, so this may be superfluous
	defer util.GlobalSigint().Remove(stopWatchHandler)

	// If we're not going to reload just run the thing once, synchronously
	if !s.reload {
		err := sess.Send(ctx, false, "set +e", s.Code)
		if err != nil {
			return 0, err
		}
		<-finishedStep
		// ignoring errors
		s.killProcesses(containerID, "INT")
		return 0, nil
	}
	f := &util.Formatter{s.options.GlobalOptions.ShowColors}
	s.logger.Info(f.Info("Reloading on file changes"))
	doCmd := func() {
		err := sess.Send(ctx, false, "set +e", s.Code)
		if err != nil {
			s.logger.Errorln(err)
			return
		}
		open, err := exposedPortMaps(s.dockerOptions.DockerHost, s.options.PublishPorts)
		if err != nil {
			s.logger.Warnf(f.Info("There was a problem parsing your docker host."), err)
			return
		}
		for _, uri := range open {
			s.logger.Infof(f.Info("Forwarding %s to %s on the container."), uri.HostURI, uri.ContainerPort)
		}
	}

	// Otherwise set up a watcher and do some magic
	watcher, err := s.watch(s.options.ProjectPath)
	if err != nil {
		return -1, err
	}

	debounce := util.NewDebouncer(2 * time.Second)
	done := make(chan struct{})
	go func() {
		for {
			select {
			case event := <-watcher.Events:
				s.logger.Debugln("fsnotify event", event.String())
				if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Remove == fsnotify.Remove {
					if !strings.HasPrefix(filepath.Base(event.Name), ".") {
						s.logger.Debug(f.Info("Modified file", event.Name))
						debounce.Trigger()
					}
				}
			case <-debounce.C:
				err := s.killProcesses(containerID, "INT")
				if err != nil {
					s.logger.Panic(err)
					return
				}
				s.logger.Info(f.Info("Reloading"))
				go doCmd()
			case err := <-watcher.Errors:
				s.logger.Error(err)
				done <- struct{}{}
				return
			case <-finishedStep:
				s.killProcesses(containerID, "INT")
				done <- struct{}{}
				return
			}
		}
	}()

	// Run build on first run
	debounce.Trigger()
	<-done
	return 0, nil
}