コード例 #1
0
ファイル: box.go プロジェクト: wercker/wercker
// Fetch an image (or update the local)
func (b *DockerBox) Fetch(ctx context.Context, env *util.Environment) (*docker.Image, error) {
	// TODO(termie): maybe move the container manipulation outside of here?
	client := b.client

	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return nil, err
	}
	authenticator := b.config.Auth.ToAuthenticator(env)
	b.repository = authenticator.Repository(env.Interpolate(b.repository))
	b.Name = fmt.Sprintf("%s:%s", b.repository, b.tag)
	// Shortcut to speed up local dev
	if b.dockerOptions.DockerLocal {
		image, err := client.InspectImage(env.Interpolate(b.Name))
		if err != nil {
			return nil, err
		}
		b.image = image
		return image, nil
	}

	check, err := authenticator.CheckAccess(env.Interpolate(b.repository), auth.Pull)
	if err != nil {
		return nil, fmt.Errorf("Error interacting with this repository: %s %v", env.Interpolate(b.repository), err)
	}
	if !check {
		return nil, fmt.Errorf("Not allowed to interact with this repository: %s:", env.Interpolate(b.repository))
	}
	// Create a pipe since we want a io.Reader but Docker expects a io.Writer
	r, w := io.Pipe()
	defer w.Close()

	// emitStatusses in a different go routine
	go EmitStatus(e, r, b.options)

	options := docker.PullImageOptions{
		// changeme if we have a private registry
		// Registry:      "docker.tsuru.io",
		OutputStream:  w,
		RawJSONStream: true,
		Repository:    b.repository,
		Tag:           env.Interpolate(b.tag),
	}
	authConfig := docker.AuthConfiguration{
		Username: authenticator.Username(),
		Password: authenticator.Password(),
	}
	err = client.PullImage(options, authConfig)
	if err != nil {
		return nil, err
	}
	image, err := client.InspectImage(env.Interpolate(b.Name))
	if err != nil {
		return nil, err
	}
	b.image = image

	return nil, err
}
コード例 #2
0
ファイル: runner.go プロジェクト: sgoings/wercker
// NewRunner from global options
func NewRunner(ctx context.Context, options *core.PipelineOptions, dockerOptions *dockerlocal.DockerOptions, getPipeline pipelineGetter) (*Runner, error) {
	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return nil, err
	}
	logger := util.RootLogger().WithField("Logger", "Runner")
	// h, err := NewLogHandler()
	// if err != nil {
	//   p.logger.WithField("Error", err).Panic("Unable to LogHandler")
	// }
	// h.ListenTo(e)

	if options.Debug {
		dh := core.NewDebugHandler()
		dh.ListenTo(e)
	}

	l, err := event.NewLiteralLogHandler(options)
	if err != nil {
		logger.WithField("Error", err).Panic("Unable to event.LiteralLogHandler")
	}
	l.ListenTo(e)

	var mh *event.MetricsEventHandler
	if options.ShouldKeenMetrics {
		mh, err = event.NewMetricsHandler(options)
		if err != nil {
			logger.WithField("Error", err).Panic("Unable to MetricsHandler")
		}
		mh.ListenTo(e)
	}

	var r *event.ReportHandler
	if options.ShouldReport {
		r, err := event.NewReportHandler(options.ReporterHost, options.ReporterKey)
		if err != nil {
			logger.WithField("Error", err).Panic("Unable to event.ReportHandler")
		}
		r.ListenTo(e)
	}

	return &Runner{
		options:       options,
		dockerOptions: dockerOptions,
		literalLogger: l,
		metrics:       mh,
		reporter:      r,
		getPipeline:   getPipeline,
		logger:        logger,
		emitter:       e,
		formatter:     &util.Formatter{options.GlobalOptions.ShowColors},
	}, nil
}
コード例 #3
0
ファイル: service.go プロジェクト: hughker/wercker
// Run executes the service
func (b *InternalServiceBox) Run(ctx context.Context, env *util.Environment, links []string) (*docker.Container, error) {
	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return nil, err
	}
	f := &util.Formatter{}

	client, err := NewDockerClient(b.dockerOptions)
	if err != nil {
		return nil, err
	}

	// Import the environment and command
	myEnv := dockerEnv(b.config.Env, env)

	origEntrypoint := b.image.Config.Entrypoint
	origCmd := b.image.Config.Cmd
	cmdInfo := []string{}

	var entrypoint []string
	if b.entrypoint != "" {
		entrypoint, err = shlex.Split(b.entrypoint)
		if err != nil {
			return nil, err
		}
		cmdInfo = append(cmdInfo, entrypoint...)
	} else {
		cmdInfo = append(cmdInfo, origEntrypoint...)
	}

	var cmd []string
	if b.config.Cmd != "" {
		cmd, err = shlex.Split(b.config.Cmd)
		if err != nil {
			return nil, err
		}
		cmdInfo = append(cmdInfo, cmd...)
	} else {
		cmdInfo = append(cmdInfo, origCmd...)
	}

	container, err := client.CreateContainer(
		docker.CreateContainerOptions{
			Name: b.getContainerName(),
			Config: &docker.Config{
				Image:           b.Name,
				Cmd:             cmd,
				Env:             myEnv,
				NetworkDisabled: b.networkDisabled,
				DNS:             b.dockerOptions.DockerDNS,
				Entrypoint:      entrypoint,
			},
		})

	if err != nil {
		return nil, err
	}

	out := []string{}
	for _, part := range cmdInfo {
		if strings.Contains(part, " ") {
			out = append(out, fmt.Sprintf("%q", part))
		} else {
			out = append(out, part)
		}
	}
	if b.options.Verbose {
		b.logger.Println(f.Info(fmt.Sprintf("Starting service %s", b.ShortName), strings.Join(out, " ")))
	}

	client.StartContainer(container.ID, &docker.HostConfig{
		DNS:   b.dockerOptions.DockerDNS,
		Links: links,
	})
	b.container = container

	go func() {
		status, err := client.WaitContainer(container.ID)
		if err != nil {
			b.logger.Errorln("Error waiting", err)
		}
		b.logger.Debugln("Service container finished with status code:", status, container.ID)

		if status != 0 {
			var errstream bytes.Buffer
			var outstream bytes.Buffer
			// recv := make(chan string)
			// outputStream := NewReceiver(recv)
			opts := docker.LogsOptions{
				Container:    container.ID,
				Stdout:       true,
				Stderr:       true,
				ErrorStream:  &errstream,
				OutputStream: &outstream,
				RawTerminal:  false,
			}
			err = client.Logs(opts)
			if err != nil {
				b.logger.Panicln(err)
			}
			e.Emit(core.Logs, &core.LogsArgs{
				Stream: fmt.Sprintf("%s-stdout", b.Name),
				Logs:   outstream.String(),
			})
			e.Emit(core.Logs, &core.LogsArgs{
				Stream: fmt.Sprintf("%s-stderr", b.Name),
				Logs:   errstream.String(),
			})
		}
	}()

	return container, nil
}
コード例 #4
0
ファイル: docker.go プロジェクト: rlugojr/wercker
// Execute commits the current container and pushes it to the configured
// registry
func (s *DockerPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	// TODO(termie): could probably re-use the tansport's client
	client, err := NewDockerClient(s.dockerOptions)
	if err != nil {
		return 1, err
	}
	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return 1, err
	}

	s.logger.WithFields(util.LogFields{
		"Registry":   s.registry,
		"Repository": s.repository,
		"Tags":       s.tags,
		"Message":    s.message,
	}).Debug("Push to registry")

	// This is clearly only relevant to docker so we're going to dig into the
	// transport internals a little bit to get the container ID
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	auth := docker.AuthConfiguration{
		Username:      s.username,
		Password:      s.password,
		Email:         s.email,
		ServerAddress: s.authServer,
	}

	if !s.dockerOptions.DockerLocal {
		checkOpts := CheckAccessOptions{
			Auth:       auth,
			Access:     "write",
			Repository: s.repository,
			Registry:   s.registry,
		}

		check, err := client.CheckAccess(checkOpts)
		if err != nil {
			s.logger.Errorln("Error during check access", err)
			return -1, err
		}
		if !check {
			s.logger.Errorln("Not allowed to interact with this repository:", s.repository)
			return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository)
		}
	}
	s.logger.Debugln("Init env:", s.data)

	config := docker.Config{
		Cmd:          s.cmd,
		Entrypoint:   s.entrypoint,
		WorkingDir:   s.workingDir,
		User:         s.user,
		Env:          s.env,
		StopSignal:   s.stopSignal,
		Labels:       s.labels,
		ExposedPorts: s.ports,
		Volumes:      s.volumes,
	}

	if len(s.tags) == 0 {
		s.tags = []string{"latest"}
	}

	commitOpts := docker.CommitContainerOptions{
		Container:  containerID,
		Repository: s.repository,
		Author:     s.author,
		Message:    s.message,
		Run:        &config,
		Tag:        s.tags[0],
	}

	s.logger.Debugln("Commit container:", containerID)
	i, err := client.CommitContainer(commitOpts)
	if err != nil {
		return -1, err
	}
	s.logger.WithField("Image", i).Debug("Commit completed")

	return s.tagAndPush(i.ID, e, client, auth)
}
コード例 #5
0
ファイル: docker.go プロジェクト: rlugojr/wercker
// Execute the scratch-n-push
func (s *DockerScratchPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	// This is clearly only relevant to docker so we're going to dig into the
	// transport internals a little bit to get the container ID
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	_, err := s.CollectArtifact(containerID)
	if err != nil {
		return -1, err
	}

	// At this point we've written the layer to disk, we're going to add up the
	// sizes of all the files to add to our json format, and sha256 the data
	layerFile, err := os.Open(s.options.HostPath("layer.tar"))
	if err != nil {
		return -1, err
	}
	defer layerFile.Close()

	var layerSize int64

	layerTar := tar.NewReader(layerFile)
	for {
		hdr, err := layerTar.Next()
		if err == io.EOF {
			// finished the tarball
			break
		}
		if err != nil {
			return -1, err
		}

		// Skip the base dir
		if hdr.Name == "./" {
			continue
		}

		layerSize += hdr.Size
	}

	config := docker.Config{
		Cmd:          s.cmd,
		Entrypoint:   s.entrypoint,
		Hostname:     containerID[:16],
		WorkingDir:   s.workingDir,
		ExposedPorts: s.ports,
		Volumes:      s.volumes,
	}

	layerID, err := GenerateDockerID()
	if err != nil {
		return -1, err
	}

	// Make the JSON file we need
	imageJSON := DockerImageJSON{
		Architecture: "amd64",
		Container:    containerID,
		ContainerConfig: DockerImageJSONContainerConfig{
			Hostname: containerID[:16],
		},
		DockerVersion: "1.5",
		Created:       time.Now(),
		ID:            layerID,
		OS:            "linux",
		Size:          layerSize,
		Config:        config,
	}

	jsonOut, err := json.MarshalIndent(imageJSON, "", "  ")
	if err != nil {
		return -1, err
	}
	s.logger.Debugln(string(jsonOut))

	// Write out the files to disk that we are going to care about
	err = os.MkdirAll(s.options.HostPath("scratch", layerID), 0755)
	if err != nil {
		return -1, err
	}
	defer os.RemoveAll(s.options.HostPath("scratch"))

	// VERSION file
	versionFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "VERSION"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer versionFile.Close()
	_, err = versionFile.Write([]byte("1.0"))
	if err != nil {
		return -1, err
	}
	err = versionFile.Sync()
	if err != nil {
		return -1, err
	}

	// json file
	jsonFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer jsonFile.Close()
	_, err = jsonFile.Write(jsonOut)
	if err != nil {
		return -1, err
	}
	err = jsonFile.Sync()
	if err != nil {
		return -1, err
	}

	// repositories file
	repositoriesFile, err := os.OpenFile(s.options.HostPath("scratch", "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer repositoriesFile.Close()
	_, err = repositoriesFile.Write([]byte(fmt.Sprintf(`{"%s":{`, s.repository)))
	if err != nil {
		return -1, err
	}

	if len(s.tags) == 0 {
		s.tags = []string{"latest"}
	}

	for i, tag := range s.tags {
		_, err = repositoriesFile.Write([]byte(fmt.Sprintf(`"%s":"%s"`, tag, layerID)))
		if err != nil {
			return -1, err
		}
		if i != len(s.tags)-1 {
			_, err = repositoriesFile.Write([]byte{','})
			if err != nil {
				return -1, err
			}
		}
	}

	_, err = repositoriesFile.Write([]byte{'}', '}'})
	err = repositoriesFile.Sync()
	if err != nil {
		return -1, err
	}
	// layer.tar has an extra folder in it so we have to strip it :/
	tempLayerFile, err := os.Open(s.options.HostPath("layer.tar"))
	if err != nil {
		return -1, err
	}
	defer os.Remove(s.options.HostPath("layer.tar"))
	defer tempLayerFile.Close()

	realLayerFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "layer.tar"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer realLayerFile.Close()

	tr := tar.NewReader(tempLayerFile)
	tw := tar.NewWriter(realLayerFile)
	for {
		hdr, err := tr.Next()
		if err == io.EOF {
			// finished the tarball
			break
		}
		if err != nil {
			return -1, err
		}
		// Skip the base dir
		if hdr.Name == "./" {
			continue
		}
		if strings.HasPrefix(hdr.Name, "output/") {
			hdr.Name = hdr.Name[len("output/"):]
		} else if strings.HasPrefix(hdr.Name, "source/") {
			hdr.Name = hdr.Name[len("source/"):]
		}
		if len(hdr.Name) == 0 {
			continue
		}
		tw.WriteHeader(hdr)
		_, err = io.Copy(tw, tr)
		if err != nil {
			return -1, err
		}
	}
	tw.Close()

	// Build our output tarball and start writing to it
	imageFile, err := os.Create(s.options.HostPath("scratch.tar"))
	defer imageFile.Close()
	if err != nil {
		return -1, err
	}
	err = util.TarPath(imageFile, s.options.HostPath("scratch"))
	if err != nil {
		return -1, err
	}
	imageFile.Close()

	client, err := NewDockerClient(s.dockerOptions)
	if err != nil {
		return 1, err
	}

	s.logger.WithFields(util.LogFields{
		"Registry":   s.registry,
		"Repository": s.repository,
		"Tags":       s.tags,
		"Message":    s.message,
	}).Debug("Scratch push to registry")

	// Check the auth
	auth := docker.AuthConfiguration{
		Username:      s.username,
		Password:      s.password,
		Email:         s.email,
		ServerAddress: s.authServer,
	}

	if !s.dockerOptions.DockerLocal {
		checkOpts := CheckAccessOptions{
			Auth:       auth,
			Access:     "write",
			Repository: s.repository,
			Registry:   s.registry,
		}

		check, err := client.CheckAccess(checkOpts)
		if err != nil {
			s.logger.Errorln("Error during check access", err)
			return -1, err
		}
		if !check {
			s.logger.Errorln("Not allowed to interact with this repository:", s.repository)
			return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository)
		}
	}

	// Okay, we can access it, do a docker load to import the image then push it
	loadFile, err := os.Open(s.options.HostPath("scratch.tar"))
	defer loadFile.Close()
	err = client.LoadImage(docker.LoadImageOptions{InputStream: loadFile})
	if err != nil {
		return -1, err
	}
	e, err := core.EmitterFromContext(ctx)
	return s.tagAndPush(layerID, e, client, auth)
}
コード例 #6
0
ファイル: main.go プロジェクト: rlugojr/wercker
func executePipeline(cmdCtx context.Context, options *core.PipelineOptions, dockerOptions *dockerlocal.DockerOptions, getter pipelineGetter) (*RunnerShared, error) {
	// Boilerplate
	soft := NewSoftExit(options.GlobalOptions)
	logger := util.RootLogger().WithField("Logger", "Main")
	e, err := core.EmitterFromContext(cmdCtx)
	if err != nil {
		return nil, err
	}
	f := &util.Formatter{options.GlobalOptions.ShowColors}

	// Set up the runner
	r, err := NewRunner(cmdCtx, options, dockerOptions, getter)
	if err != nil {
		return nil, err
	}

	// Main timer
	mainTimer := util.NewTimer()
	timer := util.NewTimer()

	// These will be emitted at the end of the execution, we're going to be
	// pessimistic and report that we failed, unless overridden at the end of the
	// execution.
	fullPipelineFinisher := r.StartFullPipeline(options)
	pipelineArgs := &core.FullPipelineFinishedArgs{}
	defer fullPipelineFinisher.Finish(pipelineArgs)

	buildFinisher := r.StartBuild(options)
	buildFinishedArgs := &core.BuildFinishedArgs{Box: nil, Result: "failed"}
	defer buildFinisher.Finish(buildFinishedArgs)

	// Debug information
	DumpOptions(options)

	// Do some sanity checks before starting
	err = dockerlocal.RequireDockerEndpoint(dockerOptions)
	if err != nil {
		return nil, soft.Exit(err)
	}

	// Start copying code
	logger.Println(f.Info("Executing pipeline"))
	timer.Reset()
	_, err = r.EnsureCode()
	if err != nil {
		e.Emit(core.Logs, &core.LogsArgs{
			Stream: "stderr",
			Logs:   err.Error() + "\n",
		})
		return nil, soft.Exit(err)
	}
	err = r.CleanupOldBuilds()
	if err != nil {
		e.Emit(core.Logs, &core.LogsArgs{
			Stream: "stderr",
			Logs:   err.Error() + "\n",
		})
	}
	if options.Verbose {
		logger.Printf(f.Success("Copied working dir", timer.String()))
	}

	// Setup environment is still a fairly special step, it needs
	// to start our boxes and get everything set up
	logger.Println(f.Info("Running step", "setup environment"))
	timer.Reset()
	shared, err := r.SetupEnvironment(cmdCtx)
	if shared.box != nil {
		if options.ShouldRemove {
			defer shared.box.Clean()
		}
		defer shared.box.Stop()
	}
	if err != nil {
		logger.Errorln(f.Fail("Step failed", "setup environment", timer.String()))
		e.Emit(core.Logs, &core.LogsArgs{
			Stream: "stderr",
			Logs:   err.Error() + "\n",
		})
		return nil, soft.Exit(err)
	}
	if options.Verbose {
		logger.Printf(f.Success("Step passed", "setup environment", timer.String()))
	}

	// Expand our context object
	box := shared.box
	buildFinishedArgs.Box = box
	pipeline := shared.pipeline
	repoName := pipeline.DockerRepo()
	tag := pipeline.DockerTag()
	message := pipeline.DockerMessage()

	shouldStore := options.ShouldArtifacts

	// TODO(termie): hack for now, probably can be made into a naive class
	var storeStep core.Step

	if shouldStore {
		storeStep = &core.ExternalStep{
			BaseStep: core.NewBaseStep(core.BaseStepOptions{
				Name:    "store",
				Owner:   "wercker",
				Version: util.Version(),
			}),
		}
	}

	e.Emit(core.BuildStepsAdded, &core.BuildStepsAddedArgs{
		Build:      pipeline,
		Steps:      pipeline.Steps(),
		StoreStep:  storeStep,
		AfterSteps: pipeline.AfterSteps(),
	})

	pr := &core.PipelineResult{
		Success:           true,
		FailedStepName:    "",
		FailedStepMessage: "",
	}

	// stepCounter starts at 3, step 1 is "get code", step 2 is "setup
	// environment".
	stepCounter := &util.Counter{Current: 3}
	checkpoint := false
	for _, step := range pipeline.Steps() {
		// we always want to run the wercker-init step to provide some functions
		if !checkpoint && stepCounter.Current > 3 {
			if options.EnableDevSteps && options.Checkpoint != "" {
				logger.Printf(f.Info("Skipping step", step.DisplayName()))
				// start at the one after the checkpoint
				if step.Checkpoint() == options.Checkpoint {
					logger.Printf(f.Info("Found checkpoint", options.Checkpoint))
					checkpoint = true
				}
				stepCounter.Increment()
				continue
			}
		}
		logger.Printf(f.Info("Running step", step.DisplayName()))
		timer.Reset()
		sr, err := r.RunStep(shared, step, stepCounter.Increment())
		if err != nil {
			pr.Success = false
			pr.FailedStepName = step.DisplayName()
			pr.FailedStepMessage = sr.Message
			logger.Printf(f.Fail("Step failed", step.DisplayName(), timer.String()))
			break
		}

		if options.EnableDevSteps && step.Checkpoint() != "" {
			logger.Printf(f.Info("Checkpointing", step.Checkpoint()))
			box.Commit(box.Repository(), fmt.Sprintf("w-%s", step.Checkpoint()), "checkpoint", false)
		}

		if options.Verbose {
			logger.Printf(f.Success("Step passed", step.DisplayName(), timer.String()))
		}
	}

	if options.ShouldCommit {
		_, err = box.Commit(repoName, tag, message, true)
		if err != nil {
			logger.Errorln("Failed to commit:", err.Error())
		}
	}

	// We need to wind the counter to where it should be if we failed a step
	// so that is the number of steps + get code + setup environment + store
	// TODO(termie): remove all the this "order" stuff completely
	stepCounter.Current = len(pipeline.Steps()) + 3

	if pr.Success && options.ShouldArtifacts {
		// At this point the build has effectively passed but we can still mess it
		// up by being unable to deliver the artifacts

		err = func() error {
			sr := &StepResult{
				Success:    false,
				Artifact:   nil,
				Message:    "",
				PackageURL: "",
				ExitCode:   1,
			}
			finisher := r.StartStep(shared, storeStep, stepCounter.Increment())
			defer finisher.Finish(sr)

			pr.FailedStepName = storeStep.Name()
			pr.FailedStepMessage = "Unable to store pipeline output"

			e.Emit(core.Logs, &core.LogsArgs{
				Logs: "Storing artifacts\n",
			})

			artifact, err := pipeline.CollectArtifact(shared.containerID)
			// Ignore ErrEmptyTarball errors
			if err != util.ErrEmptyTarball {
				if err != nil {
					sr.Message = err.Error()
					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
					})
					return err
				}

				e.Emit(core.Logs, &core.LogsArgs{
					Logs: fmt.Sprintf("Collecting files from %s\n", artifact.GuestPath),
				})

				ignoredDirectories := []string{".git", "node_modules", "vendor", "site-packages"}
				nameEmit := func(path string, info os.FileInfo, err error) error {
					relativePath := strings.TrimPrefix(path, artifact.HostPath)

					if info.IsDir() {
						if util.ContainsString(ignoredDirectories, info.Name()) {
							e.Emit(core.Logs, &core.LogsArgs{
								Logs: fmt.Sprintf(".%s/ (content omitted)\n", relativePath),
							})
							return filepath.SkipDir
						}

						return nil
					}

					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf(".%s\n", relativePath),
					})

					return nil
				}

				err = filepath.Walk(artifact.HostPath, nameEmit)
				if err != nil {
					sr.Message = err.Error()
					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
					})
					return err
				}

				tarInfo, err := os.Stat(artifact.HostTarPath)
				if err != nil {
					if os.IsNotExist(err) {
						e.Emit(core.Logs, &core.LogsArgs{
							Logs: "No artifacts stored",
						})
					} else {
						sr.Message = err.Error()
						e.Emit(core.Logs, &core.LogsArgs{
							Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
						})
						return err
					}
				} else {
					size, unit := util.ConvertUnit(tarInfo.Size())
					e.Emit(core.Logs, &core.LogsArgs{
						Logs: fmt.Sprintf("Total artifact size: %d %s\n", size, unit),
					})
				}

				if options.ShouldStoreS3 {
					artificer := dockerlocal.NewArtificer(options, dockerOptions)
					err = artificer.Upload(artifact)
					if err != nil {
						sr.Message = err.Error()
						e.Emit(core.Logs, &core.LogsArgs{
							Logs: fmt.Sprintf("Storing artifacts failed: %s\n", sr.Message),
						})
						return err
					}
				}

				sr.PackageURL = artifact.URL()
			} else {
				e.Emit(core.Logs, &core.LogsArgs{
					Logs: "No artifacts found\n",
				})
			}

			e.Emit(core.Logs, &core.LogsArgs{
				Logs: "Storing artifacts complete\n",
			})

			sr.Success = true
			sr.ExitCode = 0

			return nil
		}()
		if err != nil {
			pr.Success = false
			logger.WithField("Error", err).Error("Unable to store pipeline output")
		}
	} else {
		stepCounter.Increment()
	}

	// We're sending our build finished but we're not done yet,
	// now is time to run after-steps if we have any
	if pr.Success {
		logger.Println(f.Success("Steps passed", mainTimer.String()))
		buildFinishedArgs.Result = "passed"
	}
	buildFinisher.Finish(buildFinishedArgs)
	pipelineArgs.MainSuccessful = pr.Success

	if len(pipeline.AfterSteps()) == 0 {
		// We're about to end the build, so pull the cache and explode it
		// into the CacheDir
		if !options.DirectMount {
			timer.Reset()
			err = pipeline.CollectCache(shared.containerID)
			if err != nil {
				logger.WithField("Error", err).Error("Unable to store cache")
			}
			if options.Verbose {
				logger.Printf(f.Success("Exported Cache", timer.String()))
			}
		}

		if pr.Success {
			logger.Println(f.Success("Pipeline finished", mainTimer.String()))
		} else {
			logger.Println(f.Fail("Pipeline failed", mainTimer.String()))
		}

		if !pr.Success {
			return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName)
		}
		return shared, nil
	}

	pipelineArgs.RanAfterSteps = true

	logger.Println(f.Info("Starting after-steps"))
	// The container may have died, either way we'll have a fresh env
	container, err := box.Restart()
	if err != nil {
		logger.Panicln(err)
	}

	newSessCtx, newSess, err := r.GetSession(cmdCtx, container.ID)
	if err != nil {
		logger.Panicln(err)
	}

	newShared := &RunnerShared{
		box:         shared.box,
		pipeline:    shared.pipeline,
		sess:        newSess,
		sessionCtx:  newSessCtx,
		containerID: shared.containerID,
		config:      shared.config,
	}

	// Set up the base environment
	err = pipeline.ExportEnvironment(newSessCtx, newSess)
	if err != nil {
		return nil, err
	}

	// Add the After-Step parts
	err = pr.ExportEnvironment(newSessCtx, newSess)
	if err != nil {
		return nil, err
	}

	for _, step := range pipeline.AfterSteps() {
		logger.Println(f.Info("Running after-step", step.DisplayName()))
		timer.Reset()
		_, err := r.RunStep(newShared, step, stepCounter.Increment())
		if err != nil {
			logger.Println(f.Fail("After-step failed", step.DisplayName(), timer.String()))
			break
		}
		logger.Println(f.Success("After-step passed", step.DisplayName(), timer.String()))
	}

	// We're about to end the build, so pull the cache and explode it
	// into the CacheDir
	if !options.DirectMount {
		timer.Reset()
		err = pipeline.CollectCache(newShared.containerID)
		if err != nil {
			logger.WithField("Error", err).Error("Unable to store cache")
		}
		if options.Verbose {
			logger.Printf(f.Success("Exported Cache", timer.String()))
		}
	}

	if pr.Success {
		logger.Println(f.Success("Pipeline finished", mainTimer.String()))
	} else {
		logger.Println(f.Fail("Pipeline failed", mainTimer.String()))
	}

	if !pr.Success {
		return nil, fmt.Errorf("Step failed: %s", pr.FailedStepName)
	}

	pipelineArgs.AfterStepSuccessful = pr.Success

	return shared, nil
}
コード例 #7
0
ファイル: box.go プロジェクト: umcodemonkey/wercker
// Fetch an image (or update the local)
func (b *DockerBox) Fetch(ctx context.Context, env *util.Environment) (*docker.Image, error) {
	// TODO(termie): maybe move the container manipulation outside of here?
	client := b.client

	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return nil, err
	}

	// Shortcut to speed up local dev
	if b.dockerOptions.DockerLocal {
		image, err := client.InspectImage(env.Interpolate(b.Name))
		if err != nil {
			return nil, err
		}
		b.image = image
		return image, nil
	}

	// Check for access to this image
	auth := docker.AuthConfiguration{
		Username: env.Interpolate(b.config.Username),
		Password: env.Interpolate(b.config.Password),
	}

	checkOpts := CheckAccessOptions{
		Auth:       auth,
		Access:     "read",
		Repository: env.Interpolate(b.repository),
		Registry:   env.Interpolate(b.config.Registry),
	}

	check, err := client.CheckAccess(checkOpts)
	if err != nil {
		b.logger.Errorln("Error during check access")
		return nil, err
	}

	if !check {
		b.logger.Errorln("Not allowed to interact with this repository:", b.repository)
		return nil, fmt.Errorf("Not allowed to interact with this repository: %s", b.repository)
	}

	// Create a pipe since we want a io.Reader but Docker expects a io.Writer
	r, w := io.Pipe()
	defer w.Close()

	// emitStatusses in a different go routine
	go EmitStatus(e, r, b.options)

	options := docker.PullImageOptions{
		// changeme if we have a private registry
		// Registry:      "docker.tsuru.io",
		OutputStream:  w,
		RawJSONStream: true,
		Repository:    env.Interpolate(b.repository),
		Tag:           env.Interpolate(b.tag),
	}

	err = client.PullImage(options, auth)
	if err != nil {
		return nil, err
	}

	image, err := client.InspectImage(env.Interpolate(b.Name))
	if err != nil {
		return nil, err
	}
	b.image = image

	return nil, err
}
コード例 #8
0
ファイル: store.go プロジェクト: hughker/wercker
// Execute does the actual export and upload of the container
func (s *StoreContainerStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return -1, err
	}
	// TODO(termie): could probably re-use the tansport's client
	client, err := NewDockerClient(s.dockerOptions)
	if err != nil {
		return -1, err
	}
	// This is clearly only relevant to docker so we're going to dig into the
	// transport internals a little bit to get the container ID
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	repoName := s.DockerRepo()
	tag := s.DockerTag()
	message := s.DockerMessage()

	commitOpts := docker.CommitContainerOptions{
		Container:  containerID,
		Repository: repoName,
		Tag:        tag,
		Author:     "wercker",
		Message:    message,
	}
	s.logger.Debugln("Commit container:", containerID)
	i, err := client.CommitContainer(commitOpts)
	if err != nil {
		return -1, err
	}
	s.logger.WithField("Image", i).Debug("Commit completed")

	e.Emit(core.Logs, &core.LogsArgs{
		Logs: "Exporting container\n",
	})

	file, err := ioutil.TempFile(s.options.BuildPath(), "export-image-")
	if err != nil {
		s.logger.WithField("Error", err).Error("Unable to create temporary file")
		return -1, err
	}

	hash := sha256.New()
	w := snappystream.NewWriter(io.MultiWriter(file, hash))

	exportImageOptions := docker.ExportImageOptions{
		Name:         repoName,
		OutputStream: w,
	}
	err = client.ExportImage(exportImageOptions)
	if err != nil {
		s.logger.WithField("Error", err).Error("Unable to export image")
		return -1, err
	}

	// Copy is done now, so close temporary file and set the calculatedHash
	file.Close()

	calculatedHash := hex.EncodeToString(hash.Sum(nil))

	s.logger.WithFields(util.LogFields{
		"SHA256":            calculatedHash,
		"TemporaryLocation": file.Name(),
	}).Println("Export image successful")

	key := core.GenerateBaseKey(s.options)
	key = fmt.Sprintf("%s/%s", key, "docker.tar.sz")

	s.artifact = &core.Artifact{
		HostPath:    file.Name(),
		Key:         key,
		Bucket:      s.options.S3Bucket,
		ContentType: "application/x-snappy-framed",
		Meta: map[string]*string{
			"Sha256": &calculatedHash,
		},
	}

	return 0, nil
}
コード例 #9
0
ファイル: docker.go プロジェクト: wercker/wercker
// Execute the scratch-n-push
func (s *DockerScratchPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	// This is clearly only relevant to docker so we're going to dig into the
	// transport internals a little bit to get the container ID
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	_, err := s.CollectArtifact(containerID)
	if err != nil {
		return -1, err
	}

	// layer.tar has an extra folder in it so we have to strip it :/
	artifactReader, err := os.Open(s.options.HostPath("layer.tar"))
	if err != nil {
		return -1, err
	}
	defer artifactReader.Close()

	layerFile, err := os.OpenFile(s.options.HostPath("real_layer.tar"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer layerFile.Close()

	dgst := digest.Canonical.New()
	mwriter := io.MultiWriter(layerFile, dgst.Hash())

	tr := tar.NewReader(artifactReader)
	tw := tar.NewWriter(mwriter)

	for {
		hdr, err := tr.Next()
		if err == io.EOF {
			// finished the tarball
			break
		}

		if err != nil {
			return -1, err
		}

		// Skip the base dir
		if hdr.Name == "./" {
			continue
		}

		if strings.HasPrefix(hdr.Name, "output/") {
			hdr.Name = hdr.Name[len("output/"):]
		} else if strings.HasPrefix(hdr.Name, "source/") {
			hdr.Name = hdr.Name[len("source/"):]
		}

		if len(hdr.Name) == 0 {
			continue
		}

		tw.WriteHeader(hdr)
		_, err = io.Copy(tw, tr)
		if err != nil {
			return -1, err
		}
	}

	digest := dgst.Digest()

	config := &container.Config{
		Cmd:          s.cmd,
		Entrypoint:   s.entrypoint,
		Hostname:     containerID[:16],
		WorkingDir:   s.workingDir,
		Volumes:      s.volumes,
		ExposedPorts: tranformPorts(s.ports),
	}

	// Make the JSON file we need
	t := time.Now()
	base := image.V1Image{
		Architecture: "amd64",
		Container:    containerID,
		ContainerConfig: container.Config{
			Hostname: containerID[:16],
		},
		DockerVersion: "1.10",
		Created:       t,
		OS:            "linux",
		Config:        config,
	}

	imageJSON := image.Image{
		V1Image: base,
		History: []image.History{image.History{Created: t}},
		RootFS: &image.RootFS{
			Type:    "layers",
			DiffIDs: []layer.DiffID{layer.DiffID(digest)},
		},
	}

	js, err := imageJSON.MarshalJSON()
	if err != nil {
		return -1, err
	}

	hash := sha256.New()
	hash.Write(js)
	layerID := hex.EncodeToString(hash.Sum(nil))

	err = os.MkdirAll(s.options.HostPath("scratch", layerID), 0755)
	if err != nil {
		return -1, err
	}

	layerFile.Close()

	err = os.Rename(layerFile.Name(), s.options.HostPath("scratch", layerID, "layer.tar"))
	if err != nil {
		return -1, err
	}
	defer os.RemoveAll(s.options.HostPath("scratch"))

	// VERSION file
	versionFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "VERSION"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer versionFile.Close()

	_, err = versionFile.Write([]byte("1.0"))
	if err != nil {
		return -1, err
	}

	err = versionFile.Sync()
	if err != nil {
		return -1, err
	}

	// json file
	jsonFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer jsonFile.Close()

	_, err = jsonFile.Write(js)
	if err != nil {
		return -1, err
	}

	err = jsonFile.Sync()
	if err != nil {
		return -1, err
	}

	// repositories file
	repositoriesFile, err := os.OpenFile(s.options.HostPath("scratch", "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer repositoriesFile.Close()

	_, err = repositoriesFile.Write([]byte(fmt.Sprintf(`{"%s":{`, s.authenticator.Repository(s.repository))))
	if err != nil {
		return -1, err
	}

	if len(s.tags) == 0 {
		s.tags = []string{"latest"}
	}

	for i, tag := range s.tags {
		_, err = repositoriesFile.Write([]byte(fmt.Sprintf(`"%s":"%s"`, tag, layerID)))
		if err != nil {
			return -1, err
		}
		if i != len(s.tags)-1 {
			_, err = repositoriesFile.Write([]byte{','})
			if err != nil {
				return -1, err
			}
		}
	}

	_, err = repositoriesFile.Write([]byte{'}', '}'})
	err = repositoriesFile.Sync()
	if err != nil {
		return -1, err
	}

	// Build our output tarball and start writing to it
	imageFile, err := os.Create(s.options.HostPath("scratch.tar"))
	if err != nil {
		return -1, err
	}
	defer imageFile.Close()

	err = util.TarPath(imageFile, s.options.HostPath("scratch"))
	if err != nil {
		return -1, err
	}
	imageFile.Close()

	client, err := NewDockerClient(s.dockerOptions)
	if err != nil {
		return 1, err
	}

	// Check the auth
	if !s.dockerOptions.DockerLocal {
		check, err := s.authenticator.CheckAccess(s.repository, auth.Push)
		if !check || err != nil {
			s.logger.Errorln("Not allowed to interact with this repository:", s.repository)
			return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository)
		}
	}

	s.repository = s.authenticator.Repository(s.repository)
	s.logger.WithFields(util.LogFields{
		"Repository": s.repository,
		"Tags":       s.tags,
		"Message":    s.message,
	}).Debug("Scratch push to registry")

	// Okay, we can access it, do a docker load to import the image then push it
	loadFile, err := os.Open(s.options.HostPath("scratch.tar"))
	if err != nil {
		return -1, err
	}
	defer loadFile.Close()

	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return 1, err
	}

	err = client.LoadImage(docker.LoadImageOptions{InputStream: loadFile})
	if err != nil {
		return 1, err
	}

	return s.tagAndPush(layerID, e, client)
}
コード例 #10
0
ファイル: watchstep.go プロジェクト: wercker/wercker
// Execute runs a command and optionally reloads it
func (s *WatchStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return -1, err
	}

	// TODO(termie): PACKAGING make this a feature of session and remove
	//               the calls into its struct
	// Start watching our stdout
	stopListening := make(chan struct{})
	defer func() { stopListening <- struct{}{} }()
	go func() {
		for {
			select {
			case line := <-sess.Recv():
				e.Emit(core.Logs, &core.LogsArgs{
					// Hidden: sess.logsHidden,
					Logs: line,
				})
			// We need to make sure we stop eating the stdout from the container
			// promiscuously when we finish out step
			case <-stopListening:
				return
			}
		}
	}()

	// cheating to get containerID
	// TODO(termie): we should deal with this eventually
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	// Set up a signal handler to end our step.
	finishedStep := make(chan struct{})
	stopWatchHandler := &util.SignalHandler{
		ID: "stop-watch",
		// Signal our stuff to stop and finish the step, return false to
		// signify that we've handled the signal and don't process further
		F: func() bool {
			s.logger.Println("Keyboard interrupt detected, finishing step")
			finishedStep <- struct{}{}
			return false
		},
	}
	util.GlobalSigint().Add(stopWatchHandler)
	// NOTE(termie): I think the only way to exit this code is via this
	//               signal handler and the signal monkey removes handlers
	//               after it processes them, so this may be superfluous
	defer util.GlobalSigint().Remove(stopWatchHandler)

	// If we're not going to reload just run the thing once, synchronously
	if !s.reload {
		err := sess.Send(ctx, false, "set +e", s.Code)
		if err != nil {
			return 0, err
		}
		<-finishedStep
		// ignoring errors
		s.killProcesses(containerID, "INT")
		return 0, nil
	}
	f := &util.Formatter{s.options.GlobalOptions.ShowColors}
	s.logger.Info(f.Info("Reloading on file changes"))
	doCmd := func() {
		err := sess.Send(ctx, false, "set +e", s.Code)
		if err != nil {
			s.logger.Errorln(err)
			return
		}
		open, err := exposedPortMaps(s.dockerOptions.DockerHost, s.options.PublishPorts)
		if err != nil {
			s.logger.Warnf(f.Info("There was a problem parsing your docker host."), err)
			return
		}
		for _, uri := range open {
			s.logger.Infof(f.Info("Forwarding %s to %s on the container."), uri.HostURI, uri.ContainerPort)
		}
	}

	// Otherwise set up a watcher and do some magic
	watcher, err := s.watch(s.options.ProjectPath)
	if err != nil {
		return -1, err
	}

	debounce := util.NewDebouncer(2 * time.Second)
	done := make(chan struct{})
	go func() {
		for {
			select {
			case event := <-watcher.Events:
				s.logger.Debugln("fsnotify event", event.String())
				if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Remove == fsnotify.Remove {
					if !strings.HasPrefix(filepath.Base(event.Name), ".") {
						s.logger.Debug(f.Info("Modified file", event.Name))
						debounce.Trigger()
					}
				}
			case <-debounce.C:
				err := s.killProcesses(containerID, "INT")
				if err != nil {
					s.logger.Panic(err)
					return
				}
				s.logger.Info(f.Info("Reloading"))
				go doCmd()
			case err := <-watcher.Errors:
				s.logger.Error(err)
				done <- struct{}{}
				return
			case <-finishedStep:
				s.killProcesses(containerID, "INT")
				done <- struct{}{}
				return
			}
		}
	}()

	// Run build on first run
	debounce.Trigger()
	<-done
	return 0, nil
}