// Execute a shell and give it to the user func (s *ShellStep) Execute(ctx context.Context, sess *core.Session) (int, error) { // cheating to get containerID // TODO(termie): we should deal with this eventually dt := sess.Transport().(*DockerTransport) containerID := dt.containerID client, err := NewDockerClient(s.dockerOptions) if err != nil { return -1, err } code := s.env.Export() code = append(code, "cd $WERCKER_SOURCE_DIR") code = append(code, s.Code) err = client.AttachInteractive(containerID, s.Cmd, code) if err != nil { return -1, err } return 0, nil }
// Execute commits the current container and pushes it to the configured // registry func (s *DockerPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) { // TODO(termie): could probably re-use the tansport's client client, err := NewDockerClient(s.dockerOptions) if err != nil { return 1, err } e, err := core.EmitterFromContext(ctx) if err != nil { return 1, err } s.logger.WithFields(util.LogFields{ "Registry": s.registry, "Repository": s.repository, "Tags": s.tags, "Message": s.message, }).Debug("Push to registry") // This is clearly only relevant to docker so we're going to dig into the // transport internals a little bit to get the container ID dt := sess.Transport().(*DockerTransport) containerID := dt.containerID auth := docker.AuthConfiguration{ Username: s.username, Password: s.password, Email: s.email, ServerAddress: s.authServer, } if !s.dockerOptions.DockerLocal { checkOpts := CheckAccessOptions{ Auth: auth, Access: "write", Repository: s.repository, Registry: s.registry, } check, err := client.CheckAccess(checkOpts) if err != nil { s.logger.Errorln("Error during check access", err) return -1, err } if !check { s.logger.Errorln("Not allowed to interact with this repository:", s.repository) return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository) } } s.logger.Debugln("Init env:", s.data) config := docker.Config{ Cmd: s.cmd, Entrypoint: s.entrypoint, WorkingDir: s.workingDir, User: s.user, Env: s.env, StopSignal: s.stopSignal, Labels: s.labels, ExposedPorts: s.ports, Volumes: s.volumes, } if len(s.tags) == 0 { s.tags = []string{"latest"} } commitOpts := docker.CommitContainerOptions{ Container: containerID, Repository: s.repository, Author: s.author, Message: s.message, Run: &config, Tag: s.tags[0], } s.logger.Debugln("Commit container:", containerID) i, err := client.CommitContainer(commitOpts) if err != nil { return -1, err } s.logger.WithField("Image", i).Debug("Commit completed") return s.tagAndPush(i.ID, e, client, auth) }
// Execute the scratch-n-push func (s *DockerScratchPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) { // This is clearly only relevant to docker so we're going to dig into the // transport internals a little bit to get the container ID dt := sess.Transport().(*DockerTransport) containerID := dt.containerID _, err := s.CollectArtifact(containerID) if err != nil { return -1, err } // At this point we've written the layer to disk, we're going to add up the // sizes of all the files to add to our json format, and sha256 the data layerFile, err := os.Open(s.options.HostPath("layer.tar")) if err != nil { return -1, err } defer layerFile.Close() var layerSize int64 layerTar := tar.NewReader(layerFile) for { hdr, err := layerTar.Next() if err == io.EOF { // finished the tarball break } if err != nil { return -1, err } // Skip the base dir if hdr.Name == "./" { continue } layerSize += hdr.Size } config := docker.Config{ Cmd: s.cmd, Entrypoint: s.entrypoint, Hostname: containerID[:16], WorkingDir: s.workingDir, ExposedPorts: s.ports, Volumes: s.volumes, } layerID, err := GenerateDockerID() if err != nil { return -1, err } // Make the JSON file we need imageJSON := DockerImageJSON{ Architecture: "amd64", Container: containerID, ContainerConfig: DockerImageJSONContainerConfig{ Hostname: containerID[:16], }, DockerVersion: "1.5", Created: time.Now(), ID: layerID, OS: "linux", Size: layerSize, Config: config, } jsonOut, err := json.MarshalIndent(imageJSON, "", " ") if err != nil { return -1, err } s.logger.Debugln(string(jsonOut)) // Write out the files to disk that we are going to care about err = os.MkdirAll(s.options.HostPath("scratch", layerID), 0755) if err != nil { return -1, err } defer os.RemoveAll(s.options.HostPath("scratch")) // VERSION file versionFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "VERSION"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer versionFile.Close() _, err = versionFile.Write([]byte("1.0")) if err != nil { return -1, err } err = versionFile.Sync() if err != nil { return -1, err } // json file jsonFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer jsonFile.Close() _, err = jsonFile.Write(jsonOut) if err != nil { return -1, err } err = jsonFile.Sync() if err != nil { return -1, err } // repositories file repositoriesFile, err := os.OpenFile(s.options.HostPath("scratch", "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer repositoriesFile.Close() _, err = repositoriesFile.Write([]byte(fmt.Sprintf(`{"%s":{`, s.repository))) if err != nil { return -1, err } if len(s.tags) == 0 { s.tags = []string{"latest"} } for i, tag := range s.tags { _, err = repositoriesFile.Write([]byte(fmt.Sprintf(`"%s":"%s"`, tag, layerID))) if err != nil { return -1, err } if i != len(s.tags)-1 { _, err = repositoriesFile.Write([]byte{','}) if err != nil { return -1, err } } } _, err = repositoriesFile.Write([]byte{'}', '}'}) err = repositoriesFile.Sync() if err != nil { return -1, err } // layer.tar has an extra folder in it so we have to strip it :/ tempLayerFile, err := os.Open(s.options.HostPath("layer.tar")) if err != nil { return -1, err } defer os.Remove(s.options.HostPath("layer.tar")) defer tempLayerFile.Close() realLayerFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "layer.tar"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer realLayerFile.Close() tr := tar.NewReader(tempLayerFile) tw := tar.NewWriter(realLayerFile) for { hdr, err := tr.Next() if err == io.EOF { // finished the tarball break } if err != nil { return -1, err } // Skip the base dir if hdr.Name == "./" { continue } if strings.HasPrefix(hdr.Name, "output/") { hdr.Name = hdr.Name[len("output/"):] } else if strings.HasPrefix(hdr.Name, "source/") { hdr.Name = hdr.Name[len("source/"):] } if len(hdr.Name) == 0 { continue } tw.WriteHeader(hdr) _, err = io.Copy(tw, tr) if err != nil { return -1, err } } tw.Close() // Build our output tarball and start writing to it imageFile, err := os.Create(s.options.HostPath("scratch.tar")) defer imageFile.Close() if err != nil { return -1, err } err = util.TarPath(imageFile, s.options.HostPath("scratch")) if err != nil { return -1, err } imageFile.Close() client, err := NewDockerClient(s.dockerOptions) if err != nil { return 1, err } s.logger.WithFields(util.LogFields{ "Registry": s.registry, "Repository": s.repository, "Tags": s.tags, "Message": s.message, }).Debug("Scratch push to registry") // Check the auth auth := docker.AuthConfiguration{ Username: s.username, Password: s.password, Email: s.email, ServerAddress: s.authServer, } if !s.dockerOptions.DockerLocal { checkOpts := CheckAccessOptions{ Auth: auth, Access: "write", Repository: s.repository, Registry: s.registry, } check, err := client.CheckAccess(checkOpts) if err != nil { s.logger.Errorln("Error during check access", err) return -1, err } if !check { s.logger.Errorln("Not allowed to interact with this repository:", s.repository) return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository) } } // Okay, we can access it, do a docker load to import the image then push it loadFile, err := os.Open(s.options.HostPath("scratch.tar")) defer loadFile.Close() err = client.LoadImage(docker.LoadImageOptions{InputStream: loadFile}) if err != nil { return -1, err } e, err := core.EmitterFromContext(ctx) return s.tagAndPush(layerID, e, client, auth) }
// Execute does the actual export and upload of the container func (s *StoreContainerStep) Execute(ctx context.Context, sess *core.Session) (int, error) { e, err := core.EmitterFromContext(ctx) if err != nil { return -1, err } // TODO(termie): could probably re-use the tansport's client client, err := NewDockerClient(s.dockerOptions) if err != nil { return -1, err } // This is clearly only relevant to docker so we're going to dig into the // transport internals a little bit to get the container ID dt := sess.Transport().(*DockerTransport) containerID := dt.containerID repoName := s.DockerRepo() tag := s.DockerTag() message := s.DockerMessage() commitOpts := docker.CommitContainerOptions{ Container: containerID, Repository: repoName, Tag: tag, Author: "wercker", Message: message, } s.logger.Debugln("Commit container:", containerID) i, err := client.CommitContainer(commitOpts) if err != nil { return -1, err } s.logger.WithField("Image", i).Debug("Commit completed") e.Emit(core.Logs, &core.LogsArgs{ Logs: "Exporting container\n", }) file, err := ioutil.TempFile(s.options.BuildPath(), "export-image-") if err != nil { s.logger.WithField("Error", err).Error("Unable to create temporary file") return -1, err } hash := sha256.New() w := snappystream.NewWriter(io.MultiWriter(file, hash)) exportImageOptions := docker.ExportImageOptions{ Name: repoName, OutputStream: w, } err = client.ExportImage(exportImageOptions) if err != nil { s.logger.WithField("Error", err).Error("Unable to export image") return -1, err } // Copy is done now, so close temporary file and set the calculatedHash file.Close() calculatedHash := hex.EncodeToString(hash.Sum(nil)) s.logger.WithFields(util.LogFields{ "SHA256": calculatedHash, "TemporaryLocation": file.Name(), }).Println("Export image successful") key := core.GenerateBaseKey(s.options) key = fmt.Sprintf("%s/%s", key, "docker.tar.sz") s.artifact = &core.Artifact{ HostPath: file.Name(), Key: key, Bucket: s.options.S3Bucket, ContentType: "application/x-snappy-framed", Meta: map[string]*string{ "Sha256": &calculatedHash, }, } return 0, nil }
// Execute the scratch-n-push func (s *DockerScratchPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) { // This is clearly only relevant to docker so we're going to dig into the // transport internals a little bit to get the container ID dt := sess.Transport().(*DockerTransport) containerID := dt.containerID _, err := s.CollectArtifact(containerID) if err != nil { return -1, err } // layer.tar has an extra folder in it so we have to strip it :/ artifactReader, err := os.Open(s.options.HostPath("layer.tar")) if err != nil { return -1, err } defer artifactReader.Close() layerFile, err := os.OpenFile(s.options.HostPath("real_layer.tar"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer layerFile.Close() dgst := digest.Canonical.New() mwriter := io.MultiWriter(layerFile, dgst.Hash()) tr := tar.NewReader(artifactReader) tw := tar.NewWriter(mwriter) for { hdr, err := tr.Next() if err == io.EOF { // finished the tarball break } if err != nil { return -1, err } // Skip the base dir if hdr.Name == "./" { continue } if strings.HasPrefix(hdr.Name, "output/") { hdr.Name = hdr.Name[len("output/"):] } else if strings.HasPrefix(hdr.Name, "source/") { hdr.Name = hdr.Name[len("source/"):] } if len(hdr.Name) == 0 { continue } tw.WriteHeader(hdr) _, err = io.Copy(tw, tr) if err != nil { return -1, err } } digest := dgst.Digest() config := &container.Config{ Cmd: s.cmd, Entrypoint: s.entrypoint, Hostname: containerID[:16], WorkingDir: s.workingDir, Volumes: s.volumes, ExposedPorts: tranformPorts(s.ports), } // Make the JSON file we need t := time.Now() base := image.V1Image{ Architecture: "amd64", Container: containerID, ContainerConfig: container.Config{ Hostname: containerID[:16], }, DockerVersion: "1.10", Created: t, OS: "linux", Config: config, } imageJSON := image.Image{ V1Image: base, History: []image.History{image.History{Created: t}}, RootFS: &image.RootFS{ Type: "layers", DiffIDs: []layer.DiffID{layer.DiffID(digest)}, }, } js, err := imageJSON.MarshalJSON() if err != nil { return -1, err } hash := sha256.New() hash.Write(js) layerID := hex.EncodeToString(hash.Sum(nil)) err = os.MkdirAll(s.options.HostPath("scratch", layerID), 0755) if err != nil { return -1, err } layerFile.Close() err = os.Rename(layerFile.Name(), s.options.HostPath("scratch", layerID, "layer.tar")) if err != nil { return -1, err } defer os.RemoveAll(s.options.HostPath("scratch")) // VERSION file versionFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "VERSION"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer versionFile.Close() _, err = versionFile.Write([]byte("1.0")) if err != nil { return -1, err } err = versionFile.Sync() if err != nil { return -1, err } // json file jsonFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer jsonFile.Close() _, err = jsonFile.Write(js) if err != nil { return -1, err } err = jsonFile.Sync() if err != nil { return -1, err } // repositories file repositoriesFile, err := os.OpenFile(s.options.HostPath("scratch", "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return -1, err } defer repositoriesFile.Close() _, err = repositoriesFile.Write([]byte(fmt.Sprintf(`{"%s":{`, s.authenticator.Repository(s.repository)))) if err != nil { return -1, err } if len(s.tags) == 0 { s.tags = []string{"latest"} } for i, tag := range s.tags { _, err = repositoriesFile.Write([]byte(fmt.Sprintf(`"%s":"%s"`, tag, layerID))) if err != nil { return -1, err } if i != len(s.tags)-1 { _, err = repositoriesFile.Write([]byte{','}) if err != nil { return -1, err } } } _, err = repositoriesFile.Write([]byte{'}', '}'}) err = repositoriesFile.Sync() if err != nil { return -1, err } // Build our output tarball and start writing to it imageFile, err := os.Create(s.options.HostPath("scratch.tar")) if err != nil { return -1, err } defer imageFile.Close() err = util.TarPath(imageFile, s.options.HostPath("scratch")) if err != nil { return -1, err } imageFile.Close() client, err := NewDockerClient(s.dockerOptions) if err != nil { return 1, err } // Check the auth if !s.dockerOptions.DockerLocal { check, err := s.authenticator.CheckAccess(s.repository, auth.Push) if !check || err != nil { s.logger.Errorln("Not allowed to interact with this repository:", s.repository) return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository) } } s.repository = s.authenticator.Repository(s.repository) s.logger.WithFields(util.LogFields{ "Repository": s.repository, "Tags": s.tags, "Message": s.message, }).Debug("Scratch push to registry") // Okay, we can access it, do a docker load to import the image then push it loadFile, err := os.Open(s.options.HostPath("scratch.tar")) if err != nil { return -1, err } defer loadFile.Close() e, err := core.EmitterFromContext(ctx) if err != nil { return 1, err } err = client.LoadImage(docker.LoadImageOptions{InputStream: loadFile}) if err != nil { return 1, err } return s.tagAndPush(layerID, e, client) }
// Execute runs a command and optionally reloads it func (s *WatchStep) Execute(ctx context.Context, sess *core.Session) (int, error) { e, err := core.EmitterFromContext(ctx) if err != nil { return -1, err } // TODO(termie): PACKAGING make this a feature of session and remove // the calls into its struct // Start watching our stdout stopListening := make(chan struct{}) defer func() { stopListening <- struct{}{} }() go func() { for { select { case line := <-sess.Recv(): e.Emit(core.Logs, &core.LogsArgs{ // Hidden: sess.logsHidden, Logs: line, }) // We need to make sure we stop eating the stdout from the container // promiscuously when we finish out step case <-stopListening: return } } }() // cheating to get containerID // TODO(termie): we should deal with this eventually dt := sess.Transport().(*DockerTransport) containerID := dt.containerID // Set up a signal handler to end our step. finishedStep := make(chan struct{}) stopWatchHandler := &util.SignalHandler{ ID: "stop-watch", // Signal our stuff to stop and finish the step, return false to // signify that we've handled the signal and don't process further F: func() bool { s.logger.Println("Keyboard interrupt detected, finishing step") finishedStep <- struct{}{} return false }, } util.GlobalSigint().Add(stopWatchHandler) // NOTE(termie): I think the only way to exit this code is via this // signal handler and the signal monkey removes handlers // after it processes them, so this may be superfluous defer util.GlobalSigint().Remove(stopWatchHandler) // If we're not going to reload just run the thing once, synchronously if !s.reload { err := sess.Send(ctx, false, "set +e", s.Code) if err != nil { return 0, err } <-finishedStep // ignoring errors s.killProcesses(containerID, "INT") return 0, nil } f := &util.Formatter{s.options.GlobalOptions.ShowColors} s.logger.Info(f.Info("Reloading on file changes")) doCmd := func() { err := sess.Send(ctx, false, "set +e", s.Code) if err != nil { s.logger.Errorln(err) return } open, err := exposedPortMaps(s.dockerOptions.DockerHost, s.options.PublishPorts) if err != nil { s.logger.Warnf(f.Info("There was a problem parsing your docker host."), err) return } for _, uri := range open { s.logger.Infof(f.Info("Forwarding %s to %s on the container."), uri.HostURI, uri.ContainerPort) } } // Otherwise set up a watcher and do some magic watcher, err := s.watch(s.options.ProjectPath) if err != nil { return -1, err } debounce := util.NewDebouncer(2 * time.Second) done := make(chan struct{}) go func() { for { select { case event := <-watcher.Events: s.logger.Debugln("fsnotify event", event.String()) if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Remove == fsnotify.Remove { if !strings.HasPrefix(filepath.Base(event.Name), ".") { s.logger.Debug(f.Info("Modified file", event.Name)) debounce.Trigger() } } case <-debounce.C: err := s.killProcesses(containerID, "INT") if err != nil { s.logger.Panic(err) return } s.logger.Info(f.Info("Reloading")) go doCmd() case err := <-watcher.Errors: s.logger.Error(err) done <- struct{}{} return case <-finishedStep: s.killProcesses(containerID, "INT") done <- struct{}{} return } } }() // Run build on first run debounce.Trigger() <-done return 0, nil }