func (b *Backup) compressStagedBackup() { startString := fmt.Sprintf("%v", b.StartTime) var finalfile string if b.Config.Acceptance { finalfile = "acceptancetest.tar.gz" } else { finalfile = fmt.Sprintf("%s.consul.snapshot.%s.tar.gz", b.Config.Hostname, startString) } finalpath := filepath.Join(b.Config.TmpDir, finalfile) b.FullFilename = finalpath source := []string{b.LocalFilePath} err := archiver.TarGz(finalpath, source) if err != nil { log.Fatalf("[ERR] Unable to write compressed archive to %s: %v", finalpath, err) } }
func main() { // First, clean up err := os.RemoveAll(buildDir) if err != nil { log.Fatal(err) } err = os.RemoveAll(releaseDir) if err != nil { log.Fatal(err) } // Then set up err = os.MkdirAll(buildDir, 0755) if err != nil { log.Fatal(err) } err = os.MkdirAll(releaseDir, 0755) if err != nil { log.Fatal(err) } // Perform builds and make archives in parallel; only as many // goroutines as we have processors. var wg sync.WaitGroup var throttle = make(chan struct{}, numProcs()) for _, p := range platforms { wg.Add(1) throttle <- struct{}{} if p.os == "" || p.arch == "" || p.archive == "" { log.Fatalf("Platform OS, architecture, and archive format is required: %+v", p) } go func(p platform) { defer wg.Done() defer func() { <-throttle }() fmt.Printf("== Building %s\n", p) var baseFilename, binFilename string baseFilename = fmt.Sprintf("caddy_%s_%s", p.os, p.arch) if p.arch == "arm" { baseFilename += p.arm } binFilename = baseFilename + p.binExt binPath := filepath.Join(buildDir, binFilename) archive := filepath.Join(releaseDir, fmt.Sprintf("%s.%s", baseFilename, p.archive)) archiveContents := append(distContents, binPath) err := build(p, binPath) if err != nil { log.Fatal(err) } fmt.Printf("== Compressing %s\n", baseFilename) if p.archive == "zip" { err := archiver.Zip(archive, archiveContents) if err != nil { log.Fatal(err) } } else if p.archive == "tar.gz" { err := archiver.TarGz(archive, archiveContents) if err != nil { log.Fatal(err) } } }(p) } wg.Wait() }
// Build performs a build job. This function is blocking. If the build // job succeeds, it will automatically delete itself when it expires. // If it fails, resources are not automatically cleaned up. func (b *Build) Build() error { // Prepare the build builder, err := caddybuild.PrepareBuild(b.Features, false) // TODO: PullLatest (go get -u) DISABLED for stability; updates are manual for now defer builder.Teardown() // always perform cleanup if err != nil { return err } builder.CommandName = "./build.bash" origRepoPath := filepath.Join(os.Getenv("GOPATH"), "src/github.com/mholt/caddy") // Perform the build if b.GoArch == "arm" { var armInt int if b.GoARM != "" { armInt, err = strconv.Atoi(b.GoARM) if err != nil { return err } } else { armInt = defaultARM } err = builder.BuildStaticARM(b.GoOS, armInt, b.OutputFile, origRepoPath) } else if b.GoOS == "darwin" { // At time of writing, building with CGO_ENABLED=0 for darwin can break stuff: https://www.reddit.com/r/golang/comments/46bd5h/ama_we_are_the_go_contributors_ask_us_anything/d03rmc9 err = builder.Build(b.GoOS, b.GoArch, b.OutputFile, origRepoPath) } else { err = builder.BuildStatic(b.GoOS, b.GoArch, b.OutputFile, origRepoPath) } if err != nil { return err } // File list to include with build, then compress the build fileList := []string{ filepath.Join(CaddyPath, "/dist/README.txt"), filepath.Join(CaddyPath, "/dist/LICENSES.txt"), filepath.Join(CaddyPath, "/dist/CHANGES.txt"), filepath.Join(CaddyPath, "/dist/init"), b.OutputFile, } if b.DownloadFileCompression == CompressZip { err = archiver.Zip(b.DownloadFile, fileList) } else if b.DownloadFileCompression == CompressTarGz { err = archiver.TarGz(b.DownloadFile, fileList) } else { return fmt.Errorf("unknown compress type %v", b.DownloadFileCompression) } if err != nil { return fmt.Errorf("error compressing: %v", err) } // Delete uncompressed binary err = os.Remove(b.OutputFile) if err != nil { return err } // Finalize the build and have it clean itself // up after its expiration b.finish() return nil }