func TestIrregular_RemexecPs1NotExists(t *testing.T) { scriptName := filepath.Join(baseDir, "remexec.ps1") tmpScriptName := filepath.Join(baseDir, "remexec.ps1.tmp") os.Rename(scriptName, tmpScriptName) defer os.Rename(tmpScriptName, scriptName) c := capturer.NewStdoutCapturer() c.Start() rc, err := executeRemoteCommand("pwd") output := c.Stop() if err != nil { t.Fatalf("Error occured: %s", err) } if rc != 255 { t.Errorf("RC => %d, wants %d", rc, 255) } if strings.Contains(output, "REX003E") { t.Errorf("Output contains unexpected error message.") t.Log("Output:") t.Log(output) } }
func saveConns(conf *types.Configuration) { filename := replaceHome(connectionsPath) tmp := filename + ".tmp" wr, err := os.Create(tmp) p(err, "opening "+filename) defer func() { if err := os.Rename(tmp, filename); err != nil { p(os.Remove(filename), "deleting old connections.xml") p(os.Rename(tmp, filename), "overwriting connections.xml") } }() defer wr.Close() encoding := unicode.UTF16(unicode.LittleEndian, unicode.ExpectBOM) textEncoder := encoding.NewEncoder() writer := textEncoder.Writer(wr) fmt.Fprintln(writer, `<?xml version="1.0" encoding="utf-16"?> <!-- ****************************************************************--> <!-- * *--> <!-- * PuTTY Configuration Manager save file - All right reserved. *--> <!-- * *--> <!-- ****************************************************************--> <!-- The following lines can be modified at your own risks. -->`) encoder := xml.NewEncoder(writer) encoder.Indent("", " ") p(encoder.Encode(&conf), "encoding xml") }
func saveTaskList(tasks TaskList) { path := *fileFlag previous := path + "~" temp := path + "~~" var serializeError error = nil if file, err := os.Create(temp); err == nil { defer func() { if err = file.Close(); err != nil { os.Remove(temp) } else { if serializeError != nil { return } if _, err := os.Stat(path); err == nil { if err = os.Rename(path, previous); err != nil { fatal("unable to rename %s to %s", path, previous) } } if err = os.Rename(temp, path); err != nil { fatal("unable to rename %s to %s", temp, path) } } }() writer := NewJsonIO() if serializeError = writer.Serialize(file, tasks); serializeError != nil { fatal(serializeError.Error()) } } }
func uploadChromiumBuild(localOutDir, gsDir, targetPlatform string, gs *GsUtil) error { localUploadDir := localOutDir if targetPlatform == "Android" { localUploadDir = filepath.Join(localUploadDir, "apks") } else { // Temporarily move the not needed large "gen" and "obj" directories so // that they do not get uploaded to Google Storage. Move them back after // the method completes. genDir := filepath.Join(localOutDir, "gen") genTmpDir := filepath.Join(ChromiumBuildsDir, "gen") if err := os.Rename(genDir, genTmpDir); err != nil { return fmt.Errorf("Could not rename gen dir: %s", err) } defer util.Rename(genTmpDir, genDir) objDir := filepath.Join(localOutDir, "obj") objTmpDir := filepath.Join(ChromiumBuildsDir, "obj") if err := os.Rename(objDir, objTmpDir); err != nil { return fmt.Errorf("Could not rename obj dir: %s", err) } defer util.Rename(objTmpDir, objDir) } return gs.UploadDir(localUploadDir, gsDir, true) }
func (p *PcapLogger) Archive() { newBasename := filepath.Join(p.ArchiveDir, filepath.Base(p.basename)) os.Rename(p.basename, newBasename) for i := 1; i < p.pcapLogNum+1; i++ { os.Rename(filepath.Join(p.LogDir, fmt.Sprintf("%s.pcap.%d", p.Flow.String(), i)), fmt.Sprintf("%s.%d", newBasename, i)) } }
func TestGitGetter(t *testing.T) { if !testHasGit { t.Log("git not found, skipping") t.Skip() } g := new(GitGetter) dst := tempDir(t) // Git doesn't allow nested ".git" directories so we do some hackiness // here to get around that... moduleDir := filepath.Join(fixtureDir, "basic-git") oldName := filepath.Join(moduleDir, "DOTgit") newName := filepath.Join(moduleDir, ".git") if err := os.Rename(oldName, newName); err != nil { t.Fatalf("err: %s", err) } defer os.Rename(newName, oldName) // With a dir that doesn't exist if err := g.Get(dst, testModuleURL("basic-git")); err != nil { t.Fatalf("err: %s", err) } // Verify the main file exists mainPath := filepath.Join(dst, "main.tf") if _, err := os.Stat(mainPath); err != nil { t.Fatalf("err: %s", err) } }
// Delete atomically removes an image from the graph. func (graph *Graph) Delete(id string) error { garbage, err := graph.Garbage() if err != nil { return err } graph.idIndex.Delete(id) err = os.Rename(graph.imageRoot(id), garbage.imageRoot(id)) if err != nil { // FIXME: this introduces a race condition in Delete() if the image is already present // in garbage. Let's store at random names in grabage instead. if isNotEmpty(err) { Debugf("The image %s is already present in garbage. Removing it.", id) if err = os.RemoveAll(garbage.imageRoot(id)); err != nil { Debugf("Error while removing the image %s from garbage: %s\n", id, err) return err } Debugf("Image %s removed from garbage", id) if err = os.Rename(graph.imageRoot(id), garbage.imageRoot(id)); err != nil { return err } Debugf("Image %s put in the garbage", id) } else { Debugf("Error putting the image %s to garbage: %s\n", id, err) } return err } return nil }
func (h *hashRecord) Finish() (string, error) { err := h.tmpf.Close() if err != nil { return "", err } hs := hex.EncodeToString(h.sh.Sum([]byte{})) fn := hashFilename(h.base, hs) if h.hashin != "" && h.hashin != hs { return "", fmt.Errorf("Invalid hash %v != %v", h.hashin, hs) } err = os.Rename(h.tmpf.Name(), fn) if err != nil { os.MkdirAll(filepath.Dir(fn), 0777) os.Remove(fn) err = os.Rename(h.tmpf.Name(), fn) if err != nil { log.Printf("Error renaming %v to %v: %v", h.tmpf.Name(), fn, err) os.Remove(h.tmpf.Name()) return "", err } } h.tmpf = nil return hs, nil }
// Upgrade to the given release, saving the previous binary with a ".old" extension. func upgradeTo(path string, rel Release, archExtra string) error { expectedRelease := fmt.Sprintf("syncthing-%s-%s%s-%s.", runtime.GOOS, runtime.GOARCH, archExtra, rel.Tag) if debug { l.Debugf("expected release asset %q", expectedRelease) } for _, asset := range rel.Assets { if debug { l.Debugln("considering release", asset) } if strings.HasPrefix(asset.Name, expectedRelease) { if strings.HasSuffix(asset.Name, ".zip") { fname, err := readZip(asset.URL, filepath.Dir(path)) if err != nil { return err } old := path + ".old" os.Remove(old) err = os.Rename(path, old) if err != nil { return err } err = os.Rename(fname, path) if err != nil { return err } return nil } } } return ErrVersionUnknown }
func (t *MemFSTest) RenameOverExistingDirectory() { var err error // Create two directories, the first non-empty. oldPath := path.Join(t.Dir, "foo") err = os.MkdirAll(path.Join(oldPath, "child"), 0700) AssertEq(nil, err) newPath := path.Join(t.Dir, "bar") err = os.Mkdir(newPath, 0600) AssertEq(nil, err) // Renaming over the non-empty directory shouldn't work. err = os.Rename(newPath, oldPath) ExpectThat(err, Error(MatchesRegexp("not empty|file exists"))) // As of Go 1.8 this shouldn't work the other way around either (see // https://github.com/golang/go/commit/321c312). if atLeastGo18 { err = os.Rename(oldPath, newPath) ExpectThat(err, Error(HasSubstr("file exists"))) // Both should still be present in the parent listing. entries, err := fusetesting.ReadDirPicky(t.Dir) AssertEq(nil, err) ExpectEq(2, len(entries)) } }
func main() { // ディレクトリを作成 os.MkdirAll("testdir", 0777) // ディレクトリの名前変更 err := os.Rename("testdir", "newdir") // エラーチェック if err != nil { fmt.Println(err) os.Exit(1) } // 空のファイルを作成 file, _ := os.Create("testfile") file.Close() // ファイルの名前変更 err = os.Rename("testfile", "newfile") // エラーチェック if err != nil { fmt.Println(err) os.Exit(1) } }
func restorePage(page string) string { // 1. count # deleted versions deletedVers := sortedVersions("deleted/" + page) n := len(deletedVers) pageAlreadyThere := fileExists(pageFile(page, -1)) if pageAlreadyThere { n++ } // 2. rename pre-existing old versions ahead of deleted version count if n > 0 { preExistVers := sortedVersions("old/" + page) for _, ver := range preExistVers { old := fmt.Sprintf("old/%s.%d", page, ver) newname := fmt.Sprintf("old/%s.%d", page, ver+n) os.Rename(old, newname) } } // 3. rename deleteds to old.(1:n-1) i := 1 for _, ver := range deletedVers { old := fmt.Sprintf("deleted/%s.%d", page, ver) newname := fmt.Sprintf("old/%s.%d", page, i) os.Rename(old, newname) i++ } // 4. rename deleted page as appropriate and redirect if pageAlreadyThere { os.Rename("deleted/"+page, fmt.Sprintf("old/%s.%d", page, n)) return fmt.Sprintf("/edit/%s?ver=%d", page, n) } os.Rename("deleted/"+page, "pages/"+page) return "/" + page }
func install(name string, p []byte) error { oldExecPath := name + ".old" execDir := filepath.Dir(name) part := filepath.Join(execDir, "hk.part") err := ioutil.WriteFile(part, p, 0755) if err != nil { return err } defer os.Remove(part) // remove old executable leftover from previous update if err = os.Remove(oldExecPath); err != nil && !os.IsNotExist(err) { return err } // move the existing executable to a new file in the same directory err = os.Rename(name, oldExecPath) if err != nil { return err } // move the new executable in to become the new program err = os.Rename(part, name) if err != nil { // copy unsuccessful _ = os.Rename(oldExecPath, name) return err } else { // copy successful, remove the old binary (fails on Windows) _ = os.Remove(oldExecPath) } return nil }
func Migrate(dataDir string) { if !server.FileExists(dataDir) { logger.Printf("Data directory '%v' doesn't appear to exist, so aborting migration.", dataDir) return } logger.Printf("Migrating data in %v to version %v", dataDir, DATA_VERSION) deployedDataVersion := readDataVersion(dataDir) if deployedDataVersion == DATA_VERSION { logger.Printf("Actually, data format was already up-to-date, so skipping migration.") return } migrateEntityGraph(dataDir, "person") migrateEntityGraph(dataDir, "org") migrateEntityGraph(dataDir, "place") server.Must(os.Remove(filepath.Join(dataDir, "personGraph.dat"))) server.Must(os.Remove(filepath.Join(dataDir, "orgGraph.dat"))) server.Must(os.Remove(filepath.Join(dataDir, "placeGraph.dat"))) server.Must(os.Rename(filepath.Join(dataDir, "personGraph.dat.tmp"), filepath.Join(dataDir, "personGraph.dat"))) server.Must(os.Rename(filepath.Join(dataDir, "orgGraph.dat.tmp"), filepath.Join(dataDir, "orgGraph.dat"))) server.Must(os.Rename(filepath.Join(dataDir, "placeGraph.dat.tmp"), filepath.Join(dataDir, "placeGraph.dat"))) WriteDataVersion(dataDir) logger.Printf("Data in %v successfully migrated to version %v", dataDir, DATA_VERSION) }
func finishUpload(num int, req *http.Request) error { newuploadLock.Lock() _, ok := uploads[num] if ok { delete(uploads, num) } newuploadLock.Unlock() if !ok { return fmt.Errorf("no such upload: %d", num) } finalPath, filename, err := computeFinalPathAndFile(num) if err != nil { return err } if err := os.MkdirAll(finalPath, 0755); err != nil { return err } err = os.Rename(path.Join(directory, "tmp", strconv.Itoa(num)), path.Join(finalPath, filename)) if err != nil { return err } tmpsig := path.Join(directory, "tmp", strconv.Itoa(num)+".asc") if _, err := os.Stat(tmpsig); err == nil { err = os.Rename(tmpsig, path.Join(finalPath, filename+".asc")) if err != nil { return err } } return nil }
// dumps a NeedleMap into a cdb func DumpNeedleMapToCdb(cdbName string, nm *NeedleMap) error { tempnam := cdbName + "t" fnames := make([]string, 1, 2) adder, closer, err := openTempCdb(tempnam) if err != nil { return fmt.Errorf("error creating factory: %s", err) } fnames[0] = tempnam elt := cdb.Element{Key: make([]byte, 8), Data: make([]byte, 8)} fcount := uint64(0) walk := func(key uint64, offset, size uint32) error { if fcount >= maxCdbRecCount { if err = closer(); err != nil { return err } tempnam = cdbName + "t2" if adder, closer, err = openTempCdb(tempnam); err != nil { return fmt.Errorf("error creating second factory: %s", err) } fnames = append(fnames, tempnam) fcount = 0 } util.Uint64toBytes(elt.Key, key) util.Uint32toBytes(elt.Data[:4], offset) util.Uint32toBytes(elt.Data[4:], size) fcount++ return adder(elt) } // and write out the cdb from there err = nm.Visit(func(nv NeedleValue) error { return walk(uint64(nv.Key), nv.Offset, nv.Size) }) if err != nil { closer() return fmt.Errorf("error walking index %s: %s", nm, err) } // store fileBytes data, e := json.Marshal(nm.mapMetric) if e != nil { return fmt.Errorf("error marshaling metric %s: %s", nm.mapMetric, e) } if err = adder(cdb.Element{Key: []byte{'M'}, Data: data}); err != nil { return err } if err = closer(); err != nil { return err } os.Remove(cdbName) if len(fnames) == 1 { return os.Rename(fnames[0], cdbName) } bn, ext := nakeFilename(cdbName) if err = os.Rename(fnames[0], bn+".1"+ext); err != nil { return err } return os.Rename(fnames[1], bn+".2"+ext) }
func TestGitGetter_GetFile(t *testing.T) { if !testHasGit { t.Log("git not found, skipping") t.Skip() } g := new(GitGetter) dst := tempFile(t) // Git doesn't allow nested ".git" directories so we do some hackiness // here to get around that... moduleDir := filepath.Join(fixtureDir, "basic-git") oldName := filepath.Join(moduleDir, "DOTgit") newName := filepath.Join(moduleDir, ".git") if err := os.Rename(oldName, newName); err != nil { t.Fatalf("err: %s", err) } defer os.Rename(newName, oldName) // Download if err := g.GetFile(dst, testModuleURL("basic-git/foo.txt")); err != nil { t.Fatalf("err: %s", err) } // Verify the main file exists if _, err := os.Stat(dst); err != nil { t.Fatalf("err: %s", err) } assertContents(t, dst, "Hello\n") }
// Clear closes the existing WAL and moves away the WAL and snapshot. func (e *EncryptedRaftLogger) Clear(ctx context.Context) error { e.encoderMu.Lock() defer e.encoderMu.Unlock() if e.wal != nil { if err := e.wal.Close(); err != nil { log.G(ctx).WithError(err).Error("error closing raft WAL") } } e.snapshotter = nil newWALDir, err := ioutil.TempDir(e.StateDir, "wal.") if err != nil { return err } os.RemoveAll(newWALDir) if err = os.Rename(e.walDir(), newWALDir); err != nil { return err } newSnapDir, err := ioutil.TempDir(e.StateDir, "snap.") if err != nil { return err } os.RemoveAll(newSnapDir) if err := os.Rename(e.snapDir(), newSnapDir); err != nil { return err } return nil }
// Rotate all the logs and return a file with newly vacated filename // Rename 'log.name' to 'log.name.1' and 'log.name.1' to 'log.name.2' etc func doRotate(f string, limit int) (*os.File, error) { // create a format string with the correct amount of zero-padding for the limit numFmt := fmt.Sprintf(".%%0%dd", len(fmt.Sprintf("%d", limit))) // get all rotated files and sort them in reverse order list, err := filepath.Glob(fmt.Sprintf("%s.*", f)) if err != nil { return nil, fmt.Errorf("Error rotating logs: %s", err) } sort.Sort(sort.Reverse(sort.StringSlice(list))) for _, file := range list { parts := strings.Split(file, ".") numPart := parts[len(parts)-1] num, err := strconv.Atoi(numPart) if err != nil { // not a number, don't rotate it continue } if num >= limit { // we're at the limit, don't rotate it continue } newName := fmt.Sprintf(strings.Join(parts[:len(parts)-1], ".")+numFmt, num+1) // don't check error because there's nothing we can do os.Rename(file, newName) } if err = os.Rename(f, fmt.Sprintf(f+numFmt, 1)); err != nil { if !os.IsNotExist(err) { return nil, fmt.Errorf("Error rotating logs: %s", err) } } return os.OpenFile(f, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) }
func (fs *FS) Rename(oldPath string, newPath string, context *fuse.Context) (code fuse.Status) { if fs.isFiltered(newPath) { return fuse.EPERM } cOldPath, err := fs.getBackingPath(oldPath) if err != nil { return fuse.ToStatus(err) } cNewPath, err := fs.getBackingPath(newPath) if err != nil { return fuse.ToStatus(err) } // The Rename may cause a directory to take the place of another directory. // That directory may still be in the DirIV cache, clear it. fs.CryptFS.DirIVCacheEnc.Clear() err = os.Rename(cOldPath, cNewPath) if lerr, ok := err.(*os.LinkError); ok && lerr.Err == syscall.ENOTEMPTY { // If an empty directory is overwritten we will always get // ENOTEMPTY as the "empty" directory will still contain gocryptfs.diriv. // Handle that case by removing the target directory and trying again. cryptfs.Debug.Printf("Rename: Handling ENOTEMPTY") if fs.Rmdir(newPath, context) == fuse.OK { err = os.Rename(cOldPath, cNewPath) } } return fuse.ToStatus(err) }
func TestCacheBadJson(t *testing.T) { home, umask := os.Getenv("HOME"), syscall.Umask(0) abs, _ := filepath.Abs(".") test := filepath.Join(abs, "test") defer func() { syscall.Umask(umask) _ = os.Setenv("HOME", home) _ = os.Rename(filepath.Join(test, ".cache", cacheName), filepath.Join(test, ".cache", "bad.json")) }() _ = os.Setenv("HOME", test) _ = os.Rename(filepath.Join(test, ".cache", "bad.json"), filepath.Join(test, ".cache", cacheName)) buffer := bytes.NewBuffer([]byte{}) log.SetOutput(buffer) file := getCacheFile() if !file.Valid { t.Fatal("It should be valid") } if !strings.Contains(buffer.String(), "Decoding of cache file failed") { t.Fatal("Wrong log") } }
func (v *VuesController) Post() { file := v.Ctx.Input.Param(":files") d, f := Emplacement(Root, file) fileNotExt := strings.TrimSuffix(f, filepath.Ext(f)) c := fileNotExt + ".srt" pathSrt := path.Dir(d) + "/" + fileNotExt + ".srt" if !filepath.HasPrefix(f, ".") { finfo, err := os.Stat(d) if err != nil { check(err) } else { if !finfo.IsDir() { err := os.Rename(d, path.Dir(d)+"/."+f) if err != nil { check(err) } if !filepath.HasPrefix(c, ".") { _, err = os.Stat(pathSrt) if err == nil { err := os.Rename(pathSrt, path.Dir(pathSrt)+"/."+c) if err != nil { check(err) } } } v.Redirect("/list/"+path.Dir(file), 302) } } } else { fmt.Println(" le fichier a déjà été modifié !") v.Redirect("/list/"+path.Dir(file), 302) } }
func shake(source, dest []shakableFile) { for i := len(source) - 1; i > 0; i-- { if source[i].isShaked == false || dest[i].isShaked == false { file, err := ioutil.TempFile(filepath.Dir(source[i].filepath), "mvshaker") defer os.Remove(file.Name()) if err != nil { panic(err) } if err := os.Rename(source[i].filepath, file.Name()); err != nil { panic(err) } if err := os.Rename(dest[i].filepath, source[i].filepath); err != nil { panic(err) } if err := os.Rename(file.Name(), dest[i].filepath); err != nil { panic(err) } source[i].isShaked = true dest[i].isShaked = true } } }
func (upstream *FilesManager) writeApp(app *marathon.App) ([]string, error) { var files []string for portIndex, port := range app.Ports { app_id := PrettifyAppId(app.Id, portIndex, port) cfgfile := filepath.Join(upstream.BasePath, app_id+".instances") tmpfile := cfgfile + ".tmp" err := upstream.writeFile(tmpfile, app_id, portIndex, app) if err != nil { return files, err } files = append(files, cfgfile) if _, err := os.Stat(cfgfile); os.IsNotExist(err) { upstream.Log(fmt.Sprintf("new %v", cfgfile)) os.Rename(tmpfile, cfgfile) } else if !FileIsIdentical(tmpfile, cfgfile) { upstream.Log(fmt.Sprintf("refresh %v", cfgfile)) os.Rename(tmpfile, cfgfile) } else { // new file is identical to already existing one os.Remove(tmpfile) } } return files, nil }
// Copy imagefile and btrfs file out of the tmpdir func pullOutImagefiles(d *Daemon, builddir string, fingerprint string) error { imagefname := filepath.Join(builddir, fingerprint) finalName := shared.VarPath("images", fingerprint) err := os.Rename(imagefname, finalName) if err != nil { return err } lvsymlink := fmt.Sprintf("%s.lv", imagefname) if _, err := os.Stat(lvsymlink); err == nil { dst := shared.VarPath("images", fmt.Sprintf("%s.lv", fingerprint)) return os.Rename(lvsymlink, dst) } switch d.BackingFs { case "btrfs": subvol := fmt.Sprintf("%s.btrfs", imagefname) dst := shared.VarPath("images", fmt.Sprintf("%s.btrfs", fingerprint)) if err := os.Rename(subvol, dst); err != nil { return err } } return nil }
func (h *RotatingFileHandler) doRollover() { f, err := h.fd.Stat() if err != nil { return } if h.maxBytes <= 0 { return } else if f.Size() < int64(h.maxBytes) { return } if h.backupCount > 0 { h.fd.Close() for i := h.backupCount - 1; i > 0; i-- { sfn := fmt.Sprintf("%s.%d", h.fileName, i) dfn := fmt.Sprintf("%s.%d", h.fileName, i+1) os.Rename(sfn, dfn) } dfn := fmt.Sprintf("%s.1", h.fileName) os.Rename(h.fileName, dfn) h.fd, _ = os.OpenFile(h.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) } }
func updateCLI(channel string) { manifest, err := getUpdateManifest(channel) if err != nil { Warn("Error updating CLI") PrintError(err) return } if manifest.Version == Version && manifest.Channel == Channel { return } LogIfError(golock.Lock(updateLockPath)) defer golock.Unlock(updateLockPath) Errf("Updating Heroku v4 CLI to %s (%s)... ", manifest.Version, manifest.Channel) build := manifest.Builds[runtime.GOOS][runtime.GOARCH] // on windows we can't remove an existing file or remove the running binary // so we download the file to binName.new // move the running binary to binName.old (deleting any existing file first) // rename the downloaded file to binName if err := downloadBin(binPath+".new", build.URL); err != nil { panic(err) } if fileSha1(binPath+".new") != build.Sha1 { panic("SHA mismatch") } os.Remove(binPath + ".old") os.Rename(binPath, binPath+".old") if err := os.Rename(binPath+".new", binPath); err != nil { panic(err) } os.Remove(binPath + ".old") Errln("done") }
func rebuild_file(c []child, patch_num int, dest string) string { if patch_num == 0 { return "restored/.tmp/" + c[0].Name } err := os.Rename("restored/.tmp/"+c[0].Name, "restored/.tmp/base") if err != nil { log.Fatal(err) } for i := 1; i <= patch_num; i++ { f, err := os.Open("restored/.tmp/base") if err != nil { log.Fatal(err) } m, err := os.OpenFile("restored/.tmp/master", os.O_CREATE|os.O_WRONLY, 0777) if err != nil { log.Fatal(err) } p, err := os.Open("restored/.tmp/" + c[i].Name) if err != nil { log.Fatal(err) } err = binarydist.Patch(f, m, p) if err != nil { log.Fatal(err) } m.Close() f.Close() p.Close() os.Rename("restored/.tmp/master", "restored/.tmp/base") } return "restored/.tmp/base" }
func formatFile(filename string, write, backup bool) error { data, err := readFile(filename) if err != nil { return err } formatted, err := tick.Format(data) if err != nil { return err } if write { dir := filepath.Dir(filename) tmp, err := writeTmpFile(dir, formatted) if err != nil { return err } defer os.Remove(tmp) if backup { err := os.Rename(filename, filename+backupExt) if err != nil { return err } } err = os.Rename(tmp, filename) if err != nil { return err } } else { _, err := os.Stdout.Write([]byte(formatted)) if err != nil { return err } } return nil }
func updateSelf() { fmt.Println("Downloading latest version...") resp, err := http.Get("https://s3.amazonaws.com/progrium-flynn/flynn-grid/dev/grid-cli_" + runtime.GOOS + "_" + runtime.GOARCH + ".tgz") assert(err) defer resp.Body.Close() z, err := gzip.NewReader(resp.Body) assert(err) defer z.Close() t := tar.NewReader(z) hdr, err := t.Next() assert(err) if hdr.Name != "grid" { log.Fatal("grid binary not found in tarball") } defer t.Next() selfpath, err := osext.Executable() assert(err) info, err := os.Stat(selfpath) assert(err) assert(os.Rename(selfpath, selfpath+".old")) f, err := os.OpenFile(selfpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode().Perm()) if err != nil { assert(os.Rename(selfpath+".old", selfpath)) assert(err) } defer f.Close() _, err = io.Copy(f, t) if err != nil { assert(os.Rename(selfpath+".old", selfpath)) assert(err) } assert(os.Remove(selfpath + ".old")) fmt.Println("Updated.") }