// Reading a symlink to a file must fail func TestReadSymlinkedDirectoryToFile(t *testing.T) { var err error var file *os.File if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { t.Fatalf("failed to create file: %s", err) } file.Close() if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") } if path != "" { t.Fatalf("path should've been empty: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { t.Errorf("failed to remove file: %s", err) } if err = os.Remove("/tmp/fileLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } }
func listenStream(netw, addr string) (l net.Listener, err error) { var ( file *os.File ) fd, err := listen(netw, addr) if err != nil { return nil, err } // Set backlog size to the maximum if err = syscall.Listen(fd, syscall.SOMAXCONN); err != nil { syscall.Close(fd) return nil, err } file = os.NewFile(uintptr(fd), filePrefix+strconv.Itoa(os.Getpid())) if l, err = net.FileListener(file); err != nil { syscall.Close(fd) return nil, err } if err = file.Close(); err != nil { syscall.Close(fd) l.Close() return nil, err } return l, err }
func main() { var outFile *os.File var err error if outFile, err = os.Create("create.png"); err != nil { println("Error", err) return } defer outFile.Close() rect := image.Rect(0, 0, 100, 100) rgba := image.NewRGBA64(rect) // #golangとか書きたいけど、とりあえず#だけ for i := 0; i < 10; i++ { rgba.Set(60, (10 + i), image.Black.At(0, 0)) rgba.Set(65, (10 + i), image.Black.At(0, 0)) rgba.Set((58 + i), 13, image.Black.At(0, 0)) rgba.Set((58 + i), 16, image.Black.At(0, 0)) } outImage := rgba.SubImage(rect) if err = png.Encode(outFile, outImage); err != nil { println("Error", err) return } }
func (v *Volume) load(alsoLoadIndex bool) error { var e error fileName := path.Join(v.dir, v.Id.String()) if exists, canRead, canWrite, _ := checkFile(fileName + ".dat"); exists && !canRead { return fmt.Errorf("cannot read Volume Data file %s.dat", fileName) } else if !exists || canWrite { v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) } else if exists && canRead { glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode") v.dataFile, e = os.Open(fileName + ".dat") v.readOnly = true } else { return fmt.Errorf("Unknown state about Volume Data file %s.dat", fileName) } if e != nil { if !os.IsPermission(e) { return fmt.Errorf("cannot load Volume Data %s.dat: %s", fileName, e.Error()) } } if v.ReplicaType == CopyNil { e = v.readSuperBlock() } else { e = v.maybeWriteSuperBlock() } if e == nil && alsoLoadIndex { var indexFile *os.File if v.readOnly { glog.V(1).Infoln("open to read file", fileName+".idx") if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil { return fmt.Errorf("cannot read Volume Data %s.dat: %s", fileName, e.Error()) } if v.ensureConvertIdxToCdb(fileName) { v.nm, e = OpenCdbMap(fileName + ".cdb") return e } if indexFile != nil { glog.V(0).Infoln("converting %s.idx to %s.cdb", fileName, fileName) if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil { glog.Errorln("error converting %s.idx to %s.cdb: %s", fileName, fileName, e) } else { indexFile.Close() os.Remove(indexFile.Name()) indexFile = nil } } } else { glog.V(1).Infoln("open to write file", fileName+".idx") if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil { return fmt.Errorf("cannot write Volume Data %s.dat: %s", fileName, e.Error()) } } glog.V(0).Infoln("loading file", fileName+".idx", "readonly", v.readOnly) if v.nm, e = LoadNeedleMap(indexFile); e != nil { glog.V(0).Infoln("loading error:", e) } } return e }
func (v *Volume) ensureConvertIdxToCdb(fileName string) (cdbCanRead bool) { var indexFile *os.File var e error _, cdbCanRead, cdbCanWrite, cdbModTime := checkFile(fileName + ".cdb") _, idxCanRead, _, idxModeTime := checkFile(fileName + ".idx") if cdbCanRead && cdbModTime.After(idxModeTime) { return true } if !cdbCanWrite { return false } if !idxCanRead { glog.V(0).Infoln("Can not read file", fileName+".idx!") return false } glog.V(2).Infoln("opening file", fileName+".idx") if indexFile, e = os.Open(fileName + ".idx"); e != nil { glog.V(0).Infoln("Failed to read file", fileName+".idx !") return false } defer indexFile.Close() glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName) if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil { glog.V(0).Infof("error converting %s.idx to %s.cdb: %s", fileName, fileName, e.Error()) return false } return true }
func main() { var file *os.File var outFile *os.File var img image.Image var err error if file, err = os.Open("pkg.png"); err != nil { println("Error", err) return } defer file.Close() if img, err = png.Decode(file); err != nil { println("Error", err) return } if outFile, err = os.Create("out_pkg.jpeg"); err != nil { println("Error", err) return } option := &jpeg.Options{Quality: 100} if err = jpeg.Encode(outFile, img, option); err != nil { println() return } defer outFile.Close() }
func main() { var file *os.File var err error if file, err = os.Open("files/sample.tar.bz2"); err != nil { log.Fatalln(err) } defer file.Close() reader := tar.NewReader(bzip2.NewReader(file)) var header *tar.Header for { header, err = reader.Next() if err == io.EOF { // ファイルの最後 break } if err != nil { log.Fatalln(err) } buf := new(bytes.Buffer) if _, err = io.Copy(buf, reader); err != nil { log.Fatalln(err) } if err = ioutil.WriteFile("output/"+header.Name, buf.Bytes(), 0755); err != nil { log.Fatal(err) } } }
func readLines(path string) (lines []string, err error) { var ( file *os.File part []byte prefix bool ) if file, err = os.Open(path); err != nil { return } defer file.Close() reader := bufio.NewReader(file) buffer := bytes.NewBuffer(make([]byte, 0)) for { if part, prefix, err = reader.ReadLine(); err != nil { break } buffer.Write(part) if !prefix { lines = append(lines, buffer.String()) buffer.Reset() } } if err == io.EOF { err = nil } return }
func (f *File) Save() (err error) { var done sync.Mutex done.Lock() f.cbs <- func() { defer done.Unlock() tmpPath := f.path + "." + strconv.FormatInt(rand.Int63(), 10) var tmpF *os.File tmpF, err = os.Create(tmpPath) if err != nil { return } defer tmpF.Close() buf := new(bytes.Buffer) err = json.NewEncoder(buf).Encode(f.Obj) if err != nil { return } // indent indentBuf := new(bytes.Buffer) err = json.Indent(indentBuf, buf.Bytes(), "", " ") if err != nil { return } _, err = tmpF.Write(indentBuf.Bytes()) if err != nil { return } err = os.Rename(tmpPath, f.path) if err != nil { return } } done.Lock() return }
// Lock creates a lockfile which prevents to open more than one instance // of the same node (on the same machine). func (ctx *Context) Lock() (err error) { var f *os.File var p *os.Process var pid int lockFile := path.Join(ctx.storageDir, ctx.nodeName+".lock") if f, err = os.Open(lockFile); err != nil { goto lock } if _, err = fmt.Fscanf(f, "%d", &pid); err != nil && pid == 0 { goto lock } if p, err = os.FindProcess(pid); err == nil && p != nil { if err = p.Signal(os.UnixSignal(0)); err == nil { return errors.New( fmt.Sprintf("node '%s' is already running", ctx.NodeName())) } } lock: // Write a lock file. if f, err = os.Create(lockFile); err == nil { pid := os.Getppid() f.Write([]byte(fmt.Sprintf("%d", pid))) f.Close() } return nil }
// Open the file, write to the file, close the file. // Whichever user is running the function needs write permissions to the file or directory if the file does not yet exist. func (hook *lfsHook) Fire(entry *logrus.Entry) error { var ( fd *os.File path string msg string err error ok bool ) if path, ok = hook.paths[entry.Level]; !ok { err = fmt.Errorf("no file provided for loglevel: %d", entry.Level) log.Println(err.Error()) return err } fd, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) if err != nil { log.Println("failed to open logfile:", path, err) return err } defer fd.Close() msg, err = entry.String() if err != nil { log.Println("failed to generate string for entry:", err) return err } fd.WriteString(msg) return nil }
// persistMetaData atomically writes state to the filesystem func (d *diskQueue) persistMetaData() error { var f *os.File var err error fileName := d.metaDataFileName() tmpFileName := fmt.Sprintf("%s.%d.tmp", fileName, rand.Int()) // write to tmp file f, err = os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return err } _, err = fmt.Fprintf(f, "%d\n%d,%d\n%d,%d\n", atomic.LoadInt64(&d.depth), d.readFileNum, d.readPos, d.writeFileNum, d.writePos) if err != nil { f.Close() return err } f.Sync() f.Close() // atomically rename return atomicRename(tmpFileName, fileName) }
// retrieveMetaData initializes state from the filesystem func (d *diskQueue) retrieveMetaData() error { var f *os.File var err error fileName := d.metaDataFileName() f, err = os.OpenFile(fileName, os.O_RDONLY, 0600) if err != nil { return err } defer f.Close() var depth int64 _, err = fmt.Fscanf(f, "%d\n%d,%d\n%d,%d\n", &depth, &d.readFileNum, &d.readPos, &d.writeFileNum, &d.writePos) if err != nil { return err } atomic.StoreInt64(&d.depth, depth) d.nextReadFileNum = d.readFileNum d.nextReadPos = d.readPos return nil }
func export(file string, connection string) { var wadokufile *os.File var xmldata []byte var err error if wadokufile, err = os.Open(file); err != nil { log.Fatal("Can't open wadoku XML file: " + err.Error()) } defer wadokufile.Close() if xmldata, err = ioutil.ReadAll(wadokufile); err != nil { log.Fatal("Can't read wadoku XML file: " + err.Error()) } dict := XMLDict{} if err = xml.Unmarshal([]byte(xmldata), &dict); err != nil { log.Fatal("Can't unmarshal xmldata: " + err.Error()) return } if err = saveIntoMongo(dict, connection); err != nil { log.Fatal("Can't save entries into mongodb: " + err.Error()) } }
func downloadFile(URL string) (fileName string, err error) { var file *os.File if file, err = ioutil.TempFile(os.TempDir(), "torrent-imageviewer"); err != nil { return } defer func() { if err := file.Close(); err != nil { log.Printf("Error closing torrent file: %s", err) } }() response, err := http.Get(URL) if err != nil { return } defer func() { if err := response.Body.Close(); err != nil { log.Printf("Error closing torrent file: %s", err) } }() _, err = io.Copy(file, response.Body) return file.Name(), err }
func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) { n := fileInfo.Size() contentLength := int(n) if n != int64(contentLength) { f.Close() return nil, fmt.Errorf("too big file: %d bytes", n) } // detect content-type ext := fileExtension(fileInfo.Name(), compressed) contentType := mime.TypeByExtension(ext) if len(contentType) == 0 { data, err := readFileHeader(f, compressed) if err != nil { return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) } contentType = http.DetectContentType(data) } lastModified := fileInfo.ModTime() ff := &fsFile{ h: h, f: f, contentType: contentType, contentLength: contentLength, compressed: compressed, lastModified: lastModified, lastModifiedStr: AppendHTTPDate(nil, lastModified), t: time.Now(), } return ff, nil }
// Streams upload directly from file -> mime/multipart -> pipe -> http-request func streamingUploadFile(params map[string]string, paramName, path string, w *io.PipeWriter, file *os.File) { defer file.Close() defer w.Close() writer := multipart.NewWriter(w) part, err := writer.CreateFormFile(paramName, filepath.Base(path)) if err != nil { log.Fatal(err) return } _, err = io.Copy(part, file) if err != nil { log.Fatal(err) return } for key, val := range params { _ = writer.WriteField(key, val) } err = writer.Close() if err != nil { log.Fatal(err) return } }
func doHandleFileConcurrently(file *os.File, targetFolder string, handler FileHandlerForLineInFile) { reader := bufio.NewReader(file) scanner := bufio.NewScanner(reader) var syncStructure []chan bool defer file.Close() startTime := time.Now() for scanner.Scan() { c := make(chan bool) syncStructure = append(syncStructure, c) filenameToBeCopied := scanner.Text() go func() { doHandle(filenameToBeCopied, targetFolder, handler, c) }() } count := len(syncStructure) for i := 0; i < count; i++ { <-syncStructure[i] } endTime := time.Now() duration := endTime.Sub(startTime) log.Println("Time taken: " + duration.String()) }
// GenerateCookie generates new random node admin's cookie hash and saves // it to the storage dir in the 'cookie' file. If no force flag specified // then cookie will be not overwritten during the further calls of this // function. // // force - If true, then generates new cookie and overwrites existing one. // // Returns an error if something went wrong. func (ctx *Context) GenerateCookie(force bool) (err error) { if ctx.storageDir == "" { return errors.New("can't generate cookie, storage not set") } var buf = make([]byte, CookieSize) var cookieFile *os.File cookiePath := path.Join(ctx.storageDir, ctx.nodeName+".cookie") if !force { cookieFile, err = os.Open(cookiePath) if err == nil { n, err := io.ReadFull(cookieFile, buf[:]) if n == CookieSize && err == nil { ctx.cookie = string(buf[:]) cookieFile.Close() return nil } } } // Generate new cookie if there's none or the force flag is enabled. if _, err = rand.Read(buf[:16]); err != nil { return } hash := sha1.New() hash.Write(buf[:16]) ctx.cookie = fmt.Sprintf("%x", hash.Sum([]byte{})) if cookieFile, err = os.Create(cookiePath); err != nil { return } cookieFile.Write([]byte(ctx.cookie)) cookieFile.Close() return }
// Tries to write a challenge file to each of the directories. func webrootWriteChallenge(webroots map[string]struct{}, token string, ka []byte) { log.Debugf("writing %d webroot challenge files", len(webroots)) for wr := range webroots { os.MkdirAll(wr, 0755) // ignore errors fn := filepath.Join(wr, token) log.Debugf("writing webroot file %s", fn) // Because /var/run/acme/acme-challenge may not exist due to /var/run // possibly being a tmpfs, and because that tmpfs is likely to be world // writable, there is a risk of following a maliciously crafted symlink to // cause a file to be overwritten as root. Open the file using a // no-symlinks flag if the OS supports it, but only for /var/run paths; we // want to support symlinks for other paths, which are presumably properly // controlled. // // Unfortunately earlier components in the pathname will still be followed // if they are symlinks, but it looks like this is the best we can do. var f *os.File var err error if strings.HasPrefix(wr, "/var/run/") { f, err = deos.OpenFileNoSymlinks(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) } else { f, err = os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) } if err != nil { log.Infoe(err, "failed to open webroot file ", fn) continue } f.Write(ka) f.Close() } }
func uploadDirectory(uploader s3manager.Uploader, file os.File) { var wg sync.WaitGroup err := filepath.Walk(*filesPath, func(path string, info os.FileInfo, err error) error { if !info.IsDir() { file, err := os.Open(path) if err == nil { path := getPathInsideFolder(path, getFolderName(*filesPath)) wg.Add(1) go func() { uploadFile(uploader, createKey(path), file) wg.Done() defer file.Close() }() } else { log.Println("Can't open a file because of: ", err) } } return nil }) wg.Wait() if err != nil { log.Println("Can't process directory because of:", err) return } log.Println("Directory was successfully uploaded!") }
// ParseZone will attempt to parse a zone file from the provided filename and return a Zone. // ParseZone will return an error if the file provided does not exist or could not be properly parsed. func ParseZone(filename string) (*Zone, error) { var zone *Zone var err error zone = &Zone{ records: make([]dns.RR, 0), } // Open the file var file *os.File file, err = os.Open(filename) if err != nil { return nil, fmt.Errorf("could not parse zone file \"%s\": \"%s\"", filename, err) } defer file.Close() // Parse the file into records var tokens chan *dns.Token tokens = dns.ParseZone(file, "", "") for token := range tokens { if token.Error != nil { return nil, fmt.Errorf("could not parse zone file \"%s\": \"%s\"", filename, token.Error) } zone.records = append(zone.records, token.RR) } return zone, nil }
func writeLines(lines []string, path string) (err error) { var ( file *os.File ) if file, err = os.Create(path); err != nil { return } defer file.Close() //writer := bufio.NewWriter(file) for _, item := range lines { //fmt.Println(item) _, err := file.WriteString(strings.TrimSpace(item) + "\n") //file.Write([]byte(item)); if err != nil { //fmt.Println("debug") fmt.Println(err) break } } /*content := strings.Join(lines, "\n") _, err = writer.WriteString(content)*/ return }
func (fst fileStorageTorrent) WriteAt(p []byte, off int64) (n int, err error) { for _, fi := range fst.fts.info.UpvertedFiles() { if off >= fi.Length { off -= fi.Length continue } n1 := len(p) if int64(n1) > fi.Length-off { n1 = int(fi.Length - off) } name := fst.fts.fileInfoName(fi) os.MkdirAll(filepath.Dir(name), 0770) var f *os.File f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0660) if err != nil { return } n1, err = f.WriteAt(p[:n1], off) f.Close() if err != nil { return } n += n1 off = 0 p = p[n1:] if len(p) == 0 { break } } return }
func fileWriter(t *testing.T, file *os.File, logs []string) { filename := file.Name() time.Sleep(1 * time.Second) // wait for start Tail... for _, line := range logs { if strings.Index(line, RotateMarker) != -1 { log.Println("fileWriter: rename file => file.old") os.Rename(filename, filename+".old") file.Close() file, _ = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644) log.Println("fileWriter: re-opened file") } else if strings.Index(line, TruncateMarker) != -1 { time.Sleep(1 * time.Second) log.Println("fileWriter: truncate(file, 0)") os.Truncate(filename, 0) file.Seek(int64(0), os.SEEK_SET) } _, err := file.WriteString(line) log.Print("fileWriter: wrote ", line) if err != nil { log.Println("write failed", err) } time.Sleep(1 * time.Millisecond) } file.Close() }
func (c *client) startDownload(id uint64, outPath string, detachment *pond.Message_Detachment) (cancel func()) { killChan := make(chan bool, 1) go func() { var tmp *os.File var err error if tmp, err = ioutil.TempFile("" /* default tmp dir */, "pond-download-"); err != nil { err = errors.New("failed to create temp file: " + err.Error()) } else { os.Remove(tmp.Name()) defer tmp.Close() err = c.downloadDetachment(c.backgroundChan, tmp, id, *detachment.Url, killChan) if err == nil { _, err := tmp.Seek(0, 0 /* from start */) if err == nil { err = saveDecrypted(c.backgroundChan, outPath, id, tmp, detachment, killChan) } } } if err == nil { c.backgroundChan <- DetachmentComplete{id, nil} } else { c.backgroundChan <- DetachmentError{id, err} } tmp.Close() }() return func() { killChan <- true } }
func copyPathToPath(fromPath, toPath string) (err error) { srcFileInfo, err := os.Stat(fromPath) if err != nil { return } if srcFileInfo.IsDir() { err = os.MkdirAll(toPath, srcFileInfo.Mode()) if err != nil { return } } else { var dst *os.File dst, err = fileutils.Create(toPath) if err != nil { return } defer dst.Close() dst.Chmod(srcFileInfo.Mode()) err = fileutils.CopyPathToWriter(fromPath, dst) } return err }
func (c *client) startUpload(id uint64, inPath string) (cancel func()) { killChan := make(chan bool, 1) go func() { var detachment *pond.Message_Detachment var tmp *os.File var err error if tmp, err = ioutil.TempFile("" /* default tmp dir */, "pond-upload-"); err != nil { err = errors.New("failed to create temp file: " + err.Error()) } else { os.Remove(tmp.Name()) defer tmp.Close() detachment, err = saveEncrypted(c.rand, c.backgroundChan, tmp, id, inPath, killChan) if err == nil { err = c.uploadDetachment(c.backgroundChan, tmp, id, killChan) } } if err == nil { detachment.Url = proto.String(c.buildDetachmentURL(id)) c.log.Printf("Finished upload of %s", *detachment.Url) c.backgroundChan <- DetachmentComplete{id, detachment} } else { c.backgroundChan <- DetachmentError{id, err} } tmp.Close() }() return func() { killChan <- true } }
// FindLoopDeviceFor returns a loopback device file for the specified file which // is backing file of a loop back device. func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } targetInode := stat.Sys().(*syscall.Stat_t).Ino targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { if os.IsNotExist(err) { return nil } // Ignore all errors until the first not-exist // we want to continue looking for the file continue } dev, inode, err := getLoopbackBackingFile(file) if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() } return nil }
func OpenACI(location, sigLocation string) (types.ACIdentifier, *os.File, *os.File, error) { var asc *os.File // Signature override if sigLocation != "" { if sf, err := OpenLocation(sigLocation); err != nil { return "", nil, nil, err } else { asc = sf } } if app := tryAppFromString(location); app != nil { // Proper ACIdentifier given, let's do discovery if aci, asc, err := discoverACI(*app, asc); err != nil { return app.Name, nil, nil, err } else { return app.Name, aci, asc, nil } } else { if aci, err := OpenLocation(location); err != nil { if asc != nil { asc.Close() } return "", nil, nil, err } else { return "", aci, asc, nil } } }