func sendTTYToCommand(commandUsock *unixsocket.Usock, clientFile *os.File, err error) error { if err != nil { return err } return commandUsock.WriteFD(int(clientFile.Fd())) }
func listenStream(netw, addr string) (l net.Listener, err error) { var ( file *os.File ) fd, err := listen(netw, addr) if err != nil { return nil, err } // Set backlog size to the maximum if err = syscall.Listen(fd, syscall.SOMAXCONN); err != nil { syscall.Close(fd) return nil, err } file = os.NewFile(uintptr(fd), filePrefix+strconv.Itoa(os.Getpid())) if l, err = net.FileListener(file); err != nil { syscall.Close(fd) return nil, err } if err = file.Close(); err != nil { syscall.Close(fd) l.Close() return nil, err } return l, err }
func getTarFileBytes(file *os.File, path string) ([]byte, error) { _, err := file.Seek(0, 0) if err != nil { fmt.Errorf("error seeking file: %v", err) } var fileBytes []byte fileWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == path { fileBytes, err = ioutil.ReadAll(t.TarStream) if err != nil { return err } } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, fileWalker); err != nil { return nil, err } if fileBytes == nil { return nil, fmt.Errorf("file %q not found", path) } return fileBytes, nil }
func setFileLock(f *os.File, lock bool) error { how := syscall.LOCK_UN if lock { how = syscall.LOCK_EX } return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) }
func main() { var outFile *os.File var err error if outFile, err = os.Create("create.png"); err != nil { println("Error", err) return } defer outFile.Close() rect := image.Rect(0, 0, 100, 100) rgba := image.NewRGBA64(rect) // #golangとか書きたいけど、とりあえず#だけ for i := 0; i < 10; i++ { rgba.Set(60, (10 + i), image.Black.At(0, 0)) rgba.Set(65, (10 + i), image.Black.At(0, 0)) rgba.Set((58 + i), 13, image.Black.At(0, 0)) rgba.Set((58 + i), 16, image.Black.At(0, 0)) } outImage := rgba.SubImage(rect) if err = png.Encode(outFile, outImage); err != nil { println("Error", err) return } }
func dirList(w http.ResponseWriter, f *os.File) { w.Header().Set("Content-Type", "text/html; charset=utf-8") if dirs, err := f.Readdir(-1); err == nil { files := make([]map[string]string, len(dirs)+1) files[0] = map[string]string{ "name": "..", "href": "..", "size": "-", "mtime": "-", } for i, d := range dirs { href := d.Name() if d.IsDir() { href += "/" } files[i+1] = map[string]string{ "name": d.Name(), "href": href, "size": formatSize(d), "mtime": d.ModTime().Format("2006-01-02 15:04:05"), } } reloadCfg.dirListTmpl.Execute(w, map[string]interface{}{ "dir": f.Name(), "files": files, }) } }
// Reading a symlink to a file must fail func TestReadSymlinkedDirectoryToFile(t *testing.T) { var err error var file *os.File if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { t.Fatalf("failed to create file: %s", err) } file.Close() if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") } if path != "" { t.Fatalf("path should've been empty: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { t.Errorf("failed to remove file: %s", err) } if err = os.Remove("/tmp/fileLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } }
func main() { var file *os.File var err error if file, err = os.Open("files/sample.tar.bz2"); err != nil { log.Fatalln(err) } defer file.Close() reader := tar.NewReader(bzip2.NewReader(file)) var header *tar.Header for { header, err = reader.Next() if err == io.EOF { // ファイルの最後 break } if err != nil { log.Fatalln(err) } buf := new(bytes.Buffer) if _, err = io.Copy(buf, reader); err != nil { log.Fatalln(err) } if err = ioutil.WriteFile("output/"+header.Name, buf.Bytes(), 0755); err != nil { log.Fatal(err) } } }
func doHandleFileConcurrently(file *os.File, targetFolder string, handler FileHandlerForLineInFile) { reader := bufio.NewReader(file) scanner := bufio.NewScanner(reader) var syncStructure []chan bool defer file.Close() startTime := time.Now() for scanner.Scan() { c := make(chan bool) syncStructure = append(syncStructure, c) filenameToBeCopied := scanner.Text() go func() { doHandle(filenameToBeCopied, targetFolder, handler, c) }() } count := len(syncStructure) for i := 0; i < count; i++ { <-syncStructure[i] } endTime := time.Now() duration := endTime.Sub(startTime) log.Println("Time taken: " + duration.String()) }
// UploadReleaseAsset creates an asset by uploading a file into a release repository. // To upload assets that cannot be represented by an os.File, call NewUploadRequest directly. // // GitHub API docs : http://developer.github.com/v3/repos/releases/#upload-a-release-asset func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) u, err := addOptions(u, opt) if err != nil { return nil, nil, err } stat, err := file.Stat() if err != nil { return nil, nil, err } if stat.IsDir() { return nil, nil, errors.New("the asset to upload can't be a directory") } mediaType := mime.TypeByExtension(filepath.Ext(file.Name())) req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType) if err != nil { return nil, nil, err } asset := new(ReleaseAsset) resp, err := s.client.Do(req, asset) if err != nil { return nil, resp, err } return asset, resp, err }
func (v *Volume) load(alsoLoadIndex bool) error { var e error fileName := path.Join(v.dir, v.Id.String()) if exists, canRead, canWrite, _ := checkFile(fileName + ".dat"); exists && !canRead { return fmt.Errorf("cannot read Volume Data file %s.dat", fileName) } else if !exists || canWrite { v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) } else if exists && canRead { glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode") v.dataFile, e = os.Open(fileName + ".dat") v.readOnly = true } else { return fmt.Errorf("Unknown state about Volume Data file %s.dat", fileName) } if e != nil { if !os.IsPermission(e) { return fmt.Errorf("cannot load Volume Data %s.dat: %s", fileName, e.Error()) } } if v.ReplicaType == CopyNil { e = v.readSuperBlock() } else { e = v.maybeWriteSuperBlock() } if e == nil && alsoLoadIndex { var indexFile *os.File if v.readOnly { glog.V(1).Infoln("open to read file", fileName+".idx") if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil { return fmt.Errorf("cannot read Volume Data %s.dat: %s", fileName, e.Error()) } if v.ensureConvertIdxToCdb(fileName) { v.nm, e = OpenCdbMap(fileName + ".cdb") return e } if indexFile != nil { glog.V(0).Infoln("converting %s.idx to %s.cdb", fileName, fileName) if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil { glog.Errorln("error converting %s.idx to %s.cdb: %s", fileName, fileName, e) } else { indexFile.Close() os.Remove(indexFile.Name()) indexFile = nil } } } else { glog.V(1).Infoln("open to write file", fileName+".idx") if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil { return fmt.Errorf("cannot write Volume Data %s.dat: %s", fileName, e.Error()) } } glog.V(0).Infoln("loading file", fileName+".idx", "readonly", v.readOnly) if v.nm, e = LoadNeedleMap(indexFile); e != nil { glog.V(0).Infoln("loading error:", e) } } return e }
func (v *Volume) ensureConvertIdxToCdb(fileName string) (cdbCanRead bool) { var indexFile *os.File var e error _, cdbCanRead, cdbCanWrite, cdbModTime := checkFile(fileName + ".cdb") _, idxCanRead, _, idxModeTime := checkFile(fileName + ".idx") if cdbCanRead && cdbModTime.After(idxModeTime) { return true } if !cdbCanWrite { return false } if !idxCanRead { glog.V(0).Infoln("Can not read file", fileName+".idx!") return false } glog.V(2).Infoln("opening file", fileName+".idx") if indexFile, e = os.Open(fileName + ".idx"); e != nil { glog.V(0).Infoln("Failed to read file", fileName+".idx !") return false } defer indexFile.Close() glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName) if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil { glog.V(0).Infof("error converting %s.idx to %s.cdb: %s", fileName, fileName, e.Error()) return false } return true }
// FindLoopDeviceFor returns a loopback device file for the specified file which // is backing file of a loop back device. func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } targetInode := stat.Sys().(*syscall.Stat_t).Ino targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { if os.IsNotExist(err) { return nil } // Ignore all errors until the first not-exist // we want to continue looking for the file continue } dev, inode, err := getLoopbackBackingFile(file) if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() } return nil }
// SetCapacity reloads the size for the loopback device. func SetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { logrus.Errorf("Error loopbackSetCapacity: %s", err) return ErrSetCapacity } return nil }
func copyPathToPath(fromPath, toPath string) (err error) { srcFileInfo, err := os.Stat(fromPath) if err != nil { return } if srcFileInfo.IsDir() { err = os.MkdirAll(toPath, srcFileInfo.Mode()) if err != nil { return } } else { var dst *os.File dst, err = fileutils.Create(toPath) if err != nil { return } defer dst.Close() dst.Chmod(srcFileInfo.Mode()) err = fileutils.CopyPathToWriter(fromPath, dst) } return err }
func getConnection(local *os.File) (int, error) { var data [4]byte control := make([]byte, 4*256) // n, oobn, recvflags, from, errno - todo: error checking. _, oobn, _, _, err := syscall.Recvmsg( int(local.Fd()), data[:], control[:], 0) if err != nil { return 0, err } message := *(*syscall.Cmsghdr)(unsafe.Pointer(&control[0])) fd := *(*int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&control[0])) + syscall.SizeofCmsghdr)) if message.Type != 1 { return 0, fmt.Errorf("getConnection: recvmsg returned wrong control type: %d", message.Type) } if oobn <= syscall.SizeofCmsghdr { return 0, fmt.Errorf("getConnection: too short control message. Length: %d", oobn) } if fd < 0 { return 0, fmt.Errorf("getConnection: fd < 0: %d", fd) } return int(fd), nil }
// signFile will search for the file and sign it // it always returns nil as an error func signFile(c *cli.Context) error { if c.Args().First() == "" { log.Fatal("Please give the file to sign", 1) } fileName := c.Args().First() groupToml := c.String(optionGroup) file, err := os.Open(fileName) log.ErrFatal(err, "Couldn't read file to be signed:") sig, err := sign(file, groupToml) log.ErrFatal(err, "Couldn't create signature:") log.Lvl3(sig) var outFile *os.File outFileName := c.String("out") if outFileName != "" { outFile, err = os.Create(outFileName) log.ErrFatal(err, "Couldn't create signature file:") } else { outFile = os.Stdout } writeSigAsJSON(sig, outFile) if outFileName != "" { log.Lvl2("Signature written to: %s", outFile.Name()) } // else keep the Stdout empty return nil }
// Lock creates a lockfile which prevents to open more than one instance // of the same node (on the same machine). func (ctx *Context) Lock() (err error) { var f *os.File var p *os.Process var pid int lockFile := path.Join(ctx.storageDir, ctx.nodeName+".lock") if f, err = os.Open(lockFile); err != nil { goto lock } if _, err = fmt.Fscanf(f, "%d", &pid); err != nil && pid == 0 { goto lock } if p, err = os.FindProcess(pid); err == nil && p != nil { if err = p.Signal(os.UnixSignal(0)); err == nil { return errors.New( fmt.Sprintf("node '%s' is already running", ctx.NodeName())) } } lock: // Write a lock file. if f, err = os.Create(lockFile); err == nil { pid := os.Getppid() f.Write([]byte(fmt.Sprintf("%d", pid))) f.Close() } return nil }
func fileWriter(t *testing.T, file *os.File, logs []string) { filename := file.Name() time.Sleep(1 * time.Second) // wait for start Tail... for _, line := range logs { if strings.Index(line, RotateMarker) != -1 { log.Println("fileWriter: rename file => file.old") os.Rename(filename, filename+".old") file.Close() file, _ = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644) log.Println("fileWriter: re-opened file") } else if strings.Index(line, TruncateMarker) != -1 { time.Sleep(1 * time.Second) log.Println("fileWriter: truncate(file, 0)") os.Truncate(filename, 0) file.Seek(int64(0), os.SEEK_SET) } _, err := file.WriteString(line) log.Print("fileWriter: wrote ", line) if err != nil { log.Println("write failed", err) } time.Sleep(1 * time.Millisecond) } file.Close() }
func uploadDirectory(uploader s3manager.Uploader, file os.File) { var wg sync.WaitGroup err := filepath.Walk(*filesPath, func(path string, info os.FileInfo, err error) error { if !info.IsDir() { file, err := os.Open(path) if err == nil { path := getPathInsideFolder(path, getFolderName(*filesPath)) wg.Add(1) go func() { uploadFile(uploader, createKey(path), file) wg.Done() defer file.Close() }() } else { log.Println("Can't open a file because of: ", err) } } return nil }) wg.Wait() if err != nil { log.Println("Can't process directory because of:", err) return } log.Println("Directory was successfully uploaded!") }
func main() { filename := flag.String("s", "./log", "log file") flag.Parse() filename1 := "./1.log" filename2 := "./2.log" filename3 := "./3.log" filename4 := "./4.log" var ( fd *os.File fds [4]*os.File err error ) fd, err = os.OpenFile(*filename, os.O_RDWR, 0660) fds[0], err = os.OpenFile(filename1, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0660) fds[1], err = os.OpenFile(filename2, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0660) fds[2], err = os.OpenFile(filename3, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0660) fds[3], err = os.OpenFile(filename4, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0660) buff := make([]byte, 2048) for i := 0; i < 4; i++ { _, err = fd.Read(buff) if err != nil { return } go func() { for i := 0; i < 4; i++ { go write_buff(fds[i], buff) } }() } }
func (f *File) Save() (err error) { var done sync.Mutex done.Lock() f.cbs <- func() { defer done.Unlock() tmpPath := f.path + "." + strconv.FormatInt(rand.Int63(), 10) var tmpF *os.File tmpF, err = os.Create(tmpPath) if err != nil { return } defer tmpF.Close() buf := new(bytes.Buffer) err = json.NewEncoder(buf).Encode(f.Obj) if err != nil { return } // indent indentBuf := new(bytes.Buffer) err = json.Indent(indentBuf, buf.Bytes(), "", " ") if err != nil { return } _, err = tmpF.Write(indentBuf.Bytes()) if err != nil { return } err = os.Rename(tmpPath, f.path) if err != nil { return } } done.Lock() return }
// newFileEncoder creates a new encoder with current file offset for the page writer. func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { offset, err := f.Seek(0, os.SEEK_CUR) if err != nil { return nil, err } return newEncoder(f, prevCrc, int(offset)), nil }
// CreatePool is the programmatic example of "dmsetup create". // It creates a device with the specified poolName, data and metadata file and block size. func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := TaskCreateNamed(deviceCreate, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("devicemapper: Can't get data size %s", err) } params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("devicemapper: Can't add target %s", err) } var cookie uint var flags uint16 flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag if err := task.setCookie(&cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) } return nil }
func getLogReader(logfile string, logf *os.File) (*bufio.Reader, error) { var rdr *bufio.Reader // Is this a gzip file? if path.Ext(logfile) == gzipext { gzrdr, err := gzip.NewReader(logf) if err != nil { return nil, err } rdr = bufio.NewReader(gzrdr) } else { // See if the file has shrunk. If so, read from the beginning. fi, err := logf.Stat() if err != nil { return nil, err } if fi.Size() < pos { pos = 0 } logf.Seek(pos, os.SEEK_SET) fmt.Printf("Starting read at offset %d\n", pos) rdr = bufio.NewReader(logf) } return rdr, nil }
func writeLines(lines []string, path string) (err error) { var ( file *os.File ) if file, err = os.Create(path); err != nil { return } defer file.Close() //writer := bufio.NewWriter(file) for _, item := range lines { //fmt.Println(item) _, err := file.WriteString(strings.TrimSpace(item) + "\n") //file.Write([]byte(item)); if err != nil { //fmt.Println("debug") fmt.Println(err) break } } /*content := strings.Join(lines, "\n") _, err = writer.WriteString(content)*/ return }
// flock acquires an advisory lock on a file descriptor. func flock(f *os.File, exclusive bool, timeout time.Duration) error { var t time.Time for { // If we're beyond our timeout then return an error. // This can only occur after we've attempted a flock once. if t.IsZero() { t = time.Now() } else if timeout > 0 && time.Since(t) > timeout { return ErrTimeout } var lock syscall.Flock_t lock.Start = 0 lock.Len = 0 lock.Pid = 0 lock.Whence = 0 lock.Pid = 0 if exclusive { lock.Type = syscall.F_WRLCK } else { lock.Type = syscall.F_RDLCK } err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } // Wait for a bit and try again. time.Sleep(50 * time.Millisecond) } }
func readLines(path string) (lines []string, err error) { var ( file *os.File part []byte prefix bool ) if file, err = os.Open(path); err != nil { return } defer file.Close() reader := bufio.NewReader(file) buffer := bytes.NewBuffer(make([]byte, 0)) for { if part, prefix, err = reader.ReadLine(); err != nil { break } buffer.Write(part) if !prefix { lines = append(lines, buffer.String()) buffer.Reset() } } if err == io.EOF { err = nil } return }
func getParent(file *os.File, imgID string) (string, error) { var parent string _, err := file.Seek(0, 0) if err != nil { return "", fmt.Errorf("error seeking file: %v", err) } jsonPath := filepath.Join(imgID, "json") parentWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == jsonPath { jsonb, err := ioutil.ReadAll(t.TarStream) if err != nil { return fmt.Errorf("error reading layer json: %v", err) } var dockerData types.DockerImageData if err := json.Unmarshal(jsonb, &dockerData); err != nil { return fmt.Errorf("error unmarshaling layer data: %v", err) } parent = dockerData.Parent } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, parentWalker); err != nil { return "", err } return parent, nil }
func (fst fileStorageTorrent) WriteAt(p []byte, off int64) (n int, err error) { for _, fi := range fst.fts.info.UpvertedFiles() { if off >= fi.Length { off -= fi.Length continue } n1 := len(p) if int64(n1) > fi.Length-off { n1 = int(fi.Length - off) } name := fst.fts.fileInfoName(fi) os.MkdirAll(filepath.Dir(name), 0770) var f *os.File f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0660) if err != nil { return } n1, err = f.WriteAt(p[:n1], off) f.Close() if err != nil { return } n += n1 off = 0 p = p[n1:] if len(p) == 0 { break } } return }