// if full sync ok, return sync offset for later backlog syncing func (h *Handler) replicationSlaveFullSync(c *conn) (syncOffset int64, resp redis.Resp, err error) { // after bgsave, we must send this RDB to slave, // so we don't allow others do bgsave before full sync done. if ok := h.bgSaveSem.AcquireTimeout(time.Minute); !ok { resp, err = toRespErrorf("wait others do bgsave timeout") return } defer h.bgSaveSem.Release() // now begin full sync h.counters.syncFull.Add(1) var rdb *os.File rdb, syncOffset, err = h.replicationBgSave() if err != nil { resp, err = toRespError(err) return } defer rdb.Close() // send rdb to slave st, _ := rdb.Stat() rdbSize := st.Size() if err = c.writeRDBFrom(rdbSize, rdb); err != nil { // close this connection here??? log.Errorf("slave %s sync rdb err - %s", c, err) c.Close() return } return syncOffset, nil, nil }
func OpenFileOrStdDev(path string, write bool) *os.File { var fp *os.File var err error if path == "stdin" { fp = os.Stdin err = nil } else if path == "stdout" { fp = os.Stdout err = nil } else { if write { fp, err = os.Create(CleanPath(path)) } else { fp, err = os.Open(CleanPath(path)) } } if err != nil { log.Fatalln(err) } stat, statErr := fp.Stat() if statErr != nil { log.Fatalln(err) } if stat.IsDir() { log.Fatalf("%s: is a directory\n", path) } return fp }
// UploadReleaseAsset creates an asset by uploading a file into a release repository. // To upload assets that cannot be represented by an os.File, call NewUploadRequest directly. // // GitHub API docs : http://developer.github.com/v3/repos/releases/#upload-a-release-asset func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) u, err := addOptions(u, opt) if err != nil { return nil, nil, err } stat, err := file.Stat() if err != nil { return nil, nil, err } if stat.IsDir() { return nil, nil, errors.New("the asset to upload can't be a directory") } mediaType := mime.TypeByExtension(filepath.Ext(file.Name())) req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType) if err != nil { return nil, nil, err } asset := new(ReleaseAsset) resp, err := s.client.Do(req, asset) if err != nil { return nil, resp, err } return asset, resp, err }
func main() { var APIOptions = oss.GetDefaultAPIOptioins() APIOptions.AccessID = AccessKeyID APIOptions.SecretAccessKey = AccessKeySecret var OSSAPI, err = oss.NewAPI(APIOptions) if err != nil { log.Fatal(err) } var bucket = "ossgosdklargefile" var object = "largefile.bin" var bufSize = int64(1024 * 1024 * 2) // 2 M var file = "test.bin" var fp *os.File if err = OSSAPI.PutBucket(bucket, oss.ACLPublicReadWrite, "", nil); err != nil { log.Printf("%s\n", err) } var multi *oss.MultipartUpload if multi, err = OSSAPI.NewMultipartUpload(bucket, object, nil); err != nil { log.Printf("%s\n", err) } if fp, err = os.Open(file); err != nil { log.Fatal(err) } var stat, _ = fp.Stat() var fileLength = stat.Size() var filePart = int(fileLength / bufSize) if int64(filePart)*bufSize < fileLength { filePart = filePart + 1 } var rd io.Reader var parts = make([]oss.Part, filePart) var etag string for i := 1; i <= filePart; i++ { rd = io.LimitReader(fp, bufSize) if etag, err = multi.UploadPart(i, rd); err != nil { log.Printf("%s\n", err) } log.Printf("PartNumber: %d, ETag: %s\n", i, etag) parts[i-1] = oss.Part{ PartNumber: i, ETag: etag, } } var result oss.CompleteMultipartUploadResult if err = multi.CompleteUpload(parts, &result); err != nil { log.Printf("%s\n", err) } log.Printf("Upload result: %s\n", result) }
func getSize(f *os.File) (int64, error) { fi, err := f.Stat() if err != nil { return 0, err } return fi.Size(), nil }
//----------------------------------------------- parse & load a game data file into dictionary func parse(file *os.File) { // csv 读取器 csv_reader := csv.NewReader(file) records, err := csv_reader.ReadAll() if err != nil { ERR("cannot parse csv file.", file.Name(), err) return } // 是否为空档 if len(records) == 0 { ERR("csv file is empty", file.Name()) return } // 处理表名 fi, err := file.Stat() if err != nil { ERR("cannot stat the file", file.Name()) return } tblname := strings.TrimSuffix(fi.Name(), path.Ext(file.Name())) // 记录数据, 第一行为表头,因此从第二行开始 for line := 1; line < len(records); line++ { for field := 1; field < len(records[line]); field++ { // 每条记录的第一个字段作为行索引 _set(tblname, records[line][0], records[0][field], records[line][field]) } } }
func GetFileSize(file *os.File) (size int64, err error) { var fi os.FileInfo if fi, err = file.Stat(); err == nil { size = fi.Size() } return }
// NewBufferFile maps a file to shared memory and returns a handle to the shared memory buffer func NewBufferFile(file *os.File, size, prot int) (Buffer, error) { fi, err := file.Stat() if err != nil { return nil, err } sys := fi.Sys().(*syscall.Stat_t) if sys.Size != int64(size) { return nil, errWrongSize } // Dup to allow file parameter to be closed regardless fd, err := syscall.Dup(int(file.Fd())) if err != nil { return nil, err } const flags = syscall.MAP_SHARED b, err := syscall.Mmap(fd, 0, size, prot, flags) if err != nil { return nil, err } // localFile is nil because fd is from somewhere else buf := &sharedBuffer{os.NewFile(uintptr(fd), ""), nil, b, stackToKeep()} runtime.SetFinalizer(buf, (*sharedBuffer).finalize) return buf, nil }
func (g *GraphTool) tarCp(srcName string, tw *tar.Writer) (int64, error) { var ( src *os.File err error ) if src, err = os.Open(srcName); err != nil { return 0, err } defer src.Close() srcStat, err := src.Stat() if err != nil { g.logger.Error(err.Error()) } else if err := unix.Fadvise(int(src.Fd()), 0, srcStat.Size(), unix.MADV_SEQUENTIAL); err != nil { g.logger.Error(err.Error()) } if n, err := io.Copy(tw, src); err != nil { g.logger.Error(err.Error()) } else { return n, nil } return 0, nil }
func isTTY(file *os.File) bool { stat, err := file.Stat() if err != nil { log.Fatal(err) } return ((stat.Mode() & os.ModeCharDevice) != 0) }
func customReader() ([]byte, int) { var file *os.File var err error if section == nil { file, err = os.Open(inputSampleFilename) if err != nil { panic(err) } fi, err := file.Stat() if err != nil { panic(err) } section = io.NewSectionReader(file, 0, fi.Size()) } b := make([]byte, IO_BUFFER_SIZE) n, err := section.Read(b) if err != nil { fmt.Println("section.Read():", err) file.Close() } return b, n }
func ReadFile(name string) (b []byte, e os.Error) { var ( raw []byte err os.Error file *os.File dir *os.Dir ) file, err = os.Open(name, os.O_RDONLY, 0) if err != nil { return raw, err } dir, err = file.Stat() if err != nil { return raw, err } var size uint64 = dir.Size raw = make([]byte, size) _, err = file.Read(raw) if err != nil { return raw, err } return raw, err }
func (c *Conn) StoreBlob(file *os.File) (string, error) { reader := hashed.NewSha1FileReader(file) info, err := file.Stat() if err != nil { return "", err } size := info.Size() if err := c.WriteJSONFrame(request{"store", "", size, false}); err != nil { return "", err } if err := c.WriteBlobFrameFrom(reader, size); err != nil { return "", err } key := util.HexHash(reader) if err := c.WriteJSONFrame(request{Key: key}); err != nil { return "", err } var res response if err := c.ReadJSONFrame(&res); err != nil { return "", err } if res.Val != "ok" { return "", responseError(res) } return key, nil }
func LoadNeedleMap(file *os.File) *NeedleMap { nm := NewNeedleMap(file) bytes := make([]byte, 16*RowsToRead) count, e := nm.indexFile.Read(bytes) if count > 0 { fstat, _ := file.Stat() log.Println("Loading index file", fstat.Name(), "size", fstat.Size()) } for count > 0 && e == nil { for i := 0; i < count; i += 16 { key := util.BytesToUint64(bytes[i : i+8]) offset := util.BytesToUint32(bytes[i+8 : i+12]) size := util.BytesToUint32(bytes[i+12 : i+16]) if offset > 0 { nm.m.Set(Key(key), offset, size) //log.Println("reading key", key, "offset", offset, "size", size) nm.fileCounter++ } else { nm.m.Delete(Key(key)) //log.Println("removing key", key) nm.deletionCounter++ } } count, e = nm.indexFile.Read(bytes) } return nm }
func RawToFixed(f *os.File, options *VHDOptions) { info, err := f.Stat() check(err) size := uint64(info.Size()) header := CreateFixedHeader(size, options) binary.Write(f, binary.BigEndian, header) }
//Return true if the provided file appears to be a character device func isInteractive(file *os.File) bool { fileInfo, err := file.Stat() if err != nil { return false } return fileInfo.Mode()&(os.ModeCharDevice|os.ModeCharDevice) != 0 }
// readTombstoneV2 reads the second version of tombstone files that are capable // of storing keys and the range of time for the key that points were deleted. This // format is binary. func (t *Tombstoner) readTombstoneV2(f *os.File) ([]Tombstone, error) { // Skip header, already checked earlier if _, err := f.Seek(4, os.SEEK_SET); err != nil { return nil, err } n := int64(4) fi, err := f.Stat() if err != nil { return nil, err } size := fi.Size() tombstones := []Tombstone{} var ( min, max int64 key string ) b := make([]byte, 4096) for { if n >= size { return tombstones, nil } if _, err = f.Read(b[:4]); err != nil { return nil, err } n += 4 keyLen := int(binary.BigEndian.Uint32(b[:4])) if keyLen > len(b) { b = make([]byte, keyLen) } if _, err := f.Read(b[:keyLen]); err != nil { return nil, err } key = string(b[:keyLen]) n += int64(keyLen) if _, err := f.Read(b[:8]); err != nil { return nil, err } n += 8 min = int64(binary.BigEndian.Uint64(b[:8])) if _, err := f.Read(b[:8]); err != nil { return nil, err } n += 8 max = int64(binary.BigEndian.Uint64(b[:8])) tombstones = append(tombstones, Tombstone{ Key: key, Min: min, Max: max, }) } }
func (gh *GitHub) UploadReleaseAsset(owner, repository string, release int, name string, file *os.File) (*github.ReleaseAsset, *github.Response, error) { url_, err := url.Parse(fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repository, release)) if err != nil { return nil, nil, err } query := url_.Query() query.Add("name", name) url_.RawQuery = query.Encode() stat, err := file.Stat() if err != nil { return nil, nil, err } if stat.IsDir() { return nil, nil, errors.New("invalid asset: is a directory") } rq, err := gh.Client.NewUploadRequest(url_.String(), file, stat.Size(), "application/octet-stream") if err != nil { return nil, nil, err } asset := new(github.ReleaseAsset) rp, err := gh.Client.Do(rq, asset) if err != nil { return nil, rp, err } return asset, rp, err }
func (rr *RemoteRepository) Spool(packageName string, file *os.File) (revision *RevisionInfo, err error) { statInfo, err := file.Stat() if err != nil { fmt.Println("Error stating file", err) return } revisionId, err := buildRevisionId(file) if err != nil { fmt.Println("Failed to build revision id") return } revision = &RevisionInfo{packageName, revisionId} fileName := statInfo.Name() nameBase := fileName[:strings.Index(fileName, ".")] s3Path := fmt.Sprintf("%s.%s.%s", nameBase, revisionId, fileName[strings.Index(fileName, ".")+1:]) err = rr.bucket.PutReader(s3Path, file, statInfo.Size(), "application/octet-stream", s3.Private) if err != nil { fmt.Println("Failed to PUT revision:", err) return } return }
func (r *Runner) uploadToS3(file *os.File, b *Build, boundary string) string { name := fmt.Sprintf("%s-build-%s-%s.txt", b.ID, b.Commit, time.Now().Format("2006-01-02-15-04-05")) url := fmt.Sprintf("https://s3.amazonaws.com/%s/%s", logBucket, name) if _, err := file.Seek(0, os.SEEK_SET); err != nil { log.Printf("failed to seek log file: %s\n", err) return "" } stat, err := file.Stat() if err != nil { log.Printf("failed to get log file size: %s\n", err) return "" } log.Printf("uploading build log to S3: %s\n", url) if err := s3attempts.Run(func() error { contentType := "multipart/mixed; boundary=" + boundary acl := "public-read" _, err := r.s3.PutObject(&s3.PutObjectRequest{ Key: &name, Body: file, Bucket: &logBucket, ACL: &acl, ContentType: &contentType, ContentLength: typeconv.Int64Ptr(stat.Size()), }) return err }); err != nil { log.Printf("failed to upload build output to S3: %s\n", err) } return url }
// BuildMetadata generates the metadata for a backup archive file. func BuildMetadata(file *os.File) (*Metadata, error) { // Extract the file size. fi, err := file.Stat() if err != nil { return nil, errors.Trace(err) } size := fi.Size() // Extract the timestamp. timestamp := fileTimestamp(fi) // Get the checksum. hasher := sha1.New() _, err = io.Copy(hasher, file) if err != nil { return nil, errors.Trace(err) } rawsum := hasher.Sum(nil) checksum := base64.StdEncoding.EncodeToString(rawsum) // Build the metadata. meta := NewMetadata() meta.Started = time.Time{} meta.Origin = UnknownOrigin() err = meta.MarkComplete(size, checksum) if err != nil { return nil, errors.Trace(err) } meta.Finished = ×tamp return meta, nil }
func newFile(f *os.File, maxSize int64, pgBits uint) (*file, error) { if maxSize < 0 { panic("internal error") } pgSize := 1 << pgBits switch { case sysPage > pgSize: pgBits = uint(mathutil.Log2Uint64(uint64(sysPage))) default: pgBits = uint(mathutil.Log2Uint64(uint64(pgSize / sysPage * sysPage))) } pgSize = 1 << pgBits fi := &file{ f: f, m: fileMap{}, maxPages: int(mathutil.MinInt64( 1024, mathutil.MaxInt64(maxSize/int64(pgSize), 1)), ), pgBits: pgBits, pgMask: pgSize - 1, pgSize: pgSize, } info, err := f.Stat() if err != nil { return nil, err } if err = fi.Truncate(info.Size()); err != nil { return nil, err } return fi, nil }
func (gateway Gateway) NewRequestForFile(method, path, accessToken string, body *os.File) (req *Request, apiErr error) { progressReader := NewProgressReader(body, gateway.ui, 5*time.Second) progressReader.Seek(0, 0) fileStats, err := body.Stat() if err != nil { apiErr = errors.NewWithError(T("Error getting file info"), err) return } request, err := http.NewRequest(method, path, progressReader) if err != nil { apiErr = errors.NewWithError(T("Error building request"), err) return } fileSize := fileStats.Size() progressReader.SetTotalSize(fileSize) request.ContentLength = fileSize if err != nil { apiErr = errors.NewWithError(T("Error building request"), err) return } return gateway.newRequest(request, accessToken, progressReader) }
func ReadLastNLines(f *os.File, lines int) (string, error) { if lines <= 0 { return "", fmt.Errorf("invalid line count") } stat, err := f.Stat() if err != nil { return "", err } data, err := syscall.Mmap(int(f.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) if err != nil { return "", err } defer syscall.Munmap(data) for i := len(data) - 1; i >= 0; i-- { if data[i] == '\n' { lines-- } if lines < 0 { return string(data[i+1 : len(data)]), nil } } return string(data), nil }
func (a *Archive) AddFile(path string, f *os.File) error { info, err := f.Stat() if err != nil { return err } return a.AddInfoFile(path, info, f) }
// ToTempFile writes an action into a generated temporary file and returns its filename func (a Action) ToTempFile() (filename string, err error) { var ( data []byte fd *os.File fi os.FileInfo ) data, err = json.Marshal(a) if err != nil { return } fd, err = ioutil.TempFile("", "migaction_") defer fd.Close() if err != nil { return } _, err = fd.Write(data) if err != nil { return } fi, err = fd.Stat() if err != nil { return } filename = fmt.Sprintf("%s/%s", os.TempDir(), fi.Name()) return }
// FindLoopDeviceFor returns a loopback device file for the specified file which // is backing file of a loop back device. func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } targetInode := stat.Sys().(*syscall.Stat_t).Ino targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { if os.IsNotExist(err) { return nil } // Ignore all errors until the first not-exist // we want to continue looking for the file continue } dev, inode, err := getLoopbackBackingFile(file) if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() } return nil }
func (c *client) getFileSize(file *os.File) (int64, error) { fi, err := file.Stat() if err != nil { return 0, err } return fi.Size(), nil }
func getLogReader(logfile string, logf *os.File) (*bufio.Reader, error) { var rdr *bufio.Reader // Is this a gzip file? if path.Ext(logfile) == gzipext { gzrdr, err := gzip.NewReader(logf) if err != nil { return nil, err } rdr = bufio.NewReader(gzrdr) } else { // See if the file has shrunk. If so, read from the beginning. fi, err := logf.Stat() if err != nil { return nil, err } if fi.Size() < pos { pos = 0 } logf.Seek(pos, os.SEEK_SET) fmt.Printf("Starting read at offset %d\n", pos) rdr = bufio.NewReader(logf) } return rdr, nil }
func main() { flag.Usage = usage flag.Parse() var fd *os.File switch flag.NArg() { case 0: fd = os.Stdin case 1: var err error fd, err = os.Open(flag.Arg(0)) ck(err) defer fd.Close() fi, err := fd.Stat() ck(err) if fi.IsDir() { ck(fmt.Errorf("%v: is a directory", flag.Arg(0))) } default: usage() } r := bufio.NewReader(fd) w := bufio.NewWriter(os.Stdout) defer w.Flush() if *dflag { decode(r, w) } else { encode(r, w) } }