func (o *openFileHandle) Write(request *fuse.WriteRequest, response *fuse.WriteResponse, intr fs.Intr) fuse.Error { log.Printf("Request: %+v\nObject: %+v", request, o) start := request.Offset writeData := request.Data if writeData == nil { return fuse.ENOENT } lenData := int(start) + (len(writeData)) if lenData > int(len(o.buffer)) { //set length and capacity of buffer var newbfr = make([]byte, (lenData), (lenData)) copy(newbfr, o.buffer) response.Size = copy(newbfr[start:lenData], writeData) //log.Printf("before copying to o.buffer: %s", newbfr) o.buffer = newbfr } else { num := copy(o.buffer[start:lenData], writeData) response.Size = num } log.Printf("Buffer: %s", o.buffer) log.Printf("write response size: %v", response.Size) return nil }
func (h *handle) Write(ctx context.Context, request *fuse.WriteRequest, response *fuse.WriteResponse) (retErr error) { defer func() { if retErr == nil { protolion.Debug(&FileWrite{&h.f.Node, string(request.Data), request.Offset, errorToString(retErr)}) } else { protolion.Error(&FileWrite{&h.f.Node, string(request.Data), request.Offset, errorToString(retErr)}) } }() if h.w == nil { w, err := h.f.fs.apiClient.PutFileWriter( h.f.File.Commit.Repo.Name, h.f.File.Commit.ID, h.f.File.Path, h.f.delimiter(), h.f.fs.handleID) if err != nil { return err } h.w = w } // repeated is how many bytes in this write have already been sent in // previous call to Write. Why does the OS send us the same data twice in // different calls? Good question, this is a behavior that's only been // observed on osx, not on linux. repeated := h.cursor - int(request.Offset) if repeated < 0 { return fmt.Errorf("gap in bytes written, (OpenNonSeekable should make this impossible)") } written, err := h.w.Write(request.Data[repeated:]) if err != nil { return err } response.Size = written + repeated h.cursor += written if h.f.size < request.Offset+int64(written) { h.f.size = request.Offset + int64(written) } return nil }
func (w *Writes) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { n, err := w.buf.Write(req.Data) resp.Size = n if err != nil { return err } return nil }
func (fh *FileHandle) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { fmt.Printf("Write %s - %d at %d\n", fh.FileRef.Path, len(req.Data), req.Offset) size, err := fh.File.WriteAt(req.Data, req.Offset) resp.Size = size return err }
func (w *Writes) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { n, err := w.buf.Write(req.Data) resp.Size = n if err != nil { // TODO hiding error return fuse.EIO } return nil }
func (file *consulFile) bufferWrite(req *fuse.WriteRequest, resp *fuse.WriteResponse) bool { file.Mutex.Lock() defer file.Mutex.Unlock() if !file.Deleted { return false } file.Buf = doWrite(req.Offset, req.Data, file.Buf) resp.Size = len(req.Data) return true }
func (f *file) Write(ctx context.Context, request *fuse.WriteRequest, response *fuse.WriteResponse) error { written, err := pfsutil.PutFile(f.fs.apiClient, f.fs.repositoryName, f.commitID, f.path, request.Offset, bytes.NewReader(request.Data)) if err != nil { return err } response.Size = written if f.size < request.Offset+int64(written) { f.size = request.Offset + int64(written) } return nil }
// Write implements the fs.HandleWriter interface for ResetCachesFile. func (f *ResetCachesFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { f.fs.log.CDebugf(ctx, "ResetCachesFile Write") defer func() { f.fs.reportErr(ctx, libkbfs.WriteMode, err) }() if len(req.Data) == 0 { return nil } f.fs.config.ResetCaches() resp.Size = len(req.Data) return nil }
func (f File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { lock.Lock() defer lock.Unlock() log.Debug("Write(%s,%d)", f.GetPath(), len(req.Data)) data := req.Data bo := uint64(req.Offset) / f.Fs.blockSize ro := uint64(req.Offset) % f.Fs.blockSize var written uint64 = 0 var toWriteInTheFirstBlock = f.Fs.blockSize - ro if toWriteInTheFirstBlock > uint64(len(data)) { toWriteInTheFirstBlock = uint64(len(data)) } if ro > 0 { block, err := f.getBlock(int(bo + 1)) if err != nil { return err } log.Debug(">> write-chunk.1:%d[%d:%d]", bo+1, 0, toWriteInTheFirstBlock) if err := f.setBlock(int(bo+1), append(block[0:ro], data[:toWriteInTheFirstBlock]...)); err != nil { return err } bo = bo + 1 written = uint64(toWriteInTheFirstBlock) } var i = 0 for len(data)-int(written)-int(f.Fs.blockSize) >= int(f.Fs.blockSize) { log.Debug(">> write-chunk.2:%d[%d:%d]; rem=%d", int(bo)+1+i, written, written+f.Fs.blockSize, uint64(len(data))-written) err := f.setBlock(int(bo)+1+i, data[written:written+f.Fs.blockSize]) if err != nil { return err } written += f.Fs.blockSize i += 1 } if len(data)-int(written) > 0 { log.Debug(">> write-chunk.3:%d[%d:%d]", int(bo)+1+i, written, len(data)) err := f.setBlock(int(bo)+1+i, data[written:]) if err != nil { return err } written = uint64(len(data)) } err := f.doTruncate(uint64(req.Offset) + uint64(len(req.Data))) if err != nil { log.Error(err.Error()) return err } resp.Size = int(written) return f.set_mtime() }
func (h *Handle) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { n, err := h.sink.Write(req.Data) resp.Size = n h.File.Written(n) if err == nil { return nil } else { log.Println("ERROR writing chunk:", err) return fuse.EIO } }
func (f *file) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { f.parent.fs.mu.Lock() defer f.parent.fs.mu.Unlock() n, err := f.blob.WriteAt(req.Data, req.Offset) resp.Size = n if err != nil { log.Printf("write error: %v", err) return fuse.EIO } return nil }
// Write pushes data to a swift object. // If we detect that we are writing more data than the configured // segment size, then the first object we were writing to is moved // to the segment container and named accordingly to DLO conventions. // Remaining data will be split into segments sequentially until // file handle release is called. If we are overwriting an object // we handle segment deletion, and object creation. func (fh *ObjectHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { // Truncate the file if : // - this is the first write after creation // - this is the first write after opening an existing file if !fh.create && !fh.truncated || fh.create && !fh.target.writing { if err := fh.truncate(); err != nil { return err } } // Now that we are writing to this file, make sure no lock can // be acquired until : // - all write operations have been processed // - this filehandle has been freed fh.target.writing = true // Write first segment or file with size smaller than a segment size. if fh.uploaded+uint64(len(req.Data)) <= uint64(SegmentSize) { if _, err := fh.wd.Write(req.Data); err != nil { return err } fh.uploaded += uint64(len(req.Data)) fh.target.so.Bytes += int64(len(req.Data)) goto EndWrite } // Data written on this writer will be larger than a segment size. // Close current object, move it to the segment container if this // is the first time this happens, then open the next segment and // start writing to it. if fh.uploaded+uint64(len(req.Data)) > uint64(SegmentSize) { // Close current segment if !fh.wroteSegment { if err := fh.moveToSegment(); err != nil { return err } } fh.wd.Close() // Open next segment fh.wd, err = initSegment(fh.target.cs.Name, fh.segmentPrefix, &fh.segmentID, fh.target.so, req.Data, &fh.uploaded) if err != nil { return err } goto EndWrite } EndWrite: resp.Size = len(req.Data) return nil }
// Write implements the fs.HandleWriter interface for File. func (f *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { f.folder.fs.log.CDebugf(ctx, "File Write sz=%d ", len(req.Data)) defer func() { f.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if err := f.folder.fs.config.KBFSOps().Write( ctx, f.node, req.Data, req.Offset); err != nil { return err } resp.Size = len(req.Data) return nil }
func (f *FuseDevice) Write(fReq *fuse.WriteRequest, fResp *fuse.WriteResponse, i fs.Intr) fuse.Error { println("write request, datasize: ", len(fReq.Data)) // FIXME: handle aborted writes req := newWriteReq(fReq.Data) f.device.writeReqs <- req err := <-(*req).response if err == nil { fResp.Size = len(fReq.Data) } println("return on write: ", err) return err }
// Write implements the HandleWriter interface. It is called on *every* write (DirectIO // mode) to allow this module to handle consistency itself. Current strategy is to read // the file, change the written portions, then write it back atomically. If the key was // updated between the read and the write, try again. func (file *consulFile) Write( ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse, ) error { for attempts := 0; attempts < MaxWriteAttempts; attempts++ { if file.bufferWrite(req, resp) { return nil } pair, _, err := file.ConsulFS.Consul.Get(ctx, file.Key, nil) if file.bufferWrite(req, resp) { return nil } if err == ErrCanceled { return fuse.EINTR } else if err != nil { file.ConsulFS.Logger.WithFields(logrus.Fields{ "key": file.Key, logrus.ErrorKey: err, }).Error("consul read error") return fuse.EIO } if pair == nil { return fuse.ENOENT } pair.Value = doWrite(req.Offset, req.Data, pair.Value) // Write it back! success, _, err := file.ConsulFS.Consul.CAS(ctx, pair, nil) if file.bufferWrite(req, resp) { return nil } if err == ErrCanceled { return fuse.EINTR } else if err != nil { file.ConsulFS.Logger.WithFields(logrus.Fields{ "key": file.Key, logrus.ErrorKey: err, }).Error("consul write error") return fuse.EIO } if success { resp.Size = len(req.Data) return nil } file.ConsulFS.Logger.WithField("key", file.Key).Warning("write did not succeed") } file.ConsulFS.Logger.WithField("key", file.Key).Error("unable to perform timely write; aborting") return fuse.EIO }
func (fh FileHandle) Write(ctx context.Context, req *bfuse.WriteRequest, resp *bfuse.WriteResponse) error { log.Printf("Write offset %d size %d", req.Offset, len(req.Data)) if fh.h == nil { return EBADF } if err := fh.h.PWrite(req.Offset, req.Data); err != nil { return err } resp.Size = len(req.Data) return nil }
func (f *file) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { f.mu.Lock() defer f.mu.Unlock() f.dirty = dirty n, err := f.blob.WriteAt(req.Data, req.Offset) resp.Size = n if err != nil { log.Printf("write error: %v", err) return fuse.EIO } return nil }
func (f *file) Write(ctx context.Context, request *fuse.WriteRequest, response *fuse.WriteResponse) (retErr error) { defer func() { protolog.Info(&FileWrite{&f.Node, errorToString(retErr)}) }() written, err := pfsutil.PutFile(f.fs.apiClient, f.File.Commit.Repo.Name, f.File.Commit.Id, f.File.Path, request.Offset, bytes.NewReader(request.Data)) if err != nil { return err } response.Size = written if f.size < request.Offset+int64(written) { f.size = request.Offset + int64(written) } return nil }
// Write implements the fs.HandleWriter interface for RekeyFile. func (f *RekeyFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { f.folder.fs.log.CDebugf(ctx, "RekeyFile Write") defer func() { f.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if len(req.Data) == 0 { return nil } err = f.folder.fs.config.KBFSOps().Rekey(ctx, f.folder.getFolderBranch().Tlf) if err != nil { return err } f.folder.fs.NotificationGroupWait() resp.Size = len(req.Data) return nil }
// Write implements the fs.HandleWriter interface for RekeyFile. func (f *RekeyFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { ctx = NewContextWithOpID(ctx, f.folder.fs.log) f.folder.fs.log.CDebugf(ctx, "RekeyFile Write") defer func() { f.folder.fs.reportErr(ctx, err) }() if len(req.Data) == 0 { return nil } err = f.folder.fs.config.KBFSOps().Rekey(ctx, f.folder.folderBranch.Tlf) if err != nil { return err } resp.Size = len(req.Data) return nil }
// Write implements the fs.HandleWriter interface for // ReclaimQuotaFile. Note a write triggers quota reclamation, but // does not wait for it to finish. If you want to wait, write to // SyncFromServerFileName. func (f *ReclaimQuotaFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { f.folder.fs.log.CDebugf(ctx, "ReclaimQuotaFile Write") defer func() { f.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if len(req.Data) == 0 { return nil } err = libkbfs.ForceQuotaReclamationForTesting( f.folder.fs.config, f.folder.getFolderBranch()) if err != nil { return err } resp.Size = len(req.Data) return nil }
// Write implements the fs.HandleWriter interface for UnstageFile. func (f *UnstageFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { f.folder.fs.log.CDebugf(ctx, "UnstageFile Write") defer func() { f.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if len(req.Data) == 0 { return nil } err = f.folder.fs.config.KBFSOps(). UnstageForTesting(ctx, f.folder.getFolderBranch()) if err != nil { return err } resp.Size = len(req.Data) return nil }
func (h *mutFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, res *fuse.WriteResponse) error { if h.tmp == nil { log.Printf("Write called on camli mutFileHandle without a tempfile set") return fuse.EIO } n, err := h.tmp.WriteAt(req.Data, req.Offset) log.Printf("mutFileHandle.Write(%q, %d bytes at %d, flags %v) = %d, %v", h.f.fullPath(), len(req.Data), req.Offset, req.Flags, n, err) if err != nil { log.Println("mutFileHandle.Write:", err) return fuse.EIO } res.Size = n h.f.setSizeAtLeast(req.Offset + int64(n)) return nil }
// HandleWriter func (f *File) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { f.mu.Lock() defer f.mu.Unlock() newLen := req.Offset + int64(len(req.Data)) if newLen > int64(maxInt) { return fuse.Errno(syscall.EFBIG) } n := copy(f.data[req.Offset:], req.Data) if n < len(req.Data) { f.data = append(f.data, req.Data[n:]...) } resp.Size = len(req.Data) return nil }
func (f *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { f.mu.Lock() defer f.mu.Unlock() // expand the buffer if necessary newLen := req.Offset + int64(len(req.Data)) if newLen > int64(maxInt) { return fuse.Errno(syscall.EFBIG) } if newLen := int(newLen); newLen > len(f.data) { f.data = append(f.data, make([]byte, newLen-len(f.data))...) } n := copy(f.data[req.Offset:], req.Data) resp.Size = n return nil }
// Write implements the fs.HandleWriter interface for UpdatesFile. func (f *UpdatesFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { ctx = NewContextWithOpID(ctx, f.folder.fs.log) f.folder.fs.log.CDebugf(ctx, "UpdatesFile (enable: %t) Write", f.enable) defer func() { f.folder.fs.reportErr(ctx, err) }() if len(req.Data) == 0 { return nil } f.folder.updateMu.Lock() defer f.folder.updateMu.Unlock() if f.enable { if f.folder.updateChan == nil { return errors.New("Updates are already enabled") } err = libkbfs.RestartCRForTesting(f.folder.fs.config, f.folder.folderBranch) if err != nil { return err } f.folder.updateChan <- struct{}{} close(f.folder.updateChan) f.folder.updateChan = nil } else { if f.folder.updateChan != nil { return errors.New("Updates are already disabled") } f.folder.updateChan, err = libkbfs.DisableUpdatesForTesting(f.folder.fs.config, f.folder.folderBranch) if err != nil { return err } err = libkbfs.DisableCRForTesting(f.folder.fs.config, f.folder.folderBranch) if err != nil { return err } } resp.Size = len(req.Data) return nil }
func (f *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { f.Lock() defer f.Unlock() log.Printf("(*File) Write, req=%q, path=%s", req, filepath.Base(f.path)) if f.handler == nil { log.Println("Write: File should be open, aborting request") return fuse.ENOTSUP } n, err := f.handler.WriteAt(req.Data, req.Offset) if err != nil { log.Println("Write ERR: ", err) return err } resp.Size = n return nil }
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { glog.Infof("Entered Write\n") //TODO: Check if we need to add something here for playlists and drop directories. if fh.r == nil { if fh.f.name == ".description" { glog.Errorf("Not allowed to write description file.\n") //TODO: Allow to write description return nil } return fuse.EIO } glog.Infof("Writing file: %s.\n", fh.r.Name()) if _, err := fh.r.Seek(req.Offset, 0); err != nil { return err } n, err := fh.r.Write(req.Data) resp.Size = n return err }
// Write data to the file handle func (fh *WriteFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { fs.Debug(fh.remote, "WriteFileHandle.Write len=%d", len(req.Data)) fh.mu.Lock() defer fh.mu.Unlock() if fh.closed { fs.ErrorLog(fh.remote, "WriteFileHandle.Write error: %v", errClosedFileHandle) return errClosedFileHandle } fh.writeCalled = true // FIXME should probably check the file isn't being seeked? n, err := fh.pipeWriter.Write(req.Data) resp.Size = n fh.file.written(int64(n)) if err != nil { fs.ErrorLog(fh.remote, "WriteFileHandle.Write error: %v", err) return err } fs.Debug(fh.remote, "WriteFileHandle.Write OK (%d bytes written)", n) return nil }
// Write implements the fs.HandleWriter interface for SyncFromServerFile. func (f *SyncFromServerFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { f.folder.fs.log.CDebugf(ctx, "SyncFromServerFile Write") defer func() { f.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if len(req.Data) == 0 { return nil } // Use a context with a nil CtxAppIDKey value so that // notifications generated from this sync won't be discarded. syncCtx := context.WithValue(ctx, CtxAppIDKey, nil) err = f.folder.fs.config.KBFSOps().SyncFromServerForTesting( syncCtx, f.folder.getFolderBranch()) if err != nil { return err } f.folder.fs.NotificationGroupWait() resp.Size = len(req.Data) return nil }