Exemplo n.º 1
0
func (srv *Srv) read(req *Req) {
	tc := req.Tc
	fid := req.Fid
	if tc.Count+ninep.IOHDRSZ > req.Conn.Msize {
		req.RespondError(Etoolarge)
		return
	}

	if (fid.Type & ninep.QTAUTH) != 0 {
		var n int

		rc := req.Rc
		err := ninep.InitRread(rc, tc.Count)
		if err != nil {
			req.RespondError(err)
			return
		}

		if op, ok := (req.Conn.Srv.ops).(AuthOps); ok {
			n, err = op.AuthRead(fid, tc.Offset, rc.Data)
			if err != nil {
				req.RespondError(err)
				return
			}

			ninep.SetRreadCount(rc, uint32(n))
			req.Respond()
		} else {
			req.RespondError(Enotimpl)
		}

		return
	}

	if !fid.opened || (fid.Omode&3) == ninep.OWRITE {
		req.RespondError(Ebaduse)
		return
	}

	if (fid.Type & ninep.QTDIR) != 0 {
		fid.Lock()
		if tc.Offset == 0 {
			fid.Diroffset = 0
		} else if tc.Offset != fid.Diroffset {
			// This used to be an error, at this
			// level. But maybe the provider can handle
			// offsets that change. In one version of 9p
			// we were able to support arbitrary
			// offsets. At the least, we're going to let
			// the provider decide if this is an error.
			fid.Diroffset = tc.Offset
		}
		fid.Unlock()
	}

	(req.Conn.Srv.ops).(ReqOps).Read(req)
}
Exemplo n.º 2
0
func (f *NullFS) Read(r *srv.Req) {
	var count int

	ninep.InitRread(r.Rc, r.Tc.Count)
	fid := r.Fid.Aux.(*Fid)

	if fid.Qid.Path == Qroot {
		var dirents []byte
		for path, v := range dirQids {
			d := &ninep.Dir{
				Qid:  *v,
				Type: uint16(v.Type),
				Mode: uint32(v.Type) | v.Version,
				Name: path,
				Uid:  "root",
				Gid:  "root",
			}
			b := ninep.PackDir(d, true)
			dirents = append(dirents, b...)
			count += len(b)
		}

		// TODO: put this boilerplate into a helper function.
		switch {
		case r.Tc.Offset > uint64(len(dirents)):
			count = 0
		case len(dirents[r.Tc.Offset:]) > int(r.Tc.Count):
			count = int(r.Tc.Count)
		default:
			count = len(dirents[r.Tc.Offset:])
		}

		if count == 0 && int(r.Tc.Offset) < len(dirents) && len(dirents) > 0 {
			r.RespondError(&ninep.Error{Err: "too small read size for dir entry", Errornum: ninep.EINVAL})
			return
		}
		copy(r.Rc.Data, dirents[r.Tc.Offset:int(r.Tc.Offset)+count])
	} else {
		if fid.Qid.Path == Qzero {
			count = int(r.Tc.Count)
		}
	}
	ninep.SetRreadCount(r.Rc, uint32(count))
	r.Respond()
}
Exemplo n.º 3
0
// Write handles writes for writeable files and always succeeds.
// Only the null files has w so we don't bother checking Path.
func (f *NullFS) Write(r *srv.Req) {
	var count uint32
	ninep.SetRreadCount(r.Rc, uint32(count))
	r.Respond()
}
Exemplo n.º 4
0
func (*Fsrv) Read(req *Req) {
	var i, n int
	var err error

	fid := req.Fid.Aux.(*FFid)
	f := fid.F
	tc := req.Tc
	rc := req.Rc
	ninep.InitRread(rc, tc.Count)

	if f.Mode&ninep.DMDIR != 0 {
		// directory
		f.Lock()
		if tc.Offset == 0 {
			var g *File
			for n, g = 0, f.cfirst; g != nil; n, g = n+1, g.next {
			}

			fid.dirs = make([]*File, n)
			for n, g = 0, f.cfirst; g != nil; n, g = n+1, g.next {
				fid.dirs[n] = g
			}
		}

		n = 0
		b := rc.Data
		// only return whole entries.
		for i = 0; i < len(fid.dirs); i++ {
			g := fid.dirs[i]
			g.Lock()
			if (g.flags & Fremoved) != 0 {
				g.Unlock()
				continue
			}

			nd := ninep.PackDir(&g.Dir, req.Conn.Dotu)
			g.Unlock()

			if len(nd) > len(b) {
				break
			}
			copy(b, nd)
			b = b[len(nd):]
			n += len(nd)
		}
		fid.dirs = fid.dirs[i:]
		f.Unlock()
	} else {
		// file
		if rop, ok := f.Ops.(FReadOp); ok {
			n, err = rop.Read(fid, rc.Data, tc.Offset)
			if err != nil {
				req.RespondError(err)
				return
			}
		} else {
			req.RespondError(Eperm)
			return
		}
	}

	ninep.SetRreadCount(rc, uint32(n))
	req.Respond()
}
Exemplo n.º 5
0
func (u *Ufs) Read(req *srv.Req) {
	dbg := u.Debuglevel&srv.DbgLogFcalls != 0
	fid := req.Fid.Aux.(*Fid)
	tc := req.Tc
	rc := req.Rc
	err := fid.stat()
	if err != nil {
		req.RespondError(err)
		return
	}

	ninep.InitRread(rc, tc.Count)
	var count int
	var e error
	if fid.st.IsDir() {
		if tc.Offset == 0 {
			var e error
			// If we got here, it was open. Can't really seek
			// in most cases, just close and reopen it.
			fid.file.Close()
			if fid.file, e = os.OpenFile(fid.path, omode2uflags(req.Fid.Omode), 0); e != nil {
				req.RespondError(toError(e))
				return
			}

			if fid.dirs, e = fid.file.Readdir(-1); e != nil {
				req.RespondError(toError(e))
				return
			}

			if dbg {
				log.Printf("Read: read %d entries", len(fid.dirs))
			}
			fid.dirents = nil
			fid.direntends = nil
			for i := 0; i < len(fid.dirs); i++ {
				path := fid.path + "/" + fid.dirs[i].Name()
				st, err := dir2Dir(path, fid.dirs[i], req.Conn.Dotu, req.Conn.Srv.Upool)
				if err != nil {
					if dbg {
						log.Printf("dbg: stat of %v: %v", path, err)
					}
					continue
				}
				if dbg {
					log.Printf("Stat: %v is %v", path, st)
				}
				b := ninep.PackDir(st, req.Conn.Dotu)
				fid.dirents = append(fid.dirents, b...)
				count += len(b)
				fid.direntends = append(fid.direntends, count)
				if dbg {
					log.Printf("fid.direntends is %v\n", fid.direntends)
				}
			}
		}

		switch {
		case tc.Offset > uint64(len(fid.dirents)):
			count = 0
		case len(fid.dirents[tc.Offset:]) > int(tc.Count):
			count = int(tc.Count)
		default:
			count = len(fid.dirents[tc.Offset:])
		}

		if dbg {
			log.Printf("readdir: count %v @ offset %v", count, tc.Offset)
		}
		nextend := sort.SearchInts(fid.direntends, int(tc.Offset)+count)
		if nextend < len(fid.direntends) {
			if fid.direntends[nextend] > int(tc.Offset)+count {
				if nextend > 0 {
					count = fid.direntends[nextend-1] - int(tc.Offset)
				} else {
					count = 0
				}
			}
		}
		if dbg {
			log.Printf("readdir: count adjusted %v @ offset %v", count, tc.Offset)
		}
		if count == 0 && int(tc.Offset) < len(fid.dirents) && len(fid.dirents) > 0 {
			req.RespondError(&ninep.Error{"too small read size for dir entry", ninep.EINVAL})
			return
		}
		copy(rc.Data, fid.dirents[tc.Offset:int(tc.Offset)+count])
	} else {
		count, e = fid.file.ReadAt(rc.Data, int64(tc.Offset))
		if e != nil && e != io.EOF {
			req.RespondError(toError(e))
			return
		}
	}

	ninep.SetRreadCount(rc, uint32(count))
	req.Respond()
}