func (clnt *Clnt) recv() { var err error var buf []byte err = nil pos := 0 for { // Connect can change the client Msize. clntmsize := int(atomic.LoadUint32(&clnt.Msize)) if len(buf) < clntmsize { b := make([]byte, clntmsize*8) copy(b, buf[0:pos]) buf = b b = nil } n, oerr := clnt.conn.Read(buf[pos:]) if oerr != nil || n == 0 { err = &ninep.Error{oerr.Error(), ninep.EIO} clnt.Lock() clnt.err = err clnt.Unlock() goto closed } pos += n for pos > 4 { sz, _ := ninep.Gint32(buf) if pos < int(sz) { if len(buf) < int(sz) { b := make([]byte, atomic.LoadUint32(&clnt.Msize)*8) copy(b, buf[0:pos]) buf = b b = nil } break } fc, err, fcsize := ninep.Unpack(buf, clnt.Dotu) clnt.Lock() if err != nil { clnt.err = err clnt.conn.Close() clnt.Unlock() goto closed } if clnt.Debuglevel > 0 { clnt.logFcall(fc) if clnt.Debuglevel&DbgPrintPackets != 0 { log.Println("}-}", clnt.Id, fmt.Sprint(fc.Pkt)) } if clnt.Debuglevel&DbgPrintFcalls != 0 { log.Println("}}}", clnt.Id, fc.String()) } } var r *Req = nil for r = clnt.reqfirst; r != nil; r = r.next { if r.Tc.Tag == fc.Tag { break } } if r == nil { clnt.err = &ninep.Error{"unexpected response", ninep.EINVAL} clnt.conn.Close() clnt.Unlock() goto closed } // Good clean fun. There's a race where you can get the response BEFORE the loop // in send() thinks it is done with the request. So we have to block on // it being sent, because we really can't dequeue any more requests until // this one is wrapped up. TODO: consider a goroutine per fid for this, // if we need it. The reason I feel this is safe is that if we got a tag back // for a request we sent, then r.Sent should be written. If we got a tag // back for a request we did not sent, we won't find it here anyway. <-r.Sent r.Rc = fc switch { case r.next == nil && r.prev == nil: clnt.reqlast = nil clnt.reqfirst = nil case r.next == nil: clnt.reqlast = r.prev r.prev.next = nil r.prev = nil case r.prev == nil: clnt.reqfirst = r.next r.next.prev = nil r.next = nil default: r.next.prev = r.prev r.prev.next = r.next r.next = nil r.prev = nil } clnt.Unlock() if r.Tc.Type != r.Rc.Type-1 { if r.Rc.Type != ninep.Rerror { r.Err = &ninep.Error{"invalid response", ninep.EINVAL} } else { if r.Err == nil { r.Err = &ninep.Error{r.Rc.Error, r.Rc.Errornum} } } } if r.Done != nil { r.Done <- r } pos -= fcsize buf = buf[fcsize:] } } closed: clnt.Lock() // Drain all sent notifications from the send goroutine so // that it can accept the done notification below. This is // necessary in cases where the conn.Read returned an error // and the normal <-r.Sent is never reached. for r := clnt.reqfirst; r != nil; r = r.next { <-r.Sent } clnt.Unlock() clnt.done <- true /* send error to all pending requests */ clnt.Lock() r := clnt.reqfirst clnt.reqfirst = nil clnt.reqlast = nil if err == nil { err = clnt.err } clnt.Unlock() for r != nil { next := r.next r.Err = err r.next = nil r.prev = nil if r.Done != nil { r.Done <- r } r = next } clnts.Lock() if clnt.prev != nil { clnt.prev.next = clnt.next clnt.prev = nil } else { clnts.clntList = clnt.next } if clnt.next != nil { clnt.next.prev = clnt.prev clnt.next = nil } else { clnts.clntLast = clnt.prev } clnts.Unlock() if sop, ok := (interface{}(clnt)).(StatsOps); ok { sop.statsUnregister() } }
func (conn *Conn) recv() { var err error var n int buf := make([]byte, conn.Msize*8) pos := 0 for { if len(buf) < int(conn.Msize) { b := make([]byte, conn.Msize*8) copy(b, buf[0:pos]) buf = b b = nil } n, err = conn.conn.Read(buf[pos:]) if err != nil || n == 0 { conn.close() return } pos += n for pos > 4 { sz, _ := ninep.Gint32(buf) if sz > conn.Msize { log.Println("bad client connection: ", conn.conn.RemoteAddr()) conn.conn.Close() conn.close() return } if pos < int(sz) { if len(buf) < int(sz) { b := make([]byte, conn.Msize*8) copy(b, buf[0:pos]) buf = b b = nil } break } fc, err, fcsize := ninep.Unpack(buf, conn.Dotu) if err != nil { log.Println(fmt.Sprintf("invalid packet : %v %v", err, buf)) conn.conn.Close() conn.close() return } tag := fc.Tag req := new(Req) select { case req.Rc = <-conn.rchan: break default: req.Rc = ninep.NewFcall(conn.Msize) } req.Conn = conn req.Tc = fc // req.Rc = rc if conn.Debuglevel > 0 { conn.logFcall(req.Tc) if conn.Debuglevel&DbgPrintPackets != 0 { log.Println(">->", conn.Id, fmt.Sprint(req.Tc.Pkt)) } if conn.Debuglevel&DbgPrintFcalls != 0 { log.Println(">>>", conn.Id, req.Tc.String()) } } conn.Lock() conn.nreqs++ conn.tsz += uint64(fc.Size) conn.npend++ if conn.npend > conn.maxpend { conn.maxpend = conn.npend } req.next = conn.Reqs[tag] conn.Reqs[tag] = req process := req.next == nil if req.next != nil { req.next.prev = req } conn.Unlock() if process { // Tversion may change some attributes of the // connection, so we block on it. Otherwise, // we may loop back to reading and that is a race. // This fix brought to you by the race detector. if req.Tc.Type == ninep.Tversion { req.process() } else { go req.process() } } buf = buf[fcsize:] pos -= fcsize } } }