예제 #1
0
파일: zappy_test.go 프로젝트: keysonZZZ/kmg
func ZappyMustUnCompress(inb []byte) (outb []byte) {
	outb, err := zappy.Decode(nil, inb)
	if err != nil {
		panic(err)
	}
	return outb
}
예제 #2
0
// Get returns the data content of a block referred to by handle or an error if
// any.  The returned slice may be a sub-slice of buf if buf was large enough
// to hold the entire content.  Otherwise, a newly allocated slice will be
// returned.  It is valid to pass a nil buf.
//
// If the content was stored using compression then it is transparently
// returned decompressed.
//
// Handle must have been obtained initially from Alloc and must be still valid,
// otherwise invalid data may be returned without detecting the error.
func (a *Allocator) Get(buf []byte, handle int64) (b []byte, err error) {
	buf = buf[:cap(buf)]
	if n, ok := a.m[handle]; ok {
		a.lru.moveToFront(n)
		b = need(len(n.b), buf)
		copy(b, n.b)
		a.expHit++
		a.hit++
		return
	}

	a.expMiss++
	a.miss++
	if a.miss > 10 && len(a.m) < 500 {
		if 100*a.hit/a.miss < 95 {
			a.cacheSz++
		}
		a.hit, a.miss = 0, 0
	}
	defer func(h int64) {
		if err == nil {
			a.cadd(b, h)
		}
	}(handle)

	first := bufs.GCache.Get(16)
	defer bufs.GCache.Put(first)
	relocated := false
	relocSrc := handle
reloc:
	if handle <= 0 || handle > maxHandle {
		return nil, &ErrINVAL{"Allocator.Get: handle out of limits", handle}
	}

	off := h2off(handle)
	if err = a.read(first, off); err != nil {
		return
	}

	switch tag := first[0]; tag {
	default:
		dlen := int(tag)
		atoms := n2atoms(dlen)
		switch atoms {
		case 1:
			switch tag := first[15]; tag {
			default:
				return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
			case tagNotCompressed:
				b = need(dlen, buf)
				copy(b, first[1:])
				return
			case tagCompressed:
				return zappy.Decode(buf, first[1:dlen+1])
			}
		default:
			cc := bufs.GCache.Get(1)
			defer bufs.GCache.Put(cc)
			dlen := int(tag)
			atoms := n2atoms(dlen)
			tailOff := off + 16*int64(atoms) - 1
			if err = a.read(cc, tailOff); err != nil {
				return
			}

			switch tag := cc[0]; tag {
			default:
				return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
			case tagNotCompressed:
				b = need(dlen, buf)
				off += 1
				if err = a.read(b, off); err != nil {
					b = buf[:0]
				}
				return
			case tagCompressed:
				zbuf := bufs.GCache.Get(dlen)
				defer bufs.GCache.Put(zbuf)
				off += 1
				if err = a.read(zbuf, off); err != nil {
					return buf[:0], err
				}

				return zappy.Decode(buf, zbuf)
			}
		}
	case 0:
		return buf[:0], nil
	case tagUsedLong:
		cc := bufs.GCache.Get(1)
		defer bufs.GCache.Put(cc)
		dlen := m2n(int(first[1])<<8 | int(first[2]))
		atoms := n2atoms(dlen)
		tailOff := off + 16*int64(atoms) - 1
		if err = a.read(cc, tailOff); err != nil {
			return
		}

		switch tag := cc[0]; tag {
		default:
			return nil, &ErrILSEQ{Type: ErrTailTag, Off: off, Arg: int64(tag)}
		case tagNotCompressed:
			b = need(dlen, buf)
			off += 3
			if err = a.read(b, off); err != nil {
				b = buf[:0]
			}
			return
		case tagCompressed:
			zbuf := bufs.GCache.Get(dlen)
			defer bufs.GCache.Put(zbuf)
			off += 3
			if err = a.read(zbuf, off); err != nil {
				return buf[:0], err
			}

			return zappy.Decode(buf, zbuf)
		}
	case tagFreeShort, tagFreeLong:
		return nil, &ErrILSEQ{Type: ErrExpUsedTag, Off: off, Arg: int64(tag)}
	case tagUsedRelocated:
		if relocated {
			return nil, &ErrILSEQ{Type: ErrUnexpReloc, Off: off, Arg: relocSrc}
		}

		handle = b2h(first[1:])
		relocated = true
		goto reloc
	}
}
예제 #3
0
func (a *Allocator) verifyUsed(h, totalAtoms int64, tag byte, buf, ubuf []byte, log func(error) bool, fast bool) (compressed bool, dlen int, atoms, link int64, err error) {
	var (
		padding  int
		doff     int64
		padZeros [15]byte
		tailBuf  [16]byte
	)

	switch tag {
	default: // Short used
		dlen = int(tag)
		atoms = int64((dlen+1)/16) + 1
		padding = 15 - (dlen+1)%16
		doff = h2off(h) + 1
	case tagUsedLong:
		off := h2off(h) + 1
		var b2 [2]byte
		if err = a.read(b2[:], off); err != nil {
			return
		}

		dlen = m2n(int(b2[0])<<8 | int(b2[1]))
		atoms = int64((dlen+3)/16) + 1
		padding = 15 - (dlen+3)%16
		doff = h2off(h) + 3
	case tagUsedRelocated:
		dlen = 7
		atoms = 1
		padding = 7
		doff = h2off(h) + 1
	case tagFreeShort, tagFreeLong:
		panic("internal error")
	}

	if fast {
		if tag == tagUsedRelocated {
			dlen = 0
			if err = a.read(buf[:7], doff); err != nil {
				return
			}

			link = b2h(buf)
		}

		return false, dlen, atoms, link, nil
	}

	if ok := h+atoms-1 <= totalAtoms; !ok { // invalid last block
		err = &ErrILSEQ{Type: ErrVerifyUsedSpan, Off: h2off(h), Arg: atoms}
		log(err)
		return
	}

	tailsz := 1 + padding
	off := h2off(h) + 16*atoms - int64(tailsz)
	if err = a.read(tailBuf[:tailsz], off); err != nil {
		return false, 0, 0, 0, err
	}

	if ok := bytes.Equal(padZeros[:padding], tailBuf[:padding]); !ok {
		err = &ErrILSEQ{Type: ErrVerifyPadding, Off: h2off(h)}
		log(err)
		return
	}

	var cc byte
	switch cc = tailBuf[padding]; cc {
	default:
		err = &ErrILSEQ{Type: ErrTailTag, Off: h2off(h)}
		log(err)
		return
	case tagCompressed:
		compressed = true
		if tag == tagUsedRelocated {
			err = &ErrILSEQ{Type: ErrTailTag, Off: h2off(h)}
			log(err)
			return
		}

		fallthrough
	case tagNotCompressed:
		if err = a.read(buf[:dlen], doff); err != nil {
			return false, 0, 0, 0, err
		}
	}

	if cc == tagCompressed {
		if ubuf, err = zappy.Decode(ubuf, buf[:dlen]); err != nil || len(ubuf) > maxRq {
			err = &ErrILSEQ{Type: ErrDecompress, Off: h2off(h)}
			log(err)
			return
		}

		dlen = len(ubuf)
	}

	if tag == tagUsedRelocated {
		link = b2h(buf)
		if link == 0 {
			err = &ErrILSEQ{Type: ErrNullReloc, Off: h2off(h)}
			log(err)
			return
		}

		if link > totalAtoms { // invalid last block
			err = &ErrILSEQ{Type: ErrRelocBeyondEOF, Off: h2off(h), Arg: link}
			log(err)
			return
		}
	}

	return
}
예제 #4
0
func (c *Conn) onUpdate() {
	recvChan := make(chan []byte)
	go func() {
		for {
			n, addr, err := c.conn.ReadFrom(c.tmp)
			//debug("want read!", n, addr, err)
			// Generic non-address related errors.
			if addr == nil && err != nil {
				if err.(net.Error).Timeout() {
					continue
				} else {
					break
				}
			}
			var b []byte
			if *bCompress {
				_b, _er := zappy.Decode(nil, c.tmp[:n])
				if _er != nil {
					log.Println("decompress fail", _er.Error())
					go c.Close()
				}
				//log.Println("decompress", len(_b), n)
				b = _b
			} else {
				b = make([]byte, n)
				copy(b, c.tmp[:n])
			}
			select {
			case recvChan <- b:
			case <-c.quit:
				return
			}
		}
	}()
	ping := make(chan struct{})
	pingC := 0

	updateChan := time.NewTicker(20 * time.Millisecond)
	waitList := [](chan bool){}
	recoverChan := make(chan bool)
	var waitRecvCache *cache
	go func() {
		select {
		case ping <- struct{}{}:
		case <-c.quit:
		}

	}()
	processRecv := func() {
		if waitRecvCache != nil {
			ca := *waitRecvCache
			const buffSize = CacheBuffSize
			for {
				hr := ikcp.Ikcp_recv(c.kcp, c.tmp2, buffSize)

				if hr > 0 {
					action := c.tmp2[0]
					if action == Data {
						waitRecvCache = nil
						copy(ca.b, c.tmp2[1:hr])
						hr--
						if c.decode != nil {
							d := c.decode(ca.b[:hr])
							copy(ca.b, d)
							hr = int32(len(d))
						}
						select {
						case ca.c <- int(hr):
						case <-c.quit:
						}
					} else {
						continue
					}
				} else {
				}
				break
			}
		}
	}
out:
	for {
		select {
		case <-ping:
			pingC++
			if pingC >= 4 {
				pingC = 0
				go c.Ping()
				if c.fecR != nil {
					curr := time.Now().Unix()
					for id, info := range c.fecRCacheTbl {
						if curr >= info.overTime {
							delete(c.fecRCacheTbl, id)
							if c.fecRecvId <= id {
								c.fecRecvId = id + 1
							}
							//log.Println("timeout after del", id, len(c.fecRCacheTbl))
						}
					}
				}
			}
			if time.Now().Unix() > c.overTime {
				log.Println("overtime close", c.LocalAddr().String(), c.RemoteAddr().String())
				go c.Close()
			} else {
				time.AfterFunc(300*time.Millisecond, func() {
					select {
					case ping <- struct{}{}:
					case <-c.quit:
					}
				})
			}
		case cache := <-c.readChan:
			for {
				const buffSize = CacheBuffSize
				hr := ikcp.Ikcp_recv(c.kcp, c.tmp2, buffSize)
				if hr > 0 {
					action := c.tmp2[0]
					if action == Data {
						copy(cache.b, c.tmp2[1:hr])
						hr--
						if c.decode != nil {
							d := c.decode(cache.b[:hr])
							copy(cache.b, d)
							hr = int32(len(d))
						}
						select {
						case cache.c <- int(hr):
						case <-c.quit:
						}
					} else {
						continue
					}
				} else {
					waitRecvCache = &cache
				}
				break
			}
		case b := <-recvChan:
			c.overTime = time.Now().Unix() + 30
			if c.fecR != nil {
				if len(b) <= 7 {
					break
				}
				id := uint(int(b[2]) | (int(b[3]) << 8) | (int(b[4]) << 16) | (int(b[5]) << 24))
				var seq = uint(b[6])
				_len := int(b[0]) | (int(b[1]) << 8)
				//log.Println("recv chan", len(b), _len, id, seq, c.fecRecvId)
				if id < c.fecRecvId {
					//log.Println("drop id for noneed", id, seq)
					break
				}
				if seq < uint(c.fecDataShards) {
					ikcp.Ikcp_input(c.kcp, b[7:], _len)
					//log.Println("direct input udp", id, seq, _len)
				}
				if seq >= uint(c.fecDataShards+c.fecParityShards) {
					log.Println("-ds and -ps must be equal on both sides")
					go c.Close()
					break
				}
				tbl, have := c.fecRCacheTbl[id]
				if !have {
					tbl = &fecInfo{make([][]byte, c.fecDataShards+c.fecParityShards), time.Now().Unix() + 15}
					c.fecRCacheTbl[id] = tbl
				}
				if tbl.bytes[seq] != nil {
					//dup, drop
					break
				} else {
					tbl.bytes[seq] = b
				}
				count := 0
				reaL := 0
				for _, v := range tbl.bytes {
					if v != nil {
						count++
						if reaL < len(v) {
							reaL = len(v)
						}
					}
				}
				if count >= c.fecDataShards {
					markTbl := make(map[int]bool, len(tbl.bytes))
					for _seq, _b := range tbl.bytes {
						if _b != nil {
							markTbl[_seq] = true
						}
					}
					bNeedRebuild := false
					for i, v := range tbl.bytes {
						if i >= c.fecDataShards {
							bNeedRebuild = true
						}
						if v != nil {
							if len(v) < reaL {
								_b := make([]byte, reaL)
								copy(_b, v)
								tbl.bytes[i] = _b
							}
						}
					}
					if bNeedRebuild {
						er := (*c.fecR).Reconstruct(tbl.bytes)
						if er != nil {
							//log.Println("Reconstruct fail", er.Error())
						} else {
							//log.Println("Reconstruct ok, input", id)
							for i := 0; i < c.fecDataShards; i++ {
								if _, have := markTbl[i]; !have {
									_len := int(tbl.bytes[i][0]) | (int(tbl.bytes[i][1]) << 8)
									ikcp.Ikcp_input(c.kcp, tbl.bytes[i][7:], int(_len))
									//log.Println("fec input for mark ok", i, id, _len)
								}
							}
						}
					}
					delete(c.fecRCacheTbl, id)
					//log.Println("after del", id, len(c.fecRCacheTbl))
					if c.fecRecvId <= id {
						c.fecRecvId = id + 1
					}
				}
			} else {
				ikcp.Ikcp_input(c.kcp, b, len(b))
			}
			processRecv()
		case <-recoverChan:
			for _, r := range waitList {
				log.Println("recover writing data")
				select {
				case r <- true:
				case <-c.quit:
				}
			}
			waitList = [](chan bool){}
		case s := <-c.checkCanWrite:
			if !c.closed {
				if ikcp.Ikcp_waitsnd(c.kcp) > dataLimit {
					log.Println("wait for data limit")
					waitList = append(waitList, s)
					var f func()
					f = func() {
						n := ikcp.Ikcp_waitsnd(c.kcp)
						if n <= dataLimit/2 {
							select {
							case <-c.quit:
								log.Println("recover writing data quit")
							case recoverChan <- true:
							}
						} else {
							time.AfterFunc(40*time.Millisecond, f)
						}
					}
					time.AfterFunc(20*time.Millisecond, f)
					log.Println("wait for data limitover")
				} else {
					select {
					case s <- true:
					case <-c.quit:
					}
				}
			}
		case s := <-c.sendChan:
			b := []byte(s)
			ikcp.Ikcp_send(c.kcp, b, len(b))
		case <-updateChan.C:
			if c.closed {
				break out
			}
			ikcp.Ikcp_update(c.kcp, uint32(iclock()))
		case <-c.quit:
			break out
		}
	}
	updateChan.Stop()
}