Esempio n. 1
0
// Get key-value pairs.
func (ht *HashTable) Get(key, limit uint64) (keys, vals []uint64) {
	// This function is partially inlined in chunkcol.go
	var count, entry, bucket uint64 = 0, 0, ht.HashKey(key)
	if limit == 0 {
		keys = make([]uint64, 0, 10)
		vals = make([]uint64, 0, 10)
	} else {
		keys = make([]uint64, 0, limit)
		vals = make([]uint64, 0, limit)
	}
	for {
		entryAddr := bucket*BUCKET_SIZE + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
		entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
		entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
		if ht.File.Buf[entryAddr] == ENTRY_VALID {
			if entryKey == key {
				keys = append(keys, entryKey)
				vals = append(vals, entryVal)
				if count++; count == limit {
					return
				}
			}
		} else if entryKey == 0 && entryVal == 0 {
			return
		}
		if entry++; entry == PER_BUCKET {
			entry = 0
			if bucket = ht.NextBucket(bucket); bucket == 0 {
				return
			}
		}
	}
}
Esempio n. 2
0
// Return all entries in the hash table.
func (ht *HashTable) GetAll(limit uint64) (keys, vals []uint64) {
	prealloc := limit
	if prealloc == 0 {
		prealloc = INITIAL_BUCKETS * PER_BUCKET / 2
	}
	keys = make([]uint64, 0, prealloc)
	vals = make([]uint64, 0, prealloc)
	counter := uint64(0)
	for head := uint64(0); head < uint64(math.Pow(2, float64(HASH_BITS))); head++ {
		var entry, bucket uint64 = 0, head
		for {
			entryAddr := bucket*BUCKET_SIZE + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
			entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
			entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
			if ht.File.Buf[entryAddr] == ENTRY_VALID {
				counter++
				keys = append(keys, entryKey)
				vals = append(vals, entryVal)
				if counter == limit {
					return
				}
			} else if entryKey == 0 && entryVal == 0 {
				break
			}
			if entry++; entry == PER_BUCKET {
				entry = 0
				if bucket = ht.NextBucket(bucket); bucket == 0 {
					return
				}
			}
		}
	}
	return
}
Esempio n. 3
0
//  Uint64 decodes a uint64 from buffer
func (d *Dec) Uint64() uint64 {
	if d.err != nil {
		return 0
	}
	if d.i >= len(d.decbuf) || d.i < 0 /*overflow*/ {
		d.err = errNoDecData
		return 0
	}
	d.lng = int(d.decbuf[d.i])
	// if d.lng <= 0 {
	// 	d.err = errDecode
	// 	return 0
	// }
	d.i++
	d.lst = d.i + d.lng
	if d.lst > len(d.decbuf) {
		d.err = errDecodeNotEnoughtData
		return 0
	}
	var x uint64
	var i int
	if d.lst == len(d.decbuf) {
		x, i = binary.Uvarint(d.decbuf[d.i:])
	} else {
		x, i = binary.Uvarint(d.decbuf[d.i:d.lst])
	}
	if i <= 0 {
		d.err = errDecode
		return 0
	}
	d.i = d.lst
	return x
}
Esempio n. 4
0
func (d *int64Decoder) decodeRLE() {
	if len(d.bytes) == 0 {
		return
	}

	var i, n int

	// Next 8 bytes is the starting value
	first := binary.BigEndian.Uint64(d.bytes[i : i+8])
	i += 8

	// Next 1-10 bytes is the delta value
	value, n := binary.Uvarint(d.bytes[i:])

	i += n

	// Last 1-10 bytes is how many times the value repeats
	count, n := binary.Uvarint(d.bytes[i:])

	// Store the first value and delta value so we do not need to allocate
	// a large values slice.  We can compute the value at position d.i on
	// demand.
	d.rleFirst = first
	d.rleDelta = value
	d.n = int(count) + 1
	d.i = 0

	// We've process all the bytes
	d.bytes = nil
}
Esempio n. 5
0
// dec decodes the encoding s into z.
func (z *nstate) dec(s string) {
	b := []byte(s)
	i, n := binary.Uvarint(b)
	if n <= 0 {
		bug()
	}
	b = b[n:]
	z.partial = rune(i)
	i, n = binary.Uvarint(b)
	if n <= 0 {
		bug()
	}
	b = b[n:]
	z.flag = flags(i)
	z.q.Reset()
	last := ^uint32(0)
	for len(b) > 0 {
		i, n = binary.Uvarint(b)
		if n <= 0 {
			bug()
		}
		b = b[n:]
		last += uint32(i)
		z.q.Add(last)
	}
}
Esempio n. 6
0
func ReadPacket(conn io.Reader) (uint64, []byte, error) {
	buf := make([]byte, 65536)
	size := make([]byte, 256)
	tmp := make([]byte, 1)
	sizen := 0
	n := 0
	length := -1
	for {
		read, err := conn.Read(tmp)
		if read > 0 && err == nil {
			buf[n] = tmp[0]
			n++
			if length >= 0 {
				if n >= length {
					break
				}
			} else if (tmp[0] & 0x80) == 0 {
				len2, _ := binary.Uvarint(buf[0:n])
				length = int(len2)
				copy(size, buf[:n])
				sizen = n
				n = 0
			}
		} else if err != nil {
			return 0, append(size[:sizen], buf[:n]...), err
		}
	}
	id, n2 := binary.Uvarint(buf)
	return id, buf[n2:n], nil
}
Esempio n. 7
0
// Return the physical ID of document specified by primary key ID.
func (col *ChunkCol) GetPhysicalID(id uint64) (physID uint64, err error) {
	// This function is called so often that we better inline the hash table key scan.
	var entry, bucket uint64 = 0, col.PK.HashKey(id)
	for {
		entryAddr := bucket*chunkfile.BUCKET_SIZE + chunkfile.BUCKET_HEADER_SIZE + entry*chunkfile.ENTRY_SIZE
		entryKey, _ := binary.Uvarint(col.PK.File.Buf[entryAddr+1 : entryAddr+11])
		entryVal, _ := binary.Uvarint(col.PK.File.Buf[entryAddr+11 : entryAddr+21])
		if col.PK.File.Buf[entryAddr] == chunkfile.ENTRY_VALID {
			if entryKey == id {
				var docMap map[string]interface{}
				if col.Read(entryVal, &docMap) == nil {
					if err == nil && uid.PKOfDoc(docMap, false) == id {
						return entryVal, nil
					}
				}
			}
		} else if entryKey == 0 && entryVal == 0 {
			return 0, errors.New(fmt.Sprintf("Cannot find physical ID of %d", id))
		}
		if entry++; entry == chunkfile.PER_BUCKET {
			entry = 0
			if bucket = col.PK.NextBucket(bucket); bucket == 0 {
				return 0, errors.New(fmt.Sprintf("Cannot find physical ID of %d", id))
			}
		}
	}
	return 0, errors.New(fmt.Sprintf("Cannot find physical ID of %s", id))
}
Esempio n. 8
0
func UnmarshallHandshakePacket(buf []byte, addr *net.UDPAddr) (packet *HandshakePacket, err error) {
	if buf[0] != byte(HANDSHAKE_PACKET_TYPE) {
		err = fmt.Errorf("Invalid packet type %d", buf[0])
		return
	}
	tlvRecordLength, _ := binary.Uvarint(buf[2:4])
	header := &HandshakeHeader{
		tlvRecordLength: uint16(tlvRecordLength),
	}
	packet = &HandshakePacket{
		header:     header,
		peerAddr:   addr,
		TLVRecords: make(map[TLVRecordType]*TLVRecord),
	}
	for pointer := 4; pointer < int(tlvRecordLength); {
		typeValue, _ := binary.Uvarint(buf[pointer : pointer+2])
		lengthValue, _ := binary.Uvarint(buf[pointer+2 : pointer+4])
		bodyBuf := make([]byte, lengthValue)
		copy(bodyBuf, buf[pointer+4:(pointer+4+int(lengthValue))])
		record := &TLVRecord{
			Type:   TLVRecordType(typeValue),
			Length: uint16(lengthValue),
			Body:   bodyBuf,
		}
		packet.TLVRecords[record.Type] = record
		pointer = pointer + int(lengthValue) + 4
	}
	return
}
Esempio n. 9
0
File: index.go Progetto: s1na/fetch
func readMetaData(inPath string) {
	inFile, err := os.Open(inPath)
	defer inFile.Close()
	if err != nil {
		panic(err)
	}
	var buf []byte = make([]byte, 8)
	var posBuf []byte = make([]byte, 8)
	inFile.Read(buf)
	tmp, _ := binary.Uvarint(buf[:4])
	totalTerms = int(tmp)
	tmp, _ = binary.Uvarint(buf[4:])
	totalDocs = int(tmp)

	buf = make([]byte, 4)
	docInfos = make([]*DocInfo, totalDocs)
	for i := 0; i < totalDocs; i++ {
		inFile.Read(buf)
		tmp, _ = binary.Uvarint(buf)
		inFile.Read(posBuf)
		pos, _ := binary.Uvarint(posBuf)
		docInfos[i] = &DocInfo{length: int(tmp), pos: int64(pos)}

		docLenAvg += float64(tmp)
	}
	docLenAvg = docLenAvg / float64(totalDocs)
}
Esempio n. 10
0
// Remove specific key-value pair.
func (ht *HashTable) Remove(key, limit uint64, filter func(uint64, uint64) bool) {
	var count, entry, bucket uint64 = 0, 0, ht.hashKey(key)
	region := bucket / HASH_TABLE_REGION_SIZE
	mutex := ht.regionRWMutex[region]
	mutex.Lock()
	for {
		entryAddr := bucket*ht.BucketSize + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
		entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
		entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
		if ht.File.Buf[entryAddr] == ENTRY_VALID {
			if entryKey == key && filter(entryKey, entryVal) {
				ht.File.Buf[entryAddr] = ENTRY_INVALID
				if count++; count == limit {
					mutex.Unlock()
					return
				}
			}
		} else if entryKey == 0 && entryVal == 0 {
			mutex.Unlock()
			return
		}
		if entry++; entry == ht.PerBucket {
			mutex.Unlock()
			entry = 0
			if bucket = ht.nextBucket(bucket); bucket == 0 {
				return
			}
			region = bucket / HASH_TABLE_REGION_SIZE
			mutex = ht.regionRWMutex[region]
			mutex.Lock()
		}
	}
}
Esempio n. 11
0
func compareByOrigin(path1, path2 *Path) *Path {
	//	Select the best path based on origin attribute.
	//
	//	IGP is preferred over EGP; EGP is preferred over Incomplete.
	//	If both paths have same origin, we return None.
	log.Debugf("enter compareByOrigin")
	_, attribute1 := path1.getPathAttr(bgp.BGP_ATTR_TYPE_ORIGIN)
	_, attribute2 := path2.getPathAttr(bgp.BGP_ATTR_TYPE_ORIGIN)

	if attribute1 == nil || attribute2 == nil {
		log.WithFields(log.Fields{
			"Topic":   "Table",
			"Key":     "compareByOrigin",
			"Origin1": attribute1,
			"Origin2": attribute2,
		}).Error("can't compare origin because it's not present")
		return nil
	}

	origin1, n1 := binary.Uvarint(attribute1.(*bgp.PathAttributeOrigin).Value)
	origin2, n2 := binary.Uvarint(attribute2.(*bgp.PathAttributeOrigin).Value)
	log.Debugf("compareByOrigin -- origin1: %d(%d), origin2: %d(%d)", origin1, n1, origin2, n2)

	// If both paths have same origins
	if origin1 == origin2 {
		return nil
	} else if origin1 < origin2 {
		return path1
	} else {
		return path2
	}
}
Esempio n. 12
0
func (d *decoder) decodeRLE(b []byte) {
	var i, n int

	// Lower 4 bits hold the 10 based exponent so we can scale the values back up
	mod := int64(math.Pow10(int(b[i] & 0xF)))
	i += 1

	// Next 8 bytes is the starting timestamp
	first := binary.BigEndian.Uint64(b[i : i+8])
	i += 8

	// Next 1-10 bytes is our (scaled down by factor of 10) run length values
	value, n := binary.Uvarint(b[i:])

	// Scale the value back up
	value *= uint64(mod)
	i += n

	// Last 1-10 bytes is how many times the value repeats
	count, n := binary.Uvarint(b[i:])

	// Rebuild construct the original values now
	deltas := make([]uint64, count)
	for i := range deltas {
		deltas[i] = value
	}

	// Reverse the delta-encoding
	deltas[0] = first
	for i := 1; i < len(deltas); i++ {
		deltas[i] = deltas[i-1] + deltas[i]
	}

	d.ts = deltas
}
Esempio n. 13
0
// Pull a value from the buffer and put it into a reflective Value.
func (de *decoder) value(wiretype int, buf []byte,
	val reflect.Value) ([]byte, error) {

	// Break out the value from the buffer based on the wire type
	var v uint64
	var n int
	var vb []byte
	switch wiretype {
	case 0: // varint
		v, n = binary.Uvarint(buf)
		if n <= 0 {
			return nil, errors.New("bad protobuf varint value")
		}
		buf = buf[n:]

	case 5: // 32-bit
		if len(buf) < 4 {
			return nil, errors.New("bad protobuf 64-bit value")
		}
		v = uint64(buf[0]) |
			uint64(buf[1])<<8 |
			uint64(buf[2])<<16 |
			uint64(buf[3])<<24
		buf = buf[4:]

	case 1: // 64-bit
		if len(buf) < 8 {
			return nil, errors.New("bad protobuf 64-bit value")
		}
		v = uint64(buf[0]) |
			uint64(buf[1])<<8 |
			uint64(buf[2])<<16 |
			uint64(buf[3])<<24 |
			uint64(buf[4])<<32 |
			uint64(buf[5])<<40 |
			uint64(buf[6])<<48 |
			uint64(buf[7])<<56
		buf = buf[8:]

	case 2: // length-delimited
		v, n = binary.Uvarint(buf)
		if n <= 0 || v > uint64(len(buf)-n) {
			return nil, errors.New(
				"bad protobuf length-delimited value")
		}
		vb = buf[n : n+int(v)]
		buf = buf[n+int(v):]

	default:
		return nil, errors.New("unknown protobuf wire-type")
	}

	// We've gotten the value out of the buffer,
	// now put it into the appropriate reflective Value.
	if err := de.putvalue(wiretype, val, v, vb); err != nil {
		return nil, err
	}

	return buf, nil
}
Esempio n. 14
0
func ReadPong(rd io.Reader) (*Pong, error) {
	r := bufio.NewReader(rd)
	nl, err := binary.ReadUvarint(r)
	if err != nil {
		return nil, errors.New("could not read length")
	}

	pl := make([]byte, nl)
	_, err = io.ReadFull(r, pl)
	if err != nil {
		return nil, errors.New("could not read length given by length header")
	}

	// packet id
	_, n := binary.Uvarint(pl)
	if n <= 0 {
		return nil, errors.New("could not read packet id")
	}

	// string varint
	_, n2 := binary.Uvarint(pl[n:])
	if n2 <= 0 {
		return nil, errors.New("could not read string varint")
	}

	var pong Pong
	if err := json.Unmarshal(pl[n+n2:], &pong); err != nil {
		return nil, errors.New("could not read pong json")
	}

	return &pong, nil
}
Esempio n. 15
0
func (d *TimeDecoder) decodeRLE(b []byte) {
	var i, n int

	// Lower 4 bits hold the 10 based exponent so we can scale the values back up
	mod := int64(math.Pow10(int(b[i] & 0xF)))
	i++

	// Next 8 bytes is the starting timestamp
	first := binary.BigEndian.Uint64(b[i : i+8])
	i += 8

	// Next 1-10 bytes is our (scaled down by factor of 10) run length values
	value, n := binary.Uvarint(b[i:])

	// Scale the value back up
	value *= uint64(mod)
	i += n

	// Last 1-10 bytes is how many times the value repeats
	count, _ := binary.Uvarint(b[i:])

	d.v = int64(first - value)
	d.rleDelta = int64(value)

	d.i = -1
	d.n = int(count)
}
Esempio n. 16
0
// Return all entries in the hash table.
func (ht *HashTable) GetAll(limit uint64) (keys, vals []uint64) {
	keys = make([]uint64, 0, 100)
	vals = make([]uint64, 0, 100)
	counter := uint64(0)
	ht.File.Sync.RLock()
	defer ht.File.Sync.RUnlock()
	for head := uint64(0); head < uint64(math.Pow(2, float64(ht.HashBits))); head++ {
		var entry, bucket uint64 = 0, head
		for {
			entryAddr := bucket*ht.BucketSize + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
			entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
			entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
			if ht.File.Buf[entryAddr] == ENTRY_VALID {
				counter++
				keys = append(keys, entryKey)
				vals = append(vals, entryVal)
				if counter == limit {
					return
				}
			} else if entryKey == 0 && entryVal == 0 {
				break
			}
			if entry++; entry == ht.PerBucket {
				entry = 0
				if bucket = ht.nextBucket(bucket); bucket == 0 {
					return
				}
			}
		}
	}
	return
}
Esempio n. 17
0
func (p pkg) String() string {
	stream, b := binary.Uvarint(p[offset1:offset2])
	if b <= 0 {
		return ""
	}
	size, b := binary.Uvarint(p[offset2:offset3])
	if b <= 0 {
		return ""
	}
	if stream != 0 {
		end := offset3 + 10
		if end > len(p) {
			end = len(p)
		}
		return fmt.Sprintf("Pkg: Stream: %v Size: %v (%v) - %v", stream, size, len(p)-offset3, []byte(p[offset3:end]))
	}
	cmd, b := binary.Uvarint(p[offset3:offset4])
	if b <= 0 {
		return ""
	}
	param, b := binary.Uvarint(p[offset4:offset5])
	if b <= 0 {
		return ""
	}
	return fmt.Sprintf("Pkg: Stream: %v Size: %v Cmd: %v Param: %v", stream, size, ctrlCommands(cmd), param)
}
Esempio n. 18
0
// Get key-value pairs.
func (ht *HashTable) Get(key, limit uint64, filter func(uint64, uint64) bool) (keys, vals []uint64) {
	var count, entry, bucket uint64 = 0, 0, ht.hashKey(key)
	if limit == 0 {
		keys = make([]uint64, 0, 10)
		vals = make([]uint64, 0, 10)
	} else {
		keys = make([]uint64, 0, limit)
		vals = make([]uint64, 0, limit)
	}
	ht.File.Sync.RLock()
	defer ht.File.Sync.RUnlock()
	for {
		entryAddr := bucket*ht.BucketSize + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
		entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
		entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
		if ht.File.Buf[entryAddr] == ENTRY_VALID {
			if entryKey == key && filter(entryKey, entryVal) {
				keys = append(keys, entryKey)
				vals = append(vals, entryVal)
				if count++; count == limit {
					return
				}
			}
		} else if entryKey == 0 && entryVal == 0 {
			return
		}
		if entry++; entry == ht.PerBucket {
			entry = 0
			if bucket = ht.nextBucket(bucket); bucket == 0 {
				return
			}
		}
	}
}
Esempio n. 19
0
// Remove specific key-value pair.
func (ht *HashTable) Remove(key, limit uint64, filter func(uint64, uint64) bool) {
	var count, entry, bucket uint64 = 0, 0, ht.hashKey(key)
	ht.File.Sync.Lock()
	defer ht.File.Sync.Unlock()
	for {
		entryAddr := bucket*ht.BucketSize + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
		entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
		entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
		if ht.File.Buf[entryAddr] == ENTRY_VALID {
			if entryKey == key && filter(entryKey, entryVal) {
				ht.File.Buf[entryAddr] = ENTRY_INVALID
				if count++; count == limit {
					return
				}
			}
		} else if entryKey == 0 && entryVal == 0 {
			return
		}
		if entry++; entry == ht.PerBucket {
			entry = 0
			if bucket = ht.nextBucket(bucket); bucket == 0 {
				return
			}
		}
	}
}
Esempio n. 20
0
// rec is: varint(stringId) varint(userId) varint(langId) string
func (s *EncoderDecoderState) decodeNewTranslation(rec []byte, time time.Time) error {
	var stringId, userId, langId uint64
	var n int

	langId, n = binary.Uvarint(rec)
	panicIf(n <= 0 || n == len(rec), "decodeNewTranslation() langId")
	panicIf(!s.validLangId(int(langId)), "decodeNewTranslation(): !s.validLangId()")
	rec = rec[n:]

	userId, n = binary.Uvarint(rec)
	panicIf(n == len(rec), "decodeNewTranslation() userId")
	panicIf(!s.validUserId(int(userId)), "decodeNewTranslation(): !s.validUserId()")
	rec = rec[n:]

	stringId, n = binary.Uvarint(rec)
	panicIf(n == 0 || n == len(rec), "decodeNewTranslation() stringId")
	panicIf(!s.validStringId(int(stringId)), fmt.Sprintf("decodeNewTranslation(): !s.validStringId(%v)", stringId))
	rec = rec[n:]

	translation := string(rec)
	s.addTranslationRec(int(langId), int(userId), int(stringId), translation, time)
	if logging {
		fmt.Printf("decodeNewTranslation(): %v, %v, %v, %s\n", langId, userId, stringId, translation)
	}
	return nil
}
Esempio n. 21
0
func CountTimestamps(b []byte) int {
	if len(b) == 0 {
		return 0
	}

	// Encoding type is stored in the 4 high bits of the first byte
	encoding := b[0] >> 4
	switch encoding {
	case timeUncompressed:
		// Uncompressed timestamps are just 8 bytes each
		return len(b[1:]) / 8
	case timeCompressedRLE:
		// First 9 bytes are the starting timestamp and scaling factor, skip over them
		i := 9
		// Next 1-10 bytes is our (scaled down by factor of 10) run length values
		_, n := binary.Uvarint(b[9:])
		i += n
		// Last 1-10 bytes is how many times the value repeats
		count, _ := binary.Uvarint(b[i:])
		return int(count)
	case timeCompressedPackedSimple:
		// First 9 bytes are the starting timestamp and scaling factor, skip over them
		dec := simple8b.NewDecoder(b[9:])
		count := 1
		// Count the deltas
		for dec.Next() {
			count++
		}
		return count
	default:
		return 0
	}
}
Esempio n. 22
0
// Remove specific key-value pair.
func (ht *HashTable) Remove(key, val uint64) {
	var entry, bucket uint64 = 0, ht.hashKey(key)
	region := bucket / HASH_TABLE_REGION_SIZE
	mutex := ht.regionRWMutex[region]
	mutex.Lock()
	for {
		entryAddr := bucket*ht.BucketSize + BUCKET_HEADER_SIZE + entry*ENTRY_SIZE
		if entryAddr > ht.File.Append-ENTRY_SIZE {
			mutex.Unlock()
			return
		}
		entryKey, _ := binary.Uvarint(ht.File.Buf[entryAddr+1 : entryAddr+11])
		entryVal, _ := binary.Uvarint(ht.File.Buf[entryAddr+11 : entryAddr+21])
		if ht.File.Buf[entryAddr] == ENTRY_VALID {
			if entryKey == key && entryVal == val {
				ht.File.Buf[entryAddr] = ENTRY_INVALID
				mutex.Unlock()
				return
			}
		} else if entryKey == 0 && entryVal == 0 {
			mutex.Unlock()
			return
		}
		if entry++; entry == ht.PerBucket {
			mutex.Unlock()
			entry = 0
			if bucket = ht.nextBucket(bucket); bucket == 0 || bucket >= ht.File.Append-BUCKET_HEADER_SIZE {
				return
			}
			region = bucket / HASH_TABLE_REGION_SIZE
			mutex = ht.regionRWMutex[region]
			mutex.Lock()
		}
	}
}
Esempio n. 23
0
func client(addr, name string, hangie bool) {
	/*Starts a client, initiates a connection*/
	conn, err := net.Dial(network, addr+port)
	var succ int
	if err != nil {
		fmt.Println("My name is", name, "I couldn't join the server. I am leaving :(")
		return
	}
	buffer := make([]byte, buffer_size)
	/*First we need to send our name and receive number of clients running*/
	copy(buffer, []byte(name))
	_, err = write_deadline(conn, waiting_time, buffer)
	if nil != err {
		log.Println("Failed to send my name to server")
	}
	_, err = read_deadline(conn, waiting_time, buffer)
	if nil != err {
		log.Println("Failed to receive number of clients")
	} else {
		num_clients, succ := binary.Uvarint(buffer)
		if succ > 0 {
			fmt.Println("My name is", name, num_clients, "clients were served including me")
		}
	}
	/*Now we are sending some numbuffer = make([]byte, buffer_size)ber of requests*/
	if !hangie { //good client
		for j := 0; j < 2*max_requests; j++ {
			buffer = make([]byte, buffer_size)
			number := uint64(rand.Uint32() % 10000)
			fmt.Println("My name is", name, "I am sending number", number, "on attempt", j)
			binary.PutUvarint(buffer, number)
			_, err = write_deadline(conn, waiting_time, buffer)
			if nil != err {
				log.Println("Failed to write to server")
				continue //we hope to recover in the future
			}
		}
		for j := 0; j < 2*max_requests; j++ {
			_, err = read_deadline(conn, waiting_time, buffer)
			if nil != err {
				log.Println("This is", name, "Failed to read from server on", j, "attempt")
				continue //we hope to recover in the future
			}
			var number uint64
			number, succ = binary.Uvarint(buffer)
			if succ < 1 {
				fmt.Println("My name is", name, "I failed to get a sensible answer from server on attempt", j, "!")
			} else {
				fmt.Println("My name is", name, "I have got the number", number, "on attempt", j)
			}
		}
	} else { //terrible client, deserving to be dropped
		for {
			//fmt.Println("My name is", name, "I am trying to hang the server")
			time.Sleep(time.Second)
		}
	}
	defer conn.Close()
}
Esempio n. 24
0
// decodeBlockHandle returns the block handle encoded at the start of src, as
// well as the number of bytes it occupies. It returns zero if given invalid
// input.
func decodeBlockHandle(src []byte) (blockHandle, int) {
	offset, n := binary.Uvarint(src)
	length, m := binary.Uvarint(src[n:])
	if n == 0 || m == 0 {
		return blockHandle{}, 0
	}
	return blockHandle{offset, length}, n + m
}
Esempio n. 25
0
// Process incoming UDP packet.
// ConnListen'es synchronization channel used to tell him that he is
// free to receive new packets. Authenticated and decrypted packets
// will be written to the interface immediately (except heartbeat ones).
func (p *Peer) PktProcess(data []byte, tap io.Writer, ready chan struct{}) bool {
	p.size = len(data)
	copy(p.buf, Emptiness)
	copy(p.tag[:], data[p.size-poly1305.TagSize:])
	copy(p.buf[S20BS:], data[NonceSize:p.size-poly1305.TagSize])
	salsa20.XORKeyStream(
		p.buf[:S20BS+p.size-poly1305.TagSize],
		p.buf[:S20BS+p.size-poly1305.TagSize],
		data[:NonceSize],
		p.Key,
	)
	copy(p.keyAuth[:], p.buf[:SSize])
	if !poly1305.Verify(p.tag, data[:p.size-poly1305.TagSize], p.keyAuth) {
		ready <- struct{}{}
		p.FramesUnauth++
		return false
	}

	// Check if received nonce is known to us in either of two buckets.
	// If yes, then this is ignored duplicate.
	// Check from the oldest bucket, as in most cases this will result
	// in constant time check.
	// If Bucket0 is filled, then it becomes Bucket1.
	p.NonceCipher.Decrypt(p.buf, data[:NonceSize])
	ready <- struct{}{}
	p.nonceRecv, _ = binary.Uvarint(p.buf[:NonceSize])
	if _, p.nonceFound = p.nonceBucket1[p.NonceRecv]; p.nonceFound {
		p.FramesDup++
		return false
	}
	if _, p.nonceFound = p.nonceBucket0[p.NonceRecv]; p.nonceFound {
		p.FramesDup++
		return false
	}
	p.nonceBucket0[p.NonceRecv] = struct{}{}
	p.nonceBucketN++
	if p.nonceBucketN == NonceBucketSize {
		p.nonceBucket1 = p.nonceBucket0
		p.nonceBucket0 = make(map[uint64]struct{}, NonceBucketSize)
		p.nonceBucketN = 0
	}

	p.FramesIn++
	p.BytesIn += int64(p.size)
	p.LastPing = time.Now()
	p.NonceRecv = p.nonceRecv
	p.pktSize, _ = binary.Uvarint(p.buf[S20BS : S20BS+PktSizeSize])
	if p.pktSize == 0 {
		p.HeartbeatRecv++
		return true
	}
	p.frame = p.buf[S20BS+PktSizeSize : S20BS+PktSizeSize+p.pktSize]
	p.BytesPayloadIn += int64(p.pktSize)
	tap.Write(p.frame)
	return true
}
Esempio n. 26
0
func (p *bInfo) decodeFrom(b []byte) (int, error) {
	var n, m int
	p.offset, n = binary.Uvarint(b)
	if n > 0 {
		p.size, m = binary.Uvarint(b[n:])
	}
	if n <= 0 || m <= 0 {
		return 0, errors.ErrCorrupt("bad block handle")
	}
	return n + m, nil
}
Esempio n. 27
0
func (s *cpuSample) Load() (err error) {
	f, err := os.OpenFile(cachePath, os.O_RDONLY, 0660)
	if err != nil {
		return err
	}
	buf := make([]byte, 24)
	f.Read(buf)
	s.user, _ = binary.Uvarint(buf)
	s.system, _ = binary.Uvarint(buf[8:])
	s.idle, _ = binary.Uvarint(buf[16:])
	return f.Close()
}
Esempio n. 28
0
File: key.go Progetto: nesv/salve
func BlurKeyHash(hash []byte) (i uint64, err error) {
	if len(hash) != 20 {
		err = ErrBadHashSize
		return
	}

	a, _ := binary.Uvarint(hash[0:8])
	b, _ := binary.Uvarint(hash[8:16])
	c, _ := binary.Uvarint(hash[16:20])

	i = a ^ b ^ c
	return
}
Esempio n. 29
0
func Patch(old, patch []byte) ([]byte, error) {
	// The header is two varints for old size and new size.
	sz1, n1 := binary.Uvarint(patch)
	if n1 >= len(patch) {
		return nil, errCorruptedDeltaHeader
	}
	sz2, n2 := binary.Uvarint(patch[n1:])

	if sz1 != uint64(len(old)) {
		return nil, errOldVersionSizeMismatch
	}
	newer := make([]byte, 0, sz2)

	p := n1 + n2
	for p < len(patch) {
		b := patch[p]
		p++
		if b&0x80 != 0 {
			// copy some data from old.
			var offset, length uint32
			for i := uint(0); i < 4; i++ {
				if b&(1<<i) != 0 {
					offset |= uint32(patch[p]) << (8 * i)
					p++
				}
			}
			for i := uint(0); i < 3; i++ {
				if b&(0x10<<i) != 0 {
					length |= uint32(patch[p]) << (8 * i)
					p++
				}
			}
			if length == 0 {
				length = 1 << 16
			}
			// TODO: guard against index panics.
			newer = append(newer, old[int64(offset):int64(offset)+int64(length)]...)
		} else if b != 0 {
			// copy some data from patch
			newer = append(newer, patch[p:p+int(b)]...)
			p += int(b)
		} else {
			return nil, errUnexpectedNulByte
		}
	}

	if uint64(len(newer)) != sz2 {
		return nil, errNewVersionSizeMismatch
	}
	return newer, nil
}
Esempio n. 30
0
func UnmarshalIdRefsBunch2(buf []byte, idRefs []element.IdRefs) []element.IdRefs {
	length, n := binary.Uvarint(buf)
	if n <= 0 {
		return nil
	}

	offset := n

	if uint64(cap(idRefs)) < length {
		idRefs = make([]element.IdRefs, length)
	} else {
		idRefs = idRefs[:length]
	}

	last := int64(0)
	for i := 0; uint64(i) < length; i++ {
		idRefs[i].Id, n = binary.Varint(buf[offset:])
		if n <= 0 {
			panic("no data")
		}
		offset += n
		idRefs[i].Id += last
		last = idRefs[i].Id
	}
	var numRefs uint64
	for i := 0; uint64(i) < length; i++ {
		numRefs, n = binary.Uvarint(buf[offset:])
		if n <= 0 {
			panic("no data")
		}
		offset += n
		if uint64(cap(idRefs[i].Refs)) < numRefs {
			idRefs[i].Refs = make([]int64, numRefs)
		} else {
			idRefs[i].Refs = idRefs[i].Refs[:numRefs]
		}
	}
	last = 0
	for idIdx := 0; uint64(idIdx) < length; idIdx++ {
		for refIdx := 0; refIdx < len(idRefs[idIdx].Refs); refIdx++ {
			idRefs[idIdx].Refs[refIdx], n = binary.Varint(buf[offset:])
			if n <= 0 {
				panic("no data")
			}
			offset += n
			idRefs[idIdx].Refs[refIdx] += last
			last = idRefs[idIdx].Refs[refIdx]
		}
	}
	return idRefs
}