Example #1
0
func CheckECC(data []byte) uint32 {
	if len(data) > 200 {
		tmpdata := append(data[:100], data[len(data)-100:]...)
		return adler32.Checksum(tmpdata)
	} else {
		return adler32.Checksum(data)
	}
}
Example #2
0
// FIXME: find better way of writing this
func Encode(msg *RpcMessage) ([]byte, error) {
	payload, err := proto.Marshal(msg)
	if err != nil {
		return nil, err
	}

	wire := make([]byte, 12+len(payload))

	// size
	binary.BigEndian.PutUint32(wire, uint32(8+len(payload)))

	// marker
	if copy(wire[4:], "RPC0") != 4 {
		panic("What the hell")
	}

	// payload
	if copy(wire[8:], payload) != len(payload) {
		panic("What the hell")
	}

	// checksum
	checksum := adler32.Checksum(wire[4 : 8+len(payload)])
	binary.BigEndian.PutUint32(wire[8+len(payload):], checksum)

	return wire, nil
}
Example #3
0
func BenchmarkAdler32(b *testing.B) {
	var bv uint32
	for i := 0; i < b.N; i++ {
		bv = adler32.Checksum(in)
	}
	benchVal32 = bv
}
Example #4
0
func (this *StandardPeerSelector) PickPeer(key string) (peerAddr string) {
	// adler32 is almost same as crc32, but much 3 times faster
	checksum := adler32.Checksum([]byte(key))
	index := int(checksum) % len(this.peerAddrs)

	return this.peerAddrs[index]
}
Example #5
0
// performReducing runs the reducing goroutines.
func performReducing(mr MapReducer, mapEmitChan, reduceEmitChan KeyValueChan) {
	// Start a closer for the reduce emit chan.
	size := runtime.NumCPU()
	signals := newCloserChan(reduceEmitChan, size)

	// Start reduce goroutines.
	reduceChans := make([]KeyValueChan, size)
	for i := 0; i < size; i++ {
		reduceChans[i] = make(KeyValueChan)
		go func(in KeyValueChan) {
			mr.Reduce(in, reduceEmitChan)
			signals <- struct{}{}
		}(reduceChans[i])
	}

	// Read map emitted data.
	for kv := range mapEmitChan {
		hash := adler32.Checksum([]byte(kv.Key()))
		idx := hash % uint32(size)
		reduceChans[idx] <- kv
	}

	// Close reduce channels.
	for _, reduceChan := range reduceChans {
		reduceChan.Close()
	}
}
Example #6
0
// NewReaderDict is like NewReader but uses a preset dictionary.
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, os.Error) {
	z := new(reader)
	if fr, ok := r.(flate.Reader); ok {
		z.r = fr
	} else {
		z.r = bufio.NewReader(r)
	}
	_, err := io.ReadFull(z.r, z.scratch[0:2])
	if err != nil {
		return nil, err
	}
	h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
	if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
		return nil, HeaderError
	}
	if z.scratch[1]&0x20 != 0 {
		_, err = io.ReadFull(z.r, z.scratch[0:4])
		if err != nil {
			return nil, err
		}
		checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
		if checksum != adler32.Checksum(dict) {
			return nil, DictionaryError
		}
		z.decompressor = flate.NewReaderDict(z.r, dict)
	} else {
		z.decompressor = flate.NewReader(z.r)
	}
	z.digest = adler32.New()
	return z, nil
}
Example #7
0
func Decode(r io.Reader) (msg *RpcMessage, err error) {
	header := make([]byte, 4)
	_, err = io.ReadFull(r, header)
	if err != nil {
		return
	}

	length := binary.BigEndian.Uint32(header)
	payload := make([]byte, length)
	_, err = io.ReadFull(r, payload)
	if err != nil {
		return
	}

	if string(payload[:4]) != "RPC0" {
		err = fmt.Errorf("Wrong marker")
		return
	}

	checksum := adler32.Checksum(payload[:length-4])
	if checksum != binary.BigEndian.Uint32(payload[length-4:]) {
		err = fmt.Errorf("Wrong checksum")
		return
	}

	msg = new(RpcMessage)
	err = proto.Unmarshal(payload[4:length-4], msg)
	return
}
// Perform the reducing.
func performReducing(mapEmitChan KeyValueChan, reduceFunc ReduceFunc, reduceSize int, reduceEmitChan KeyValueChan) {
	// Start a closer for the reduce emit chan.

	sigChan := closeSignalChannel(reduceEmitChan, reduceSize)

	// Start reduce funcs.

	reduceChans := make(KeyValueChans, reduceSize)

	for i := 0; i < reduceSize; i++ {
		reduceChans[i] = make(KeyValueChan)

		go func(inChan KeyValueChan) {
			reduceFunc(inChan, reduceEmitChan)

			sigChan <- true
		}(reduceChans[i])
	}

	// Read map emitted data.

	for kv := range mapEmitChan {
		hash := adler32.Checksum([]byte(kv.Key))
		idx := hash % uint32(reduceSize)

		reduceChans[idx] <- kv
	}

	// Close reduce channels.

	for _, reduceChan := range reduceChans {
		close(reduceChan)
	}
}
Example #9
0
func (section_header *Section_Header) Verify(datar *bytes.Reader) bool {
	var buf []byte

	datar.Read(buf)
	fmt.Println(section_header.Checksum, len(buf))
	return section_header.Checksum == adler32.Checksum(buf[:72])

}
Example #10
0
func errorClass(err error) string {
	class := reflect.TypeOf(err).String()
	if class == "" {
		return "panic"
	} else if class == "*errors.errorString" {
		checksum := adler32.Checksum([]byte(err.Error()))
		return fmt.Sprintf("{%x}", checksum)
	} else {
		return strings.TrimPrefix(class, "*")
	}
}
Example #11
0
// Put sets a value by key in the hash map
func (t *HashTable) Put(key, value string) {
	hash := adler32.Checksum([]byte(key)) % maxTableSize
	for {
		if t.Table[hash] != nil && t.Table[hash].Key != key {
			hash = (hash + 1) % maxTableSize
		} else {
			break
		}
	}
	t.Table[hash] = &node{key, value}
}
Example #12
0
func (c *Catalog) Include(content []byte) (ret bool) {
	crc := adler32.Checksum([]byte(content))
	sort.Sort(c.Files)
	exists := sort.Search(len(c.Files), func(i int) bool {
		return c.Files[i] >= crc
	})

	if exists < len(c.Files) && c.Files[exists] == crc {
		return true
	}
	return false
}
Example #13
0
func BenchmarkAdler32(b *testing.B) {
	in := make([]byte, 10000)
	for i := 0; i < len(in); i++ {
		in[i] = byte(rand.Intn(255))
	}
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		adler32.Checksum(in)
	}
	b.SetBytes(int64(len(in)))
}
Example #14
0
File: writer.go Project: ds2dev/gcc
// writeHeader writes the ZLIB header.
func (z *Writer) writeHeader() (err error) {
	z.wroteHeader = true
	// ZLIB has a two-byte header (as documented in RFC 1950).
	// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
	// The next four bits is the CM (compression method), which is 8 for deflate.
	z.scratch[0] = 0x78
	// The next two bits is the FLEVEL (compression level). The four values are:
	// 0=fastest, 1=fast, 2=default, 3=best.
	// The next bit, FDICT, is set if a dictionary is given.
	// The final five FCHECK bits form a mod-31 checksum.
	switch z.level {
	case 0, 1:
		z.scratch[1] = 0 << 6
	case 2, 3, 4, 5:
		z.scratch[1] = 1 << 6
	case 6, -1:
		z.scratch[1] = 2 << 6
	case 7, 8, 9:
		z.scratch[1] = 3 << 6
	default:
		panic("unreachable")
	}
	if z.dict != nil {
		z.scratch[1] |= 1 << 5
	}
	z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
	if _, err = z.w.Write(z.scratch[0:2]); err != nil {
		return err
	}
	if z.dict != nil {
		// The next four bytes are the Adler-32 checksum of the dictionary.
		checksum := adler32.Checksum(z.dict)
		z.scratch[0] = uint8(checksum >> 24)
		z.scratch[1] = uint8(checksum >> 16)
		z.scratch[2] = uint8(checksum >> 8)
		z.scratch[3] = uint8(checksum >> 0)
		if _, err = z.w.Write(z.scratch[0:4]); err != nil {
			return err
		}
	}
	if z.compressor == nil {
		// Initialize deflater unless the Writer is being reused
		// after a Reset call.
		z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
		if err != nil {
			return err
		}
		z.digest = adler32.New()
	}
	return nil
}
// StartJob launches a job on the given queue. It is not executed immediately but
// scheduled to run as a task which performs splitting of the input reader based
// on the number of shards.
func (m *mapper) startJobHandler(w http.ResponseWriter, r *http.Request) {
	c := appengine.NewContext(r)

	values := r.URL.Query()
	name := values.Get("name")
	jobSpec, err := CreateJobInstance(name)
	if err != nil {
		return
	}

	shards, err := strconv.Atoi(values.Get("shards"))
	if shards == 0 || err != nil {
		shards = m.config.Shards
	}

	queue := values.Get("queue")
	if queue != "" {
		// override the queue for this request
		// (used by locker.Schedule later)
		c = locker.WithQueue(c, queue)
	}
	bucket := values.Get("bucket")

	query, err := jobSpec.Query(r)
	if err != nil {
		log.Errorf(c, "error creating query %v", err)
		w.WriteHeader(http.StatusBadRequest)
		return
	}

	requestHash := r.Header.Get("X-Appengine-Request-Id-Hash")
	if requestHash == "" {
		// this should only happen when testing, we just need a short hash
		requestID := appengine.RequestID(c)
		requestHash = strconv.FormatUint(uint64(adler32.Checksum([]byte(requestID))), 16)
	}

	id := fmt.Sprintf("%s/%s", name, requestHash)
	job := &job{
		JobName:   name,
		JobSpec:   jobSpec,
		Bucket:    bucket,
		Shards:    shards,
		Iterating: true,
	}
	job.common.start(query)

	key := datastore.NewKey(c, m.config.DatastorePrefix+jobKind, id, 0, nil)
	m.locker.Schedule(c, key, job, m.config.Path+jobURL, nil)
}
Example #16
0
// Get returns a value by string, and true/false if the key exists
func (t HashTable) Get(key string) (string, bool) {
	hash := adler32.Checksum([]byte(key)) % maxTableSize
	for {
		if t.Table[hash] != nil && t.Table[hash].Key != key {
			hash = (hash + 1) % maxTableSize
		} else {
			break
		}
	}
	if t.Table[hash] == nil {
		return "", false
	}
	return t.Table[hash].Value, true
}
Example #17
0
func (z *reader) Reset(r io.Reader, dict []byte) error {
	*z = reader{decompressor: z.decompressor}
	if fr, ok := r.(flate.Reader); ok {
		z.r = fr
	} else {
		z.r = bufio.NewReader(r)
	}

	// Read the header (RFC 1950 section 2.2.).
	_, z.err = io.ReadFull(z.r, z.scratch[0:2])
	if z.err != nil {
		if z.err == io.EOF {
			z.err = io.ErrUnexpectedEOF
		}
		return z.err
	}
	h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
	if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
		z.err = ErrHeader
		return z.err
	}
	haveDict := z.scratch[1]&0x20 != 0
	if haveDict {
		_, z.err = io.ReadFull(z.r, z.scratch[0:4])
		if z.err != nil {
			if z.err == io.EOF {
				z.err = io.ErrUnexpectedEOF
			}
			return z.err
		}
		checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
		if checksum != adler32.Checksum(dict) {
			z.err = ErrDictionary
			return z.err
		}
	}

	if z.decompressor == nil {
		if haveDict {
			z.decompressor = flate.NewReaderDict(z.r, dict)
		} else {
			z.decompressor = flate.NewReader(z.r)
		}
	} else {
		z.decompressor.(flate.Resetter).Reset(z.r, dict)
	}
	z.digest = adler32.New()
	return nil
}
Example #18
0
// NewWriterDict creates a new io.WriteCloser that satisfies writes by compressing data written to w.
// It is the caller's responsibility to call Close on the WriteCloser when done.
// level is the compression level, which can be DefaultCompression, NoCompression,
// or any integer value between BestSpeed and BestCompression (inclusive).
// dict is the preset dictionary to compress with, or nil to use no dictionary.
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, os.Error) {
	z := new(Writer)
	// ZLIB has a two-byte header (as documented in RFC 1950).
	// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
	// The next four bits is the CM (compression method), which is 8 for deflate.
	z.scratch[0] = 0x78
	// The next two bits is the FLEVEL (compression level). The four values are:
	// 0=fastest, 1=fast, 2=default, 3=best.
	// The next bit, FDICT, is set if a dictionary is given.
	// The final five FCHECK bits form a mod-31 checksum.
	switch level {
	case 0, 1:
		z.scratch[1] = 0 << 6
	case 2, 3, 4, 5:
		z.scratch[1] = 1 << 6
	case 6, -1:
		z.scratch[1] = 2 << 6
	case 7, 8, 9:
		z.scratch[1] = 3 << 6
	default:
		return nil, os.NewError("level out of range")
	}
	if dict != nil {
		z.scratch[1] |= 1 << 5
	}
	z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
	_, err := w.Write(z.scratch[0:2])
	if err != nil {
		return nil, err
	}
	if dict != nil {
		// The next four bytes are the Adler-32 checksum of the dictionary.
		checksum := adler32.Checksum(dict)
		z.scratch[0] = uint8(checksum >> 24)
		z.scratch[1] = uint8(checksum >> 16)
		z.scratch[2] = uint8(checksum >> 8)
		z.scratch[3] = uint8(checksum >> 0)
		_, err = w.Write(z.scratch[0:4])
		if err != nil {
			return nil, err
		}
	}
	z.w = w
	z.compressor = flate.NewWriterDict(w, level, dict)
	z.digest = adler32.New()
	return z, nil
}
Example #19
0
func main() {
	servers := make([]*store.StoreServer, NUM_SHARDS)
	i := 0
	for i = 0; i < NUM_SHARDS; i++ {
		servers[i] = store.NewStoreServer()
	}
	//server := store.NewStoreServer()

	var e entry
	var firstTime uint64

	scanner := bufio.NewScanner(os.Stdin)
	c := 0
	n := 0
	for scanner.Scan() {
		err := json.Unmarshal([]byte(scanner.Text()), &e)
		if err != nil {
			n += 1
			continue
		}
		if firstTime == 0 {
			firstTime = e.Time
		}
		name := e.timeSeriesName()
		shard := adler32.Checksum([]byte(name)) % NUM_SHARDS
		servers[shard].Put(e.timeSeriesName(), uint32(e.Time/1000), e.Value)
		c += 1
	}

	fmt.Printf("first_time: %d  last_time: %d  delta: %d\n", firstTime/1000, e.Time/1000,
		(e.Time-firstTime)/1000)

	total_bytes, total_series := uint64(0), 0

	for i = 0; i < NUM_SHARDS; i++ {
		bytes_consumed := servers[i].BytesConsumed()
		num_time_series := servers[i].NumTimeSeries()
		fmt.Printf("%02d: bytes=%d #series=%d\n", i, bytes_consumed, num_time_series)
		total_bytes += uint64(bytes_consumed)
		total_series += num_time_series
	}

	fmt.Printf("total: bytes: %d  series: %d   count: %d\n", total_bytes, total_series, c)
	//router := mux.NewRouter().StrictSlash(true)
	//router.HandleFunc("/query", server.Query)
	//log.Fatal(http.ListenAndServe(":8080", router))
}
Example #20
0
func TestOverflow(t *testing.T) {
	data := make([]byte, 65536)
	for k, _ := range data {
		data[k] = 255
	}
	for i := 17343; i <= 17343; i++ {
		d := uint32(1)
		d = pushBack(d, data[:i])
		if uint32(d) != adler32.Checksum(data[:i]) {
			t.Fatalf("pushBack seems to be wrong")
		}
		d = popFront(d, data[:i], i)
		if d != uint32(1) {
			t.Errorf("Overflow at length %d detected: d=%08x", i, d)
		}
	}
}
Example #21
0
func (e *partitionEmitter) Emit(reduceKey string, sortKey string, value string) {

	partition := uint32(0)

	if e.partitions > 1 {
		partition = adler32.Checksum([]byte(reduceKey)) % uint32(e.partitions)
	}

	if e.emitters[partition] == nil {
		e.FileNames[partition] = fmt.Sprintf("%s.%04d", e.fileNameTemplate, partition)
		fd, _ := os.Create(e.FileNames[partition])
		e.fds[partition] = fd
		w := bufio.NewWriter(fd)
		e.emitters[partition] = newPrintEmitter(w)
	}

	e.emitters[partition].Emit(reduceKey, sortKey, value)
}
func TestDecodeEncode(t *testing.T) {
	buf := bytes.NewBuffer([]byte(armorExample1))
	result, err := Decode(buf)
	if err != nil {
		t.Error(err)
	}
	expectedType := "PGP SIGNATURE"
	if result.Type != expectedType {
		t.Errorf("result.Type: got:%s want:%s", result.Type, expectedType)
	}
	if len(result.Header) != 1 {
		t.Errorf("len(result.Header): got:%d want:1", len(result.Header))
	}
	v, ok := result.Header["Version"]
	if !ok || v != "GnuPG v1.4.10 (GNU/Linux)" {
		t.Errorf("result.Header: got:%#v", result.Header)
	}

	contents, err := ioutil.ReadAll(result.Body)
	if err != nil {
		t.Error(err)
	}

	if adler32.Checksum(contents) != 0x789d7f00 {
		t.Errorf("contents: got: %x", contents)
	}

	buf = bytes.NewBuffer(nil)
	w, err := Encode(buf, result.Type, result.Header)
	if err != nil {
		t.Error(err)
	}
	_, err = w.Write(contents)
	if err != nil {
		t.Error(err)
	}
	w.Close()

	if !bytes.Equal(buf.Bytes(), []byte(armorExample1)) {
		t.Errorf("got: %s\nwant: %s", string(buf.Bytes()), armorExample1)
	}
}
Example #23
0
// Writes up to 4MB of random data into the rolling checksum
func TestEquivalenceLong(t *testing.T) {
	testarr := make([]byte, 4*1024*1024)
	sum := 0
	for sum != len(testarr) {
		num, _ := rand.Read(testarr[sum:])
		sum += num
	}

	for window := 32; window <= 40960; window *= 2 {
		var rolling = New(uint32(window))
		rolling.Write(testarr)
		start := len(testarr) - window
		expected := adler32.Checksum(testarr[start:])
		actual := rolling.Sum32()
		if expected != actual {
			t.Fatalf("%d: expected %x, got %x",
				window, expected, actual)
		}
	}
}
Example #24
0
func (this *mysqlStore) KafkaTopic(appid string, topic string, ver string) (r string) {
	b := mpool.BytesBufferGet()
	b.Reset()
	b.WriteString(appid)
	b.WriteByte('.')
	b.WriteString(topic)
	b.WriteByte('.')
	b.WriteString(ver)
	if len(ver) > 2 {
		// ver starts with 'v1', from 'v10' on, will use obfuscation
		b.WriteByte('.')

		// can't use app secret as part of cookie: what if user changes his secret?
		// FIXME user can guess the cookie if they know the algorithm in advance
		cookie := adler32.Checksum([]byte(appid + topic))
		b.WriteString(strconv.Itoa(int(cookie % 1000)))
	}
	r = b.String()
	mpool.BytesBufferPut(b)
	return
}
Example #25
0
//HTTP Request Handler
func HandleRequest(rw http.ResponseWriter, req *http.Request) {
	//Get adjusted filepath which will ensure proper directory
	cleanpath := op.Path + filepath.Clean(req.URL.Path)

	//Get the checksum of the path
	n := adler32.Checksum([]byte(cleanpath))%40 + 1
	//n = 9

	strurl := "http://virtual" + fmt.Sprintf("%d", n) + ".cs.missouri.edu:8006"

	fmt.Println(strurl)

	u, err := url.Parse(strurl)
	if err != nil {
		log.Fatal(err)
	}

	//Create a reverse proxy
	reverse_proxy := httputil.NewSingleHostReverseProxy(u)
	reverse_proxy.ServeHTTP(rw, req)
}
func checksum(r io.Reader, method string) (string, error) {
	b, err := ioutil.ReadAll(r)
	if err != nil {
		return "", err
	}

	switch method {
	case "md5":
		return fmt.Sprintf("%x", md5.Sum(b)), nil
	case "sha1":
		return fmt.Sprintf("%x", sha1.Sum(b)), nil
	case "sha256":
		return fmt.Sprintf("%x", sha256.Sum256(b)), nil
	case "sha512":
		return fmt.Sprintf("%x", sha512.Sum512(b)), nil
	case "adler32":
		return strconv.FormatUint(uint64(adler32.Checksum(b)), 10), nil
	case "crc32":
		return strconv.FormatUint(uint64(crc32.ChecksumIEEE(b)), 10), nil
	default:
		return "", fmt.Errorf("hashing method %s is not supported", method)
	}
}
Example #27
0
func (z *reader) Reset(r io.Reader, dict []byte) error {
	if fr, ok := r.(flate.Reader); ok {
		z.r = fr
	} else {
		z.r = bufio.NewReader(r)
	}
	_, err := io.ReadFull(z.r, z.scratch[0:2])
	if err != nil {
		return err
	}
	h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
	if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
		return ErrHeader
	}
	haveDict := z.scratch[1]&0x20 != 0
	if haveDict {
		_, err = io.ReadFull(z.r, z.scratch[0:4])
		if err != nil {
			return err
		}
		checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
		if checksum != adler32.Checksum(dict) {
			return ErrDictionary
		}
	}
	if z.decompressor == nil {
		if haveDict {
			z.decompressor = flate.NewReaderDict(z.r, dict)
		} else {
			z.decompressor = flate.NewReader(z.r)
		}
	} else {
		z.decompressor.(flate.Resetter).Reset(z.r, dict)
	}
	z.digest = adler32.New()
	return nil
}
Example #28
0
// Tests by writing up to 4k of data into the checksum
func TestEquivalence(t *testing.T) {
	testarr := make([]byte, 4*1024)
	sum := 0
	for sum != len(testarr) {
		num, _ := rand.Read(testarr[sum:])
		sum += num
	}
	for i := 0; i <= len(testarr); i++ {
		var rolling = New(255)
		rolling.Write(testarr[:i])
		start := 0
		if i > 255 {
			start = i - 255
		}

		expected := adler32.Checksum(testarr[start:i])
		actual := rolling.Sum32()
		if expected != actual {
			t.Fatalf("%d: expected %x, got %x, %x -> %x",
				i, expected, actual,
				testarr[start-1], testarr[i-1])
		}
	}
}
func (s *sharder) GetBlockShard(block *pfs.Block) uint64 {
	return uint64(adler32.Checksum([]byte(block.Hash))) % s.blockModulus
}
func (s *sharder) GetShard(file *pfs.File) uint64 {
	return uint64(adler32.Checksum([]byte(path.Clean(file.Path)))) % s.fileModulus
}