Пример #1
0
// Address returns ipv6 ephemeral address based on sha3 hash
func (d Destination) Address() net.IP {
	var (
		h    = sha3.NewShake128()
		data = make([]byte, net.IPv6len)
	)
	h.Write(d)
	h.Read(data)
	return net.IP(data)
}
Пример #2
0
func (p *poly) uniform(seed *[SeedBytes]byte, torSampling bool) {
	if !torSampling {
		// Reference version, vartime.
		nBlocks := 14
		var buf [shake128Rate * 14]byte

		// h and buf are left unscrubbed because the output is public.
		h := sha3.NewShake128()
		h.Write(seed[:])
		h.Read(buf[:])

		for ctr, pos := 0, 0; ctr < paramN; {
			val := binary.LittleEndian.Uint16(buf[pos:])

			if val < 5*paramQ {
				p.coeffs[ctr] = val
				ctr++
			}
			pos += 2
			if pos > shake128Rate*nBlocks-2 {
				nBlocks = 1
				h.Read(buf[:shake128Rate])
				pos = 0
			}
		}
	} else {
		// `torref` version, every valid `a` is generate in constant time,
		// though the number of attempts varies.
		const nBlocks = 16
		var buf [shake128Rate * nBlocks]byte

		// h and buf are left unscrubbed because the output is public.
		h := sha3.NewShake128()
		h.Write(seed[:])

		for {
			h.Read(buf[:])
			if !p.discardTo(buf[:]) {
				break
			}
		}

	}
}
Пример #3
0
// Store should not be called concurrently, as it will result in corrupted hashes
func (ds *DiskSink) Store(loc BlockLocation, data []byte) error {
	if ds.writing {
		return fmt.Errorf("concurrent write to disksink is unsupported")
	}

	ds.writing = true
	defer func() {
		ds.writing = false
	}()

	if ds.hashBuf == nil {
		ds.hashBuf = make([]byte, 32)
	}

	if ds.shake == nil {
		ds.shake = sha3.NewShake128()
	}

	ds.shake.Reset()
	_, err := ds.shake.Write(data)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	_, err = io.ReadFull(ds.shake, ds.hashBuf)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	if ds.BlockHashes != nil {
		ds.BlockHashes.Set(loc, append([]byte{}, ds.hashBuf...))
	}

	fileSize := ds.Container.Files[int(loc.FileIndex)].Size
	blockSize := ComputeBlockSize(fileSize, loc.BlockIndex)
	addr := fmt.Sprintf("shake128-32/%x/%d", ds.hashBuf, blockSize)

	path := filepath.Join(ds.BasePath, addr)

	err = os.MkdirAll(filepath.Dir(path), 0755)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	// create file only if it doesn't exist yet
	file, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
	if err != nil {
		if os.IsExist(err) {
			// block's already there!
			return nil
		}
		return errors.Wrap(err, 1)
	}

	defer file.Close()

	if ds.Compressor == nil {
		_, err = io.Copy(file, bytes.NewReader(data))
		if err != nil {
			return errors.Wrap(err, 1)
		}
	} else {
		err = ds.Compressor.Compress(file, data)
		if err != nil {
			return errors.Wrap(err, 1)
		}
	}

	return nil
}