// concat key derivation function // See: 5.8.1 of NIST.800-56A func concatKDF(masterKey []byte, keyType string, encKeySize, macKeySize int) ([]byte, []byte) { // build buffer common to encryption and integrity derivation buffer := make([]byte, len(masterKey)+len(keyType)+26) binary.BigEndian.PutUint32(buffer[:], uint32(1)) copy(buffer[4:], masterKey) copy(buffer[8+len(masterKey):], keyType) binary.BigEndian.PutUint32(buffer[8+len(masterKey)+len(keyType):], uint32(0)) binary.BigEndian.PutUint32(buffer[12+len(masterKey)+len(keyType):], uint32(0)) // derive the encryption key binary.BigEndian.PutUint32(buffer[4+len(masterKey):], uint32(encKeySize)) copy(buffer[16+len(masterKey)+len(keyType):], "Encryption") var h hash.Hash if macKeySize == 256 { h = sha256.New() } else if macKeySize == 512 { h = sha512.New() } else { panic("Unknown hash size") } h.Write(buffer) encKey := h.Sum(nil) // derive the integrity key binary.BigEndian.PutUint32(buffer[4+len(masterKey):], uint32(macKeySize)) copy(buffer[16+len(masterKey)+len(keyType):], "Integrity") h.Reset() h.Write(buffer[:len(buffer)-1]) macKey := h.Sum(nil) return encKey[:encKeySize/8], macKey[:macKeySize/8] }
// generate a hash for the given password and salt // intentionally slow for better security func GenPasswordHashSlow(password []byte, salt []byte) []byte { var h hash.Hash var lastHash []byte h = sha512.New() h.Write(passwordHashConstant) h.Write(password) h.Write(salt) lastHash = h.Sum(nil) for i := 0; i < _hashLoopCount; i++ { h.Reset() h.Write(salt) h.Write(lastHash) h.Write(password) h.Write(passwordHashConstant) lastHash = h.Sum(nil) } r := make([]byte, 0, 2*len(lastHash)) for i := 0; i < len(lastHash); i++ { r = append(r, (lastHash[i]&0x0f)+'a') r = append(r, ((lastHash[i]>>4)&0x0f)+'a') } return r }
func filehash(h hash.Hash, r io.Reader) ([]byte, error) { defer h.Reset() if _, err := io.Copy(h, r); err != nil { return nil, err } return h.Sum(nil), nil }
func Encode(k int, h hash.Hash, value []byte, output int) (enc []byte, s [][]byte) { s0 := make([]byte, h.Size()) n := len(value) blockcount := n / k s = make([][]byte, blockcount) for i := 0; i < blockcount; i++ { h.Reset() if i == 0 { h.Write(s0) } else { h.Write(s[i-1]) } h.Write(value[i*k : (i+1)*k]) s[i] = h.Sum(make([]byte, 0, h.Size())) } rng := make([](*RNG), len(s)) for i := 0; i < len(s); i++ { rng[i] = NewRNG(h, s[i]) } enc = make([]byte, output) for i := 0; i < output; i++ { enc[i] = rng[i%blockcount].Next() } return }
// DeriveConcatKDF implements NIST SP 800-56A Concatenation Key Derivation Function. Derives // key material of keydatalen bits size given Z (sharedSecret), OtherInfo (AlgorithmID | // PartyUInfo | PartyVInfo | SuppPubInfo | SuppPrivInfo) and hash function func DeriveConcatKDF(keydatalen int, sharedSecret, algId, partyUInfo, partyVInfo, suppPubInfo, suppPrivInfo []byte, h hash.Hash) []byte { otherInfo := arrays.Concat(algId, partyUInfo, partyVInfo, suppPubInfo, suppPrivInfo) keyLenBytes := keydatalen >> 3 reps := int(math.Ceil(float64(keyLenBytes) / float64(h.Size()))) if reps > MaxInt { panic("kdf.DeriveConcatKDF: too much iterations (more than 2^32-1).") } dk := make([]byte, 0, keyLenBytes) for counter := 1; counter <= reps; counter++ { h.Reset() counterBytes := arrays.UInt32ToBytes(uint32(counter)) h.Write(counterBytes) h.Write(sharedSecret) h.Write(otherInfo) dk = h.Sum(dk) } return dk[:keyLenBytes] }
// Perform birthday attack on 8, 16, 24, 32, 40, or 48 bit SHA-512-n digest. func birthdayAttack(h hash.Hash) (item1, item2, digestOut []byte) { // i is 64 bits. Since 2^64 > 2^48 (the largest SHA-512-n digest) // at least one collision will be found due to the pigeon hole principle. // There's a ~50% chance of finding such a collision in the first 2^(n/2) // tries. tries := make(map[string][]byte) // Do 0 out of the loop, since 0 is used as our stop point. i := uint64(0) data := uint64ToBytes(i) h.Reset() h.Write(data) digest := h.Sum(nil) tries[string(digest)] = data for i++; i != 0; i++ { data := uint64ToBytes(i) h.Reset() h.Write(data) digest := h.Sum(nil) digestStr := string(digest) if prev, ok := tries[digestStr]; ok { // found collision item1 = data item2 = prev digestOut = digest return } tries[digestStr] = data } return nil, nil, nil }
// HashPasswordWithSalt scrambles the password with the provided parameters. func HashPasswordWithSalt(password, tweak, salt []byte, g, g0 int64, H hash.Hash) ([]byte, error) { if g < g0 { return nil, ErrInvalidGarlic } x := make([]byte, len(tweak)+len(password)|len(salt)) copy(x, tweak) copy(x[len(tweak):], password) copy(x[len(tweak)+len(password):], salt) var err error for i := g0; i <= g; i++ { c := bigPadded(big.NewInt(i), cPad) twoCp1 := new(big.Int).Exp(big.NewInt(2), big.NewInt(i), nil) twoCp1 = twoCp1.Add(twoCp1, big.NewInt(1)) x, err = sbrh(c, x, H) if err != nil { H.Reset() return nil, err } H.Write(c) H.Write(bigPadded(twoCp1, cPad)) H.Write(x) x = H.Sum(nil) H.Reset() } return x, nil }
// finishedSum30 calculates the contents of the verify_data member of a SSLv3 // Finished message given the MD5 and SHA1 hashes of a set of handshake // messages. func finishedSum30(md5, sha1 hash.Hash, masterSecret []byte, magic []byte) []byte { md5.Write(magic) md5.Write(masterSecret) md5.Write(ssl30Pad1[:]) md5Digest := md5.Sum(nil) md5.Reset() md5.Write(masterSecret) md5.Write(ssl30Pad2[:]) md5.Write(md5Digest) md5Digest = md5.Sum(nil) sha1.Write(magic) sha1.Write(masterSecret) sha1.Write(ssl30Pad1[:40]) sha1Digest := sha1.Sum(nil) sha1.Reset() sha1.Write(masterSecret) sha1.Write(ssl30Pad2[:40]) sha1.Write(sha1Digest) sha1Digest = sha1.Sum(nil) ret := make([]byte, len(md5Digest)+len(sha1Digest)) copy(ret, md5Digest) copy(ret[len(md5Digest):], sha1Digest) return ret }
// Iterated writes to out the result of computing the Iterated and Salted S2K // function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, // salt and iteration count. func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { combined := make([]byte, len(in)+len(salt)) copy(combined, salt) copy(combined[len(salt):], in) if count < len(combined) { count = len(combined) } done := 0 var digest []byte for i := 0; done < len(out); i++ { h.Reset() for j := 0; j < i; j++ { h.Write(zero[:]) } written := 0 for written < count { if written+len(combined) > count { todo := count - written h.Write(combined[:todo]) written = count } else { h.Write(combined) written += len(combined) } } digest = h.Sum(digest[:0]) n := copy(out[done:], digest) done += n } }
// Tweak generates a new tweak from the mode, hash, salt length (in // bytes), and any additional data. It provides additional information // that will complicate an attacker's efforts, and allows a system to // differentiate between different uses of the Catena function's output. func Tweak(mode byte, H hash.Hash, saltLen int, ad []byte) ([]byte, error) { if mode != ModePassHash && mode != ModeKeyDerivation { return nil, ErrInvalidTweakMode } hashLen := H.Size() tweakLen := 5 + hashLen var t = make([]byte, 1, tweakLen) t[0] = mode var tmp uint16 = uint16(H.Size() * 8) high := byte(tmp >> 8) low := byte(tmp << 8 >> 8) t = append(t, high) t = append(t, low) tmp = uint16(saltLen * 8) high = byte(tmp >> 8) low = byte(tmp << 8 >> 8) t = append(t, high) t = append(t, low) H.Reset() H.Write(ad) t = append(t, H.Sum(nil)...) H.Reset() return t, nil }
// EncryptOAEP encrypts the given message with RSA-OAEP. // The message must be no longer than the length of the public modulus less // twice the hash length plus 2. func EncryptOAEP(hash hash.Hash, rand io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err os.Error) { hash.Reset() k := (pub.N.Len() + 7) / 8 if len(msg) > k-2*hash.Size()-2 { err = MessageTooLongError{} return } hash.Write(label) lHash := hash.Sum() hash.Reset() em := make([]byte, k) seed := em[1 : 1+hash.Size()] db := em[1+hash.Size():] copy(db[0:hash.Size()], lHash) db[len(db)-len(msg)-1] = 1 copy(db[len(db)-len(msg):], msg) _, err = io.ReadFull(rand, seed) if err != nil { return } mgf1XOR(db, hash, seed) mgf1XOR(seed, hash, db) m := new(big.Int) m.SetBytes(em) c := encrypt(new(big.Int), pub, m) out = c.Bytes() return }
func calculateRef(h hash.Hash, data []byte) *Ref { var tmp [RefLen]byte h.Reset() h.Write(data) mac := h.Sum(tmp[:0]) return RefFromBytes(mac) }
// Send len(data) # of sequential packets, hashed with the provided hash.Hash // down the provided channel func sequentialPacketChannel(data []byte, hasher hash.Hash) chan *Packet { // FIXME: Packet size is hard-coded at 1 outchan := make(chan *Packet) go func() { for i, _ := range data { var mbuf [64]byte hasher.Write(data[i : i+1]) m := hasher.Sum(nil) for i, _ := range mbuf { mbuf[i] = m[i] } hasher.Reset() h := PacketHeader{ SequenceN: uint32(i), Mac: mbuf, Size: uint32(1), } outchan <- &Packet{h, data[i : i+1]} } close(outchan) }() return outchan }
func (otp *OTP) hashCalc(algorithm string) ([8]byte, error) { var hash_algorithm hash.Hash tmpseq := STAITC_OTP_OTP_REP_COUNT - (otp.seq % STAITC_OTP_OTP_REP_COUNT) _string_ := strconv.Itoa(otp.seed) + otp.passphrase switch otp.mAlgorithm { case "MD4": hash_algorithm = md4.New() case "MD5": hash_algorithm = md5.New() case "RIPEMD128": hash_algorithm = ripemd128.New() case "RIPEMD160": hash_algorithm = ripemd160.New() case "SHA1": hash_algorithm = sha1.New() default: return [8]byte{0, 0, 0, 0, 0, 0, 0, 0}, fmt.Errorf("NoSuchAlgorithmException: %s", otp.mAlgorithm) } hash_algorithm.Reset() hash_algorithm.Write(UCS2_to_UTF8([]byte(_string_))) otp.hash = hashValueTo8(hash_algorithm.Sum(nil)) for tmpseq > 0 { hash_algorithm.Reset() hash_algorithm.Write(otp.hash[:]) otp.hash = hashValueTo8(hash_algorithm.Sum(nil)) tmpseq-- } return otp.hash, nil }
func passwordToKey(proto AuthProtocol, password string, engineId []byte) []byte { var h hash.Hash switch proto { case Md5: h = md5.New() case Sha: h = sha1.New() } pass := []byte(password) plen := len(pass) for i := mega / plen; i > 0; i-- { h.Write(pass) } remain := mega % plen if remain > 0 { h.Write(pass[:remain]) } ku := h.Sum(nil) h.Reset() h.Write(ku) h.Write(engineId) h.Write(ku) return h.Sum(nil) }
func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte { h.Reset() h.Write([]byte{magic}) for _, mpi := range mpis { h.Write(appendMPI(nil, mpi)) } return h.Sum(nil) }
func hashSlices(out []byte, hasher hash.Hash, slices ...[]byte) { defer hasher.Reset() hasher.Reset() for i := range slices { hasher.Write(slices[i]) } hasher.Sum(out) }
// deriveKey takes an HMAC object and a label and calculates out = HMAC(k, label). func deriveKey(out *[32]byte, label []byte, h hash.Hash) { h.Reset() h.Write(label) n := h.Sum(out[:0]) if &n[0] != &out[0] { panic("hash function too large") } }
func filehash(name string, h hash.Hash, r io.Reader) []byte { defer h.Reset() if _, err := io.Copy(h, r); err != nil { log.Print(name, ": Copy to SHA1: ", err) return nil } return h.Sum(nil) }
func strhash(data string, h hash.Hash) []byte { defer h.Reset() if _, err := io.WriteString(h, data); err != nil { log.Print("String write to SHA1: ", err) return nil } return h.Sum(nil) }
// hashBlock returns in hashed the given number of times: H(...H(in)). // If times is 0, returns a copy of input without hashing it. func hashBlock(h hash.Hash, in []byte, times int) (out []byte) { out = append(out, in...) for i := 0; i < times; i++ { h.Reset() h.Write(out) out = h.Sum(out[:0]) } return }
// WriteOutputTarStream writes assembled tar archive to a writer. func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error { // ... Since these are interfaces, this is possible, so let's not have a nil pointer if fg == nil || up == nil { return nil } var copyBuffer []byte var crcHash hash.Hash var crcSum []byte var multiWriter io.Writer for { entry, err := up.Next() if err != nil { if err == io.EOF { return nil } return err } switch entry.Type { case storage.SegmentType: if _, err := w.Write(entry.Payload); err != nil { return err } case storage.FileType: if entry.Size == 0 { continue } fh, err := fg.Get(entry.GetName()) if err != nil { return err } if crcHash == nil { crcHash = crc64.New(storage.CRCTable) crcSum = make([]byte, 8) multiWriter = io.MultiWriter(w, crcHash) copyBuffer = byteBufferPool.Get().([]byte) defer byteBufferPool.Put(copyBuffer) } else { crcHash.Reset() } if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil { fh.Close() return err } if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) { // I would rather this be a comparable ErrInvalidChecksum or such, // but since it's coming through the PipeReader, the context of // _which_ file would be lost... fh.Close() return fmt.Errorf("file integrity checksum failed for %q", entry.GetName()) } fh.Close() } } }
// DeepHashObject writes specified object to hash using the spew library // which follows pointers and prints actual values of the nested objects // ensuring the hash does not change when a pointer changes. func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { hasher.Reset() printer := spew.ConfigState{ Indent: " ", SortKeys: true, DisableMethods: true, SpewKeys: true, } printer.Fprintf(hasher, "%#v", objectToWrite) }
func benchmarkHash(b *testing.B, h hash.Hash) { m := make([]byte, 4096) h.Reset() b.SetBytes(int64(len(m))) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { h.Write(m) } }
func wrap(hasher hash.Hash) func(*testing.B) { return func(b *testing.B) { b.SetBytes(blocksize) for i := 0; i < b.N; i++ { hasher.Write(buf) hashvalue = hasher.Sum(nil) hasher.Reset() } } }
// Creates a node given a hash function and data to hash func NewNode(h hash.Hash, block []byte) (Node, error) { if h == nil || block == nil { return Node{}, nil } defer h.Reset() _, err := h.Write(block[:]) if err != nil { return Node{}, err } return Node{Hash: h.Sum(nil)}, nil }
// NewHash returns a new UUID dervied from the hash of space concatenated with // data generated by h. The hash should be at least 16 byte in length. The // first 16 bytes of the hash are used to form the UUID. The version of the // UUID will be the lower 4 bits of version. NewHash is used to implement // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() h.Write(space[:]) h.Write([]byte(data)) s := h.Sum(nil) uuid := UUID{} copy(uuid[:], s) uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant return uuid }
// newHash returns a new UUID dervied from the hash of space concatenated with // data generated by h. The hash should be at least 16 byte in length. The // first 16 bytes of the hash are used to form the UUID. The version of the // UUID will be the lower 4 bits of version. NewHash is used to implement // NewV3 and NewV5. func (_ UUIDCtrl) newHash(h hash.Hash, space []byte, data []byte, version int) []byte { h.Reset() h.Write(space) h.Write([]byte(data)) s := h.Sum(nil) bytes := make([]byte, 16) copy(bytes, s) bytes[6] = (bytes[6] & 0x0f) | uint8((version&0xf)<<4) bytes[8] = (bytes[8] & 0x3f) | 0x80 // RFC 4122 variant return bytes }
func testHash(name string, h hash.Hash) { now := time.Now() for j := 0; j < 16; j++ { for slice[0] = 0; slice[0] < 255; slice[0]++ { h.Reset() h.Write(blob) h.Write(slice) h.Sum(nil) } } fmt.Printf("%s: %v\n", name, time.Now().Sub(now)) }
func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) { h.Reset() var p [1]byte p[0] = prefix h.Write(p[:]) h.Write(in) if len(out) == h.Size() { h.Sum(out[:0]) } else { digest := h.Sum(nil) copy(out, digest) } }