func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
	var tag [poly1305.TagSize]byte
	copy(tag[:], ciphertext[len(ciphertext)-16:])
	ciphertext = ciphertext[:len(ciphertext)-16]

	var counter [16]byte
	copy(counter[4:], nonce)

	var polyKey [32]byte
	chacha20.XORKeyStream(polyKey[:], polyKey[:], &counter, &c.key)

	polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(ciphertext))+8+8)
	copy(polyInput, additionalData)
	copy(polyInput[roundTo16(len(additionalData)):], ciphertext)
	binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData)))
	binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(ciphertext)))

	ret, out := sliceForAppend(dst, len(ciphertext))
	if !poly1305.Verify(&tag, polyInput, &polyKey) {
		for i := range out {
			out[i] = 0
		}
		return nil, errOpen
	}

	counter[0] = 1
	chacha20.XORKeyStream(out, ciphertext, &counter, &c.key)
	return ret, nil
}
Exemple #2
0
func (sec *AccountSecurity) unlock(clientKey *security.ManagedKey) (
	*security.ManagedKey, *security.ManagedKeyPair, error) {

	if clientKey.Encrypted() {
		return nil, nil, security.ErrKeyMustBeDecrypted
	}

	var (
		mac [16]byte
		key [32]byte
	)
	copy(mac[:], sec.MAC)
	copy(key[:], clientKey.Plaintext)
	if !poly1305.Verify(&mac, sec.Nonce, &key) {
		return nil, nil, ErrAccessDenied
	}

	kek := sec.UserKey.Clone()
	if err := kek.Decrypt(clientKey); err != nil {
		return nil, nil, err
	}

	kp := sec.KeyPair.Clone()
	if err := kp.Decrypt(&kek); err != nil {
		return nil, nil, err
	}

	return &kek, &kp, nil
}
Exemple #3
0
func poly1305Verify(msg []byte, nonce []byte, key *MACKey, mac []byte) bool {
	k := poly1305PrepareKey(nonce, key)

	var m [16]byte
	copy(m[:], mac)

	return poly1305.Verify(&m, msg, &k)
}
Exemple #4
0
func (a *Agent) verify(accessKey *security.ManagedKey) bool {
	var (
		mac [16]byte
		key [32]byte
	)
	copy(mac[:], a.MAC)
	copy(key[:], accessKey.Plaintext)
	return poly1305.Verify(&mac, a.IV, &key)
}
Exemple #5
0
// Process incoming UDP packet.
// ConnListen'es synchronization channel used to tell him that he is
// free to receive new packets. Authenticated and decrypted packets
// will be written to the interface immediately (except heartbeat ones).
func (p *Peer) PktProcess(data []byte, tap io.Writer, ready chan struct{}) bool {
	p.size = len(data)
	copy(p.buf, Emptiness)
	copy(p.tag[:], data[p.size-poly1305.TagSize:])
	copy(p.buf[S20BS:], data[NonceSize:p.size-poly1305.TagSize])
	salsa20.XORKeyStream(
		p.buf[:S20BS+p.size-poly1305.TagSize],
		p.buf[:S20BS+p.size-poly1305.TagSize],
		data[:NonceSize],
		p.Key,
	)
	copy(p.keyAuth[:], p.buf[:SSize])
	if !poly1305.Verify(p.tag, data[:p.size-poly1305.TagSize], p.keyAuth) {
		ready <- struct{}{}
		p.FramesUnauth++
		return false
	}

	// Check if received nonce is known to us in either of two buckets.
	// If yes, then this is ignored duplicate.
	// Check from the oldest bucket, as in most cases this will result
	// in constant time check.
	// If Bucket0 is filled, then it becomes Bucket1.
	p.NonceCipher.Decrypt(p.buf, data[:NonceSize])
	ready <- struct{}{}
	p.nonceRecv, _ = binary.Uvarint(p.buf[:NonceSize])
	if _, p.nonceFound = p.nonceBucket1[p.NonceRecv]; p.nonceFound {
		p.FramesDup++
		return false
	}
	if _, p.nonceFound = p.nonceBucket0[p.NonceRecv]; p.nonceFound {
		p.FramesDup++
		return false
	}
	p.nonceBucket0[p.NonceRecv] = struct{}{}
	p.nonceBucketN++
	if p.nonceBucketN == NonceBucketSize {
		p.nonceBucket1 = p.nonceBucket0
		p.nonceBucket0 = make(map[uint64]struct{}, NonceBucketSize)
		p.nonceBucketN = 0
	}

	p.FramesIn++
	p.BytesIn += int64(p.size)
	p.LastPing = time.Now()
	p.NonceRecv = p.nonceRecv
	p.pktSize, _ = binary.Uvarint(p.buf[S20BS : S20BS+PktSizeSize])
	if p.pktSize == 0 {
		p.HeartbeatRecv++
		return true
	}
	p.frame = p.buf[S20BS+PktSizeSize : S20BS+PktSizeSize+p.pktSize]
	p.BytesPayloadIn += int64(p.pktSize)
	tap.Write(p.frame)
	return true
}
Exemple #6
0
Fichier : pm.go Projet : logan/heim
func (pm *PM) verifyKey(pmKey *security.ManagedKey) error {
	var (
		mac [16]byte
		key [32]byte
	)
	copy(mac[:], pm.ReceiverMAC)
	copy(key[:], pmKey.Plaintext)
	if !poly1305.Verify(&mac, []byte(pm.Receiver), &key) {
		return ErrAccessDenied
	}
	return nil
}
Exemple #7
0
// Open authenticates and decrypts a box produced by Seal and appends the
// message to out, which must not overlap box. The output will be Overhead
// bytes smaller than box.
func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
	if len(box) < Overhead {
		return nil, false
	}

	var subKey [32]byte
	var counter [16]byte
	setup(&subKey, &counter, nonce, key)

	// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
	// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
	// keystream as a side effect.
	var firstBlock [64]byte
	salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)

	var poly1305Key [32]byte
	copy(poly1305Key[:], firstBlock[:])
	var tag [poly1305.TagSize]byte
	copy(tag[:], box)

	if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
		return nil, false
	}

	ret, out := sliceForAppend(out, len(box)-Overhead)

	// We XOR up to 32 bytes of box with the keystream generated from
	// the first block.
	box = box[Overhead:]
	firstMessageBlock := box
	if len(firstMessageBlock) > 32 {
		firstMessageBlock = firstMessageBlock[:32]
	}
	for i, x := range firstMessageBlock {
		out[i] = firstBlock[32+i] ^ x
	}

	box = box[len(firstMessageBlock):]
	out = out[len(firstMessageBlock):]

	// Now decrypt the rest.
	counter[8] = 1
	salsa.XORKeyStream(out, box, &counter, &subKey)

	return ret, true
}
Exemple #8
0
// DecryptAndVerify returns the chacha20 decrypted messages.
// An error is returned when the poly1305 message authenticator (seal) could not be verified.
// Nonce should be 8 byte.
func DecryptAndVerify(key, nonce, message []byte, mac [16]byte, add []byte) ([]byte, error) {

	chacha20, err := chacha20.New(key, nonce)
	if err != nil {
		panic(err)
	}

	// poly1305 key is chacha20 over 32 zeros
	var poly1305Key [32]byte
	var chacha20KeyOut = make([]byte, 64)
	var zeros = make([]byte, 64)
	chacha20.XORKeyStream(chacha20KeyOut, zeros)
	copy(poly1305Key[:], chacha20KeyOut)

	var chacha20Out = make([]byte, len(message))
	var poly1305Out [16]byte

	// poly1305 byte order
	// - add bytes up to mod 16 (if available)
	// - message up to mod 16
	// - number of add bytes up to mod 8
	// - number of message bytes up to mod 8
	var poly1305In []byte
	if len(add) > 0 {
		poly1305In = AddBytes(poly1305In, add, 16)
	}

	poly1305In = AddBytes(poly1305In, message, 16)
	addLength := make([]byte, 8)
	msgLength := make([]byte, 8)
	binary.LittleEndian.PutUint64(addLength, uint64(len(add)))
	binary.LittleEndian.PutUint64(msgLength, uint64(len(message)))

	poly1305In = AddBytes(poly1305In, addLength, 8)
	poly1305In = AddBytes(poly1305In, msgLength, 8)

	poly1305.Sum(&poly1305Out, poly1305In, &poly1305Key)

	if poly1305.Verify(&mac, poly1305In, &poly1305Key) == false {
		return nil, errors.New("MAC not equal: " + hex.EncodeToString(poly1305Out[:]) + " != " + hex.EncodeToString(mac[:]))
	}

	chacha20.XORKeyStream(chacha20Out, message)
	return chacha20Out, nil
}
Exemple #9
0
func (sec *RoomSecurity) Unlock(managerKey *security.ManagedKey) (*security.ManagedKeyPair, error) {
	if managerKey.Encrypted() {
		return nil, security.ErrKeyMustBeDecrypted
	}

	var (
		mac [16]byte
		key [32]byte
	)
	copy(mac[:], sec.MAC)
	copy(key[:], managerKey.Plaintext)
	if !poly1305.Verify(&mac, sec.KeyPair.IV, &key) {
		return nil, ErrAccessDenied
	}

	kp := sec.KeyPair.Clone()
	if err := kp.Decrypt(managerKey); err != nil {
		return nil, err
	}

	return &kp, nil
}
Exemple #10
0
func proxy(conn net.Conn, encode, decode cipher.Stream, randomDataLen int, key *[32]byte) {
	cconn := cipherConn.NewCipherConn(decode, encode, conn)

	var ri = 0
	var randomdata = make([]byte, randomDataLen+poly1305.TagSize)
	for ri < (randomDataLen + poly1305.TagSize) {
		r, err := cconn.Read(randomdata[ri:])
		if err != nil {
			return
		}
		ri += r
	}

	var mac [16]byte
	copy(mac[:], randomdata[randomDataLen:])
	if !poly1305.Verify(&mac, randomdata[:randomDataLen], key) {
		log.Println("poly1305 mac verify error")
		return
	}

	simpleSocks5.Socks5Handle(cconn)
	time.Sleep(time.Second)
}
Exemple #11
0
func (p *Peer) PktProcess(data []byte, tap io.Writer, reorderable bool) bool {
	if len(data) < MinPktLength {
		return false
	}
	p.BusyR.Lock()
	for i := 0; i < SSize; i++ {
		p.bufR[i] = byte(0)
	}
	copy(p.bufR[S20BS:], data[TagSize:])
	salsa20.XORKeyStream(
		p.bufR[:S20BS+len(data)-TagSize-NonceSize],
		p.bufR[:S20BS+len(data)-TagSize-NonceSize],
		data[len(data)-NonceSize:],
		p.Key,
	)

	copy(p.keyAuthR[:], p.bufR[:SSize])
	copy(p.tagR[:], data[:TagSize])
	if !poly1305.Verify(p.tagR, data[TagSize:], p.keyAuthR) {
		p.FramesUnauth++
		p.BusyR.Unlock()
		return false
	}

	// Check if received nonce is known to us in either of two buckets.
	// If yes, then this is ignored duplicate.
	// Check from the oldest bucket, as in most cases this will result
	// in constant time check.
	// If Bucket0 is filled, then it becomes Bucket1.
	p.NonceCipher.Decrypt(
		data[len(data)-NonceSize:],
		data[len(data)-NonceSize:],
	)
	p.nonceRecv = binary.BigEndian.Uint64(data[len(data)-NonceSize:])
	if reorderable {
		_, p.nonceFound0 = p.nonceBucket0[p.nonceRecv]
		_, p.nonceFound1 = p.nonceBucket1[p.nonceRecv]
		if p.nonceFound0 || p.nonceFound1 || p.nonceRecv+2*NonceBucketSize < p.nonceLatest {
			p.FramesDup++
			p.BusyR.Unlock()
			return false
		}
		p.nonceBucket0[p.nonceRecv] = struct{}{}
		p.nonceBucketN++
		if p.nonceBucketN == NonceBucketSize {
			p.nonceBucket1 = p.nonceBucket0
			p.nonceBucket0 = make(map[uint64]struct{}, NonceBucketSize)
			p.nonceBucketN = 0
		}
	} else {
		if p.nonceRecv != p.NonceExpect {
			p.FramesDup++
			p.BusyR.Unlock()
			return false
		}
		p.NonceExpect += 2
	}
	if p.nonceRecv > p.nonceLatest {
		p.nonceLatest = p.nonceRecv
	}

	p.FramesIn++
	atomic.AddInt64(&p.BytesIn, int64(len(data)))
	p.LastPing = time.Now()
	p.pktSizeR = binary.BigEndian.Uint16(p.bufR[S20BS : S20BS+PktSizeSize])

	if p.pktSizeR == 0 {
		p.HeartbeatRecv++
		p.BusyR.Unlock()
		return true
	}
	if int(p.pktSizeR) > len(data)-MinPktLength {
		return false
	}
	p.BytesPayloadIn += int64(p.pktSizeR)
	tap.Write(p.bufR[S20BS+PktSizeSize : S20BS+PktSizeSize+p.pktSizeR])
	p.BusyR.Unlock()
	return true
}
Exemple #12
0
func progd_reverse(ar cmdoptS) {

	if ar.parrate != 0 { //we do not care the actual number
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, metadata reconstruction was ignored:" + err.Error())
		}

		cmd := exec.Command("par2", "r", "mdpp.par2", "-v", "--", "md")
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.in_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, metadata reconstruction data compute was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, metadata reconstruction data compute was ignored(or failed):" + err.Error())
		}
	}

	//Open metadata leveldb
	dbi, err := bolt.Open(ar.in_dir+"/md", 0600, nil)

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	tx, err := dbi.Begin(false)
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	defer tx.Rollback()
	db := tx.Bucket([]byte("Ketv1"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	ndb := db.Get([]byte("packagesum"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	var nd int64

	missing_file := make([]string, 0, 25)
	all_file := make([]string, 0, 25)

	binary.Read(bytes.NewBuffer(ndb), binary.LittleEndian, nd)
	var cfn int64
	for cfn <= nd {
		cnnn := fmt.Sprintf(ar.in_dir+"/df%X", cfn)
		all_file = append(all_file, fmt.Sprintf("df%X", cfn))

		if _, err := os.Stat(cnnn); err != nil {
			if ar.parrate == 0 {
				missing_file = append(missing_file, fmt.Sprintf("df%X", cfn))
			} else {

				//touch the missing file so that par2 will try to recover this
				cfnd, err := os.Create(cnnn)

				if err != nil {
					fmt.Println(err.Error())
					os.Exit(-1)
				}

				cfnd.Close()

				missing_file = append(missing_file, fmt.Sprintf("df%X", cfn))

			}
		}
		cfn++
	}

	if len(missing_file) != 0 {
		if ar.parrate == 0 {
			fmt.Println("%d file missing", len(missing_file))

			for cf := range missing_file {
				fmt.Println(cf)
			}

			fmt.Println("Failed to reverse operate as there is file missing.")
			os.Exit(-1)

		} else {
			fmt.Println("%d file missing, but reconstruction by par2 underway.")

			for cf := range missing_file {
				fmt.Println(cf)
			}
		}
	}

	data_reconstruction_unsuccessful := true

	if ar.parrate != 0 { //we do not care the actual number
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, data reconstruction was ignored:" + err.Error())
		}

		cmdargs := []string{"r", "mdpp.par2", "-v", "--"}

		cmdargs = append(cmdargs, all_file...)

		cmd := exec.Command("par2", cmdargs...)
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.in_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, metadata reconstruction was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, data reconstruction was ignored(or failed):" + err.Error())
		} else {
			data_reconstruction_unsuccessful = false
		}
	}

	if ar.parrate != 0 && data_reconstruction_unsuccessful {
		fmt.Println("operation failed: unable to reconstruct.")
		fmt.Println("If data were correct, remove parrate might do.")

		for cf := range missing_file {
			os.Remove(fmt.Sprint("%s/%s", ar.in_dir, cf))
		}

		os.Exit(-1)
	}

	//now we do the actual job

	nonce := db.Get([]byte("nonce"))
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	//calc key

	keyhasher := sha3.NewShake256()

	keyhasher.Write(nonce)
	keyhasher.Write([]byte(ar.secret_key))

	xchachakey := make([]byte, 32)
	keyhasher.Read(xchachakey)

	poly1305key := make([]byte, 32)
	keyhasher.Read(poly1305key)

	//set up stream

	var LimitedSizeReadFromi LimitedSizeReadFrom

	LimitedSizeReadFromi.InitNow()

	LimitedSizeReadFromi.TargetPatten = ar.in_dir + "/df%X"

	cryptos, err := chacha20.NewXChaCha(xchachakey, nonce)

	HashWriter := sha3.NewShake256()

	Tread := io.TeeReader(LimitedSizeReadFromi, HashWriter)

	DataReader := NewDecryptedReader(Tread, cryptos)

	DeCompressedStream := snappy.NewReader(DataReader)

	TarStream := tar.NewReader(DeCompressedStream)

	for {
		hdr, err := TarStream.Next()
		if err == io.EOF {
			// end of tar archive
			break
		}
		if err != nil {
			log.Fatalln(err)
		}
		filenamex := hdr.Name
		if !IsPathAllowed(hdr.Name) {
			filenamex = url.QueryEscape(hdr.Name)
		}

		dirc := filepath.Dir(ar.out_dir + "/" + filenamex)
		os.MkdirAll(dirc, 0700)

		cfhd, err := os.Create(ar.out_dir + "/" + filenamex)

		if err != nil {
			log.Fatalln(err)
		}

		_, err = io.Copy(cfhd, TarStream)

		if err != nil {
			log.Fatalln(err)
		}

		cfhd.Close()

	}

	LimitedSizeReadFromi.Finialize()

	FileHash := make([]byte, 64)
	HashWriter.Read(FileHash)
	fmt.Printf("Hash: %x\n", FileHash)

	var poly1305sum [16]byte
	var poly1305sum_key [32]byte
	poly1305sums := db.Get([]byte("poly1305sum"))

	copy(poly1305sum[:], poly1305sums)
	copy(poly1305sum_key[:], poly1305key)

	iscorrect := poly1305.Verify(&poly1305sum, FileHash, &poly1305sum_key)

	dbi.Close()

	if iscorrect == true {
		fmt.Println("Correct File data")
		os.Exit(0)
	} else {
		fmt.Println("File data is't match!")
		os.Exit(-2)
	}

}