Beispiel #1
0
func main() {
	if len(os.Args) == 1 {
		cmd := exec.Command(os.Args[0], "-test.bench=.*", "-test.benchmem=true", "-test.benchtime=2s")
		cmd.Stdout = os.Stdout
		cmd.Stderr = os.Stderr
		cmd.Run()
		return
	}

	fmt.Printf("Build: %s %s-%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)

	hashers := []struct {
		name string
		hash hash.Hash
	}{
		{"SHA256", sha256.New()},
		{"SHAKE256", shakeAdapter{sha3.NewShake256(), 64}},
	}

	benchmarks := []testing.InternalBenchmark{}
	for _, h := range hashers {
		benchmarks = append(benchmarks, testing.InternalBenchmark{
			Name: h.name,
			F:    wrap(h.hash),
		})
	}
	testing.Main(func(pat, str string) (bool, error) {
		return true, nil
	}, nil, benchmarks, nil)
}
func main() {
	for _, c := range curves {
		f, err := os.Create(c.params.Name)
		if err != nil {
			fmt.Fprintf(os.Stderr, "error creating file %s: %s", c.params.Name, err)
			os.Exit(-1)
		}
		h := sha3.NewShake256()
		h.Write([]byte(c.params.Name))
		h.Write([]byte(": doubly prime"))
		buf := make([]byte, c.bits)
		v := new(big.Int)
		fmt.Fprintf(f, "[")
		for i := 0; i < 1000; i++ {
			if i != 0 {
				fmt.Fprintf(f, ",")
			}
			for trial := 0; ; trial++ {
				h.Read(buf)
				v.SetBytes(buf)
				if v.Cmp(c.params.P) == -1 {
					break
				}
			}
			fmt.Fprintf(f, "%d", v)
		}
		fmt.Fprintf(f, "]\n")
		f.Close()
	}
}
Beispiel #3
0
func main() {
	flag.Parse()

	value := flag.Arg(0)
	nonce := flag.Arg(1)

	if len(value) == 0 || len(nonce) == 0 {
		fmt.Printf("Usage: %s <value> <nonce>\n", os.Args[0])
		os.Exit(1)
	}

	shake := sha3.NewShake256()
	shake.Write([]byte(value))
	shake.Write([]byte("|"))
	shake.Write([]byte(nonce))

	hash := make([]byte, 16)
	_, err := shake.Read(hash)
	if err != nil {
		fmt.Println(err)
		os.Exit(1)
	}

	fmt.Printf("%X-%X-%X-%X\n", hash[0:3], hash[4:7], hash[8:11], hash[12:15])
}
Beispiel #4
0
// tokenHash calculates a SHA3 token hash.
func tokenHash(b []byte, c *conf.Config) []byte {
	h := make([]byte, c.Listener.Security.TokenLen)
	d := sha3.NewShake256()
	d.Write([]byte(c.Listener.Security.Salt))
	d.Write(b)
	d.Read(h)
	return h
}
Beispiel #5
0
func expandSecret(sk *[SecretKeySize]byte) (x, skhr *[32]byte) {
	x, skhr = new([32]byte), new([32]byte)
	hash := sha3.NewShake256()
	hash.Write(sk[:32])
	hash.Read(x[:])
	hash.Read(skhr[:])
	x[0] &= 248
	x[31] &= 127
	x[31] |= 64
	return
}
Beispiel #6
0
// This is the same as in the CONIKS paper.
// H(k_empty || nonce || prefix || depth)
func HashEmptyBranch(treeNonce []byte, prefixBits []bool) []byte {
	h := sha3.NewShake256()
	h.Write([]byte{EmptyBranchIdentifier})
	h.Write(treeNonce)
	h.Write(ToBytes(prefixBits))
	buf := make([]byte, 4)
	binary.LittleEndian.PutUint32(buf, uint32(len(prefixBits)))
	h.Write(buf)
	var ret [HashBytes]byte
	h.Read(ret[:])
	return ret[:]
}
Beispiel #7
0
// Differences from the CONIKS paper:
// * Add an identifier byte at the beginning to make it impossible for this to collide with leaves
//   or empty branches.
// * Add the prefix of the index, to protect against limited hash collisions or bugs.
// This gives H(k_internal || h_child0 || h_child1 || prefix || depth)
func HashInternalNode(prefixBits []bool, childHashes *[2][HashBytes]byte) []byte {
	h := sha3.NewShake256()
	h.Write([]byte{InternalNodeIdentifier})
	h.Write(childHashes[0][:])
	h.Write(childHashes[1][:])
	h.Write(ToBytes(prefixBits))
	buf := make([]byte, 4)
	binary.LittleEndian.PutUint32(buf, uint32(len(prefixBits)))
	h.Write(buf)
	var ret [HashBytes]byte
	h.Read(ret[:])
	return ret[:]
}
Beispiel #8
0
// This is the same as in the CONIKS paper: H(k_leaf || nonce || index || depth || value)
func HashLeaf(treeNonce []byte, indexBytes []byte, depth int, value []byte) []byte {
	h := sha3.NewShake256()
	h.Write([]byte{LeafIdentifier})
	h.Write(treeNonce)
	h.Write(indexBytes)
	buf := make([]byte, 4)
	binary.LittleEndian.PutUint32(buf, uint32(depth))
	h.Write(buf)
	h.Write(value)
	var ret [HashBytes]byte
	h.Read(ret[:])
	return ret[:]
}
Beispiel #9
0
func Compute(m []byte, sk *[SecretKeySize]byte) []byte {
	x, _ := expandSecret(sk)
	var ii edwards25519.ExtendedGroupElement
	var iiB [32]byte
	edwards25519.GeScalarMult(&ii, x, hashToCurve(m))
	ii.ToBytes(&iiB)

	hash := sha3.NewShake256()
	hash.Write(iiB[:]) // const length: Size
	hash.Write(m)
	var vrf [Size]byte
	hash.Read(vrf[:])
	return vrf[:]
}
Beispiel #10
0
// Verify returns true iff vrf=Compute(m, sk) for the sk that corresponds to pk.
func Verify(pkBytes, m, vrfBytes, proof []byte) bool {
	if len(proof) != ProofSize || len(vrfBytes) != Size || len(pkBytes) != PublicKeySize {
		return false
	}
	var pk, c, cRef, t, vrf, iiB, ABytes, BBytes [32]byte
	copy(vrf[:], vrfBytes)
	copy(pk[:], pkBytes)
	copy(c[:32], proof[:32])
	copy(t[:32], proof[32:64])
	copy(iiB[:], proof[64:96])

	hash := sha3.NewShake256()
	hash.Write(iiB[:]) // const length
	hash.Write(m)
	var hCheck [Size]byte
	hash.Read(hCheck[:])
	if !bytes.Equal(hCheck[:], vrf[:]) {
		return false
	}
	hash.Reset()

	var P, B, ii, iic edwards25519.ExtendedGroupElement
	var A, hmtP, iicP edwards25519.ProjectiveGroupElement
	if !P.FromBytesBaseGroup(&pk) {
		return false
	}
	if !ii.FromBytesBaseGroup(&iiB) {
		return false
	}
	edwards25519.GeDoubleScalarMultVartime(&A, &c, &P, &t)
	A.ToBytes(&ABytes)

	hm := hashToCurve(m)
	edwards25519.GeDoubleScalarMultVartime(&hmtP, &t, hm, &[32]byte{})
	edwards25519.GeDoubleScalarMultVartime(&iicP, &c, &ii, &[32]byte{})
	iicP.ToExtended(&iic)
	hmtP.ToExtended(&B)
	edwards25519.GeAdd(&B, &B, &iic)
	B.ToBytes(&BBytes)

	var cH [64]byte
	hash.Write(ABytes[:]) // const length
	hash.Write(BBytes[:]) // const length
	hash.Write(m)
	hash.Read(cH[:])
	edwards25519.ScReduce(&cRef, &cH)
	return cRef == c
}
Beispiel #11
0
func main() {
	msg := []byte("Taichi Nakashima")

	// A MAC with 32 bytes of output has 256-bit security strength
	h := make([]byte, 64)

	// The SHAKE functions are recommended for most new uses.
	// They can produce output of arbitrary length.
	// SHAKE256, with an output length of at least 64 bytes, provides 256-bit security against all attacks.
	d := sha3.NewShake256()
	d.Write(msg)
	d.Read(h)

	fmt.Printf("Length: %d byte (%d bits)\n", len(h), len(h)*8)
	fmt.Printf("Output: %x\n", h)
}
Beispiel #12
0
// Prove returns the vrf value and a proof such that Verify(pk, m, vrf, proof)
// == true. The vrf value is the same as returned by Compute(m, sk).
func Prove(m []byte, sk *[SecretKeySize]byte) (vrf, proof []byte) {
	x, skhr := expandSecret(sk)
	var cH, rH [64]byte
	var r, c, minusC, t, grB, hrB, iiB [32]byte
	var ii, gr, hr edwards25519.ExtendedGroupElement

	hm := hashToCurve(m)
	edwards25519.GeScalarMult(&ii, x, hm)
	ii.ToBytes(&iiB)

	hash := sha3.NewShake256()
	hash.Write(skhr[:])
	hash.Write(sk[32:]) // public key, as in ed25519
	hash.Write(m)
	hash.Read(rH[:])
	hash.Reset()
	edwards25519.ScReduce(&r, &rH)

	edwards25519.GeScalarMultBase(&gr, &r)
	edwards25519.GeScalarMult(&hr, &r, hm)
	gr.ToBytes(&grB)
	hr.ToBytes(&hrB)

	hash.Write(grB[:])
	hash.Write(hrB[:])
	hash.Write(m)
	hash.Read(cH[:])
	hash.Reset()
	edwards25519.ScReduce(&c, &cH)

	edwards25519.ScNeg(&minusC, &c)
	edwards25519.ScMulAdd(&t, x, &minusC, &r)

	proof = make([]byte, ProofSize)
	copy(proof[:32], c[:])
	copy(proof[32:64], t[:])
	copy(proof[64:96], iiB[:])

	hash.Write(iiB[:]) // const length: Size
	hash.Write(m)
	vrf = make([]byte, Size)
	hash.Read(vrf[:])
	return
}
Beispiel #13
0
func progd_forword(ar cmdoptS) {

	//create metadata leveldb

	dbi, err := bolt.Open(ar.out_dir+"/md", 0600, nil)

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	tx, err := dbi.Begin(true)
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	defer tx.Rollback()
	db, err := tx.CreateBucket([]byte("Ketv1"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	//generate crypto nonce

	nonce, _ := GenerateRandomBytes(24)

	//store it

	err = db.Put([]byte("nonce"), nonce)
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	//calc key

	keyhasher := sha3.NewShake256()

	keyhasher.Write(nonce)
	keyhasher.Write([]byte(ar.secret_key))

	xchachakey := make([]byte, 32)
	keyhasher.Read(xchachakey)

	poly1305key := make([]byte, 32)
	keyhasher.Read(poly1305key)

	//init stream

	var LimitedSizeWriteToFilei LimitedSizeWriteToFile
	LimitedSizeWriteToFilei.InitNow()
	LimitedSizeWriteToFilei.TargetPatten = ar.out_dir + "/df%X"
	if !ar.div_unitk {
		LimitedSizeWriteToFilei.BytesPerFile = int64(ar.div_at) * const_Mbyte
	} else {
		LimitedSizeWriteToFilei.BytesPerFile = int64(ar.div_at) * const_Kbyte
	}

	cryptos, err := chacha20.NewXChaCha(xchachakey, nonce)

	HashWriter := sha3.NewShake256()

	CyDWriter := io.MultiWriter(LimitedSizeWriteToFilei, HashWriter)

	Data_writer := NewEncryptedWriter(cryptos, CyDWriter)

	CompressedStream := snappy.NewWriter(Data_writer)

	TarStream := tar.NewWriter(CompressedStream)

	GenFileList(ar.in_dir)

	for id := range rfi {
		filedes, err := os.Open(ar.in_dir + "/" + rfi[id])
		if err != nil {
			fmt.Println("Failed to open file " + rfi[id] + ":" + err.Error())
		}
		filein, _ := filedes.Stat()
		hdr := &tar.Header{
			Name: rfi[id],
			Mode: 0600,
			Size: filein.Size(),
		}

		if err := TarStream.WriteHeader(hdr); err != nil {
			log.Fatalln(err)
		}

		_, err = io.Copy(TarStream, filedes)

		if err != nil {
			fmt.Println("Failed to Write file " + rfi[id] + ":" + err.Error())
		}

		filedes.Close()

	}

	if err := TarStream.Close(); err != nil {
		log.Fatalln(err)
	}

	_, _, nd := LimitedSizeWriteToFilei.Finialize()

	FileHash := make([]byte, 64)
	HashWriter.Read(FileHash)

	var poly1305sum [16]byte
	var poly1305sum_key [32]byte

	copy(poly1305sum_key[:], poly1305key)

	poly1305.Sum(&poly1305sum, FileHash, &poly1305sum_key)

	err = db.Put([]byte("poly1305sum"), poly1305sum[:])
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	bb := new(bytes.Buffer)
	binary.Write(bb, binary.LittleEndian, nd)

	err = db.Put([]byte("packagesum"), bb.Bytes())
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	//we won't use it anymore
	if err := tx.Commit(); err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	dbi.Close()

	//finially we call par2 to compute reconstruction data
	if ar.parrate != 0 {
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, reconstruction data compute was ignored:" + err.Error())
		}

		DirIf, _ := os.Open(ar.out_dir)
		DirIfs, _ := DirIf.Readdirnames(-1)

		cmdargs := []string{"c", "-a", "mdpp", "-r" + strconv.Itoa(ar.parrate), "-v", "--"}
		cmdargs = append(cmdargs, DirIfs...)

		cmd := exec.Command("par2", cmdargs...)
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.out_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, reconstruction data compute was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, reconstruction data compute was ignored(or failed):" + err.Error())
		}
	}

	fmt.Printf("Hash: %x\n", FileHash)
	fmt.Printf("Key: %s\n", ar.secret_key)

}
Beispiel #14
0
func progd_reverse(ar cmdoptS) {

	if ar.parrate != 0 { //we do not care the actual number
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, metadata reconstruction was ignored:" + err.Error())
		}

		cmd := exec.Command("par2", "r", "mdpp.par2", "-v", "--", "md")
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.in_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, metadata reconstruction data compute was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, metadata reconstruction data compute was ignored(or failed):" + err.Error())
		}
	}

	//Open metadata leveldb
	dbi, err := bolt.Open(ar.in_dir+"/md", 0600, nil)

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	tx, err := dbi.Begin(false)
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}
	defer tx.Rollback()
	db := tx.Bucket([]byte("Ketv1"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	ndb := db.Get([]byte("packagesum"))

	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	var nd int64

	missing_file := make([]string, 0, 25)
	all_file := make([]string, 0, 25)

	binary.Read(bytes.NewBuffer(ndb), binary.LittleEndian, nd)
	var cfn int64
	for cfn <= nd {
		cnnn := fmt.Sprintf(ar.in_dir+"/df%X", cfn)
		all_file = append(all_file, fmt.Sprintf("df%X", cfn))

		if _, err := os.Stat(cnnn); err != nil {
			if ar.parrate == 0 {
				missing_file = append(missing_file, fmt.Sprintf("df%X", cfn))
			} else {

				//touch the missing file so that par2 will try to recover this
				cfnd, err := os.Create(cnnn)

				if err != nil {
					fmt.Println(err.Error())
					os.Exit(-1)
				}

				cfnd.Close()

				missing_file = append(missing_file, fmt.Sprintf("df%X", cfn))

			}
		}
		cfn++
	}

	if len(missing_file) != 0 {
		if ar.parrate == 0 {
			fmt.Println("%d file missing", len(missing_file))

			for cf := range missing_file {
				fmt.Println(cf)
			}

			fmt.Println("Failed to reverse operate as there is file missing.")
			os.Exit(-1)

		} else {
			fmt.Println("%d file missing, but reconstruction by par2 underway.")

			for cf := range missing_file {
				fmt.Println(cf)
			}
		}
	}

	data_reconstruction_unsuccessful := true

	if ar.parrate != 0 { //we do not care the actual number
		_, err := exec.LookPath("par2")
		if err != nil {
			fmt.Println("Unable to whereis par2, data reconstruction was ignored:" + err.Error())
		}

		cmdargs := []string{"r", "mdpp.par2", "-v", "--"}

		cmdargs = append(cmdargs, all_file...)

		cmd := exec.Command("par2", cmdargs...)
		cmd.Stdout = os.Stdout
		Absp, _ := filepath.Abs(ar.in_dir)
		cmd.Dir = Absp
		err = cmd.Start()
		if err != nil {
			fmt.Println("Unable to exec par2, metadata reconstruction was ignored:" + err.Error())
		}
		err = cmd.Wait()
		if err != nil {
			fmt.Println("par2 was finished unsuccessfully, data reconstruction was ignored(or failed):" + err.Error())
		} else {
			data_reconstruction_unsuccessful = false
		}
	}

	if ar.parrate != 0 && data_reconstruction_unsuccessful {
		fmt.Println("operation failed: unable to reconstruct.")
		fmt.Println("If data were correct, remove parrate might do.")

		for cf := range missing_file {
			os.Remove(fmt.Sprint("%s/%s", ar.in_dir, cf))
		}

		os.Exit(-1)
	}

	//now we do the actual job

	nonce := db.Get([]byte("nonce"))
	if err != nil {
		fmt.Println(err.Error())
		os.Exit(-1)
	}

	//calc key

	keyhasher := sha3.NewShake256()

	keyhasher.Write(nonce)
	keyhasher.Write([]byte(ar.secret_key))

	xchachakey := make([]byte, 32)
	keyhasher.Read(xchachakey)

	poly1305key := make([]byte, 32)
	keyhasher.Read(poly1305key)

	//set up stream

	var LimitedSizeReadFromi LimitedSizeReadFrom

	LimitedSizeReadFromi.InitNow()

	LimitedSizeReadFromi.TargetPatten = ar.in_dir + "/df%X"

	cryptos, err := chacha20.NewXChaCha(xchachakey, nonce)

	HashWriter := sha3.NewShake256()

	Tread := io.TeeReader(LimitedSizeReadFromi, HashWriter)

	DataReader := NewDecryptedReader(Tread, cryptos)

	DeCompressedStream := snappy.NewReader(DataReader)

	TarStream := tar.NewReader(DeCompressedStream)

	for {
		hdr, err := TarStream.Next()
		if err == io.EOF {
			// end of tar archive
			break
		}
		if err != nil {
			log.Fatalln(err)
		}
		filenamex := hdr.Name
		if !IsPathAllowed(hdr.Name) {
			filenamex = url.QueryEscape(hdr.Name)
		}

		dirc := filepath.Dir(ar.out_dir + "/" + filenamex)
		os.MkdirAll(dirc, 0700)

		cfhd, err := os.Create(ar.out_dir + "/" + filenamex)

		if err != nil {
			log.Fatalln(err)
		}

		_, err = io.Copy(cfhd, TarStream)

		if err != nil {
			log.Fatalln(err)
		}

		cfhd.Close()

	}

	LimitedSizeReadFromi.Finialize()

	FileHash := make([]byte, 64)
	HashWriter.Read(FileHash)
	fmt.Printf("Hash: %x\n", FileHash)

	var poly1305sum [16]byte
	var poly1305sum_key [32]byte
	poly1305sums := db.Get([]byte("poly1305sum"))

	copy(poly1305sum[:], poly1305sums)
	copy(poly1305sum_key[:], poly1305key)

	iscorrect := poly1305.Verify(&poly1305sum, FileHash, &poly1305sum_key)

	dbi.Close()

	if iscorrect == true {
		fmt.Println("Correct File data")
		os.Exit(0)
	} else {
		fmt.Println("File data is't match!")
		os.Exit(-2)
	}

}