Ejemplo n.º 1
0
func main() {
	core.RegisterTypes()

	flag.Parse()

	db := core.ContinueDB(filepath.Join(*BaseDbPath, "balances"), *treapToken)

	logtreap := new(verified.LogTreap)
	logtreap.MakeOpaque()
	ads.GetInfo(logtreap).Token = *treapToken

	pagingC := core.NewPagingC(db)
	pagingC.Load(ads.GetInfo(logtreap))

	c := pagingC

	c.Use(logtreap)
	length := logtreap.Count(c)
	hash := logtreap.Slice(0, length, c)

	// this hash is what we commit to....
	c.Use(hash)
	tree := hash.Finish(c).(verified.LogTree)

	c.Use(tree)
	lastReturn := tree.Index(length-1, c)

	c.Use(lastReturn)
	balances := lastReturn.ArgsOrResults[0].(bitrie.Bitrie)

	c.Use(balances)
	// core.Dump(bitrie.Bits{}, balances, c)

	fmt.Println(logtreap.Count(c))
	fmt.Println(commitmentToBalances(hash, c))
	n := 1000
	sizes := make([]int, 0)
	for i := 0; i < n; i++ {
		sizes = append(sizes, randomBalance(balances, c))
	}
	sort.Ints(sizes)
	fmt.Println(sizes)

	sizes = make([]int, 0)
	for i := 0; i < n; i++ {
		j := rand.Intn(int(logtreap.Count(c) - 1))
		hash := logtreap.Slice(0, int32(j+1), c)
		sizes = append(sizes, nextstep(hash, c))
	}

	for i := 0; i < 20; i++ {
		j := rand.Intn(int(logtreap.Count(c) - 5000 - 1))
		for k := j; k < j+5000; k++ {
			hash := logtreap.Slice(0, int32(k+1), c)
			sizes = append(sizes, nextstep(hash, c))
		}
	}
	sort.Ints(sizes)
	fmt.Println(sizes)
}
Ejemplo n.º 2
0
func (c *PagingC) Store(info *ads.Info) int64 {
	if info.Token != 0 {
		return info.Token
	}

	buffer := ads.GetFromPool()
	defer ads.ReturnToPool(buffer)

	ads.Hash(info.Value)

	buffer.Reset()
	e := ads.Encoder{
		Writer:      buffer,
		Transparent: map[ads.ADS]bool{info.Value: true},
	}
	e.Encode(&info.Value)

	for _, root := range ads.CollectChildren(info.Value) {
		info := ads.GetInfo(root)
		c.Store(info)

		var buffer [8]byte
		binary.LittleEndian.PutUint64(buffer[:], uint64(info.Token))
		if n, err := e.Write(buffer[:]); n != 8 || err != nil {
			log.Panic(err)
		}
	}

	info.Token = c.DB.Write(buffer.Bytes())
	return info.Token
}
Ejemplo n.º 3
0
func (c *PagingC) MarkUsed(value ads.ADS, include bool) {
	if value.IsOpaque() {
		return
	}

	info := ads.GetInfo(value)

	if info.Next != nil {
		a, b := info.Prev, info.Next
		a.Next = b
		b.Prev = a
	} else {
		if !include {
			return
		} else {
			for _, child := range ads.CollectChildren(value) {
				c.MarkUsed(child, true)
			}
		}
		c.Count++
	}

	a, b := c.Tail.Prev, c.Tail
	a.Next = info
	info.Prev = a
	info.Next = b
	b.Prev = info
}
Ejemplo n.º 4
0
func main() {
	core.RegisterTypes()
	transactions.RegisterTypes()

	flag.Parse()

	db := core.ContinueDB(filepath.Join(*BaseDbPath, "transactions"), *treapToken)

	logtreap := new(verified.LogTreap)
	logtreap.MakeOpaque()
	ads.GetInfo(logtreap).Token = *treapToken

	pagingC := core.NewPagingC(db)
	pagingC.Load(ads.GetInfo(logtreap))

	c := pagingC

	c.Use(logtreap)
	length := logtreap.Count(c)
	hash := logtreap.Slice(0, length, c)

	// this hash is what we commit to....
	c.Use(hash)
	tree := hash.Finish(c).(verified.LogTree)

	c.Use(tree)
	lastReturn := tree.Index(length-1, c)

	c.Use(lastReturn)
	transactions := lastReturn.ArgsOrResults[0].(bitrie.Bitrie)

	c.Use(transactions)
	fmt.Println(BitrieSize(transactions, c))

	for i := 0; i < 100; i++ {
		fmt.Println(core.RandomKey(bitrie.Bits{}, transactions, c))
	}
}
Ejemplo n.º 5
0
func (c *PagingC) Use(values ...ads.ADS) {
	for _, value := range values {
		comp.Uses++

		if value.IsOpaque() {
			c.Loads++

			info := ads.GetInfo(value)
			c.Load(info)
		}

		c.MarkUsed(value, false)
	}
}
Ejemplo n.º 6
0
func main() {
	flag.Parse()

	var mode = flag.Arg(0)

	var f func(b *core.Block, c comp.C) bitrie.Bitrie

	if mode == "balances" {
		f = core.CalculateBalancesImpl
	} else if mode == "transactions" {
		f = transactions.CalculateTxnsImpl
	} else {
		log.Fatalf("Usage: buildbalances [balances|transactions]")
	}

	core.RegisterTypes()
	transactions.RegisterTypes()

	db := core.CreateDB(filepath.Join(*BaseDbPath, mode))
	pagingC := core.NewPagingC(db)

	file, err := os.Open(*BootstrapPath)
	if err != nil {
		log.Fatalf("couldn't open %v: %v\n", *BootstrapPath, err)
	}
	loader := NewLoader(file)

	processed := int64(0)
	processedNow := int64(0)
	startNow := time.Now()

	start := time.Now()
	last := time.Now()

	log.Println(ads.GetFuncId(f))

	c := &verified.ProofC{
		Outer:   pagingC,
		Stack:   []*verified.LogTreap{nil},
		ToCache: ads.GetFuncId(f),
	}

	var lastBlock *core.Block

	c.Stack[0] = nil

	c.Call(f, (*core.Block)(nil))

	for i := 0; ; i++ {
		b, err := loader.readBlock()
		if err != nil {
			log.Fatalf("couldn't read block: %v\n", err)
		}
		if b == nil {
			break
		}

		block := core.MakeBlock(b, lastBlock)

		// balances = ProcessBlock(block, balances, c)
		c.Stack[0] = nil
		c.Call(f, block)

		//fmt.Printf("block: %v\n", i)
		//dump(bitrie.Bits{}, balances)

		// logtreap = verified.CombineTreap(logtreap, c.Stack[0], c)
		logtreap := c.Stack[0]
		// markUsed(&c.Log[0])

		pagingC.Unload()

		if i%100 == 0 {
			// before we page logtreap, we must compute all seqhashes, or they'll be stored empty...
			logtreap.SeqHash(c)
			pagingC.MarkUsed(logtreap, true)

			// dump(bitrie.Bits{}, balances)
			//fmt.Printf("%v: %v\n", i, doCount(balances, c))
		}

		if i%1000 == 0 {
			token := pagingC.Store(ads.GetInfo(logtreap))
			log.Printf("after %d: %d\n", i, token)
			if err := db.BufferedWriter.Flush(); err != nil {
				log.Panic(err)
			}
		}

		bytes, _ := b.Bytes()
		processed += int64(len(bytes))
		processedNow += int64(len(bytes))

		now := time.Now()

		if now.Sub(last) > time.Second {
			last = now

			log.Printf("block: %d\n", i)

			nowSecs := int64(now.Sub(startNow) / time.Second)
			secs := int64(now.Sub(start) / time.Second)

			ops := int64(logtreap.Count(c)) / 2

			log.Printf("count: %d\n", pagingC.Count)
			log.Printf("processed % 8.2f MB, % 5.2f MB/sec\n", float64(processed)/1000/1000, float64(processed/secs)/1000/1000)
			log.Printf("procesnow % 8.2f MB, % 5.2f MB/sec\n", float64(processedNow)/1000/1000, float64(processedNow/nowSecs)/1000/1000)
			log.Printf("merges    % 8.3fe6, % 5.3fe6 per sec\n", float64(seqhash.Calls)/1000/1000, float64(seqhash.Calls/secs)/1000/1000)
			log.Printf("ops       % 8.3fe6, % 5.3fe6 per sec\n", float64(ops)/1000/1000, float64(ops/secs)/1000/1000)
			log.Printf("uses      % 8.3fe6, % 5.3fe6 per sec\n", float64(comp.Uses)/1000/1000, float64(comp.Uses/secs)/1000/1000)
			log.Printf("loads     % 8.3fe6, % 5.3fe6 per sec\n", float64(pagingC.Loads)/1000/1000, float64(pagingC.Loads/secs)/1000/1000)
			log.Printf("unloads   % 8.3fe6, % 5.3fe6 per sec\n", float64(pagingC.Unloads)/1000/1000, float64(pagingC.Unloads/secs)/1000/1000)
			log.Printf("loadtime %d unloadtime %d loaddisktime %d total %d", pagingC.LoadTime/time.Second, pagingC.UnloadTime/time.Second, pagingC.LoadDiskTime/time.Second, secs)

			if nowSecs > 5 {
				startNow = now
				processedNow = 0
			}
		}
	}

	logtreap := c.Stack[0]
	token := pagingC.Store(ads.GetInfo(logtreap))
	log.Printf("final: %d\n", token)
	if err := db.BufferedWriter.Flush(); err != nil {
		log.Panic(err)
	}
}