Esempio n. 1
0
func NewLevelDbShardDatastore(config *configuration.Configuration) (*LevelDbShardDatastore, error) {
	baseDbDir := filepath.Join(config.DataDir, SHARD_DATABASE_DIR)
	err := os.MkdirAll(baseDbDir, 0744)
	if err != nil {
		return nil, err
	}
	opts := levigo.NewOptions()
	opts.SetCache(levigo.NewLRUCache(config.LevelDbLruCacheSize))
	opts.SetCreateIfMissing(true)
	opts.SetBlockSize(64 * ONE_KILOBYTE)
	filter := levigo.NewBloomFilter(SHARD_BLOOM_FILTER_BITS_PER_KEY)
	opts.SetFilterPolicy(filter)
	opts.SetMaxOpenFiles(config.LevelDbMaxOpenFiles)

	return &LevelDbShardDatastore{
		baseDbDir:      baseDbDir,
		config:         config,
		shards:         make(map[uint32]*LevelDbShard),
		levelDbOptions: opts,
		maxOpenShards:  config.LevelDbMaxOpenShards,
		lastAccess:     make(map[uint32]int64),
		shardRefCounts: make(map[uint32]int),
		shardsToClose:  make(map[uint32]bool),
		pointBatchSize: config.LevelDbPointBatchSize,
	}, nil
}
Esempio n. 2
0
func NewLeveldbCache(dbname string, cacheM int) (*LeveldbCache, error) {
	opts := levigo.NewOptions()
	filter := levigo.NewBloomFilter(10)
	cache := levigo.NewLRUCache(1024 * 1024 * cacheM)
	opts.SetFilterPolicy(filter)
	opts.SetCache(cache)
	opts.SetCreateIfMissing(true)
	opts.SetWriteBufferSize(8 * 1024 * 104) // 8M
	opts.SetCompression(levigo.SnappyCompression)

	if ldb, err := levigo.Open(dbname, opts); err == nil {
		so := levigo.NewReadOptions()
		so.SetFillCache(false)
		return &LeveldbCache{
			db:    ldb,
			fp:    filter,
			cache: cache,
			Ro:    levigo.NewReadOptions(),
			Wo:    levigo.NewWriteOptions(),
			So:    so,
		}, nil
	} else {
		return nil, err
	}
}
Esempio n. 3
0
File: db.go Progetto: kingpro/golib
func (db *DB) initOptions(cfg *Config) *levigo.Options {
	opts := levigo.NewOptions()

	opts.SetCreateIfMissing(true)

	if cfg.CacheSize > 0 {
		db.cache = levigo.NewLRUCache(cfg.CacheSize)
		opts.SetCache(db.cache)
	}

	//we must use bloomfilter
	db.filter = levigo.NewBloomFilter(defaultFilterBits)
	opts.SetFilterPolicy(db.filter)

	if !cfg.Compression {
		opts.SetCompression(levigo.NoCompression)
	}

	if cfg.BlockSize > 0 {
		opts.SetBlockSize(cfg.BlockSize)
	}

	if cfg.WriteBufferSize > 0 {
		opts.SetWriteBufferSize(cfg.WriteBufferSize)
	}

	return opts
}
Esempio n. 4
0
func NewLevelDbDatastore(dbDir string) (Datastore, error) {
	opts := levigo.NewOptions()
	opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE))
	opts.SetCreateIfMissing(true)
	opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES)
	filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY)
	opts.SetFilterPolicy(filter)
	db, err := levigo.Open(dbDir, opts)
	if err != nil {
		return nil, err
	}

	ro := levigo.NewReadOptions()

	lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY)
	if err2 != nil {
		return nil, err2
	}

	lastId := uint64(0)
	if lastIdBytes != nil {
		lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes))
		if err2 != nil {
			return nil, err2
		}
	}

	wo := levigo.NewWriteOptions()

	return &LevelDbDatastore{db: db, lastIdUsed: lastId, readOptions: ro, writeOptions: wo}, nil
}
Esempio n. 5
0
func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilterEncoded int) (p *LevelDBPersistence, err error) {
	options := levigo.NewOptions()
	options.SetCreateIfMissing(true)
	options.SetParanoidChecks(true)

	cache := levigo.NewLRUCache(cacheCapacity)
	options.SetCache(cache)

	filterPolicy := levigo.NewBloomFilter(bitsPerBloomFilterEncoded)
	options.SetFilterPolicy(filterPolicy)

	storage, err := levigo.Open(storageRoot, options)
	if err != nil {
		return
	}

	readOptions := levigo.NewReadOptions()
	writeOptions := levigo.NewWriteOptions()
	writeOptions.SetSync(true)

	p = &LevelDBPersistence{
		cache:        cache,
		filterPolicy: filterPolicy,
		options:      options,
		readOptions:  readOptions,
		storage:      storage,
		writeOptions: writeOptions,
	}

	return
}
Esempio n. 6
0
func (engine *LevelDbEngine) Init(config *proto.DBConfigs) error {
	if config == nil {
		return proto.ErrNoEngineConfig
	}
	if config.LevelDbConfigs == nil {
		config.LevelDbConfigs = DefaultLevelDbConf
	}
	options := levigo.NewOptions()
	// options.SetCreateIfMissing(config.CreateIfMissing)
	options.SetCreateIfMissing(true)
	options.SetParanoidChecks(config.LevelDbConfigs.ParanoidCheck)
	if config.LevelDbConfigs.LRUCacheSize > 0 {
		options.SetCache(levigo.NewLRUCache(config.LevelDbConfigs.LRUCacheSize))
	}
	if config.LevelDbConfigs.BloomFilterLength > 0 {
		options.SetFilterPolicy(levigo.NewBloomFilter(config.LevelDbConfigs.BloomFilterLength))
	}
	engine.config = config
	engine.dbOptions = options
	db, err := levigo.Open(engine.config.DataPath, engine.dbOptions)
	if err != nil {
		return err
	}
	engine.db = db
	return nil
}
Esempio n. 7
0
func TestLevigo(t *testing.T) {
	path := "/tmp/levigo_test_10101"
	os.RemoveAll(path)

	opts := levigo.NewOptions()
	filter := levigo.NewBloomFilter(10)
	opts.SetFilterPolicy(filter)
	opts.SetCache(levigo.NewLRUCache(1024 << 20)) // 1G
	opts.SetCreateIfMissing(true)
	if ldb, err := levigo.Open(path, opts); err == nil {
		key := []byte("test-test hwl0dsfds")
		val := []byte("value")

		if err = ldb.Put(levigo.NewWriteOptions(), key, val); err != nil {
			t.Fail()
		} else {
			ro := levigo.NewReadOptions()
			if data, err := ldb.Get(ro, key); err == nil && reflect.DeepEqual(data, val) {
				ro.SetFillCache(false)
				it := ldb.NewIterator(ro)
				it.Seek([]byte{0})
				for ; it.Valid(); it.Next() {
					log.Printf("%s => %s", it.Key(), it.Value())
				}
			} else {
				t.Fail()
			}
		}
	} else {
		t.Fail()
	}
}
Esempio n. 8
0
func NewLevelDbDatastore(dbDir string) (Datastore, error) {
	mainDbDir := filepath.Join(dbDir, DATABASE_DIR)
	requestLogDir := filepath.Join(dbDir, REQUEST_LOG_BASE_DIR)

	err := os.MkdirAll(mainDbDir, 0744)
	if err != nil {
		return nil, err
	}
	previousLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now().Add(-time.Hour*24)))
	if err != nil {
		return nil, err
	}
	currentLog, err := NewRequestLogDb(getRequestLogDirForDate(requestLogDir, time.Now()))
	if err != nil {
		return nil, err
	}

	opts := levigo.NewOptions()
	opts.SetCache(levigo.NewLRUCache(ONE_GIGABYTE))
	opts.SetCreateIfMissing(true)
	opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES)
	filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY)
	opts.SetFilterPolicy(filter)
	db, err := levigo.Open(dbDir, opts)
	if err != nil {
		return nil, err
	}

	ro := levigo.NewReadOptions()

	lastIdBytes, err2 := db.Get(ro, NEXT_ID_KEY)
	if err2 != nil {
		return nil, err2
	}

	lastId := uint64(0)
	if lastIdBytes != nil {
		lastId, err2 = binary.ReadUvarint(bytes.NewBuffer(lastIdBytes))
		if err2 != nil {
			return nil, err2
		}
	}

	wo := levigo.NewWriteOptions()

	leveldbStore := &LevelDbDatastore{
		db:                 db,
		lastIdUsed:         lastId,
		readOptions:        ro,
		writeOptions:       wo,
		requestLogDir:      requestLogDir,
		currentRequestLog:  currentLog,
		previousRequestLog: previousLog}

	go leveldbStore.periodicallyRotateRequestLog()

	return leveldbStore, nil
}
Esempio n. 9
0
File: db.go Progetto: scozss/setdb
func openDB() {
	opts := levigo.NewOptions()
	cache := levigo.NewLRUCache(128 * 1024 * 1024) // 128MB cache
	opts.SetCache(cache)
	filter := levigo.NewBloomFilter(10)
	opts.SetFilterPolicy(filter)
	opts.SetCreateIfMissing(true)

	var err error
	DB, err = levigo.Open("db", opts)
	maybeFatal(err)
}
Esempio n. 10
0
func NewTorrentDB(dir string) (*TorrentDB, error) {
	opts := levigo.NewOptions()
	filter := levigo.NewBloomFilter(10)
	opts.SetFilterPolicy(filter)
	opts.SetCreateIfMissing(true)
	defer opts.Close()
	db, err := levigo.Open(dir, opts)
	if err != nil {
		return nil, err
	}
	return &TorrentDB{db, nil, &sync.RWMutex{}}, nil
}
Esempio n. 11
0
func openDb(path string) (*levigo.DB, error) {
	opts := levigo.NewOptions()
	opts.SetCreateIfMissing(true)
	opts.SetFilterPolicy(levigo.NewBloomFilter(16))
	opts.SetCache(levigo.NewLRUCache(10490000))
	opts.SetMaxOpenFiles(500)
	opts.SetWriteBufferSize(62914560)
	opts.SetEnv(levigo.NewDefaultEnv())
	dbn, err := levigo.Open(path, opts)
	if err != nil {
		return nil, fmt.Errorf("failed to open db at %s: %v\n", path, err)
	}
	return dbn, nil
}
Esempio n. 12
0
func NewRequestLogDb(dir string) (*requestLogDb, error) {
	err := os.MkdirAll(dir, 0744)
	if err != nil {
		return nil, err
	}
	opts := levigo.NewOptions()
	opts.SetCache(levigo.NewLRUCache(ONE_MEGABYTE))
	opts.SetCreateIfMissing(true)
	opts.SetBlockSize(TWO_FIFTY_SIX_KILOBYTES)
	filter := levigo.NewBloomFilter(BLOOM_FILTER_BITS_PER_KEY)
	opts.SetFilterPolicy(filter)
	db, err := levigo.Open(dir, opts)
	if err != nil {
		return nil, err
	}
	return &requestLogDb{dir: dir, db: db}, nil
}
Esempio n. 13
0
// NewLevelDBPersistence returns an initialized LevelDBPersistence object,
// created with the given options.
func NewLevelDBPersistence(o LevelDBOptions) (*LevelDBPersistence, error) {
	options := levigo.NewOptions()
	options.SetCreateIfMissing(true)
	options.SetParanoidChecks(o.UseParanoidChecks)

	compression := levigo.SnappyCompression
	if o.Compression == Uncompressed {
		compression = levigo.NoCompression
	}
	options.SetCompression(compression)

	cache := levigo.NewLRUCache(o.CacheSizeBytes)
	options.SetCache(cache)

	filterPolicy := levigo.NewBloomFilter(10)
	options.SetFilterPolicy(filterPolicy)

	options.SetMaxOpenFiles(o.OpenFileAllowance)

	storage, err := levigo.Open(o.Path, options)
	if err != nil {
		return nil, err
	}

	readOptions := levigo.NewReadOptions()

	writeOptions := levigo.NewWriteOptions()
	writeOptions.SetSync(o.FlushOnMutate)

	return &LevelDBPersistence{
		path:    o.Path,
		name:    o.Name,
		purpose: o.Purpose,

		cache:        cache,
		filterPolicy: filterPolicy,

		options:      options,
		readOptions:  readOptions,
		writeOptions: writeOptions,

		storage: storage,
	}, nil
}
Esempio n. 14
0
func applyConfig(o *levigo.Options, config map[string]interface{}) (
	*levigo.Options, error) {

	cim, ok := config["create_if_missing"].(bool)
	if ok {
		o.SetCreateIfMissing(cim)
	}

	eie, ok := config["error_if_exists"].(bool)
	if ok {
		o.SetErrorIfExists(eie)
	}

	wbs, ok := config["write_buffer_size"].(float64)
	if ok {
		o.SetWriteBufferSize(int(wbs))
	}

	bs, ok := config["block_size"].(float64)
	if ok {
		o.SetBlockSize(int(bs))
	}

	bri, ok := config["block_restart_interval"].(float64)
	if ok {
		o.SetBlockRestartInterval(int(bri))
	}

	lcc, ok := config["lru_cache_capacity"].(float64)
	if ok {
		lruCache := levigo.NewLRUCache(int(lcc))
		o.SetCache(lruCache)
	}

	bfbpk, ok := config["bloom_filter_bits_per_key"].(float64)
	if ok {
		bf := levigo.NewBloomFilter(int(bfbpk))
		o.SetFilterPolicy(bf)
	}

	return o, nil
}
Esempio n. 15
0
func NewLevelDBPersistence(storageRoot string, cacheCapacity, bitsPerBloomFilterEncoded int) (p *LevelDBPersistence, err error) {
	options := levigo.NewOptions()
	options.SetCreateIfMissing(true)
	options.SetParanoidChecks(*leveldbUseParanoidChecks)
	compression := levigo.NoCompression
	if *leveldbUseSnappy {
		compression = levigo.SnappyCompression
	}
	options.SetCompression(compression)

	cache := levigo.NewLRUCache(cacheCapacity)
	options.SetCache(cache)

	filterPolicy := levigo.NewBloomFilter(bitsPerBloomFilterEncoded)
	options.SetFilterPolicy(filterPolicy)

	storage, err := levigo.Open(storageRoot, options)
	if err != nil {
		return
	}

	var (
		readOptions  = levigo.NewReadOptions()
		writeOptions = levigo.NewWriteOptions()
	)

	writeOptions.SetSync(*leveldbFlushOnMutate)
	p = &LevelDBPersistence{
		cache:        cache,
		filterPolicy: filterPolicy,
		options:      options,
		readOptions:  readOptions,
		storage:      storage,
		writeOptions: writeOptions,
	}

	return
}
Esempio n. 16
0
func main() {
	fmt.Printf("GOMAXPROCS is %d\n", getGOMAXPROCS())
	confFile := "config.json"
	conf, err := btcplex.LoadConfig(confFile)
	if err != nil {
		log.Fatalf("Can't load config file: %v", err)
	}
	pool, err := btcplex.GetSSDB(conf)
	if err != nil {
		log.Fatalf("Can't connect to SSDB: %v", err)
	}

	opts := levigo.NewOptions()
	opts.SetCreateIfMissing(true)
	filter := levigo.NewBloomFilter(10)
	opts.SetFilterPolicy(filter)
	ldb, err := levigo.Open(conf.LevelDbPath, opts) //alpha
	defer ldb.Close()

	if err != nil {
		log.Fatalf("failed to load db: %s\n", err)
	}

	wo := levigo.NewWriteOptions()
	//wo.SetSync(true)
	defer wo.Close()

	ro := levigo.NewReadOptions()
	defer ro.Close()

	wb := levigo.NewWriteBatch()
	defer wb.Close()

	conn := pool.Get()
	defer conn.Close()

	log.Println("Waiting 3 seconds before starting...")
	time.Sleep(3 * time.Second)

	latestheight := 0
	log.Printf("Latest height: %v\n", latestheight)

	running = true
	cs := make(chan os.Signal, 1)
	signal.Notify(cs, os.Interrupt)
	go func() {
		for sig := range cs {
			running = false
			log.Printf("Captured %v, waiting for everything to finish...\n", sig)
			wg.Wait()
			defer os.Exit(1)
		}
	}()

	concurrency := 250
	sem := make(chan bool, concurrency)

	// Real network magic byte
	blockchain, blockchainerr := blkparser.NewBlockchain(conf.BitcoindBlocksPath, [4]byte{0xF8, 0xB5, 0x03, 0xDF})
	if blockchainerr != nil {
		log.Fatalf("Error loading block file: ", blockchainerr)
	}

	block_height := uint(0)
	for {
		if !running {
			break
		}

		wg.Add(1)

		bl, er := blockchain.NextBlock()
		if er != nil {
			log.Println("Initial import done.")
			break
		}

		bl.Raw = nil

		if bl.Parent == "" {
			block_height = uint(0)
			conn.Do("HSET", fmt.Sprintf("block:%v:h", bl.Hash), "main", true)
			conn.Do("HSET", fmt.Sprintf("block:%v:h", bl.Hash), "height", 0)

		} else {
			parentheight, _ := redis.Int(conn.Do("HGET", fmt.Sprintf("block:%v:h", bl.Parent), "height"))
			block_height = uint(parentheight + 1)
			conn.Do("HSET", fmt.Sprintf("block:%v:h", bl.Hash), "height", block_height)
			prevheight := block_height - 1
			prevhashtest := bl.Parent
			prevnext := bl.Hash
			for {
				prevkey := fmt.Sprintf("height:%v", prevheight)
				prevcnt, _ := redis.Int(conn.Do("ZCARD", prevkey))
				// SSDB doesn't support negative slice yet
				prevs, _ := redis.Strings(conn.Do("ZRANGE", prevkey, 0, prevcnt-1))
				for _, cprevhash := range prevs {
					if cprevhash == prevhashtest {
						// current block parent
						prevhashtest, _ = redis.String(conn.Do("HGET", fmt.Sprintf("block:%v:h", cprevhash), "parent"))
						// Set main to 1 and the next => prevnext
						conn.Do("HMSET", fmt.Sprintf("block:%v:h", cprevhash), "main", true, "next", prevnext)
						conn.Do("SET", fmt.Sprintf("block:height:%v", prevheight), cprevhash)
						prevnext = cprevhash
					} else {
						// Set main to 0
						conn.Do("HSET", fmt.Sprintf("block:%v:h", cprevhash), "main", false)
						oblock, _ := btcplex.GetBlockCachedByHash(pool, cprevhash)
						for _, otx := range oblock.Txs {
							otx.Revert(pool)
						}
					}
				}
				if len(prevs) == 1 {
					break
				}
				prevheight--
			}
			//}

		}

		// Orphans blocks handling
		conn.Do("ZADD", fmt.Sprintf("height:%v", block_height), bl.BlockTime, bl.Hash)
		conn.Do("HSET", fmt.Sprintf("block:%v:h", bl.Hash), "parent", bl.Parent)

		if latestheight != 0 && !(latestheight+1 <= int(block_height)) {
			log.Printf("Skipping block #%v\n", block_height)
			continue
		}

		log.Printf("Current block: %v (%v)\n", block_height, bl.Hash)

		block := new(Block)
		block.Hash = bl.Hash
		block.Height = block_height
		block.Version = bl.Version
		block.MerkleRoot = bl.MerkleRoot
		block.BlockTime = bl.BlockTime
		block.Bits = bl.Bits
		block.Nonce = bl.Nonce
		block.Size = bl.Size
		block.Parent = bl.Parent

		txs := []*Tx{}

		total_bl_out := uint64(0)
		for tx_index, tx := range bl.Txs {
			//log.Printf("Tx #%v: %v\n", tx_index, tx.Hash)

			total_tx_out := uint64(0)
			total_tx_in := uint64(0)

			//conn.Send("MULTI")
			txos := []*btcplex.TxOut{}
			txis := []*btcplex.TxIn{}

			for txo_index, txo := range tx.TxOuts {
				txwg.Add(1)
				sem <- true
				go func(bl *blkparser.Block, tx *blkparser.Tx, pool *redis.Pool, total_tx_out *uint64, txo *blkparser.TxOut, txo_index int) {
					conn := pool.Get()
					defer conn.Close()
					defer func() {
						<-sem
					}()
					defer txwg.Done()
					atomic.AddUint64(total_tx_out, uint64(txo.Value))
					//atomic.AddUint32(txos_cnt, 1)

					ntxo := new(btcplex.TxOut)
					ntxo.TxHash = tx.Hash
					ntxo.BlockHash = bl.Hash
					ntxo.BlockTime = bl.BlockTime
					ntxo.Addr = txo.Addr
					ntxo.Value = txo.Value
					ntxo.Index = uint32(txo_index)
					txospent := new(btcplex.TxoSpent)
					ntxo.Spent = txospent
					ntxocached := new(TxOutCached)
					ntxocached.Addr = txo.Addr
					ntxocached.Value = txo.Value

					ntxocachedjson, _ := json.Marshal(ntxocached)
					ldb.Put(wo, []byte(fmt.Sprintf("txo:%v:%v", tx.Hash, txo_index)), ntxocachedjson)

					ntxojson, _ := json.Marshal(ntxo)
					ntxokey := fmt.Sprintf("txo:%v:%v", tx.Hash, txo_index)
					conn.Do("SET", ntxokey, ntxojson)

					//conn.Send("ZADD", fmt.Sprintf("txo:%v", tx.Hash), txo_index, ntxokey)
					conn.Do("ZADD", fmt.Sprintf("addr:%v", ntxo.Addr), bl.BlockTime, tx.Hash)
					conn.Do("ZADD", fmt.Sprintf("addr:%v:received", ntxo.Addr), bl.BlockTime, tx.Hash)

					conn.Do("HINCRBY", fmt.Sprintf("addr:%v:h", ntxo.Addr), "tr", ntxo.Value)

					txomut.Lock()
					txos = append(txos, ntxo)
					txomut.Unlock()

				}(bl, tx, pool, &total_tx_out, txo, txo_index)
			}

			//txis_cnt := uint32(0)
			// Skip the ins if it's a CoinBase Tx (1 TxIn for newly generated coins)
			if !(len(tx.TxIns) == 1 && tx.TxIns[0].InputVout == 0xffffffff) {

				for txi_index, txi := range tx.TxIns {
					txwg.Add(1)
					sem <- true
					go func(txi *blkparser.TxIn, bl *blkparser.Block, tx *blkparser.Tx, pool *redis.Pool, total_tx_in *uint64, txi_index int) {
						conn := pool.Get()
						defer conn.Close()
						defer func() {
							<-sem
						}()
						defer txwg.Done()

						ntxi := new(btcplex.TxIn)
						ntxi.TxHash = tx.Hash
						ntxi.BlockHash = bl.Hash
						ntxi.BlockTime = bl.BlockTime
						ntxi.Index = uint32(txi_index)
						nprevout := new(btcplex.PrevOut)
						nprevout.Vout = txi.InputVout
						nprevout.Hash = txi.InputHash
						ntxi.PrevOut = nprevout
						prevtxo := new(TxOutCached)

						prevtxocachedraw, err := ldb.Get(ro, []byte(fmt.Sprintf("txo:%v:%v", txi.InputHash, txi.InputVout)))
						if err != nil {
							log.Printf("Err getting prevtxocached: %v", err)
						}

						if len(prevtxocachedraw) > 0 {
							if err := json.Unmarshal(prevtxocachedraw, prevtxo); err != nil {
								panic(err)
							}
						} else {
							// Shouldn't happen!
							//log.Println("Fallback to SSDB")
							prevtxoredisjson, err := redis.String(conn.Do("GET", fmt.Sprintf("txo:%v:%v", txi.InputHash, txi.InputVout)))
							if err != nil {
								log.Printf("KEY:%v\n", fmt.Sprintf("txo:%v:%v", txi.InputHash, txi.InputVout))
								panic(err)
							}
							prevtxoredis := new(btcplex.TxOut)
							json.Unmarshal([]byte(prevtxoredisjson), prevtxoredis)

							prevtxo.Addr = prevtxoredis.Addr
							prevtxo.Value = prevtxoredis.Value
							//prevtxo.Id = prevtxomongo.Id.Hex()
						}

						ldb.Delete(wo, []byte(fmt.Sprintf("txo:%v:%v", txi.InputHash, txi.InputVout)))

						nprevout.Address = prevtxo.Addr
						nprevout.Value = prevtxo.Value

						txospent := new(btcplex.TxoSpent)
						txospent.Spent = true
						txospent.BlockHeight = uint32(block_height)
						txospent.InputHash = tx.Hash
						txospent.InputIndex = uint32(txi_index)

						//total_tx_in+= uint(nprevout.Value)
						atomic.AddUint64(total_tx_in, nprevout.Value)

						tximut.Lock()
						txis = append(txis, ntxi)
						tximut.Unlock()
						//atomic.AddUint32(txis_cnt, 1)

						//log.Println("Starting update prev txo")
						ntxijson, _ := json.Marshal(ntxi)
						ntxikey := fmt.Sprintf("txi:%v:%v", tx.Hash, txi_index)

						txospentjson, _ := json.Marshal(txospent)

						conn.Do("SET", ntxikey, ntxijson)
						//conn.Send("ZADD", fmt.Sprintf("txi:%v", tx.Hash), txi_index, ntxikey)

						conn.Do("SET", fmt.Sprintf("txo:%v:%v:spent", txi.InputHash, txi.InputVout), txospentjson)

						conn.Do("ZADD", fmt.Sprintf("addr:%v", nprevout.Address), bl.BlockTime, tx.Hash)
						conn.Do("ZADD", fmt.Sprintf("addr:%v:sent", nprevout.Address), bl.BlockTime, tx.Hash)
						conn.Do("HINCRBY", fmt.Sprintf("addr:%v:h", nprevout.Address), "ts", nprevout.Value)
					}(txi, bl, tx, pool, &total_tx_in, txi_index)

				}
			}

			err := ldb.Write(wo, wb)
			if err != nil {
				log.Fatalf("Err write batch: %v", err)
			}
			wb.Clear()

			txwg.Wait()

			total_bl_out += total_tx_out

			ntx := new(Tx)
			ntx.Index = uint32(tx_index)
			ntx.Hash = tx.Hash
			ntx.Size = tx.Size
			ntx.LockTime = tx.LockTime
			ntx.Version = tx.Version
			ntx.TxInCnt = uint32(len(txis))
			ntx.TxOutCnt = uint32(len(txos))
			ntx.TotalOut = uint64(total_tx_out)
			ntx.TotalIn = uint64(total_tx_in)
			ntx.BlockHash = bl.Hash
			ntx.BlockHeight = block_height
			ntx.BlockTime = bl.BlockTime

			ntxjson, _ := json.Marshal(ntx)
			ntxjsonkey := fmt.Sprintf("tx:%v", ntx.Hash)
			conn.Do("SET", ntxjsonkey, ntxjson)
			conn.Do("ZADD", fmt.Sprintf("tx:%v:blocks", tx.Hash), bl.BlockTime, bl.Hash)
			conn.Do("ZADD", fmt.Sprintf("block:%v:txs", block.Hash), tx_index, ntxjsonkey)

			ntx.TxIns = txis
			ntx.TxOuts = txos
			txs = append(txs, ntx)
		}

		block.TotalBTC = uint64(total_bl_out)
		block.TxCnt = uint32(len(txs))

		blockjson, _ := json.Marshal(block)
		conn.Do("ZADD", "blocks", block.BlockTime, block.Hash)
		conn.Do("MSET", fmt.Sprintf("block:%v", block.Hash), blockjson, "height:latest", int(block_height), fmt.Sprintf("block:height:%v", block.Height), block.Hash)
		block.Txs = txs
		blockjsoncache, _ := json.Marshal(block)
		conn.Do("SET", fmt.Sprintf("block:%v:cached", block.Hash), blockjsoncache)

		if !running {
			log.Printf("Done. Stopped at height: %v.", block_height)
		}

		wg.Done()
	}
	wg.Wait()
}