Esempio n. 1
0
func SaveJSON(block interfaces.DatabaseBatchable) error {
	data, err := block.(interfaces.Printable).JSONByte()
	if err != nil {
		return err
	}

	var out bytes.Buffer
	json.Indent(&out, data, "", "\t")
	data = out.Bytes()

	strChainID := fmt.Sprintf("%x", block.GetChainID())
	dir := DataStorePath + strChainID
	if FileNotExists(dir) {
		err := os.MkdirAll(dir, 0777)
		if err == nil {
			fmt.Println("Created directory " + dir)
		} else {
			return err
		}
	}

	err = ioutil.WriteFile(fmt.Sprintf(dir+"/storeJSON.%09d.block", block.GetDatabaseHeight()), data, 0777)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 2
0
func SaveEntryJSON(entry interfaces.DatabaseBatchable, blockHeight uint32) error {
	data, err := entry.(interfaces.Printable).JSONByte()
	if err != nil {
		return err
	}

	var out bytes.Buffer
	json.Indent(&out, data, "", "\t")
	data = out.Bytes()

	strChainID := fmt.Sprintf("%x", entry.GetChainID())
	dir := DataStorePath + strChainID + "/entries"
	if FileNotExists(dir) {
		err := os.MkdirAll(dir, 0777)
		if err == nil {
			fmt.Println("Created directory " + dir)
		} else {
			return err
		}
	}

	err = ioutil.WriteFile(fmt.Sprintf(dir+"/storeJSON.%09d.%v.entry", blockHeight, entry.DatabasePrimaryIndex().String()), data, 0777)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 3
0
func (db *Overlay) Insert(bucket []byte, entry interfaces.DatabaseBatchable) error {
	err := db.DB.Put(bucket, entry.DatabasePrimaryIndex().Bytes(), entry)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 4
0
func (db *Overlay) ProcessBlockBatchWithoutHead(blockBucket, numberBucket, secondaryIndexBucket []byte, block interfaces.DatabaseBatchable) error {
	if block == nil {
		return nil
	}

	batch := []interfaces.Record{}

	batch = append(batch, interfaces.Record{blockBucket, block.DatabasePrimaryIndex().Bytes(), block})

	if numberBucket != nil {
		bytes := make([]byte, 4)
		binary.BigEndian.PutUint32(bytes, block.GetDatabaseHeight())
		batch = append(batch, interfaces.Record{numberBucket, bytes, block.DatabasePrimaryIndex()})
	}

	if secondaryIndexBucket != nil {
		batch = append(batch, interfaces.Record{secondaryIndexBucket, block.DatabaseSecondaryIndex().Bytes(), block.DatabasePrimaryIndex()})
	}

	err := db.DB.PutInBatch(batch)
	if err != nil {
		return err
	}

	return nil
}
Esempio n. 5
0
func SaveEntryBinary(entry interfaces.DatabaseBatchable, blockHeight uint32) error {
	data, err := entry.MarshalBinary()
	if err != nil {
		return err
	}

	strChainID := fmt.Sprintf("%x", entry.GetChainID())
	dir := DataStorePath + strChainID + "/entries"
	if FileNotExists(dir) {
		err := os.MkdirAll(dir, 0777)
		if err == nil {
			fmt.Println("Created directory " + dir)
		} else {
			return err
		}
	}

	err = ioutil.WriteFile(fmt.Sprintf(dir+"/store.%09d.%v.entry", blockHeight, entry.DatabasePrimaryIndex().String()), data, 0777)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 6
0
func SaveBinary(block interfaces.DatabaseBatchable) error {
	data, err := block.MarshalBinary()
	if err != nil {
		return err
	}

	strChainID := fmt.Sprintf("%x", block.GetChainID())
	dir := DataStorePath + strChainID
	if FileNotExists(dir) {
		err := os.MkdirAll(dir, 0777)
		if err == nil {
			fmt.Println("Created directory " + dir)
		} else {
			return err
		}
	}

	err = ioutil.WriteFile(fmt.Sprintf(dir+"/store.%09d.block", block.GetDatabaseHeight()), data, 0777)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 7
0
func (db *Overlay) ProcessBlockBatch(blockBucket, numberBucket, secondaryIndexBucket []byte, block interfaces.DatabaseBatchable) error {
	if block == nil {
		return nil
	}

	batch := []interfaces.Record{}

	batch = append(batch, interfaces.Record{blockBucket, block.DatabasePrimaryIndex().Bytes(), block})

	if numberBucket != nil {
		bytes := make([]byte, 4)
		binary.BigEndian.PutUint32(bytes, block.GetDatabaseHeight())
		batch = append(batch, interfaces.Record{numberBucket, bytes, block.DatabasePrimaryIndex()})
	}

	if secondaryIndexBucket != nil {
		batch = append(batch, interfaces.Record{secondaryIndexBucket, block.DatabaseSecondaryIndex().Bytes(), block.DatabasePrimaryIndex()})
	}

	batch = append(batch, interfaces.Record{[]byte{CHAIN_HEAD}, block.GetChainID(), block.DatabasePrimaryIndex()})

	err := db.DB.PutInBatch(batch)
	if err != nil {
		return err
	}

	if db.ExportData {
		err = blockExtractor.ExportBlock(block)
		if err != nil {
			return err
		}
	}

	return nil
}
Esempio n. 8
0
// ProcessEBlockBatche inserts the EBlock and update all it's ebentries in DB
func (db *Overlay) ProcessEBlockBatch(eblock interfaces.DatabaseBatchable) error {
	//Each chain has its own number bucket, otherwise we would have conflicts
	numberBucket := append([]byte{byte(ENTRYBLOCK_CHAIN_NUMBER)}, eblock.GetChainID()...)
	return db.ProcessBlockBatch([]byte{byte(ENTRYBLOCK)}, numberBucket, []byte{byte(ENTRYBLOCK_KEYMR)}, eblock)
}