func mainCleanUp() { log.Infof("Cleaning up...") err := node.Destroy() if err != nil { log.Infof("Error destroying node: %s", err.Error()) } }
func (u *outputDB) Commit(tag uint32, force bool) error { if force || u.db.WAValueLen() > kaiju.GetConfig().MaxKdbWAValueLen { log.Infof("Committing blocks up to number %d ...", tag) err := u.db.Commit(tag) log.Infof("Committed blocks up to number %d", tag) return err } return nil }
// Returns *Stats, tag, cursor func readHeader(s Storage) (*Stats, uint32, int64, error) { errInvalid := errors.New("Invalid KDB header") p := make([]byte, HeaderSize) if _, err := s.Read(p); err != nil { return nil, 0, 0, err } if p[0] != 'K' || p[1] != 'D' || p[2] != 'B' || Version != p[3] { return nil, 0, 0, errInvalid } if SlotSize != p[4] || ValLenUnit != p[5] || HeaderSize != p[6] { return nil, 0, 0, errInvalid } buf := bytes.NewBuffer(p[8:]) stats := new(Stats) var tag uint32 var cursor int64 binary.Read(buf, binary.LittleEndian, &stats.capacity) binary.Read(buf, binary.LittleEndian, &stats.records) binary.Read(buf, binary.LittleEndian, &stats.deadSlots) binary.Read(buf, binary.LittleEndian, &stats.deadValues) for i := 0; i < 6; i++ { //reserved space var n int32 binary.Read(buf, binary.LittleEndian, &n) } binary.Read(buf, binary.LittleEndian, &tag) binary.Read(buf, binary.LittleEndian, &cursor) if stats.capacity <= 0 { return nil, 0, 0, errInvalid } else if cursor < HeaderSize+int64(stats.capacity)*2*SlotSize { return nil, 0, 0, errInvalid } log.Infof("kdb readHeader: capacity %d records %d deadSlots %d deadValues %d tag %d cursor %d", stats.capacity, stats.records, stats.deadSlots, stats.deadValues, tag, cursor) return stats, tag, cursor, nil }
func newSwdl(begin int, end int, paral int, load int) *swdl { // Open a window that wider than paral * load maxSlots := (end - begin) / load slots := paral * 4 if slots > maxSlots { slots = maxSlots } s := slots * load if slots == 0 { s = end - begin } log.Infof("newSwdl begin %d end %d winsize %d", begin, end, s) return &swdl{ begin: begin, end: end, paral: paral, load: load, cursor: begin, size: s, window: make([]interface{}, 0), chout: make(chan map[int]*blockchain.InvElement), chin: make(chan map[int]interface{}), chblock: make(chan struct { btcmsg.Message I int }), done: make(chan struct{}), ca: newSwdlca(), } }
func (db *KDB) Rebuild(capacity uint32, s Storage, was Storage) (*KDB, error) { newdb, err := New(capacity, s, was) if err != nil { return nil, err } f := func(i uint32, sd []byte, val []byte, mv bool) error { n, err := newdb.slotScan(sd[:InternalKeySize], nil, nil) if err != nil { return err } binary.LittleEndian.PutUint32(sd[InternalKeySize:], newdb.dataLoc()) newdb.writeKey(sd, n) newdb.writeValue(val, keyData(sd).unitValLen(), mv) if i%100000 == 0 { newdb.commit(i) log.Infof("KDB.Rebuild: current key count:%d", i) } return nil } db.smutex.RLock() defer db.smutex.RUnlock() if _, _, err = db.enumerate(f); err != nil { return nil, err } if tag, err := db.tag(); err != nil { return nil, err } else { newdb.commit(tag) } return newdb, nil }
// Append downloaded headers. // TODO: more robust way of getting old blocks func (h *headers) Append(hs []*catma.Header) error { if len(hs) == 0 { return nil } oldL := len(h.data) - 1 // excluding genesis for _, header := range hs { err := h.appendHeader(header) if err != nil { return err } } // Write to file as well f := h.file // Caclulate the offset offset := int64(binary.Size(hs[0]) * oldL) _, err := f.Seek(offset, 0) if err != nil { return err } for _, header := range hs { err := binary.Write(f, binary.LittleEndian, header) if err != nil { return err } } log.Infof("Headers total: %v", len(h.data)) return f.Sync() }
// Load block headers saved in file. // Errors are not returned to caller, simply print a log func (h *headers) loadHeaders() { r := h.file for { ch := new(catma.Header) if err := binary.Read(r, binary.LittleEndian, ch); err == nil { if err = h.appendHeader(ch); err != nil { log.Infof("Error loading block header: %s", err) break } } else { if err != io.EOF { log.Infof("Error reading blcok header file: %s", err) } break } } log.Infof("Loaded header count: %v", len(h.data)) }
func (p *addrPool) dump() { stats := make(map[int32]int32) for e := p.addrStatusQueue.Front(); e != nil; e = e.Next() { t := e.Value.(*addrStatus).timesFailed if c, ok := stats[t]; ok { stats[t] = c + 1 } else { stats[t] = 1 } } log.Infof("AddrPool Dump %v", stats) }
func (sw *swdl) start() { sw.wg.Add(1) // For doSaveBlocks for i := 0; i < sw.paral; i++ { go sw.doDownload() } go sw.doSaveBlock() go sw.doSchedule() sw.chin <- nil // Trigger downloading sw.wg.Wait() db := cold.Get().OutputDB() if err := db.Commit(uint32(sw.end-1), true); err != nil { log.Panicf("db commit error: %s", err) } log.Infof("Finished downloading from %d to %d", sw.begin, sw.end) }
func moreHeaders() { headers := cold.Get().Headers() l := headers.GetLocator() msg := btcmsg.NewGetHeadersMsg() mg := msg.(*btcmsg.Message_getheaders) mg.BlockLocators = l mg.HashStop = new(klib.Hash256) f := func(m btcmsg.Message) (bool, bool) { _, ok := m.(*btcmsg.Message_headers) return ok, true } mh := knet.ParalMsgForMsg(mg, f, 3) if mh != nil { h, _ := mh.(*btcmsg.Message_headers) err := headers.Append(h.Headers) if err != nil { log.Infof("Error appending headers: %s", err) } } }
func mainFunc() { c := make(chan struct{}) log.Infof("Starting KNet...") ch, err := knet.Start(10) if err != nil { log.Infof("Error starting knet: %s", err) } <-ch log.Infof("KNet initialized.") log.Infof("Initializing Node...") err = node.Init() if err != nil { log.Infof("Error initializing Node: %s", err.Error()) return } log.Infof("Starting Node...") node.Start() log.Infof("Node started.") // Don't quit _ = <-c }
func (sw *swdl) schedule(msgs map[int]interface{}) { // 1. Fill blanks with downloaded blocks got := 0 for k, v := range msgs { i := k - sw.cursor if _, ok := v.(*btcmsg.Message_block); ok { got++ } sw.window[i] = v } // 2. Congestion control if len(msgs) > 0 && sw.ca.update(got, len(msgs)) { // Send nil for a "NOP" download sw.sendWork(nil) return } // 3. Slide window and process blocks dist := len(sw.window) // Slide distance for i, elem := range sw.window { if bm, ok := elem.(*btcmsg.Message_block); ok { //log.Infoln("save block", i + sw.cursor, i, sw.cursor, bm.Header.Hash()) sw.chblock <- struct { btcmsg.Message I int }{bm, i + sw.cursor} } else { dist = i break } } if dist > 0 { log.Infof("swdl: window slided %d", dist) } sw.window = sw.window[dist:] sw.cursor += dist l := len(sw.window) for i := l; i < sw.size; i++ { p := sw.cursor + i if p >= sw.end { break } ie := blockchain.GetInvElem(p) sw.window = append(sw.window, ie) } if len(sw.window) == 0 { close(sw.done) // All blocks downloaded } // 4. Handle unfinished work unfinished := make(map[int]*blockchain.InvElement) for i, elem := range sw.window { if ie, ok := elem.(*blockchain.InvElement); ok { unfinished[sw.cursor+i] = ie } } l = len(unfinished) if len(msgs) > 0 { log.Debugf("winsize %d left %d got %d cursor %d health %f", len(sw.window), l, got, sw.cursor, sw.ca.health) } if l == 0 { // No blocks left, do nothing } else if l <= sw.load { sw.sendWork(unfinished) } else { work := make(map[int]*blockchain.InvElement) for k, v := range unfinished { work[k] = v if len(work) >= sw.load { if !sw.sendWork(work) { // channel is full break } work = make(map[int]*blockchain.InvElement) } } } }