func (w *writecache) Put(b *blocks.Block) error { if _, ok := w.cache.Get(b.Key()); ok { return nil } w.cache.Add(b.Key(), struct{}{}) return w.blockstore.Put(b) }
// HasBlock announces the existance of a block to this bitswap service. The // service will potentially notify its peers. func (bs *Bitswap) HasBlock(blk blocks.Block) error { select { case <-bs.process.Closing(): return errors.New("bitswap is closed") default: } err := bs.blockstore.Put(blk) if err != nil { log.Errorf("Error writing block to datastore: %s", err) return err } // NOTE: There exists the possiblity for a race condition here. If a user // creates a node, then adds it to the dagservice while another goroutine // is waiting on a GetBlock for that object, they will receive a reference // to the same node. We should address this soon, but i'm not going to do // it now as it requires more thought and isnt causing immediate problems. bs.notifications.Publish(blk) bs.engine.AddBlock(blk) select { case bs.newBlocks <- blk.Cid(): // send block off to be reprovided case <-bs.process.Closing(): return bs.process.Close() } return nil }
func assertBlocksEqual(t *testing.T, a, b *blocks.Block) { if !bytes.Equal(a.Data, b.Data) { t.Fatal("blocks aren't equal") } if a.Key() != b.Key() { t.Fatal("block keys aren't equal") } }
func getOrFail(bitswap Instance, b *blocks.Block, t *testing.T, wg *sync.WaitGroup) { if _, err := bitswap.Blockstore().Get(b.Key()); err != nil { _, err := bitswap.Exchange.GetBlock(context.Background(), b.Key()) if err != nil { t.Fatal(err) } } wg.Done() }
func (bs *blockstore) Put(block blocks.Block) error { k := dshelp.CidToDsKey(block.Cid()) // Has is cheaper than Put, so see if we already have it exists, err := bs.datastore.Has(k) if err == nil && exists { return nil // already stored. } return bs.datastore.Put(k, block.RawData()) }
func (w *writecache) Put(b blocks.Block) error { k := b.Key() if _, ok := w.cache.Get(k); ok { return nil } defer log.EventBegin(context.TODO(), "writecache.BlockAdded", &k).Done() w.cache.Add(b.Key(), struct{}{}) return w.blockstore.Put(b) }
func (s *BlockList) Push(b *blocks.Block) { if s.uniques == nil { s.uniques = make(map[key.Key]*list.Element) } _, ok := s.uniques[b.Key()] if !ok { e := s.list.PushBack(b) s.uniques[b.Key()] = e } }
func (bs *blockstore) Put(block *blocks.Block) error { k := block.Key().DsKey() // Has is cheaper than Put, so see if we already have it exists, err := bs.datastore.Has(k) if err == nil && exists { return nil // already stored. } return bs.datastore.Put(k, block.Data) }
func (b *bloomcache) Put(bl blocks.Block) error { if has, ok := b.hasCached(bl.Cid()); ok && has { return nil } err := b.blockstore.Put(bl) if err == nil { b.bloom.AddTS(bl.Cid().Bytes()) } return err }
func (b *arccache) Put(bl blocks.Block) error { if has, ok := b.hasCached(bl.Cid()); ok && has { return nil } err := b.blockstore.Put(bl) if err == nil { b.addCache(bl.Cid(), true) } return err }
// AddBlock adds a particular block to the service, Putting it into the datastore. // TODO pass a context into this if the remote.HasBlock is going to remain here. func (s *BlockService) AddBlock(b *blocks.Block) (key.Key, error) { k := b.Key() err := s.Blockstore.Put(b) if err != nil { return k, err } if err := s.worker.HasBlock(b); err != nil { return "", errors.New("blockservice is closed") } return k, nil }
func (e *Engine) addBlock(block blocks.Block) { work := false for _, l := range e.ledgerMap { if entry, ok := l.WantListContains(block.Key()); ok { e.peerRequestQueue.Push(entry, l.Partner) work = true } } if work { e.signalNewWork() } }
func assertBlocksEqual(t *testing.T, a, b blocks.Block) { if !bytes.Equal(a.RawData(), b.RawData()) { t.Fatal("blocks aren't equal") } if a.Cid() != b.Cid() { t.Fatal("block keys aren't equal") } }
func getCmd(nodes []int, block *blocks.Block) error { var wg sync.WaitGroup for _, node := range nodes { wg.Add(1) go func(i int) { ctx, cancel := context.WithTimeout(context.Background(), deadline) defer cancel() peers[i].Exchange.GetBlock(ctx, block.Key()) fmt.Printf("Gotem from node %d.\n", i) peers[i].Exchange.Close() wg.Done() }(node) } wg.Wait() return nil }
func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { bs.counterLk.Lock() defer bs.counterLk.Unlock() bs.blocksRecvd++ has, err := bs.blockstore.Has(b.Cid()) if err != nil { log.Infof("blockstore.Has error: %s", err) return err } if err == nil && has { bs.dupBlocksRecvd++ bs.dupDataRecvd += uint64(len(b.RawData())) } if has { return ErrAlreadyHaveBlock } return nil }
// Add adds a node to the dagService, storing the block in the BlockService func (n *dagService) Add(nd *Node) (key.Key, error) { if n == nil { // FIXME remove this assertion. protect with constructor invariant return "", fmt.Errorf("dagService is nil") } d, err := nd.Encoded(false) if err != nil { return "", err } b := new(blocks.Block) b.Data = d b.Multihash, err = nd.Multihash() if err != nil { return "", err } return n.Blocks.AddBlock(b) }
func (t *Batch) Add(nd *Node) (key.Key, error) { d, err := nd.Encoded(false) if err != nil { return "", err } b := new(blocks.Block) b.Data = d b.Multihash, err = nd.Multihash() if err != nil { return "", err } k := key.Key(b.Multihash) t.blocks = append(t.blocks, b) t.size += len(b.Data) if t.size > t.MaxSize { return k, t.Commit() } return k, nil }
func decodeBlock(b blocks.Block) (node.Node, error) { c := b.Cid() switch c.Type() { case cid.Protobuf: decnd, err := DecodeProtobuf(b.RawData()) if err != nil { if strings.Contains(err.Error(), "Unmarshal failed") { return nil, fmt.Errorf("The block referred to by '%s' was not a valid merkledag node", c) } return nil, fmt.Errorf("Failed to decode Protocol Buffers: %v", err) } decnd.cached = b.Cid() return decnd, nil case cid.Raw: return NewRawNode(b.RawData()), nil case cid.CBOR: return ipldcbor.Decode(b.RawData()) default: return nil, fmt.Errorf("unrecognized object type: %s", c.Type()) } }
// AddBlock adds a particular block to the service, Putting it into the datastore. // TODO pass a context into this if the remote.HasBlock is going to remain here. func (s *blockService) AddBlock(o blocks.Block) (*cid.Cid, error) { c := o.Cid() if s.checkFirst { has, err := s.blockstore.Has(c) if err != nil { return nil, err } if has { return c, nil } } err := s.blockstore.Put(o) if err != nil { return nil, err } if err := s.exchange.HasBlock(o); err != nil { return nil, errors.New("blockservice is closed") } return c, nil }
func (b *bloomcache) Put(bl blocks.Block) error { if has, ok := b.hasCached(bl.Key()); ok && has { return nil } err := b.blockstore.Put(bl) if err == nil { b.bloom.AddTS([]byte(bl.Key())) b.arc.Add(bl.Key(), true) } return err }
func (m *impl) AddBlock(b blocks.Block) { m.blocks[b.Cid().KeyString()] = b }
func (m *impl) AddBlock(b blocks.Block) { m.blocks[b.Key()] = b }
// DeleteBlock deletes a block in the blockservice from the datastore func (s *blockService) DeleteBlock(o blocks.Block) error { return s.blockstore.DeleteBlock(o.Cid()) }
func (ps *impl) Publish(block *blocks.Block) { topic := string(block.Key()) ps.wrapped.Pub(block, topic) }
func (ps *impl) Publish(block blocks.Block) { ps.wrapped.Pub(block, block.Cid().KeyString()) }