func (m *mfileBlock) WriteBuf(_ context.Context, s torus.BlockRef) ([]byte, error) {
	m.mut.Lock()
	defer m.mut.Unlock()
	if m.closed {
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return nil, torus.ErrClosed
	}
	index := m.findEmpty()
	if index == -1 {
		clog.Error("mfile: out of space")
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return nil, torus.ErrOutOfSpace
	}
	clog.Tracef("mfile: writing block at index %d", index)
	buf := m.dataFile.GetBlock(uint64(index))
	err := m.refFile.WriteBlock(uint64(index), s.ToBytes())
	if err != nil {
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return nil, err
	}
	if v := m.findIndex(s); v != -1 {
		// we already have it
		clog.Debug("mfile: block already exists", s)
		// Not an error, if we already have it
		return nil, torus.ErrExists
	}
	promBlocks.WithLabelValues(m.name).Inc()
	m.refIndex[s] = index
	promBlocksWritten.WithLabelValues(m.name).Inc()
	return buf, nil
}
func (k *ketama) GetPeers(key torus.BlockRef) (torus.PeerPermutation, error) {
	s, ok := k.ring.GetNodes(string(key.ToBytes()), len(k.peers))
	if !ok {
		if len(s) == 0 {
			return torus.PeerPermutation{}, errors.New("couldn't get any nodes")
		}
		for _, x := range k.peers {
			has := false
			for _, y := range s {
				if y == x.UUID {
					has = true
					break
				}
			}
			if !has {
				s = append(s, x.UUID)
			}
		}
	}

	if len(s) != len(k.peers) {
		return torus.PeerPermutation{}, errors.New("couldn't get sufficient nodes")
	}

	rep := k.rep
	if len(k.peers) < k.rep {
		rep = len(k.peers)
	}

	return torus.PeerPermutation{
		Peers:       s,
		Replication: rep,
	}, nil
}
func (c *client) Block(ctx context.Context, ref torus.BlockRef) ([]byte, error) {
	resp, err := c.handler.Block(ctx, &models.BlockRequest{
		BlockRef: ref.ToProto(),
	})
	if err != nil {
		return nil, err
	}
	return resp.Data, nil
}
func (d *Distributor) readFromPeer(ctx context.Context, i torus.BlockRef, peer string) ([]byte, error) {
	blk, err := d.client.GetBlock(ctx, peer, i)
	// If we're successful, store that.
	if err == nil {
		d.readCache.Put(string(i.ToBytes()), blk)
		promDistBlockPeerHits.WithLabelValues(peer).Inc()
		return blk, nil
	}
	return nil, err
}
func (c *client) PutBlock(ctx context.Context, ref torus.BlockRef, data []byte) error {
	_, err := c.handler.PutBlock(ctx, &models.PutBlockRequest{
		Refs: []*models.BlockRef{
			ref.ToProto(),
		},
		Blocks: [][]byte{
			data,
		},
	})
	return err
}
func (m *mod) GetPeers(key torus.BlockRef) (torus.PeerPermutation, error) {
	permute := make([]string, len(m.peerlist))
	crc := crc32.ChecksumIEEE(key.ToBytes())
	sum := int(crc) % len(m.peers)
	copy(permute, m.peerlist[sum:])
	copy(permute[len(m.peerlist)-sum:], m.peerlist[:sum])
	return torus.PeerPermutation{
		Peers:       permute,
		Replication: m.rep,
	}, nil
}
func (d *Distributor) GetBlock(ctx context.Context, i torus.BlockRef) ([]byte, error) {
	d.mut.RLock()
	defer d.mut.RUnlock()
	promDistBlockRequests.Inc()
	bcache, ok := d.readCache.Get(string(i.ToBytes()))
	if ok {
		promDistBlockCacheHits.Inc()
		return bcache.([]byte), nil
	}
	peers, err := d.ring.GetPeers(i)
	if err != nil {
		promDistBlockFailures.Inc()
		return nil, err
	}
	if len(peers.Peers) == 0 {
		promDistBlockFailures.Inc()
		return nil, ErrNoPeersBlock
	}
	writeLevel := d.getWriteFromServer()
	for _, p := range peers.Peers[:peers.Replication] {
		if p == d.UUID() || writeLevel == torus.WriteLocal {
			b, err := d.blocks.GetBlock(ctx, i)
			if err == nil {
				promDistBlockLocalHits.Inc()
				return b, nil
			}
			promDistBlockLocalFailures.Inc()
			break
		}
	}
	var blk []byte
	readLevel := d.getReadFromServer()
	switch readLevel {
	case torus.ReadBlock:
		blk, err = d.readWithBackoff(ctx, i, peers)
	case torus.ReadSequential:
		blk, err = d.readSequential(ctx, i, peers, clientTimeout)
	case torus.ReadSpread:
		blk, err = d.readSpread(ctx, i, peers)
	default:
		panic("unhandled read level")
	}
	if err != nil {
		// We completely failed!
		promDistBlockFailures.Inc()
		clog.Errorf("no peers for block %s", i)
	}
	return blk, err
}
func (m *mfileBlock) WriteBlock(_ context.Context, s torus.BlockRef, data []byte) error {
	m.mut.Lock()
	defer m.mut.Unlock()
	if m.closed {
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return torus.ErrClosed
	}
	index := m.findEmpty()
	if index == -1 {
		clog.Error("mfile: out of space")
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return torus.ErrOutOfSpace
	}
	clog.Tracef("mfile: writing block at index %d", index)
	err := m.dataFile.WriteBlock(uint64(index), data)
	if err != nil {
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return err
	}
	err = m.refFile.WriteBlock(uint64(index), s.ToBytes())
	if err != nil {
		promBlockWritesFailed.WithLabelValues(m.name).Inc()
		return err
	}
	if v := m.findIndex(s); v != -1 {
		// we already have it
		clog.Debug("mfile: block already exists", s)
		olddata := m.dataFile.GetBlock(uint64(v))
		if !bytes.Equal(olddata, data) {
			clog.Error("getting wrong data for block", s)
			clog.Errorf("%s, %s", olddata[:10], data[:10])
			return torus.ErrExists
		}
		// Not an error, if we already have it
		return nil
	}
	promBlocks.WithLabelValues(m.name).Inc()
	m.refIndex[s] = index
	promBlocksWritten.WithLabelValues(m.name).Inc()
	return nil
}
Ejemplo n.º 9
0
Archivo: mod.go Proyecto: coreos/torus
func (m *mod) GetPeers(key torus.BlockRef) (torus.PeerPermutation, error) {
	peerlist := sort.StringSlice([]string(m.peers.PeerList()))
	if len(peerlist) == 0 {
		return torus.PeerPermutation{}, fmt.Errorf("couldn't get any nodes")
	}
	if len(peerlist) != len(m.peers) {
		return torus.PeerPermutation{}, fmt.Errorf("couldn't get sufficient nodes")
	}
	permute := make([]string, len(peerlist))
	crc := crc32.ChecksumIEEE(key.ToBytes())
	sum := int(crc) % len(m.peers)
	copy(permute, peerlist[sum:])
	copy(permute[len(peerlist)-sum:], peerlist[:sum])
	rep := m.rep
	if len(m.peers) < m.rep {
		rep = len(m.peers)
	}
	return torus.PeerPermutation{
		Peers:       permute,
		Replication: rep,
	}, nil
}
func (b *blockvolGC) IsDead(ref torus.BlockRef) bool {
	v, ok := b.highwaters[ref.Volume()]
	if !ok {
		if clog.LevelAt(capnslog.TRACE) {
			clog.Tracef("%s doesn't exist anymore", ref)
		}
		// Volume doesn't exist anymore
		return true
	}
	// If it's a new block or INode, let it be.
	if ref.INode >= v {
		if clog.LevelAt(capnslog.TRACE) {
			clog.Tracef("%s is new compared to %d", ref, v)
		}
		return false
	}
	// If it's an INode block, and it's not in our list
	if ref.BlockType() == torus.TypeINode {
		for _, x := range b.curINodes {
			if ref.HasINode(x, torus.TypeINode) {
				if clog.LevelAt(capnslog.TRACE) {
					clog.Tracef("%s is in %s", ref, x)
				}
				return false
			}
		}
		if clog.LevelAt(capnslog.TRACE) {
			clog.Tracef("%s is a dead INode", ref)
		}
		return true
	}
	// If it's a data block
	if v := b.set[ref]; v {
		return false
	}
	if clog.LevelAt(capnslog.TRACE) {
		clog.Tracef("%s is dead", ref)
	}
	return true
}
func (d *Distributor) WriteBlock(ctx context.Context, i torus.BlockRef, data []byte) error {
	d.mut.RLock()
	defer d.mut.RUnlock()
	peers, err := d.ring.GetPeers(i)
	if err != nil {
		return err
	}
	if len(peers.Peers) == 0 {
		return torus.ErrOutOfSpace
	}
	d.readCache.Put(string(i.ToBytes()), data)
	switch d.getWriteFromServer() {
	case torus.WriteLocal:
		err = d.blocks.WriteBlock(ctx, i, data)
		if err == nil {
			return nil
		}
		clog.Tracef("Couldn't write locally; writing to cluster")
		fallthrough
	case torus.WriteOne:
		for _, p := range peers.Peers[:peers.Replication] {
			// If we're one of the desired peers, we count, write here first.
			if p == d.UUID() {
				err = d.blocks.WriteBlock(ctx, i, data)
				if err != nil {
					clog.Noticef("WriteOne error, local: %s", err)
				} else {
					return nil
				}
			}
		}
		for _, p := range peers.Peers {
			err = d.client.PutBlock(ctx, p, i, data)
			if err == nil {
				return nil
			}
			clog.Noticef("WriteOne error, remote: %s", err)
		}
		return torus.ErrNoPeer
	case torus.WriteAll:
		toWrite := peers.Replication
		for _, p := range peers.Peers {
			var err error
			if p == d.UUID() {
				err = d.blocks.WriteBlock(ctx, i, data)
			} else {
				err = d.client.PutBlock(ctx, p, i, data)
			}
			if err != nil {
				clog.Noticef("error WriteAll to peer %s: %s", p, err)
			} else {
				toWrite--
			}
			if toWrite == 0 {
				return nil
			}
		}
		if toWrite == peers.Replication {
			clog.Noticef("error WriteAll to all peers")
			return torus.ErrNoPeer
		}
		clog.Warningf("only wrote block to %d/%d peers", toWrite, peers.Replication)
	}
	return nil
}