Ejemplo n.º 1
0
func (t *torrent) pieceLength(piece int) (len_ pp.Integer) {
	if int(piece) == t.numPieces()-1 {
		len_ = pp.Integer(t.Length() % t.Info.PieceLength)
	}
	if len_ == 0 {
		len_ = pp.Integer(t.Info.PieceLength)
	}
	return
}
Ejemplo n.º 2
0
func (t *Torrent) pieceLength(piece int) (len_ pp.Integer) {
	if piece < 0 || piece >= t.info.NumPieces() {
		return
	}
	if piece == t.numPieces()-1 {
		len_ = pp.Integer(t.length % t.info.PieceLength)
	}
	if len_ == 0 {
		len_ = pp.Integer(t.info.PieceLength)
	}
	return
}
Ejemplo n.º 3
0
func chunkIndexSpec(index int, pieceLength, chunkSize pp.Integer) chunkSpec {
	ret := chunkSpec{pp.Integer(index) * chunkSize, chunkSize}
	if ret.Begin+ret.Length > pieceLength {
		ret.Length = pieceLength - ret.Begin
	}
	return ret
}
Ejemplo n.º 4
0
func (t *torrent) urgentChunkInPiece(piece int) bool {
	p := pp.Integer(piece)
	for req := range t.urgent {
		if req.Index == p {
			return true
		}
	}
	return false
}
Ejemplo n.º 5
0
Archivo: misc.go Proyecto: gbjk/torrent
// Return the request that would include the given offset into the torrent data.
func torrentOffsetRequest(torrentLength, pieceSize, chunkSize, offset int64) (
	r request, ok bool) {
	if offset < 0 || offset >= torrentLength {
		return
	}
	r.Index = pp.Integer(offset / pieceSize)
	r.Begin = pp.Integer(offset % pieceSize / chunkSize * chunkSize)
	r.Length = pp.Integer(chunkSize)
	pieceLeft := pp.Integer(pieceSize - int64(r.Begin))
	if r.Length > pieceLeft {
		r.Length = pieceLeft
	}
	torrentLeft := torrentLength - int64(r.Index)*pieceSize - int64(r.Begin)
	if int64(r.Length) > torrentLeft {
		r.Length = pp.Integer(torrentLeft)
	}
	ok = true
	return
}
Ejemplo n.º 6
0
func (t *Torrent) connRequestPiecePendingChunks(c *connection, piece int) (more bool) {
	if !c.PeerHasPiece(piece) {
		return true
	}
	chunkIndices := t.pieces[piece].undirtiedChunkIndices().ToSortedSlice()
	return itertools.ForPerm(len(chunkIndices), func(i int) bool {
		req := request{pp.Integer(piece), t.chunkIndexSpec(chunkIndices[i], piece)}
		return c.Request(req)
	})
}
Ejemplo n.º 7
0
func (t *torrent) connRequestPiecePendingChunks(c *connection, piece int) (more bool) {
	if !c.PeerHasPiece(piece) {
		return true
	}
	for _, cs := range t.Pieces[piece].shuffledPendingChunkSpecs(t, piece) {
		req := request{pp.Integer(piece), cs}
		if !c.Request(req) {
			return false
		}
	}
	return true
}
Ejemplo n.º 8
0
func (cn *connection) Have(piece int) {
	for piece >= len(cn.sentHaves) {
		cn.sentHaves = append(cn.sentHaves, false)
	}
	if cn.sentHaves[piece] {
		return
	}
	cn.Post(pp.Message{
		Type:  pp.Have,
		Index: pp.Integer(piece),
	})
	cn.sentHaves[piece] = true
}
Ejemplo n.º 9
0
func (p *piece) numDirtyBytes() (ret pp.Integer) {
	defer func() {
		if ret > p.length() {
			panic("too many dirty bytes")
		}
	}()
	numRegularDirtyChunks := p.numDirtyChunks()
	if p.chunkIndexDirty(p.numChunks() - 1) {
		numRegularDirtyChunks--
		ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
	}
	ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
	return
}
Ejemplo n.º 10
0
func (t *torrent) validOutgoingRequest(r request) bool {
	if r.Index >= pp.Integer(t.Info.NumPieces()) {
		return false
	}
	if r.Begin%t.chunkSize != 0 {
		return false
	}
	if r.Length > t.chunkSize {
		return false
	}
	pieceLength := t.pieceLength(int(r.Index))
	if r.Begin+r.Length > pieceLength {
		return false
	}
	return r.Length == t.chunkSize || r.Begin+r.Length == pieceLength
}
Ejemplo n.º 11
0
func (t *torrent) pieceNumPendingBytes(index int) (count pp.Integer) {
	if t.pieceComplete(index) {
		return
	}
	piece := &t.Pieces[index]
	count = t.pieceLength(index)
	if !piece.EverHashed {
		return
	}
	regularDirty := piece.numDirtyChunks()
	lastChunkIndex := t.pieceNumChunks(index) - 1
	if piece.pendingChunkIndex(lastChunkIndex) {
		regularDirty--
		count -= t.chunkIndexSpec(lastChunkIndex, index).Length
	}
	count -= pp.Integer(regularDirty) * t.chunkSize
	return
}
Ejemplo n.º 12
0
// Add or merge a torrent spec. If the torrent is already present, the
// trackers will be merged with the existing ones. If the Info isn't yet
// known, it will be set. The display name is replaced if the new spec
// provides one. Returns new if the torrent wasn't already in the client.
func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (t *Torrent, new bool, err error) {
	t, new = cl.AddTorrentInfoHash(spec.InfoHash)
	if spec.DisplayName != "" {
		t.SetDisplayName(spec.DisplayName)
	}
	if spec.Info != nil {
		err = t.SetInfoBytes(spec.Info.Bytes)
		if err != nil {
			return
		}
	}
	cl.mu.Lock()
	defer cl.mu.Unlock()
	if spec.ChunkSize != 0 {
		t.chunkSize = pp.Integer(spec.ChunkSize)
	}
	t.addTrackers(spec.Trackers)
	t.maybeNewConns()
	return
}
Ejemplo n.º 13
0
// Handle a received chunk from a peer.
func (cl *Client) downloadedChunk(t *Torrent, c *connection, msg *pp.Message) {
	chunksReceived.Add(1)

	req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))

	// Request has been satisfied.
	if cl.connDeleteRequest(t, c, req) {
		defer c.updateRequests()
	} else {
		unexpectedChunksReceived.Add(1)
	}

	index := int(req.Index)
	piece := &t.pieces[index]

	// Do we actually want this chunk?
	if !t.wantPiece(req) {
		unwantedChunksReceived.Add(1)
		c.UnwantedChunksReceived++
		return
	}

	c.UsefulChunksReceived++
	c.lastUsefulChunkReceived = time.Now()

	cl.upload(t, c)

	// Need to record that it hasn't been written yet, before we attempt to do
	// anything with it.
	piece.incrementPendingWrites()
	// Record that we have the chunk.
	piece.unpendChunkIndex(chunkIndex(req.chunkSpec, t.chunkSize))

	// Cancel pending requests for this chunk.
	for _, c := range t.conns {
		if cl.connCancel(t, c, req) {
			c.updateRequests()
		}
	}

	cl.mu.Unlock()
	// Write the chunk out. Note that the upper bound on chunk writing
	// concurrency will be the number of connections.
	err := t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
	cl.mu.Lock()

	piece.decrementPendingWrites()

	if err != nil {
		log.Printf("%s: error writing chunk %v: %s", t, req, err)
		t.pendRequest(req)
		t.updatePieceCompletion(int(msg.Index))
		return
	}

	// It's important that the piece is potentially queued before we check if
	// the piece is still wanted, because if it is queued, it won't be wanted.
	if t.pieceAllDirty(index) {
		cl.queuePieceCheck(t, int(req.Index))
	}

	if c.peerTouchedPieces == nil {
		c.peerTouchedPieces = make(map[int]struct{})
	}
	c.peerTouchedPieces[index] = struct{}{}

	cl.event.Broadcast()
	t.publishPieceChange(int(req.Index))
	return
}