// expire is the generic check that move expired tasks from a pending pool back // into a task pool, returning all entities caught with expired tasks. // // Note, this method expects the queue lock to be already held. The // reason the lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { // Iterate over the expired requests and return each to the queue expiries := make(map[string]int) for id, request := range pendPool { if time.Since(request.Time) > timeout { // Update the metrics with the timeout timeoutMeter.Mark(1) // Return any non satisfied requests to the pool for hash, index := range request.Hashes { taskQueue.Push(hash, float32(index)) } for _, header := range request.Headers { taskQueue.Push(header, -float32(header.Number.Uint64())) } // Add the peer to the expiry report along the the number of failed requests expirations := len(request.Hashes) if expirations < len(request.Headers) { expirations = len(request.Headers) } expiries[id] = expirations } } // Remove the expired requests from the pending pool for id, _ := range expiries { delete(pendPool, id) } return expiries }
// Cancel aborts a fetch request, returning all pending hashes to the task queue. func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { q.lock.Lock() defer q.lock.Unlock() for hash, index := range request.Hashes { taskQueue.Push(hash, float32(index)) } for _, header := range request.Headers { taskQueue.Push(header, -float32(header.Number.Uint64())) } delete(pendPool, request.Peer.id) }
// reserveHashes reserves a set of hashes for the given peer, skipping previously // failed ones. // // Note, this method expects the queue lock to be already held for writing. The // reason the lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { // Short circuit if the peer's already downloading something (sanity check to // not corrupt state) if _, ok := pendPool[p.id]; ok { return nil } // Calculate an upper limit on the hashes we might fetch (i.e. throttling) allowance := maxPending if allowance > 0 { for _, request := range pendPool { allowance -= len(request.Hashes) } } // If there's a task generator, ask it to fill our task queue if taskGen != nil && taskQueue.Size() < allowance { taskGen(allowance - taskQueue.Size()) } if taskQueue.Empty() { return nil } // Retrieve a batch of hashes, skipping previously failed ones send := make(map[common.Hash]int) skip := make(map[common.Hash]int) for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ { hash, priority := taskQueue.Pop() if p.Lacks(hash.(common.Hash)) { skip[hash.(common.Hash)] = int(priority) } else { send[hash.(common.Hash)] = int(priority) } } // Merge all the skipped hashes back for hash, index := range skip { taskQueue.Push(hash, float32(index)) } // Assemble and return the block download request if len(send) == 0 { return nil } request := &fetchRequest{ Peer: p, Hashes: send, Time: time.Now(), } pendPool[p.id] = request return request }
// expire is the generic check that move expired tasks from a pending pool back // into a task pool, returning all entities caught with expired tasks. // // Note, this method expects the queue lock to be already held for writing. The // reason the lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) []string { // Iterate over the expired requests and return each to the queue peers := []string{} for id, request := range pendPool { if time.Since(request.Time) > timeout { // Update the metrics with the timeout timeoutMeter.Mark(1) // Return any non satisfied requests to the pool for hash, index := range request.Hashes { taskQueue.Push(hash, float32(index)) } for _, header := range request.Headers { taskQueue.Push(header, -float32(header.Number.Uint64())) } peers = append(peers, id) } } // Remove the expired requests from the pending pool for _, id := range peers { delete(pendPool, id) } return peers }
// deliver injects a data retrieval response into the results queue. // // Note, this method expects the queue lock to be already held for writing. The // reason the lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) { // Short circuit if the data was never requested request := pendPool[id] if request == nil { return 0, errNoFetchesPending } reqTimer.UpdateSince(request.Time) delete(pendPool, id) // If no data items were retrieved, mark them as unavailable for the origin peer if results == 0 { for _, header := range request.Headers { request.Peer.MarkLacking(header.Hash()) } } // Assemble each of the results with their headers and retrieved data parts var ( accepted int failure error useful bool ) for i, header := range request.Headers { // Short circuit assembly if no more fetch results are found if i >= results { break } // Reconstruct the next result if contents match up index := int(header.Number.Int64() - int64(q.resultOffset)) if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { failure = errInvalidChain break } if err := reconstruct(header, i, q.resultCache[index]); err != nil { failure = err break } donePool[header.Hash()] = struct{}{} q.resultCache[index].Pending-- useful = true accepted++ // Clean up a successful fetch request.Headers[i] = nil delete(taskPool, header.Hash()) } // Return all failed or missing fetches to the queue for _, header := range request.Headers { if header != nil { taskQueue.Push(header, -float32(header.Number.Uint64())) } } // Wake up WaitResults if accepted > 0 { q.active.Signal() } // If none of the data was good, it's a stale delivery switch { case failure == nil || failure == errInvalidChain: return accepted, failure case useful: return accepted, fmt.Errorf("partial failure: %v", failure) default: return accepted, errStaleDelivery } }
// reserveHeaders reserves a set of data download operations for a given peer, // skipping any previously failed ones. This method is a generic version used // by the individual special reservation functions. // // Note, this method expects the queue lock to be already held for writing. The // reason the lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { // Short circuit if the pool has been depleted, or if the peer's already // downloading something (sanity check not to corrupt state) if taskQueue.Empty() { return nil, false, nil } if _, ok := pendPool[p.id]; ok { return nil, false, nil } // Calculate an upper limit on the items we might fetch (i.e. throttling) space := len(q.resultCache) - len(donePool) for _, request := range pendPool { space -= len(request.Headers) } // Retrieve a batch of tasks, skipping previously failed ones send := make([]*types.Header, 0, count) skip := make([]*types.Header, 0) progress := false for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { header := taskQueue.PopItem().(*types.Header) // If we're the first to request this task, initialise the result container index := int(header.Number.Int64() - int64(q.resultOffset)) if index >= len(q.resultCache) || index < 0 { common.Report("index allocation went beyond available resultCache space") return nil, false, errInvalidChain } if q.resultCache[index] == nil { components := 1 if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { components = 2 } q.resultCache[index] = &fetchResult{ Pending: components, Header: header, } } // If this fetch task is a noop, skip this fetch operation if isNoop(header) { donePool[header.Hash()] = struct{}{} delete(taskPool, header.Hash()) space, proc = space-1, proc-1 q.resultCache[index].Pending-- progress = true continue } // Otherwise unless the peer is known not to have the data, add to the retrieve list if p.Lacks(header.Hash()) { skip = append(skip, header) } else { send = append(send, header) } } // Merge all the skipped headers back for _, header := range skip { taskQueue.Push(header, -float32(header.Number.Uint64())) } if progress { // Wake WaitResults, resultCache was modified q.active.Signal() } // Assemble and return the block download request if len(send) == 0 { return nil, progress, nil } request := &fetchRequest{ Peer: p, Headers: send, Time: time.Now(), } pendPool[p.id] = request return request, progress, nil }