// Returns true if the connection is added. func (cl *Client) addConnection(t *Torrent, c *connection) bool { if cl.closed.IsSet() { return false } if !cl.wantConns(t) { return false } for _, c0 := range t.conns { if c.PeerID == c0.PeerID { // Already connected to a client with that ID. duplicateClientConns.Add(1) return false } } if len(t.conns) >= socketsPerTorrent { c := t.worstBadConn(cl) if c == nil { return false } if cl.config.Debug && missinggo.CryHeard() { log.Printf("%s: dropping connection to make room for new one:\n %s", t, c) } c.Close() t.deleteConnection(c) } if len(t.conns) >= socketsPerTorrent { panic(len(t.conns)) } t.conns = append(t.conns, c) c.t = t return true }
// In a get_peers response, the addresses of torrent clients involved with the // queried info-hash. func (m Msg) Values() (vs []Peer) { v := func() interface{} { defer func() { recover() }() return m["r"].(map[string]interface{})["values"] }() if v == nil { return } vl, ok := v.([]interface{}) if !ok { if missinggo.CryHeard() { log.Printf(`unexpected krpc "values" field: %#v`, v) } return } vs = make([]Peer, 0, len(vl)) for _, i := range vl { s, ok := i.(string) if !ok { panic(i) } // Because it's a list of strings, we can let the length of the string // determine the IP version of the compact peer. var cp util.CompactPeer err := cp.UnmarshalBinary([]byte(s)) if err != nil { log.Printf("error decoding values list element: %s", err) continue } vs = append(vs, Peer{cp.IP[:], int(cp.Port)}) } return }
func (s *Server) processPacket(b []byte, addr dHTAddr) { if len(b) < 2 || b[0] != 'd' || b[len(b)-1] != 'e' { // KRPC messages are bencoded dicts. readNotKRPCDict.Add(1) return } var d Msg err := bencode.Unmarshal(b, &d) if err != nil { readUnmarshalError.Add(1) func() { if se, ok := err.(*bencode.SyntaxError); ok { // The message was truncated. if int(se.Offset) == len(b) { return } // Some messages seem to drop to nul chars abrubtly. if int(se.Offset) < len(b) && b[se.Offset] == 0 { return } // The message isn't bencode from the first. if se.Offset == 0 { return } } if missinggo.CryHeard() { log.Printf("%s: received bad krpc message from %s: %s: %+q", s, addr, err, b) } }() return } s.mu.Lock() defer s.mu.Unlock() if d.Y == "q" { readQuery.Add(1) s.handleQuery(addr, d) return } t := s.findResponseTransaction(d.T, addr) if t == nil { //log.Printf("unexpected message: %#v", d) return } node := s.getNode(addr, d.SenderID()) node.lastGotResponse = time.Now() // TODO: Update node ID as this is an authoritative packet. go t.handleResponse(d) s.deleteTransaction(t) }
func (cl *Client) ipBlockRange(ip net.IP) (r iplist.Range, blocked bool) { if cl.ipBlockList == nil { return } ip4 := ip.To4() // If blocklists are enabled, then block non-IPv4 addresses, because // blocklists do not yet support IPv6. if ip4 == nil { if missinggo.CryHeard() { log.Printf("blocking non-IPv4 address: %s", ip) } r = ipv6BlockRange blocked = true return } return cl.ipBlockList.Lookup(ip4) }
// Returns true if the connection is added. func (t *Torrent) addConnection(c *connection) bool { if t.cl.closed.IsSet() { return false } if !t.wantConns() { return false } for _, c0 := range t.conns { if c.PeerID == c0.PeerID { // Already connected to a client with that ID. duplicateClientConns.Add(1) return false } } if len(t.conns) >= t.maxEstablishedConns { c := t.worstBadConn() if c == nil { return false } if t.cl.config.Debug && missinggo.CryHeard() { log.Printf("%s: dropping connection to make room for new one:\n %s", t, c) } c.Close() t.deleteConnection(c) } if len(t.conns) >= t.maxEstablishedConns { panic(len(t.conns)) } t.conns = append(t.conns, c) if c.t != nil { panic("connection already associated with a torrent") } // Reconcile bytes transferred before connection was associated with a // torrent. t.stats.wroteBytes(c.stats.BytesWritten) t.stats.readBytes(c.stats.BytesRead) c.t = t return true }
func (c *Conn) processDelivery(h header, payload []byte) { deliveriesProcessed.Add(1) c.mu.Lock() defer c.mu.Unlock() defer c.event.Broadcast() c.assertHeader(h) c.peerWndSize = h.WndSize c.applyAcks(h) if h.Timestamp == 0 { c.lastTimeDiff = 0 } else { c.lastTimeDiff = c.timestamp() - h.Timestamp } // We want this connection destroyed, and our peer has acked everything. if c.sentFin && len(c.unackedSends) == 0 { // log.Print("gracefully completed") c.destroy(nil) return } if h.Type == stReset { c.destroy(errors.New("peer reset")) return } if c.cs == csSynSent { if h.Type != stState { return } c.changeState(csConnected) c.ack_nr = h.SeqNr - 1 return } if h.Type == stState { return } c.pendSendState() if !seqLess(c.ack_nr, h.SeqNr) { // Already received this packet. return } inboundIndex := int(h.SeqNr - c.ack_nr - 1) if inboundIndex < len(c.inbound) && c.inbound[inboundIndex].seen { // Already received this packet. return } // Derived from running in production: // grep -oP '(?<=packet out of order, index=)\d+' log | sort -n | uniq -c // 64 should correspond to 8 bytes of selective ack. if inboundIndex >= maxUnackedInbound { // Discard packet too far ahead. if missinggo.CryHeard() { // I can't tell if this occurs due to bad peers, or something // missing in the implementation. log.Printf("received packet from %s %d ahead of next seqnr (%x > %x)", c.remoteAddr, inboundIndex, h.SeqNr, c.ack_nr+1) } return } // Extend inbound so the new packet has a place. for inboundIndex >= len(c.inbound) { c.inbound = append(c.inbound, recv{}) } c.inbound[inboundIndex] = recv{true, payload, h.Type} c.processInbound() }
func (c *Conn) deliver(h header, payload []byte) { c.mu.Lock() defer c.mu.Unlock() defer c.event.Broadcast() if h.Type == stSyn { if h.ConnID != c.send_id { panic(fmt.Sprintf("%d != %d", h.ConnID, c.send_id)) } } else { if h.ConnID != c.recv_id { panic("erroneous delivery") } } c.peerWndSize = h.WndSize c.ackTo(h.AckNr) for _, ext := range h.Extensions { switch ext.Type { case extensionTypeSelectiveAck: c.ackSkipped(h.AckNr + 1) bitmask := selectiveAckBitmask(ext.Bytes) for i := 0; i < bitmask.NumBits(); i++ { if bitmask.BitIsSet(i) { nr := h.AckNr + 2 + uint16(i) // log.Printf("selectively acked %d", nr) c.ack(nr) } else { c.ackSkipped(h.AckNr + 2 + uint16(i)) } } } } if h.Timestamp == 0 { c.lastTimeDiff = 0 } else { c.lastTimeDiff = c.timestamp() - h.Timestamp } // log.Printf("now micros: %d, header timestamp: %d, header diff: %d", c.timestamp(), h.Timestamp, h.TimestampDiff) if h.Type == stReset { c.destroy(errors.New("peer reset")) return } if c.cs == csSynSent { if h.Type != stState { return } c.cs = csConnected c.ack_nr = h.SeqNr - 1 return } if h.Type == stState { return } if !seqLess(c.ack_nr, h.SeqNr) { if h.Type == stSyn { c.sendState() } // Already received this packet. return } inboundIndex := int(h.SeqNr - c.ack_nr - 1) if inboundIndex < len(c.inbound) && c.inbound[inboundIndex].seen { // Already received this packet. return } // Derived from running in production: // grep -oP '(?<=packet out of order, index=)\d+' log | sort -n | uniq -c // 64 should correspond to 8 bytes of selective ack. if inboundIndex >= maxUnackedInbound { // Discard packet too far ahead. if missinggo.CryHeard() { // I can't tell if this occurs due to bad peers, or something // missing in the implementation. log.Printf("received packet from %s %d ahead of next seqnr (%x > %x)", c.remoteAddr, inboundIndex, h.SeqNr, c.ack_nr+1) } return } // Extend inbound so the new packet has a place. for inboundIndex >= len(c.inbound) { c.inbound = append(c.inbound, recv{}) } if inboundIndex != 0 { // log.Printf("packet out of order, index=%d", inboundIndex) } c.inbound[inboundIndex] = recv{true, payload} // Consume consecutive next packets. for len(c.inbound) > 0 && c.inbound[0].seen { c.ack_nr++ c.readBuf = append(c.readBuf, c.inbound[0].data...) c.inbound = c.inbound[1:] } c.sendState() if c.cs == csSentFin { if !seqLess(h.AckNr, c.seq_nr-1) { c.destroy(nil) } } if h.Type == stFin { c.destroy(nil) } }