// TestHTTPTrackerScrape verifies that the HTTP tracker scrape format is correct func TestHTTPTrackerScrape(t *testing.T) { log.Println("TestHTTPTrackerScrape()") // Generate mock data.FileRecord file := data.FileRecord{ InfoHash: "6465616462656566", Verified: true, } // Save mock file if !file.Save() { t.Fatalf("Failed to save mock file") } // Store file in slice files := make([]data.FileRecord, 0) files = append(files[:], file) // Create a HTTP tracker, trigger a scrape tracker := HTTPTracker{} res := tracker.Scrape(files) log.Println(string(res)) // Unmarshal response scrape := scrapeResponse{} if err := bencode.Unmarshal(bytes.NewReader(res), &scrape); err != nil { t.Fatalf("Failed to unmarshal bencode scrape response") } // Delete mock file if !file.Delete() { t.Fatalf("Failed to delete mock file") } }
func getSectionList(input io.Reader) (list SectionList, err error) { list = make(SectionList, 0, 5) err = bencode.Unmarshal(input, &list) return }
func (s *share) processRequest(msg []byte) ([]byte, error) { fmt.Println("process request") var r Request err := bencode.Unmarshal(bytes.NewBuffer(msg), &r) if err != nil { return []byte{}, err } mdata, err := s.getFileMeta(r.File) check(err) if r.Index == -1 && r.Begin == -1 && r.Length == -1 { fmt.Println("want some meta") var data bytes.Buffer err = bencode.Marshal(&data, *mdata) fmt.Println(mdata) check(err) return s.createPiece(r.File, -1, -1, data.Bytes()), nil } buf := make([]byte, r.Length) f, err := os.Open(s.Path + "/" + r.File) check(err) _, err = f.ReadAt(buf, int64(r.Index*mdata.Piece_length+r.Begin)) f.Close() check(err) fmt.Println("sending piece for", r.File, r.Index, r.Begin) return s.createPiece(r.File, r.Index, r.Begin, buf), nil }
func (t *TorrentSession) reload(metadata string) { var info InfoDict err := bencode.Unmarshal(bytes.NewReader([]byte(metadata)), &info) if err != nil { log.Println("Error when reloading torrent: ", err) return } t.M.Info = info t.load() }
//p == nil if not a ping func parseHeader(b []byte) (Header, error) { var h Header if string(b[:4]) == "DBIT" { err := bencode.Unmarshal(bytes.NewBuffer(b[4:]), &h) if err != nil { return h, fmt.Errorf("Invalid Bencoding") } return h, nil } return h, fmt.Errorf("Not a Dropbit message") }
func (s *share) getFileMeta(path string) (btf *bt_file, err error) { var data []byte err = s.Db.QueryRow("SELECT data FROM files WHERE path = ?", path).Scan(&data) if err != nil { return btf, err } //TODO THIS IS WEIRD, WE NEED TO GO DEEPER var bt bt_file err = bencode.Unmarshal(bytes.NewBuffer(data), &bt) return &bt, err }
// TestHTTPTrackerError verifies that the HTTP tracker error format is correct func TestHTTPTrackerError(t *testing.T) { log.Println("TestHTTPTrackerError()") // Create a HTTP tracker, trigger an error tracker := HTTPTracker{} res := tracker.Error("Testing") log.Println(string(res)) // Unmarshal response errRes := errorResponse{} if err := bencode.Unmarshal(bytes.NewReader(res), &errRes); err != nil { t.Fatalf("Failed to unmarshal bencode error response") } }
// Read responses from bencode-speaking nodes. Return the appropriate data structure. func readResponse(p packetType) (response responseType, err error) { // The calls to bencode.Unmarshal() can be fragile. defer func() { if x := recover(); x != nil { // debug.Printf("DHT: !!! Recovering from panic() after bencode.Unmarshal %q, %v", string(p.b), x) } }() if e2 := bencode.Unmarshal(bytes.NewBuffer(p.b), &response); e2 == nil { err = nil return } else { // debug.Printf("DHT: unmarshal error, odd or partial data during UDP read? %v, err=%s", string(p.b), e2) return response, e2 } return }
// TestHTTPAnnounce verifies that the HTTP tracker announce output format is correct func TestHTTPAnnounce(t *testing.T) { log.Println("TestHTTPAnnounce()") // Load config config, err := common.LoadConfig() if err != nil { t.Fatalf("Could not load configuration: %s", err.Error()) } common.Static.Config = config // Generate mock data.FileRecord file := data.FileRecord{ InfoHash: "6465616462656566303030303030303030303030", Verified: true, } // Save mock file if err := file.Save(); err != nil { t.Fatalf("Failed to save mock file: %s", err.Error()) } // Generate fake announce query query := url.Values{} query.Set("info_hash", "deadbeef") query.Set("ip", "127.0.0.1") query.Set("port", "5000") query.Set("uploaded", "0") query.Set("downloaded", "0") query.Set("left", "0") // Create a HTTP tracker, trigger an announce tracker := HTTPTracker{} res := tracker.Announce(query, file) log.Println(string(res)) // Unmarshal response announce := AnnounceResponse{} if err := bencode.Unmarshal(bytes.NewReader(res), &announce); err != nil { t.Fatalf("Failed to unmarshal bencode announce response") } // Delete mock file if err := file.Delete(); err != nil { t.Fatalf("Failed to delete mock file: %s", err.Error()) } }
func saveMetaInfo(metadata string) (err error) { var info InfoDict err = bencode.Unmarshal(bytes.NewReader([]byte(metadata)), &info) if err != nil { return } f, err := os.Create(info.Name + ".torrent") if err != nil { log.Println("Error when opening file for creation: ", err) return } defer f.Close() _, err = f.WriteString(metadata) return }
func (t *TorrentSession) DoExtension(msg []byte, p *peerState) (err error) { var h ExtensionHandshake if msg[0] == EXTENSION_HANDSHAKE { err = bencode.Unmarshal(bytes.NewReader(msg[1:]), &h) if err != nil { log.Println("Error when unmarshaling extension handshake") return err } p.theirExtensions = make(map[string]int) for name, code := range h.M { p.theirExtensions[name] = code } if t.si.HaveTorrent || t.si.ME != nil && t.si.ME.Transferring { return } // Fill metadata info if h.MetadataSize != uint(0) { nPieces := uint(math.Ceil(float64(h.MetadataSize) / float64(16*1024))) t.si.ME.Pieces = make([][]byte, nPieces) } if _, ok := p.theirExtensions["ut_metadata"]; ok { t.si.ME.Transferring = true p.sendMetadataRequest(0) } } else if ext, ok := t.si.OurExtensions[int(msg[0])]; ok { switch ext { case "ut_metadata": t.DoMetadata(msg[1:], p) default: log.Println("Unknown extension: ", ext) } } else { log.Println("Unknown extension: ", int(msg[0])) } return nil }
func (s *share) processMeta(u *UDPMessage, out chan *UDPMessage) { msg, sender := u.data, u.addr fmt.Println("process meta") mfiles := s.getFileHashes() var shake Shake err := bencode.Unmarshal(bytes.NewBuffer(msg), &shake) check(err) yfiles := shake.Files for yf, yhash := range yfiles { mhash, ok := mfiles[yf] if !ok || mhash != yhash { b := s.createRequest(yf, -1, -1, -1) out <- &UDPMessage{sender, b} } } }
func getTrackerInfo(url string) (tr *TrackerResponse, err error) { r, err := proxyHttpGet(url) if err != nil { return } defer r.Body.Close() if r.StatusCode >= 400 { data, _ := ioutil.ReadAll(r.Body) reason := "Bad Request " + string(data) log.Println(reason) err = errors.New(reason) return } var tr2 TrackerResponse err = bencode.Unmarshal(r.Body, &tr2) r.Body.Close() if err != nil { return } tr = &tr2 return }
func getMetaInfo(torrent string) (metaInfo *MetaInfo, err error) { var input io.ReadCloser if strings.HasPrefix(torrent, "http:") { r, err := proxyHttpGet(torrent) if err != nil { return nil, err } input = r.Body } else if strings.HasPrefix(torrent, "magnet:") { magnet, err := parseMagnet(torrent) if err != nil { log.Println("Couldn't parse magnet: ", err) return nil, err } ih, err := dht.DecodeInfoHash(magnet.InfoHashes[0]) if err != nil { return nil, err } metaInfo = &MetaInfo{InfoHash: string(ih)} return metaInfo, err } else { if input, err = os.Open(torrent); err != nil { return } } // We need to calcuate the sha1 of the Info map, including every value in the // map. The easiest way to do this is to read the data using the Decode // API, and then pick through it manually. var m interface{} m, err = bencode.Decode(input) input.Close() if err != nil { err = errors.New("Couldn't parse torrent file phase 1: " + err.Error()) return } topMap, ok := m.(map[string]interface{}) if !ok { err = errors.New("Couldn't parse torrent file phase 2.") return } infoMap, ok := topMap["info"] if !ok { err = errors.New("Couldn't parse torrent file. info") return } var b bytes.Buffer if err = bencode.Marshal(&b, infoMap); err != nil { return } hash := sha1.New() hash.Write(b.Bytes()) var m2 MetaInfo err = bencode.Unmarshal(&b, &m2.Info) if err != nil { return } m2.InfoHash = string(hash.Sum(nil)) m2.Announce = getString(topMap, "announce") m2.AnnounceList = getSliceSliceString(topMap, "announce-list") m2.CreationDate = getString(topMap, "creation date") m2.Comment = getString(topMap, "comment") m2.CreatedBy = getString(topMap, "created by") m2.Encoding = getString(topMap, "encoding") metaInfo = &m2 return }
func (t *TorrentSession) DoMetadata(msg []byte, p *peerState) { // We need a buffered reader because the raw data is put directly // after the bencoded data, and a simple reader will get all its bytes // eaten. A buffered reader will keep a reference to where the // bdecoding ended. br := bufio.NewReader(bytes.NewReader(msg)) var message MetadataMessage err := bencode.Unmarshal(br, &message) if err != nil { log.Println("Error when parsing metadata: ", err) return } mt := message.MsgType switch mt { case METADATA_REQUEST: //TODO: Answer to metadata request case METADATA_DATA: var piece bytes.Buffer _, err := io.Copy(&piece, br) if err != nil { log.Println("Error when getting metadata piece: ", err) return } t.si.ME.Pieces[message.Piece] = piece.Bytes() finished := true for idx, data := range t.si.ME.Pieces { if len(data) == 0 { p.sendMetadataRequest(idx) finished = false } } if !finished { break } log.Println("Finished downloading metadata!") var full bytes.Buffer for _, piece := range t.si.ME.Pieces { full.Write(piece) } b := full.Bytes() // Verify sha sha := sha1.New() sha.Write(b) actual := string(sha.Sum(nil)) if actual != t.M.InfoHash { log.Println("Invalid metadata") log.Printf("Expected %s, got %s\n", t.M.InfoHash, actual) } metadata := string(b) err = saveMetaInfo(metadata) if err != nil { return } t.reload(metadata) case METADATA_REJECT: log.Printf("%s didn't want to send piece %d\n", p.address, message.Piece) default: log.Println("Didn't understand metadata extension type: ", mt) } }
func (s *share) processPiece(u *UDPMessage, out chan *UDPMessage) { msg, sender := u.data, u.addr fmt.Println("Process piece") var p Piece err := bencode.Unmarshal(bytes.NewBuffer(msg), &p) check(err) var data []byte err = s.Db.QueryRow("SELECT data FROM files WHERE path = ?", p.File).Scan(&data) check(err) //if meta and file we're not aware of, just take their meta if err == sql.ErrNoRows && p.Index == -1 && p.Begin == -1 { fmt.Println("mine", len(p.Piece)) _, err := s.Db.Exec("INSERT INTO files(path, data) values(?, ?)", p.File, p.Piece) //TODO modify bitset HAVES to all 0 check(err) } else if err != nil { return } var mdata bt_file err = bencode.Unmarshal(bytes.NewBuffer(data), &mdata) //thanks to hashes, EOF characters in string (but don't break anything) if err != io.ErrUnexpectedEOF { check(err) } fmt.Println("time", mdata.Time) fmt.Println("Length", mdata.Length) fmt.Println("piece_len", mdata.Piece_length) //fmt.Println("pieces", mdata.Pieces) if p.Index == -1 && p.Begin == -1 { fmt.Println("got meta") var ydata bt_file err = bencode.Unmarshal(bytes.NewBuffer([]byte(p.Piece)), &ydata) check(err) if mdata.Time > ydata.Time { return //I don't need this } //process meta; at this point, I either didn't have it or theirs is newer rlength := int(math.Min(float64(BLOCK_SIZE), float64(ydata.Piece_length))) for i := 0; i < len(ydata.Pieces)/20; i++ { //256k chunks pb := i * 20 //piece hash actual index if i >= len(mdata.Pieces)/20 || mdata.Pieces[pb:pb+20] != ydata.Pieces[pb:pb+20] { for j := 0; j < ydata.Piece_length; j += rlength { //16K chunks length := int(math.Min(float64(ydata.Length-int64((i*ydata.Piece_length)+j)), float64(rlength))) out <- &UDPMessage{sender, s.createRequest(p.File, i, j, length)} if length < rlength { break } } } } } else { fmt.Printf("opening %s to write at %d, %d\n", p.File, p.Index, p.Begin) //TODO eh, flags are weird //TODO also need to see behavior on files where WriteAt() will be OOB f, err := os.OpenFile(s.Path+"/"+p.File, os.O_RDWR|os.O_CREATE, 0666) check(err) _, err = f.WriteAt([]byte(p.Piece), int64(p.Index*mdata.Piece_length+p.Begin)) f.Close() check(err) } }