Example #1
0
// TestUDPAnnounce verifies that the UDP tracker announce output format is correct
func TestUDPAnnounce(t *testing.T) {
	log.Println("TestUDPAnnounce()")

	// Load config
	config := common.LoadConfig()
	common.Static.Config = config

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "6465616462656566",
		Verified: true,
	}

	// Save mock file
	if !file.Save() {
		t.Fatalf("Failed to save mock file")
	}

	// Generate fake announce query
	query := url.Values{}
	query.Set("info_hash", "deadbeef")
	query.Set("ip", "127.0.0.1")
	query.Set("port", "5000")
	query.Set("uploaded", "0")
	query.Set("downloaded", "0")
	query.Set("left", "0")
	query.Set("numwant", "50")

	// Create a UDP tracker, trigger an announce
	tracker := UDPTracker{TransID: uint32(1234)}
	res := tracker.Announce(query, file)

	// Decode response
	announce := new(udp.AnnounceResponse)
	err := announce.UnmarshalBinary(res)
	if err != nil {
		t.Fatalf("Failed to decode UDP announce response")
	}
	log.Println(announce)

	// Verify correct action
	if announce.Action != 1 {
		t.Fatalf("Incorrect UDP action, expected 1")
	}

	// Encode response, verify same as before
	announceBuf, err := announce.MarshalBinary()
	if err != nil {
		t.Fatalf("Failed to encode UDP announce response")
	}

	if !bytes.Equal(res, announceBuf) {
		t.Fatalf("Byte slices are not identical")
	}

	// Delete mock file
	if !file.Delete() {
		t.Fatalf("Failed to delete mock file")
	}
}
Example #2
0
// TestHTTPTrackerScrape verifies that the HTTP tracker scrape format is correct
func TestHTTPTrackerScrape(t *testing.T) {
	log.Println("TestHTTPTrackerScrape()")

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "6465616462656566",
		Verified: true,
	}

	// Save mock file
	if !file.Save() {
		t.Fatalf("Failed to save mock file")
	}

	// Store file in slice
	files := make([]data.FileRecord, 0)
	files = append(files[:], file)

	// Create a HTTP tracker, trigger a scrape
	tracker := HTTPTracker{}
	res := tracker.Scrape(files)
	log.Println(string(res))

	// Unmarshal response
	scrape := scrapeResponse{}
	if err := bencode.Unmarshal(bytes.NewReader(res), &scrape); err != nil {
		t.Fatalf("Failed to unmarshal bencode scrape response")
	}

	// Delete mock file
	if !file.Delete() {
		t.Fatalf("Failed to delete mock file")
	}
}
Example #3
0
// TestHTTPRouter verifies that the main HTTP router is working properly
func TestHTTPRouter(t *testing.T) {
	log.Println("TestHTTPRouter()")

	// Load config
	config, err := common.LoadConfig()
	if err != nil {
		t.Fatalf("Could not load configuration: %s", err.Error())
	}
	common.Static.Config = config

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "6465616462656566303030303030303030303030",
		Verified: true,
	}

	// Save mock file
	if err := file.Save(); err != nil {
		t.Fatalf("Failed to save mock file: %s", err.Error())
	}

	// Generate mock data.FileRecord
	file2 := data.FileRecord{
		InfoHash: "6265656664656164",
		Verified: true,
	}

	// Save mock file
	if err := file2.Save(); err != nil {
		t.Fatalf("Failed to save mock file: %s", err.Error())
	}

	// Iterate all HTTP tests
	for _, test := range httpTests {
		// Generate mock HTTP request
		r, err := http.NewRequest("GET", "http://localhost:8080"+test.url, nil)
		r.Header.Set("User-Agent", "goat_test")
		if err != nil {
			t.Fatalf("Failed to create HTTP request")
		}

		// Capture HTTP writer response with recorder
		w := httptest.NewRecorder()

		// Invoke HTTP router
		parseHTTP(w, r)
		log.Println("TEST:", test.url)
		log.Println(w.Body.String())
	}

	// Delete mock file2
	err = file.Delete()
	err2 := file2.Delete()
	if err != nil || err2 != nil {
		t.Fatalf("Failed to delete mock file : %s %s", err.Error(), err2.Error())
	}
}
Example #4
0
// TestUDPTrackerScrape verifies that the UDP tracker scrape format is correct
func TestUDPTrackerScrape(t *testing.T) {
	log.Println("TestUDPTrackerScrape()")

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "6465616462656566",
		Verified: true,
	}

	// Save mock file
	if !file.Save() {
		t.Fatalf("Failed to save mock file")
	}

	// Store file in slice
	files := make([]data.FileRecord, 0)
	files = append(files[:], file)

	// Create a UDP tracker, trigger a scrape
	tracker := UDPTracker{TransID: uint32(1234)}
	res := tracker.Scrape(files)

	// Decode response
	scrape := new(udp.ScrapeResponse)
	err := scrape.UnmarshalBinary(res)
	if err != nil {
		t.Fatalf("Failed to decode UDP scrape response")
	}
	log.Println(scrape)

	// Verify correct action
	if scrape.Action != 2 {
		t.Fatalf("Incorrect UDP action, expected 2")
	}

	// Encode response, verify same as before
	scrapeBuf, err := scrape.MarshalBinary()
	if err != nil {
		t.Fatalf("Failed to encode UDP scrape response")
	}

	if !bytes.Equal(res, scrapeBuf) {
		t.Fatalf("Byte slices are not identical")
	}

	// Delete mock file
	if !file.Delete() {
		t.Fatalf("Failed to delete mock file")
	}
}
Example #5
0
// TestHTTPAnnounce verifies that the HTTP tracker announce output format is correct
func TestHTTPAnnounce(t *testing.T) {
	log.Println("TestHTTPAnnounce()")

	// Load config
	config, err := common.LoadConfig()
	if err != nil {
		t.Fatalf("Could not load configuration: %s", err.Error())
	}
	common.Static.Config = config

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "6465616462656566303030303030303030303030",
		Verified: true,
	}

	// Save mock file
	if err := file.Save(); err != nil {
		t.Fatalf("Failed to save mock file: %s", err.Error())
	}

	// Generate fake announce query
	query := url.Values{}
	query.Set("info_hash", "deadbeef")
	query.Set("ip", "127.0.0.1")
	query.Set("port", "5000")
	query.Set("uploaded", "0")
	query.Set("downloaded", "0")
	query.Set("left", "0")

	// Create a HTTP tracker, trigger an announce
	tracker := HTTPTracker{}
	res := tracker.Announce(query, file)
	log.Println(string(res))

	// Unmarshal response
	announce := AnnounceResponse{}
	if err := bencode.Unmarshal(bytes.NewReader(res), &announce); err != nil {
		t.Fatalf("Failed to unmarshal bencode announce response")
	}

	// Delete mock file
	if err := file.Delete(); err != nil {
		t.Fatalf("Failed to delete mock file: %s", err.Error())
	}
}
Example #6
0
// TestGetFilesJSON verifies that /api/files returns proper JSON output
func TestGetFilesJSON(t *testing.T) {
	log.Println("TestGetFilesJSON()")

	// Load config
	config := common.LoadConfig()
	common.Static.Config = config

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "deadbeef",
		Verified: true,
	}

	// Save mock file
	if !file.Save() {
		t.Fatalf("Failed to save mock file")
	}

	// Load mock file to fetch ID
	file = file.Load(file.InfoHash, "info_hash")
	if file == (data.FileRecord{}) {
		t.Fatalf("Failed to load mock file")
	}

	// Request output JSON from API for this file
	var file2 data.FileRecord
	err := json.Unmarshal(getFilesJSON(file.ID), &file2)
	if err != nil {
		t.Fatalf("Failed to unmarshal result JSON for single file")
	}

	// Verify objects are the same
	if file.ID != file2.ID {
		t.Fatalf("ID, expected %d, got %d", file.ID, file2.ID)
	}

	// Request output JSON from API for all files
	var allFiles []data.FileRecord
	err = json.Unmarshal(getFilesJSON(-1), &allFiles)
	if err != nil {
		t.Fatalf("Failed to unmarshal result JSON for all files")
	}

	// Verify known file is in result set
	found := false
	for _, f := range allFiles {
		if f.ID == file.ID {
			found = true
		}
	}

	if !found {
		t.Fatalf("Expected file not found in all files result set")
	}

	// Delete mock file
	if !file.Delete() {
		t.Fatalf("Failed to delete mock file")
	}
}
Example #7
0
// Scrape scrapes a tracker request
func Scrape(tracker torrentTracker, query url.Values) []byte {
	// List of files to be scraped
	scrapeFiles := make([]data.FileRecord, 0)

	// Iterate all info_hash values in query
	for _, infoHash := range query["info_hash"] {
		// Make a copy of query, set the info hash as current in loop
		localQuery := query
		localQuery.Set("info_hash", infoHash)

		// Store scrape information in struct
		scrape := new(data.ScrapeLog).FromValues(localQuery)
		if scrape == (data.ScrapeLog{}) {
			return tracker.Error("Malformed scrape")
		}

		// Request to store scrape
		go scrape.Save()

		log.Printf("scrape: [%s %s] %s", tracker.Protocol(), scrape.IP, scrape.InfoHash)

		// Check for a matching file via info_hash
		file := new(data.FileRecord).Load(scrape.InfoHash, "info_hash")
		if file == (data.FileRecord{}) {
			// Torrent is not currently registered
			return tracker.Error("Unregistered torrent")
		}

		// Ensure file is verified, meaning we will permit scraping of it
		if !file.Verified {
			return tracker.Error("Unverified torrent")
		}

		// Launch peer reaper to remove old peers from this file
		go file.PeerReaper()

		// File is valid, add it to list to be scraped
		scrapeFiles = append(scrapeFiles[:], file)
	}

	// Create scrape
	return tracker.Scrape(scrapeFiles)
}
Example #8
0
// Announce announces using UDP format
func (u UDPTracker) Announce(query url.Values, file data.FileRecord) []byte {
	// Create UDP announce response
	announce := udp.AnnounceResponse{
		Action:   1,
		TransID:  u.TransID,
		Interval: uint32(common.Static.Config.Interval),
		Leechers: uint32(file.Leechers()),
		Seeders:  uint32(file.Seeders()),
	}

	// Convert to UDP byte buffer
	announceBuf, err := announce.MarshalBinary()
	if err != nil {
		log.Println(err.Error())
		return u.Error("Could not create UDP announce response")
	}

	// Numwant
	numwant, err := strconv.Atoi(query.Get("numwant"))
	if err != nil {
		numwant = 50
	}

	// Add compact peer list
	res := bytes.NewBuffer(announceBuf)
	err = binary.Write(res, binary.BigEndian, file.PeerList(query.Get("ip"), numwant))
	if err != nil {
		log.Println(err.Error())
		return u.Error("Could not create UDP announce response")
	}

	return res.Bytes()
}
Example #9
0
// getFilesJSON returns a JSON representation of one or more data.FileRecords
func getFilesJSON(ID int) ([]byte, error) {
	// Check for a valid integer ID
	if ID > 0 {
		// Load file
		file, err := new(data.FileRecord).Load(ID, "id")
		if err != nil {
			return nil, err
		}

		// Create JSON represenation
		jsonFile, err := file.ToJSON()
		if err != nil {
			return nil, err
		}

		// Marshal into JSON
		res, err := json.Marshal(jsonFile)
		if err != nil {
			return nil, err
		}

		return res, nil
	}

	// Load all files
	files, err := new(data.FileRecordRepository).All()
	if err != nil {
		return nil, err
	}

	// Marshal into JSON
	res, err := json.Marshal(files)
	if err != nil {
		return nil, err
	}

	return res, err
}
Example #10
0
// Announce announces using HTTP format
func (h HTTPTracker) Announce(query url.Values, file data.FileRecord) []byte {
	// Generate response struct
	announce := AnnounceResponse{
		Interval:    common.Static.Config.Interval,
		MinInterval: common.Static.Config.Interval / 2,
	}

	// Get seeders count on file
	var err error
	announce.Complete, err = file.Seeders()
	if err != nil {
		log.Println(err.Error())
	}

	// Get leechers count on file
	announce.Incomplete, err = file.Leechers()
	if err != nil {
		log.Println(err.Error())
	}

	// Check for numwant parameter, return up to that number of peers
	// Default is 50 per protocol
	numwant := 50
	if query.Get("numwant") != "" {
		// Verify numwant is an integer
		num, err := strconv.Atoi(query.Get("numwant"))
		if err == nil {
			numwant = num
		}
	}

	// Marshal struct into bencode
	buf := bytes.NewBuffer(make([]byte, 0))
	if err := bencode.Marshal(buf, announce); err != nil {
		log.Println(err.Error())
		return h.Error(ErrAnnounceFailure.Error())
	}

	// Generate compact peer list of length numwant
	// Note: because we are HTTP, we can mark second parameter as 'true' to get a
	// more accurate peer list
	compactPeers, err := file.CompactPeerList(numwant, true)
	if err != nil {
		log.Println(err.Error())
		return h.Error(ErrPeerListFailure.Error())
	}

	// Because the bencode marshaler does not handle compact, binary peer list conversion,
	// we handle it manually here.

	// Get initial buffer, chop off 3 bytes: "0:e", append the actual list length with new colon
	out := buf.Bytes()
	out = append(out[0:len(out)-3], []byte(strconv.Itoa(len(compactPeers))+":")...)

	// Append peers list, terminate with an "e"
	return append(append(out, compactPeers...), byte('e'))
}
Example #11
0
// Announce announces using UDP format
func (u UDPTracker) Announce(query url.Values, file data.FileRecord) []byte {
	// Create UDP announce response
	announce := udp.AnnounceResponse{
		Action:   1,
		TransID:  u.TransID,
		Interval: uint32(common.Static.Config.Interval),
	}

	// Calculate file seeders and leechers
	seeders, err := file.Seeders()
	if err != nil {
		log.Println(err.Error())
	}
	announce.Seeders = uint32(seeders)

	leechers, err := file.Leechers()
	if err != nil {
		log.Println(err.Error())
	}
	announce.Leechers = uint32(leechers)

	// Convert to UDP byte buffer
	announceBuf, err := announce.MarshalBinary()
	if err != nil {
		log.Println(err.Error())
		return u.Error(ErrAnnounceFailure.Error())
	}

	// Numwant
	numwant, err := strconv.Atoi(query.Get("numwant"))
	if err != nil {
		numwant = 50
	}

	// Retrieve compact peer list
	// Note: because we are UDP, we send the second parameter 'false' to get
	// a "best guess" peer list, due to anonymous announces
	peers, err := file.CompactPeerList(numwant, false)
	if err != nil {
		log.Println(err.Error())
		return u.Error(ErrPeerListFailure.Error())
	}

	// Add compact peer list
	res := bytes.NewBuffer(announceBuf)
	err = binary.Write(res, binary.BigEndian, peers)
	if err != nil {
		log.Println(err.Error())
		return u.Error(ErrPeerListFailure.Error())
	}

	return res.Bytes()
}
Example #12
0
// Announce announces using HTTP format
func (h HTTPTracker) Announce(query url.Values, file data.FileRecord) []byte {
	// Generate response struct
	announce := AnnounceResponse{
		Complete:    file.Seeders(),
		Incomplete:  file.Leechers(),
		Interval:    common.Static.Config.Interval,
		MinInterval: common.Static.Config.Interval / 2,
	}

	// Check for numwant parameter, return up to that number of peers
	// Default is 50 per protocol
	numwant := 50
	if query.Get("numwant") != "" {
		// Verify numwant is an integer
		num, err := strconv.Atoi(query.Get("numwant"))
		if err == nil {
			numwant = num
		}
	}

	// Marshal struct into bencode
	buf := bytes.NewBuffer(make([]byte, 0))
	if err := bencode.Marshal(buf, announce); err != nil {
		log.Println(err.Error())
		return h.Error("Tracker error: failed to create announce response")
	}

	// Generate compact peer list of length numwant, exclude this user
	peers := file.PeerList(query.Get("ip"), numwant)

	// Because the bencode marshaler does not handle compact, binary peer list conversion,
	// we handle it manually here.

	// Get initial buffer, chop off 3 bytes: "0:e", append the actual list length with new colon
	out := buf.Bytes()
	out = append(out[0:len(out)-3], []byte(strconv.Itoa(len(peers))+":")...)

	// Append peers list, terminate with an "e"
	out = append(append(out, peers...), byte('e'))

	// Return final announce message
	return out
}
Example #13
0
// Announce generates and triggers a tracker announces request
func Announce(tracker TorrentTracker, user data.UserRecord, query url.Values) []byte {
	// Store announce information in struct
	announce := new(data.AnnounceLog)
	err := announce.FromValues(query)
	if err != nil {
		return tracker.Error("Malformed announce")
	}

	// Request to store announce
	go func(announce *data.AnnounceLog) {
		if err := announce.Save(); err != nil {
			log.Println(err.Error())
		}
	}(announce)

	// Only report event when needed
	event := ""
	if announce.Event != "" {
		event = announce.Event + " "
	}

	log.Printf("announce: [%s %s:%d] %s%s", tracker.Protocol(), announce.IP, announce.Port, event, announce.InfoHash)

	// Check for a matching file via info_hash
	file, err := new(data.FileRecord).Load(announce.InfoHash, "info_hash")
	if err != nil {
		log.Println(err.Error())
		return tracker.Error(ErrAnnounceFailure.Error())
	}

	// Torrent is currently unregistered
	if file == (data.FileRecord{}) {
		log.Printf("tracker: detected new file, awaiting manual approval [hash: %s]", announce.InfoHash)

		// Create an entry in file table for this hash, but mark it as unverified
		file.InfoHash = announce.InfoHash
		file.Verified = false

		// Save file asynchronously
		go func(file data.FileRecord) {
			if err := file.Save(); err != nil {
				log.Println(err.Error())
			}
		}(file)

		// Report error
		return tracker.Error("Unregistered torrent")
	}

	// Ensure file is verified, meaning we will permit tracking of it
	if !file.Verified {
		return tracker.Error("Unverified torrent")
	}

	// Launch peer reaper asynchronously to remove old peers from this file
	go func(file data.FileRecord) {
		// Start peer reaper
		count, err := file.PeerReaper()
		if err != nil {
			log.Println(err.Error())
		}

		// Report peers reaped
		if count > 0 {
			log.Println("peerReaper: reaped %d peers on file ID: %d", count, file.ID)
		}
	}(file)

	// If UDP tracker, we cannot reliably detect user, so we announce anonymously
	if _, ok := tracker.(UDPTracker); ok {
		return tracker.Announce(query, file)
	}

	// Check existing record for this user with this file and this IP
	fileUser, err := new(data.FileUserRecord).Load(file.ID, user.ID, query.Get("ip"))
	if err != nil {
		log.Println(err.Error())
		return tracker.Error(ErrAnnounceFailure.Error())
	}

	// New user, starting torrent
	if fileUser == (data.FileUserRecord{}) {
		// Create new relationship
		fileUser.FileID = file.ID
		fileUser.UserID = user.ID
		fileUser.IP = query.Get("ip")
		fileUser.Active = true
		fileUser.Announced = 1

		// If announce reports 0 left, but no existing record, user is probably the initial seeder
		if announce.Left == 0 {
			fileUser.Completed = true
		} else {
			fileUser.Completed = false
		}

		// Track the initial uploaded, download, and left values
		// NOTE: clients report absolute values, so delta should NEVER be calculated for these
		fileUser.Uploaded = announce.Uploaded
		fileUser.Downloaded = announce.Downloaded
		fileUser.Left = announce.Left
	} else {
		// Else, pre-existing record, so update
		// Event "stopped", mark as inactive
		// NOTE: likely only reported by clients which are actively seeding, NOT when stopped during leeching
		if announce.Event == "stopped" {
			fileUser.Active = false
		} else {
			// Else, "started", "completed", or no status, mark as active
			fileUser.Active = true
		}

		// Check for completion
		// Could be from a peer stating completed, or a seed reporting 0 left
		if announce.Event == "completed" || announce.Left == 0 {
			fileUser.Completed = true
		} else {
			fileUser.Completed = false
		}

		// Add an announce
		fileUser.Announced = fileUser.Announced + 1

		// Store latest statistics, but do so in a sane way (no removing upload/download, no adding left)
		// NOTE: clients report absolute values, so delta should NEVER be calculated for these
		// NOTE: It is also worth noting that if a client re-downloads a file they have previously downloaded,
		// but the data.FileUserRecord relationship is not cleared, they will essentially get a "free" download, with
		// no extra download penalty to their share ratio
		// For the time being, this behavior will be expected and acceptable
		if announce.Uploaded > fileUser.Uploaded {
			fileUser.Uploaded = announce.Uploaded
		}
		if announce.Downloaded > fileUser.Downloaded {
			fileUser.Downloaded = announce.Downloaded
		}
		if announce.Left < fileUser.Left {
			fileUser.Left = announce.Left
		}
	}

	// Update file/user relationship record asynchronously
	go func(fileUser data.FileUserRecord) {
		if err := fileUser.Save(); err != nil {
			log.Println(err.Error())
		}
	}(fileUser)

	// Create announce
	return tracker.Announce(query, file)
}
Example #14
0
// Scrape generates and triggers a tracker scrape request
func Scrape(tracker TorrentTracker, query url.Values) []byte {
	// List of files to be scraped
	scrapeFiles := make([]data.FileRecord, 0)

	// Iterate all info_hash values in query
	for _, infoHash := range query["info_hash"] {
		// Make a copy of query, set the info hash as current in loop
		localQuery := query
		localQuery.Set("info_hash", infoHash)

		// Store scrape information in struct
		scrape := new(data.ScrapeLog)
		err := scrape.FromValues(localQuery)
		if err != nil {
			return tracker.Error("Malformed scrape")
		}

		// Request to store scrape
		go func(scrape *data.ScrapeLog) {
			if err := scrape.Save(); err != nil {
				log.Println(err.Error())
			}
		}(scrape)

		log.Printf("scrape: [%s %s] %s", tracker.Protocol(), scrape.IP, scrape.InfoHash)

		// Check for a matching file via info_hash
		file, err := new(data.FileRecord).Load(scrape.InfoHash, "info_hash")
		if err != nil {
			log.Println(err.Error())
			return tracker.Error(ErrScrapeFailure.Error())
		}

		// Torrent is not currently registered
		if file == (data.FileRecord{}) {
			return tracker.Error("Unregistered torrent")
		}

		// Ensure file is verified, meaning we will permit scraping of it
		if !file.Verified {
			return tracker.Error("Unverified torrent")
		}

		// Launch peer reaper asynchronously to remove old peers from this file
		go func(file data.FileRecord) {
			// Start peer reaper
			count, err := file.PeerReaper()
			if err != nil {
				log.Println(err.Error())
			}

			// Report peers reaped
			if count > 0 {
				log.Println("peerReaper: reaped %d peers on file ID: %d", count, file.ID)
			}
		}(file)

		// File is valid, add it to list to be scraped
		scrapeFiles = append(scrapeFiles[:], file)
	}

	// Create scrape
	return tracker.Scrape(scrapeFiles)
}
Example #15
0
// TestUDPRouter verifies that the main UDP router is working properly
func TestUDPRouter(t *testing.T) {
	log.Println("TestUDPRouter()")

	// Load config
	config, err := common.LoadConfig()
	if err != nil {
		t.Fatalf("Could not load configuration: %s", err.Error())
	}
	common.Static.Config = config

	// Generate mock data.FileRecord
	file := data.FileRecord{
		InfoHash: "6465616462656566303030303030303030303030",
		Verified: true,
	}

	// Save mock file
	if err := file.Save(); err != nil {
		t.Fatalf("Failed to save mock file: %s", err.Error())
	}

	// Fake UDP address
	addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
	if err != nil {
		t.Fatalf("Failed to create fake UDP address")
	}

	// Connect packet with handshake
	connect := udp.Packet{udpInitID, 0, 1234}
	connectBuf, err := connect.MarshalBinary()
	if err != nil {
		t.Fatalf("Failed to create UDP connect packet")
	}

	// Perform connection handshake
	res, err := parseUDP(connectBuf, addr)
	if err != nil {
		errRes := new(udp.ErrorResponse)
		err2 := errRes.UnmarshalBinary(res)
		if err2 != nil {
			t.Fatalf(err.Error())
		}

		log.Println("ERROR:", errRes.Error)
		t.Fatalf(err.Error())
	}

	// Retrieve response, get new connection ID, which will be expected by router
	connRes := new(udp.ConnectResponse)
	err = connRes.UnmarshalBinary(res)
	if err != nil {
		t.Fatalf(err.Error())
	}

	// Create announce request
	announce := udp.AnnounceRequest{
		ConnID:     connRes.ConnID,
		Action:     1,
		TransID:    connRes.TransID,
		InfoHash:   []byte("deadbeef000000000000"),
		PeerID:     []byte("00001111222233334444"),
		Downloaded: 0,
		Left:       0,
		Uploaded:   0,
		IP:         0,
		Key:        1234,
		Port:       5000,
	}

	// Get announce bytes
	announceBuf, err := announce.MarshalBinary()
	if err != nil {
		t.Fatalf(err.Error())
	}

	// Send announce to UDP router
	res, err = parseUDP(announceBuf, addr)
	if err != nil {
		errRes := new(udp.ErrorResponse)
		err2 := errRes.UnmarshalBinary(res)
		if err2 != nil {
			t.Fatalf(err.Error())
		}

		log.Println("ERROR:", errRes.Error)
		t.Fatalf(err.Error())
	}

	// Get UDP announce response
	announceRes := new(udp.AnnounceResponse)
	err = announceRes.UnmarshalBinary(res)
	if err != nil {
		errRes := new(udp.ErrorResponse)
		err2 := errRes.UnmarshalBinary(res)
		if err2 != nil {
			t.Fatalf(err.Error())
		}

		log.Println("ERROR:", errRes.Error)
		t.Fatalf(err.Error())
	}
	log.Println(announceRes)

	// Create scrape request
	scrape := udp.ScrapeRequest{
		ConnID:     connRes.ConnID,
		Action:     2,
		TransID:    connRes.TransID,
		InfoHashes: [][]byte{[]byte("deadbeef000000000000")},
	}

	// Get scrape bytes
	scrapeBuf, err := scrape.MarshalBinary()
	if err != nil {
		t.Fatalf(err.Error())
	}

	// Send scrape to UDP router
	res, err = parseUDP(scrapeBuf, addr)
	if err != nil {
		errRes := new(udp.ErrorResponse)
		err2 := errRes.UnmarshalBinary(res)
		if err2 != nil {
			t.Fatalf(err.Error())
		}

		log.Println("ERROR:", errRes.Error)
		t.Fatalf(err.Error())
	}

	// Get UDP scrape response
	scrapeRes := new(udp.ScrapeResponse)
	err = scrapeRes.UnmarshalBinary(res)
	if err != nil {
		errRes := new(udp.ErrorResponse)
		err2 := errRes.UnmarshalBinary(res)
		if err2 != nil {
			t.Fatalf(err.Error())
		}

		log.Println("ERROR:", errRes.Error)
		t.Fatalf(err.Error())
	}
	log.Println(scrapeRes)

	// Delete mock file
	if err := file.Delete(); err != nil {
		t.Fatalf("Failed to delete mock file: %s", err.Error())
	}
}