Exemple #1
0
// Scrape scrapes a tracker request
func Scrape(tracker torrentTracker, query url.Values) []byte {
	// List of files to be scraped
	scrapeFiles := make([]data.FileRecord, 0)

	// Iterate all info_hash values in query
	for _, infoHash := range query["info_hash"] {
		// Make a copy of query, set the info hash as current in loop
		localQuery := query
		localQuery.Set("info_hash", infoHash)

		// Store scrape information in struct
		scrape := new(data.ScrapeLog).FromValues(localQuery)
		if scrape == (data.ScrapeLog{}) {
			return tracker.Error("Malformed scrape")
		}

		// Request to store scrape
		go scrape.Save()

		log.Printf("scrape: [%s %s] %s", tracker.Protocol(), scrape.IP, scrape.InfoHash)

		// Check for a matching file via info_hash
		file := new(data.FileRecord).Load(scrape.InfoHash, "info_hash")
		if file == (data.FileRecord{}) {
			// Torrent is not currently registered
			return tracker.Error("Unregistered torrent")
		}

		// Ensure file is verified, meaning we will permit scraping of it
		if !file.Verified {
			return tracker.Error("Unverified torrent")
		}

		// Launch peer reaper to remove old peers from this file
		go file.PeerReaper()

		// File is valid, add it to list to be scraped
		scrapeFiles = append(scrapeFiles[:], file)
	}

	// Create scrape
	return tracker.Scrape(scrapeFiles)
}
Exemple #2
0
// Announce generates and triggers a tracker announces request
func Announce(tracker TorrentTracker, user data.UserRecord, query url.Values) []byte {
	// Store announce information in struct
	announce := new(data.AnnounceLog)
	err := announce.FromValues(query)
	if err != nil {
		return tracker.Error("Malformed announce")
	}

	// Request to store announce
	go func(announce *data.AnnounceLog) {
		if err := announce.Save(); err != nil {
			log.Println(err.Error())
		}
	}(announce)

	// Only report event when needed
	event := ""
	if announce.Event != "" {
		event = announce.Event + " "
	}

	log.Printf("announce: [%s %s:%d] %s%s", tracker.Protocol(), announce.IP, announce.Port, event, announce.InfoHash)

	// Check for a matching file via info_hash
	file, err := new(data.FileRecord).Load(announce.InfoHash, "info_hash")
	if err != nil {
		log.Println(err.Error())
		return tracker.Error(ErrAnnounceFailure.Error())
	}

	// Torrent is currently unregistered
	if file == (data.FileRecord{}) {
		log.Printf("tracker: detected new file, awaiting manual approval [hash: %s]", announce.InfoHash)

		// Create an entry in file table for this hash, but mark it as unverified
		file.InfoHash = announce.InfoHash
		file.Verified = false

		// Save file asynchronously
		go func(file data.FileRecord) {
			if err := file.Save(); err != nil {
				log.Println(err.Error())
			}
		}(file)

		// Report error
		return tracker.Error("Unregistered torrent")
	}

	// Ensure file is verified, meaning we will permit tracking of it
	if !file.Verified {
		return tracker.Error("Unverified torrent")
	}

	// Launch peer reaper asynchronously to remove old peers from this file
	go func(file data.FileRecord) {
		// Start peer reaper
		count, err := file.PeerReaper()
		if err != nil {
			log.Println(err.Error())
		}

		// Report peers reaped
		if count > 0 {
			log.Println("peerReaper: reaped %d peers on file ID: %d", count, file.ID)
		}
	}(file)

	// If UDP tracker, we cannot reliably detect user, so we announce anonymously
	if _, ok := tracker.(UDPTracker); ok {
		return tracker.Announce(query, file)
	}

	// Check existing record for this user with this file and this IP
	fileUser, err := new(data.FileUserRecord).Load(file.ID, user.ID, query.Get("ip"))
	if err != nil {
		log.Println(err.Error())
		return tracker.Error(ErrAnnounceFailure.Error())
	}

	// New user, starting torrent
	if fileUser == (data.FileUserRecord{}) {
		// Create new relationship
		fileUser.FileID = file.ID
		fileUser.UserID = user.ID
		fileUser.IP = query.Get("ip")
		fileUser.Active = true
		fileUser.Announced = 1

		// If announce reports 0 left, but no existing record, user is probably the initial seeder
		if announce.Left == 0 {
			fileUser.Completed = true
		} else {
			fileUser.Completed = false
		}

		// Track the initial uploaded, download, and left values
		// NOTE: clients report absolute values, so delta should NEVER be calculated for these
		fileUser.Uploaded = announce.Uploaded
		fileUser.Downloaded = announce.Downloaded
		fileUser.Left = announce.Left
	} else {
		// Else, pre-existing record, so update
		// Event "stopped", mark as inactive
		// NOTE: likely only reported by clients which are actively seeding, NOT when stopped during leeching
		if announce.Event == "stopped" {
			fileUser.Active = false
		} else {
			// Else, "started", "completed", or no status, mark as active
			fileUser.Active = true
		}

		// Check for completion
		// Could be from a peer stating completed, or a seed reporting 0 left
		if announce.Event == "completed" || announce.Left == 0 {
			fileUser.Completed = true
		} else {
			fileUser.Completed = false
		}

		// Add an announce
		fileUser.Announced = fileUser.Announced + 1

		// Store latest statistics, but do so in a sane way (no removing upload/download, no adding left)
		// NOTE: clients report absolute values, so delta should NEVER be calculated for these
		// NOTE: It is also worth noting that if a client re-downloads a file they have previously downloaded,
		// but the data.FileUserRecord relationship is not cleared, they will essentially get a "free" download, with
		// no extra download penalty to their share ratio
		// For the time being, this behavior will be expected and acceptable
		if announce.Uploaded > fileUser.Uploaded {
			fileUser.Uploaded = announce.Uploaded
		}
		if announce.Downloaded > fileUser.Downloaded {
			fileUser.Downloaded = announce.Downloaded
		}
		if announce.Left < fileUser.Left {
			fileUser.Left = announce.Left
		}
	}

	// Update file/user relationship record asynchronously
	go func(fileUser data.FileUserRecord) {
		if err := fileUser.Save(); err != nil {
			log.Println(err.Error())
		}
	}(fileUser)

	// Create announce
	return tracker.Announce(query, file)
}
Exemple #3
0
// Scrape generates and triggers a tracker scrape request
func Scrape(tracker TorrentTracker, query url.Values) []byte {
	// List of files to be scraped
	scrapeFiles := make([]data.FileRecord, 0)

	// Iterate all info_hash values in query
	for _, infoHash := range query["info_hash"] {
		// Make a copy of query, set the info hash as current in loop
		localQuery := query
		localQuery.Set("info_hash", infoHash)

		// Store scrape information in struct
		scrape := new(data.ScrapeLog)
		err := scrape.FromValues(localQuery)
		if err != nil {
			return tracker.Error("Malformed scrape")
		}

		// Request to store scrape
		go func(scrape *data.ScrapeLog) {
			if err := scrape.Save(); err != nil {
				log.Println(err.Error())
			}
		}(scrape)

		log.Printf("scrape: [%s %s] %s", tracker.Protocol(), scrape.IP, scrape.InfoHash)

		// Check for a matching file via info_hash
		file, err := new(data.FileRecord).Load(scrape.InfoHash, "info_hash")
		if err != nil {
			log.Println(err.Error())
			return tracker.Error(ErrScrapeFailure.Error())
		}

		// Torrent is not currently registered
		if file == (data.FileRecord{}) {
			return tracker.Error("Unregistered torrent")
		}

		// Ensure file is verified, meaning we will permit scraping of it
		if !file.Verified {
			return tracker.Error("Unverified torrent")
		}

		// Launch peer reaper asynchronously to remove old peers from this file
		go func(file data.FileRecord) {
			// Start peer reaper
			count, err := file.PeerReaper()
			if err != nil {
				log.Println(err.Error())
			}

			// Report peers reaped
			if count > 0 {
				log.Println("peerReaper: reaped %d peers on file ID: %d", count, file.ID)
			}
		}(file)

		// File is valid, add it to list to be scraped
		scrapeFiles = append(scrapeFiles[:], file)
	}

	// Create scrape
	return tracker.Scrape(scrapeFiles)
}