// Starts a process of building the torrent file. This function does everything // in a separate goroutine and uses up to 'nworkers' of goroutines to perform // SHA1 hashing. Therefore it will return almost immedately. It returns two // channels, the first one is for completion awaiting, the second one is for // getting status reports. Status report is a number of bytes hashed, you can // get the total amount of bytes by inspecting the Batch.TotalSize method return // value. func (b *Batch) Start(w io.Writer, nworkers int) (<-chan error, <-chan int64) { if nworkers <= 0 { nworkers = 1 } completion := make(chan error) status := make(chan int64) go func() { // prepare workers workers := make([]*worker, nworkers) free_workers := make(chan *worker, nworkers) for i := 0; i < nworkers; i++ { workers[i] = new_worker(free_workers) } stop_workers := func() { for _, w := range workers { w.stop() } for _, w := range workers { w.wait_for_stop() } } // prepare files for reading fr := files_reader{files: b.files} npieces := b.total_size/b.piece_length + 1 b.pieces = make([]byte, 20*npieces) hashed := int64(0) // read all the pieces passing them to workers for hashing var data []byte for i := int64(0); i < npieces; i++ { if data == nil { data = make([]byte, b.piece_length) } nr, err := fr.Read(data) if err != nil { // EOF is not an eror if it was the last piece if err == io.EOF { if i != npieces-1 { stop_workers() completion <- err return } } else { stop_workers() completion <- err return } } // cut the data slice to the amount of actual data read data = data[:nr] w := <-free_workers data = w.queue(data, b.pieces[20*i:20*i+20]) // update and try to send the status report if data != nil { hashed += int64(len(data)) data = data[:cap(data)] select { case status <- hashed: default: } } } stop_workers() // at this point the hash was calculated and we're ready to // write the torrent file err := b.write_torrent(w) if err != nil { completion <- err return } completion <- nil }() return completion, status }