// Fetch Txos and Txins func (tx *Tx) Build(rpool *redis.Pool) (err error) { c := rpool.Get() defer c.Close() tx.TxIns = []*TxIn{} tx.TxOuts = []*TxOut{} txinskeys := []interface{}{} for i := range iter.N(int(tx.TxInCnt)) { txinskeys = append(txinskeys, fmt.Sprintf("txi:%v:%v", tx.Hash, i)) } txinsjson, _ := redis.Strings(c.Do("MGET", txinskeys...)) for _, txinjson := range txinsjson { ctxi := new(TxIn) err = json.Unmarshal([]byte(txinjson), ctxi) tx.TxIns = append(tx.TxIns, ctxi) } txoutskeys := []interface{}{} txoutsspentkeys := []interface{}{} for i := range iter.N(int(tx.TxOutCnt)) { txoutskeys = append(txoutskeys, fmt.Sprintf("txo:%v:%v", tx.Hash, i)) txoutsspentkeys = append(txoutsspentkeys, fmt.Sprintf("txo:%v:%v:spent", tx.Hash, i)) } txoutsjson, _ := redis.Strings(c.Do("MGET", txoutskeys...)) txoutsspentjson, _ := redis.Strings(c.Do("MGET", txoutsspentkeys...)) for txoindex, txoutjson := range txoutsjson { ctxo := new(TxOut) err = json.Unmarshal([]byte(txoutjson), ctxo) if txoutsspentjson[txoindex] != "" { cspent := new(TxoSpent) err = json.Unmarshal([]byte(txoutsspentjson[txoindex]), cspent) ctxo.Spent = cspent } tx.TxOuts = append(tx.TxOuts, ctxo) } return }
func Benchmark(b *testing.B) { db, _ := sql.Open("sqlrpc", serverAddr) defer db.Close() db.Exec("drop table if exists a") db.Exec("create table a(b)") for range iter.N(b.N) { for i := range iter.N(10) { db.Exec("insert into a values (?)", i) } rows, _ := db.Query("select * from a where b < ?", 3) var count int for rows.Next() { var b int rows.Scan(&b) if b < 3 { count++ } } assert.Nil(b, rows.Err()) assert.EqualValues(b, 3, count) rows.Close() db.Exec("delete from a") } assert.Equal(b, 0, len(server.refs)) }
func TestRejectDialBacklogFilled(t *testing.T) { s, err := NewSocket("udp", "localhost:0") if err != nil { t.Fatal(err) } errChan := make(chan error) dial := func() { _, err := s.Dial(s.Addr().String()) require.Error(t, err) errChan <- err } // Fill the backlog. for range iter.N(backlog) { go dial() } sleepWhile(&s.mu, func() bool { return len(s.backlog) < backlog }) select { case err := <-errChan: t.Fatalf("got premature error: %s", err) default: } // One more connection should cause a dial attempt to get reset. go dial() err = <-errChan assert.EqualError(t, err, "peer reset") s.Close() for range iter.N(backlog) { <-errChan } }
// Add changes the number of workers func (p *Pool) Add(n int) error { if n >= 1 { for range iter.N(n) { stopC := make(chan struct{}) p.wg.Add(1) go NewWorker(p.read, p.write, p.fn, stopC).Work(p.wg) p.Lock() p.workers = append(p.workers, stopC) p.Unlock() } } else if n <= -1 { for range iter.N(-1 * n) { p.Lock() // close channel to stop worker close(p.workers[len(p.workers)-1]) // from github.com/golang/go/wiki/SliceTricks // Delete without preserving order p.workers[len(p.workers)-1] = nil p.workers = p.workers[:len(p.workers)-1] // unlock p.Unlock() } } else { return fmt.Errorf("%d is not a valid number of workers to add", n) } return nil }
func connectSelfLots(n int, t testing.TB) { s, err := NewSocket("127.0.0.1:0") if err != nil { t.Fatal(err) } go func() { for range iter.N(n) { c, err := s.Accept() if err != nil { log.Fatal(err) } defer c.Close() } }() dialErr := make(chan error) connCh := make(chan net.Conn) for range iter.N(n) { go func() { c, err := s.Dial(s.Addr().String()) if err != nil { dialErr <- err return } connCh <- c }() } conns := make([]net.Conn, 0, n) for range iter.N(n) { select { case c := <-connCh: conns = append(conns, c) case err := <-dialErr: t.Fatal(err) } if testing.Verbose() { log.Printf("%x", len(conns)) } } for _, c := range conns { if c != nil { c.Close() } } s.mu.Lock() for len(s.conns) != 0 { if testing.Verbose() { log.Printf("socket conns: %d", len(s.conns)) if len(s.conns) < 10 { for _, c := range s.conns { log.Printf("%#v", c) } } } s.event.Wait() } s.mu.Unlock() s.Close() }
func main() { fmt.Println(unsafe.Sizeof(iter.N(10))) // prints 12 on the playground. // Print 0 - 9, inclusive, without causing any allocations. for i := range iter.N(10) { fmt.Println(i) } }
// calc evaluates the function for each cell. func calc(c *Field, andMask uint32, f ceval) *Field { out := NewField(c.Width, c.Height) for y := range iter.N(c.Height) { for x := range iter.N(c.Width) { out.State[c.Width*y+x] = f(c, x, y, andMask) } } return out }
func BenchmarkIteration(b *testing.B) { for range iter.N(b.N) { i := New() for p := range iter.N(500) { i.SetPiece(p, p) } for e := i.First(); e != nil; e = e.Next() { } } }
func BenchmarkInsert(b *testing.B) { for range iter.N(b.N) { li := newLRUItems() for range iter.N(10000) { r := rand.Int63() t := time.Unix(r/1e9, r%1e9) li.Insert(ItemInfo{ Accessed: t, }) } } }
func connectSelfLots(n int, t testing.TB) { defer goroutineLeakCheck(t)() s, err := NewSocket("udp", "localhost:0") if err != nil { t.Fatal(err) } go func() { for range iter.N(n) { c, err := s.Accept() if err != nil { log.Fatal(err) } defer c.Close() } }() dialErr := make(chan error) connCh := make(chan net.Conn) dialSema := make(chan struct{}, backlog) for range iter.N(n) { go func() { dialSema <- struct{}{} c, err := s.Dial(s.Addr().String()) <-dialSema if err != nil { dialErr <- err return } connCh <- c }() } conns := make([]net.Conn, 0, n) for range iter.N(n) { select { case c := <-connCh: conns = append(conns, c) case err := <-dialErr: t.Fatal(err) } } for _, c := range conns { if c != nil { c.Close() } } s.mu.Lock() for len(s.conns) != 0 { // log.Print(len(s.conns)) s.event.Wait() } s.mu.Unlock() s.Close() }
func nextState(c, is2, is3 *Field) *Field { out := NewField(c.Width, c.Height) for y := range iter.N(c.Height) { for x := range iter.N(c.Width) { i := y*c.Width + x // Live cells with 2 or 3 neighbors live. out.State[i] = ((is2.State[i] | is3.State[i]) & c.State[i]) | // Dead cells with exactly 3 neighbors live. (is3.State[i] &^ c.State[i]) } } return out }
func NewWorkspace(shouldShow RepoFilter, presenter RepoPresenter) *workspace { w := &workspace{ ImportPaths: make(chan string, 64), unique: make(chan *Repo, 64), processedFiltered: make(chan *Repo, 64), Statuses: make(chan string, 64), Errors: make(chan error, 64), shouldShow: shouldShow, presenter: presenter, repos: make(map[string]*Repo), } { var wg sync.WaitGroup for range iter.N(parallelism) { wg.Add(1) go w.uniqueWorker(&wg) } go func() { wg.Wait() close(w.unique) }() } { var wg sync.WaitGroup for range iter.N(parallelism) { wg.Add(1) go w.processFilterWorker(&wg) } go func() { wg.Wait() close(w.processedFiltered) }() } { var wg sync.WaitGroup for range iter.N(parallelism) { wg.Add(1) go w.presenterWorker(&wg) } go func() { wg.Wait() close(w.Statuses) close(w.Errors) }() } return w }
func main() { tagflag.Parse(&flags) for _, filename := range flags.TorrentFiles { metainfo, err := metainfo.LoadFromFile(filename) if err != nil { log.Print(err) continue } info := &metainfo.Info.Info if flags.JustName { fmt.Printf("%s\n", metainfo.Info.Name) continue } d := map[string]interface{}{ "Name": info.Name, "NumPieces": info.NumPieces(), "PieceLength": info.PieceLength, } if flags.PieceHashes { d["PieceHashes"] = func() (ret []string) { for i := range iter.N(info.NumPieces()) { ret = append(ret, hex.EncodeToString(info.Pieces[i*20:(i+1)*20])) } return }() } b, _ := json.MarshalIndent(d, "", " ") os.Stdout.Write(b) } if !flags.JustName { os.Stdout.WriteString("\n") } }
// Tests the request ordering based on a connections priorities. func TestPieceRequestOrder(t *testing.T) { c := connection{ pieceRequestOrder: pieceordering.New(), piecePriorities: []int{1, 4, 0, 3, 2}, } testRequestOrder(nil, c.pieceRequestOrder, t) c.pendPiece(2, PiecePriorityNone, nil) testRequestOrder(nil, c.pieceRequestOrder, t) c.pendPiece(1, PiecePriorityNormal, nil) c.pendPiece(2, PiecePriorityNormal, nil) testRequestOrder([]int{2, 1}, c.pieceRequestOrder, t) c.pendPiece(0, PiecePriorityNormal, nil) testRequestOrder([]int{2, 0, 1}, c.pieceRequestOrder, t) c.pendPiece(1, PiecePriorityReadahead, nil) testRequestOrder([]int{1, 2, 0}, c.pieceRequestOrder, t) c.pendPiece(4, PiecePriorityNow, nil) // now(4), r(1), normal(0, 2) testRequestOrder([]int{4, 1, 2, 0}, c.pieceRequestOrder, t) c.pendPiece(2, PiecePriorityReadahead, nil) // N(4), R(1, 2), N(0) testRequestOrder([]int{4, 2, 1, 0}, c.pieceRequestOrder, t) c.pendPiece(1, PiecePriorityNow, nil) // now(4, 1), readahead(2), normal(0) // in the same order, the keys will be: -15+6, -15+12, -5, 1 // so we test that a very low priority (for this connection), "now" // piece has been placed after a readahead piece. testRequestOrder([]int{4, 2, 1, 0}, c.pieceRequestOrder, t) // Note this intentially sets to None a piece that's not in the order. for i := range iter.N(5) { c.pendPiece(i, PiecePriorityNone, nil) } testRequestOrder(nil, c.pieceRequestOrder, t) }
func CreateRandomTree(t *testing.T, path string, rec, maxrec int) (string, int) { p := NewRandomDir(path) if rec == 0 { t.Logf("Creating a new random tree at %v", p) } //NewRandomDir(p) //NewEmptyFile(p) nfiles := 0 for { nfiles = mrand.Intn(10) if nfiles >= 3 { break } } cnt := 0 var wg sync.WaitGroup for _ = range iter.N(nfiles) { wg.Add(1) go NewRandomFileWg(p, &wg) cnt++ if rec < maxrec && mrand.Intn(10) < 5 { _, ncnt := CreateRandomTree(t, p, rec+1, maxrec) cnt += ncnt } // Break at 30 to spend less time if cnt > 30 { return p, cnt } } wg.Wait() if rec == 0 { t.Log("Random tree created") } return p, cnt }
func TestRejectDialBacklogFilled(t *testing.T) { s, err := NewSocket("udp", "localhost:0") if err != nil { t.Fatal(err) } errChan := make(chan error, 1) dial := func() { _, err := s.Dial(s.Addr().String()) if err != nil { errChan <- err } } // Fill the backlog. for range iter.N(backlog + 1) { go dial() } s.mu.Lock() for len(s.backlog) < backlog { s.event.Wait() } s.mu.Unlock() select { case <-errChan: t.FailNow() default: } // One more connection should cause a dial attempt to get reset. go dial() err = <-errChan if err.Error() != "peer reset" { t.FailNow() } s.Close() }
func main() { log.SetFlags(log.Flags() | log.Lshortfile) flag.Parse() metaInfo, err := metainfo.LoadFromFile(*torrentPath) if err != nil { log.Fatal(err) } mMapSpan := &mmap_span.MMapSpan{} if len(metaInfo.Info.Files) > 0 { for _, file := range metaInfo.Info.Files { filename := filepath.Join(append([]string{*dataPath, metaInfo.Info.Name}, file.Path...)...) goMMap := fileToMmap(filename, file.Length) mMapSpan.Append(goMMap) } log.Println(len(metaInfo.Info.Files)) } else { goMMap := fileToMmap(*dataPath, metaInfo.Info.Length) mMapSpan.Append(goMMap) } log.Println(mMapSpan.Size()) log.Println(len(metaInfo.Info.Pieces)) info := metaInfo.Info for i := range iter.N(metaInfo.Info.NumPieces()) { p := info.Piece(i) hash := sha1.New() _, err := io.Copy(hash, io.NewSectionReader(mMapSpan, p.Offset(), p.Length())) if err != nil { log.Fatal(err) } fmt.Printf("%d: %x: %v\n", i, p.Hash(), bytes.Equal(hash.Sum(nil), p.Hash().Bytes())) } }
func (c *connection) peerPiecesChanged() { if c.t.haveInfo() { for i := range iter.N(c.t.numPieces()) { c.peerHasPieceChanged(i) } } }
func randomMap(size int) map[string]string { out := make(map[string]string, size) for _ = range iter.N(size) { out[randomString(40)] = randomString(40) } return out }
func CastSlice(slicePtr interface{}, fromSlice interface{}) { fromSliceValue := reflect.ValueOf(fromSlice) fromLen := fromSliceValue.Len() if fromLen == 0 { return } // Deref the pointer to slice. slicePtrValue := reflect.ValueOf(slicePtr) if slicePtrValue.Kind() != reflect.Ptr { panic("destination is not a pointer") } destSliceValue := slicePtrValue.Elem() // The type of the elements of the destination slice. destSliceElemType := destSliceValue.Type().Elem() destSliceValue.Set(reflect.MakeSlice(destSliceValue.Type(), fromLen, fromLen)) for i := range iter.N(fromSliceValue.Len()) { // The value inside the interface in the slice element. itemValue := fromSliceValue.Index(i) if itemValue.Kind() == reflect.Interface { itemValue = itemValue.Elem() } convertedItem := itemValue.Convert(destSliceElemType) destSliceValue.Index(i).Set(convertedItem) } }
func (me *store) OpenTorrentData(info *metainfo.Info) (ret *data) { ret = &data{info, me} for i := range iter.N(info.NumPieces()) { go ret.PieceComplete(i) } return }
func TestTimerDoesNotFireAfterStop(t *testing.T) { t.Skip("the standard library implementation is broken") fail := make(chan struct{}) done := make(chan struct{}) defer close(done) for range iter.N(1000) { tr := time.NewTimer(0) tr.Stop() // There may or may not be a value in the channel now. But definitely // one should not be added after we receive it. select { case <-tr.C: default: } // Now set the timer to trigger in hour. It definitely shouldn't be // receivable now for an hour. tr.Reset(time.Hour) go func() { select { case <-tr.C: // As soon as the channel receives, notify failure. fail <- struct{}{} case <-done: } }() } select { case <-fail: t.FailNow() case <-time.After(100 * time.Millisecond): } }
func (t *torrent) numPiecesCompleted() (num int) { for i := range iter.N(t.Info.NumPieces()) { if t.pieceComplete(i) { num++ } } return }
func randomString(size int) string { buf := bytes.NewBuffer(make([]byte, 0, size)) for _ = range iter.N(size) { buf.WriteRune(alphabet[rand.Intn(len(alphabet))]) } return buf.String() }
func TestWorkerRetries(t *testing.T) { t.Parallel() var broker thermocline.Broker broker = mem.NewBroker() reader, err := broker.Read("test", thermocline.NoVersion) if err != nil { t.Errorf("could not open queue '%s'", err) } writer, err := broker.Write("test", thermocline.NoVersion) if err != nil { t.Errorf("could not open queue '%s'", err) } tn := rand.Intn(256) for i := range iter.N(tn) { task, err := thermocline.NewTask(fmt.Sprintf("test %d", i)) if err != nil { t.Error("could not create test task", err) } writer <- task } stopper := make(chan struct{}) var worked int64 wg := &sync.WaitGroup{} for range iter.N(rand.Intn(256)) { wg.Add(1) go thermocline.NewWorker(reader, writer, func(task *thermocline.Task) ([]*thermocline.Task, error) { atomic.AddInt64(&worked, 1) return nil, errors.New("cannot process task, herp derup") }, stopper).Work(wg) } time.Sleep(500 * time.Millisecond) close(stopper) wg.Wait() if atomic.LoadInt64(&worked) != int64(tn*3) { t.Errorf("%d tasks not worked in retry test after 500ms, actually %d", tn*3, atomic.LoadInt64(&worked)) } }
func (t *torrent) getCompletedPieces() (ret bitmap.Bitmap) { for i := range iter.N(t.numPieces()) { if t.pieceComplete(i) { ret.Add(i) } } return }
func ConvertToSliceOfEmptyInterface(slice interface{}) (ret []interface{}) { v := reflect.ValueOf(slice) l := v.Len() ret = make([]interface{}, 0, l) for i := range iter.N(v.Len()) { ret = append(ret, v.Index(i).Interface()) } return }
func TestAllocs(t *testing.T) { var x []struct{} allocs := testing.AllocsPerRun(500, func() { x = iter.N(1e9) }) if allocs > 0.1 { t.Errorf("allocs = %v", allocs) } }
func ExampleN() { for i := range iter.N(4) { fmt.Println(i) } // Output: // 0 // 1 // 2 // 3 }
// Check that closing, and resulting detach of a Conn doesn't close the parent // Socket. We Accept, then close the connection and ensure it's detached. Then // Accept again to check the Socket is still functional and unclosed. func TestConnCloseUnclosedSocket(t *testing.T) { t.Parallel() s, err := NewSocket("udp", "localhost:0") require.NoError(t, err) defer func() { require.NoError(t, s.Close()) }() // Prevents the dialing goroutine from closing its end of the Conn before // we can check that it has been registered in the listener. dialerSync := make(chan struct{}) go func() { for range iter.N(2) { c, err := Dial(s.Addr().String()) require.NoError(t, err) <-dialerSync err = c.Close() require.NoError(t, err) } }() for range iter.N(2) { a, err := s.Accept() require.NoError(t, err) // We do this in a closure because we need to unlock Server.mu if the // test failure exception is thrown. "Do as we say, not as we do" -Go // team. func() { s.mu.Lock() defer s.mu.Unlock() require.Len(t, s.conns, 1) }() dialerSync <- struct{}{} require.NoError(t, a.Close()) func() { s.mu.Lock() defer s.mu.Unlock() for len(s.conns) != 0 { s.event.Wait() } }() } }