func ExampleContextRelationShip() { ctx1, cancel1 := context.WithCancel(context.Background()) ctx2, cancel2 := context.WithCancel(ctx1) var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() log.Printf("waiting for ctx1...") <-ctx1.Done() log.Printf("ctx1.Done() returns") }() go func() { defer wg.Done() log.Printf("waiting for ctx2....") <-ctx2.Done() log.Printf("ctx2.Done() returns") }() time.AfterFunc(time.Second*5, cancel2) time.AfterFunc(time.Second*1, cancel1) //触发 1 2 同时结束 wg.Wait() }
func TestDoubleGet(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") instances := sg.Instances(2) blocks := bg.Blocks(1) ctx1, cancel1 := context.WithCancel(context.Background()) blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } ctx2, cancel2 := context.WithCancel(context.Background()) defer cancel2() blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []*cid.Cid{blocks[0].Cid()}) if err != nil { t.Fatal(err) } // ensure both requests make it into the wantlist at the same time time.Sleep(time.Millisecond * 100) cancel1() _, ok := <-blkch1 if ok { t.Fatal("expected channel to be closed") } err = instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) } select { case blk, ok := <-blkch2: if !ok { t.Fatal("expected to get the block here") } t.Log(blk) case <-time.After(time.Second * 5): t.Fatal("timed out waiting on block") } for _, inst := range instances { err := inst.Exchange.Close() if err != nil { t.Fatal(err) } } }
// NewPool creates a Pool func NewPool(parentCtx context.Context) *Pool { baseCtx, baseCancel := context.WithCancel(parentCtx) ctx, cancel := context.WithCancel(baseCtx) return &Pool{ baseCtx: baseCtx, baseCancel: baseCancel, ctx: ctx, cancel: cancel, } }
// InitializeKeyspace sets the ipns record for the given key to // point to an empty directory. func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error { emptyDir := ft.EmptyDirNode() nodek, err := n.DAG.Add(emptyDir) if err != nil { return err } ctx, cancel := context.WithCancel(n.Context()) defer cancel() err = n.Pinning.Pin(ctx, emptyDir, false) if err != nil { return err } err = n.Pinning.Flush() if err != nil { return err } pub := nsys.NewRoutingPublisher(n.Routing, n.Repo.Datastore()) if err := pub.Publish(ctx, key, path.FromCid(nodek)); err != nil { return err } return nil }
func (rp *Republisher) republishEntries(p goprocess.Process) error { ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p)) defer cancel() for id, _ := range rp.entries { log.Debugf("republishing ipns entry for %s", id) priv := rp.ps.PrivKey(id) // Look for it locally only _, ipnskey := namesys.IpnsKeysForID(id) p, seq, err := rp.getLastVal(ipnskey) if err != nil { if err == errNoEntry { continue } return err } // update record with same sequence number eol := time.Now().Add(rp.RecordLifetime) err = namesys.PutRecordToRouting(ctx, priv, p, seq, eol, rp.r, id) if err != nil { return err } } return nil }
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { g := blocksutil.NewBlockGenerator() ctx, cancel := context.WithCancel(context.Background()) n := New() defer n.Shutdown() t.Log("generate a large number of blocks. exceed default buffer") bs := g.Blocks(1000) ks := func() []*cid.Cid { var keys []*cid.Cid for _, b := range bs { keys = append(keys, b.Cid()) } return keys }() _ = n.Subscribe(ctx, ks...) // ignore received channel t.Log("cancel context before any blocks published") cancel() for _, b := range bs { n.Publish(b) } t.Log("publishing the large number of blocks to the ignored channel must not deadlock") }
// GoWithContext is similar to Go, but uses ctx as the parent of // the context that will be used for the task's cancelation. func GoWithContext(ctx context.Context, fnStart StartFuncCtx) *Single { ctx, cancel := context.WithCancel(ctx) fnS := func() error { return fnStart(ctx) } return goNoCtx(fnS, cancel) }
func TestDirBuilder(t *testing.T) { dserv := testu.GetDAGServ() ctx, closer := context.WithCancel(context.Background()) defer closer() inbuf, node := testu.GetRandomNode(t, dserv, 1024) key := node.Cid() b := NewDirectory(dserv) b.AddChild(ctx, "random", key) dir := b.GetNode() outn, err := dir.GetLinkedProtoNode(ctx, dserv, "random") if err != nil { t.Fatal(err) } reader, err := NewDagReader(ctx, outn, dserv) if err != nil { t.Fatal(err) } outbuf, err := ioutil.ReadAll(reader) if err != nil { t.Fatal(err) } err = testu.ArrComp(inbuf, outbuf) if err != nil { t.Fatal(err) } }
func TestEndSeek(t *testing.T) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { t.Fatal(err) } _, err = dagmod.Write(make([]byte, 100)) if err != nil { t.Fatal(err) } offset, err := dagmod.Seek(0, os.SEEK_CUR) if offset != 100 { t.Fatal("expected the relative seek 0 to return current location") } offset, err = dagmod.Seek(0, os.SEEK_SET) if offset != 0 { t.Fatal("expected the absolute seek to set offset at 0") } offset, err = dagmod.Seek(0, os.SEEK_END) if offset != 100 { t.Fatal("expected the end seek to set offset at end") } }
func TestRelativeSeek(t *testing.T) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { t.Fatal(err) } for i := 0; i < 64; i++ { dagmod.Write([]byte{byte(i)}) if _, err := dagmod.Seek(1, os.SEEK_CUR); err != nil { t.Fatal(err) } } out, err := ioutil.ReadAll(dagmod) if err != nil { t.Fatal(err) } for i, v := range out { if v != 0 && i/2 != int(v) { t.Errorf("expected %d, at index %d, got %d", i/2, i, v) } } }
func main() { var rcpAddr, port string flag.StringVar(&rcpAddr, "rcp", ":3323", "rcp addr (default: ':3323')") flag.StringVar(&port, "http", ":3343", "http port (default: '3343')") flag.Parse() //START2 OMIT ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() m := runtime.NewServeMux() opts := []grpc.DialOption{grpc.WithInsecure()} err := idl.RegisterUserServiceHandlerFromEndpoint(ctx, m, rcpAddr, opts) if err != nil { fmt.Fprintf(os.Stderr, "cannot register service handler: %v\n", err) os.Exit(1) } // custom routes first, and cors handling on all requests h := cors(preMuxRouter(m)) if err = http.ListenAndServe(port, h); err != nil { fmt.Fprintf(os.Stderr, "http server error: %v\n", err) os.Exit(1) } //END2 OMIT }
func _() { ctx, cancel := context.WithCancel() // ERROR "not used on all paths" if condition { cancel() } return // ERROR "this return statement may be reached without using the cancel var" }
func (i *cmdInvocation) SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) { intrh := NewIntrHandler() ctx, cancelFunc := context.WithCancel(ctx) handlerFunc := func(count int, ih *IntrHandler) { switch count { case 1: fmt.Println() // Prevent un-terminated ^C character in terminal ih.wg.Add(1) go func() { defer ih.wg.Done() cancelFunc() }() default: fmt.Println("Received another interrupt before graceful shutdown, terminating...") os.Exit(-1) } } intrh.Handle(handlerFunc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) return intrh, ctx }
func NewRegistry(cfg Config, appname, ip string, port int) (*Registry, error) { backendSpec, err := newBackendSpec(appname, ip, port) if err != nil { return nil, fmt.Errorf("Failed to create backend: err=(%s)", err) } if cfg.TTL <= 0 { cfg.TTL = defaultRegistrationTTL } etcdCfg := etcd.Config{Endpoints: []string{localEtcdProxy}} etcdClt, err := etcd.New(etcdCfg) if err != nil { return nil, err } ctx, cancelFunc := context.WithCancel(context.Background()) go func() { for { err := etcdClt.AutoSync(ctx, 10*time.Second) if err == context.DeadlineExceeded || err == context.Canceled { break } fmt.Print(err) } }() etcdKeysAPI := etcd.NewKeysAPI(etcdClt) c := Registry{ cfg: cfg, backendSpec: backendSpec, etcdKeysAPI: etcdKeysAPI, ctx: ctx, cancelFunc: cancelFunc, } return &c, nil }
func TestDoContext(t *testing.T) { receivedCh := make(chan struct{}) block := make(chan struct{}) testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { close(receivedCh) <-block w.WriteHeader(http.StatusOK) })) defer testServer.Close() defer close(block) ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { <-receivedCh cancel() }() c := testRESTClient(t, testServer) _, err := c.Verb("GET"). Context(ctx). Prefix("foo"). DoRaw() if err == nil { t.Fatal("Expected context cancellation error") } }
func (dm *DagModifier) readPrep() error { err := dm.Sync() if err != nil { return err } if dm.read == nil { ctx, cancel := context.WithCancel(dm.ctx) dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv) if err != nil { return err } i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET) if err != nil { return err } if i != int64(dm.curWrOff) { return ErrSeekFail } dm.readCancel = cancel dm.read = dr } return nil }
// This example demonstrate the use of a cancelable context preventing a // goroutine leak. By the end of the example func's execution, the "count" // goroutine is canceled. func ExampleWithCancel() { count := func(ctx context.Context, dst chan<- int) { n := 1 for { select { case dst <- n: n++ case <-ctx.Done(): return } } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() ints := make(chan int) go count(ctx, ints) for n := range ints { fmt.Println(n) if n == 5 { return } } // Output: // 1 // 2 // 3 // 4 // 5 }
func TestCtxRead(t *testing.T) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv) ctx, cancel := context.WithCancel(context.Background()) defer cancel() dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { t.Fatal(err) } _, err = dagmod.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7}) if err != nil { t.Fatal(err) } dagmod.Seek(0, os.SEEK_SET) readBuf := make([]byte, 4) _, err = dagmod.CtxReadFull(ctx, readBuf) if err != nil { t.Fatal(err) } err = testu.ArrComp(readBuf, []byte{0, 1, 2, 3}) if err != nil { t.Fatal(err) } // TODO(Kubuxu): context cancel case, I will do it after I figure out dagreader tests, // because this is exacelly the same. }
func TestBasic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ds, rt := setupRoot(ctx, t) rootdir := rt.GetValue().(*Directory) // test making a basic dir _, err := rootdir.Mkdir("a") if err != nil { t.Fatal(err) } path := "a/b/c/d/e/f/g" d := mkdirP(t, rootdir, path) fi := getRandFile(t, ds, 1000) // test inserting that file err = d.AddChild("afile", fi) if err != nil { t.Fatal(err) } err = assertFileAtPath(ds, rootdir, fi, "a/b/c/d/e/f/g/afile") if err != nil { t.Fatal(err) } }
func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv := testu.GetDAGServ() n := testu.GetEmptyNode(b, dserv) ctx, cancel := context.WithCancel(context.Background()) defer cancel() wrsize := 4096 dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { b.Fatal(err) } buf := make([]byte, b.N*wrsize) u.NewTimeSeededRand().Read(buf) b.StartTimer() b.SetBytes(int64(wrsize)) for i := 0; i < b.N; i++ { n, err := dagmod.Write(buf[i*wrsize : (i+1)*wrsize]) if err != nil { b.Fatal(err) } if n != wrsize { b.Fatal("Wrote bad size") } } }
func addDefaultAssets(out io.Writer, repoRoot string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() r, err := fsrepo.Open(repoRoot) if err != nil { // NB: repo is owned by the node return err } nd, err := core.NewNode(ctx, &core.BuildCfg{Repo: r}) if err != nil { return err } defer nd.Close() dkey, err := assets.SeedInitDocs(nd) if err != nil { return fmt.Errorf("init: seeding init docs failed: %s", err) } log.Debugf("init: seeded init docs %s", dkey) if _, err = fmt.Fprintf(out, "to get started, enter:\n"); err != nil { return err } _, err = fmt.Fprintf(out, "\n\tipfs cat /ipfs/%s/readme\n\n", dkey) return err }
// CloseNotify is a middleware that cancels ctx when the underlying // connection has gone away. It can be used to cancel long operations // on the server when the client disconnects before the response is ready. func CloseNotify(next http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { cn, ok := w.(http.CloseNotifier) if !ok { panic("chi/middleware: CloseNotify expects http.ResponseWriter to implement http.CloseNotifier interface") } closeNotifyCh := cn.CloseNotify() ctx, cancel := context.WithCancel(r.Context()) defer cancel() go func() { select { case <-ctx.Done(): return case <-closeNotifyCh: cancel() return } }() r = r.WithContext(ctx) next.ServeHTTP(w, r) } return http.HandlerFunc(fn) }
func TestSeekAndRead(t *testing.T) { dserv := testu.GetDAGServ() inbuf := make([]byte, 256) for i := 0; i <= 255; i++ { inbuf[i] = byte(i) } node := testu.GetNode(t, dserv, inbuf) ctx, closer := context.WithCancel(context.Background()) defer closer() reader, err := NewDagReader(ctx, node, dserv) if err != nil { t.Fatal(err) } for i := 255; i >= 0; i-- { reader.Seek(int64(i), os.SEEK_SET) if reader.Offset() != int64(i) { t.Fatal("expected offset to be increased by one after read") } out := readByte(t, reader) if int(out) != i { t.Fatalf("read %d at index %d, expected %d", out, i, i) } if reader.Offset() != int64(i+1) { t.Fatal("expected offset to be increased by one after read") } } }
func (w *bufferedworker) AddWorker() { tlog("%s AddingWorker", w.name) w.workerMx.Lock() ctx, cancel := context.WithCancel(context.Background()) workerName := fmt.Sprintf("%s#%d", w.name, len(w.workers)) wk := new(taskworker) wk.name = workerName wk.doNotCloseChannels = true // the input and output channels are shared // replace the worker's own task channels with our own buffered channels wk.tasks = w.buftasks //wk.done = w.bufdone wk.Pipeline(w.bufdone) w.workers = append(w.workers, cancel) go wk.Start(ctx) w.workerMx.Unlock() }
func TestConsistentAccounting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() sender := newEngine(ctx, "Ernie") receiver := newEngine(ctx, "Bert") // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { m := message.New(false) content := []string{"this", "is", "message", "i"} m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.Engine.MessageSent(receiver.Peer, m) receiver.Engine.MessageReceived(sender.Peer, m) } // Ensure sender records the change if sender.Engine.numBytesSentTo(receiver.Peer) == 0 { t.Fatal("Sent bytes were not recorded") } // Ensure sender and receiver have the same values if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) { t.Fatal("Inconsistent book-keeping. Strategies don't agree") } // Ensure sender didn't record receving anything. And that the receiver // didn't record sending anything if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 { t.Fatal("Bert didn't send bytes to Ernie") } }
func RunSupernodePutRecordGetRecord(conf testutil.LatencyConfig) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() servers, clients, err := InitializeSupernodeNetwork(ctx, 2, 2, conf) if err != nil { return err } for _, n := range append(servers, clients...) { defer n.Close() } putter := clients[0] getter := clients[1] k := "key" note := []byte("a note from putter") if err := putter.Routing.PutValue(ctx, k, note); err != nil { return fmt.Errorf("failed to put value: %s", err) } received, err := getter.Routing.GetValue(ctx, k) if err != nil { return fmt.Errorf("failed to get value: %s", err) } if 0 != bytes.Compare(note, received) { return errors.New("record doesn't match") } cancel() return nil }
func TestValidAfter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() pi := testutil.RandIdentityOrFatal(t) key := cid.NewCidV0(u.Hash([]byte("mock key"))) conf := DelayConfig{ ValueVisibility: delay.Fixed(1 * time.Hour), Query: delay.Fixed(0), } rs := NewServerWithDelay(conf) rs.Client(pi).Provide(ctx, key) var providers []pstore.PeerInfo providers, err := rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } if len(providers) > 0 { t.Fail() } conf.ValueVisibility.Set(0) providers, err = rs.Client(pi).FindProviders(ctx, key) if err != nil { t.Fatal(err) } t.Log("providers", providers) if len(providers) != 1 { t.Fail() } }
func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]*cid.Cid, error) { dagnodes := make([]node.Node, 0) for _, fpath := range paths { p, err := path.ParsePath(fpath) if err != nil { return nil, err } dagnode, err := core.Resolve(ctx, n.Namesys, n.Resolver, p) if err != nil { return nil, fmt.Errorf("pin: %s", err) } dagnodes = append(dagnodes, dagnode) } var out []*cid.Cid for _, dagnode := range dagnodes { c := dagnode.Cid() ctx, cancel := context.WithCancel(ctx) defer cancel() err := n.Pinning.Pin(ctx, dagnode, recursive) if err != nil { return nil, fmt.Errorf("pin: %s", err) } out = append(out, c) } err := n.Pinning.Flush() if err != nil { return nil, err } return out, nil }
func TestClientRedirectContext(t *testing.T) { defer afterTest(t) ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { Redirect(w, r, "/", StatusFound) })) defer ts.Close() ctx, cancel := context.WithCancel(context.Background()) c := &Client{CheckRedirect: func(req *Request, via []*Request) error { cancel() if len(via) > 2 { return errors.New("too many redirects") } return nil }} req, _ := NewRequest("GET", ts.URL, nil) req = req.WithContext(ctx) _, err := c.Do(req) ue, ok := err.(*url.Error) if !ok { t.Fatalf("got error %T; want *url.Error", err) } if ue.Err != ExportErrRequestCanceled && ue.Err != ExportErrRequestCanceledConn { t.Errorf("url.Error.Err = %v; want errRequestCanceled or errRequestCanceledConn", ue.Err) } }
func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]*cid.Cid, error) { var unpinned []*cid.Cid for _, p := range paths { p, err := path.ParsePath(p) if err != nil { return nil, err } k, err := core.ResolveToCid(ctx, n, p) if err != nil { return nil, err } ctx, cancel := context.WithCancel(ctx) defer cancel() err = n.Pinning.Unpin(ctx, k, recursive) if err != nil { return nil, err } unpinned = append(unpinned, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return unpinned, nil }