Exemplo n.º 1
0
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {

	net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
	block := blocks.NewBlock([]byte("block"))
	g := NewTestSessionGenerator(net)
	defer g.Close()

	peers := g.Instances(2)
	hasBlock := peers[0]
	defer hasBlock.Exchange.Close()

	if err := hasBlock.Exchange.HasBlock(block); err != nil {
		t.Fatal(err)
	}

	wantsBlock := peers[1]
	defer wantsBlock.Exchange.Close()

	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
	defer cancel()
	received, err := wantsBlock.Exchange.GetBlock(ctx, block.Cid())
	if err != nil {
		t.Log(err)
		t.Fatal("Expected to succeed")
	}

	if !bytes.Equal(block.RawData(), received.RawData()) {
		t.Fatal("Data doesn't match")
	}
}
Exemplo n.º 2
0
func TestChain(t *testing.T) {
	mockCtrl := gomock.NewController(t)
	defer mockCtrl.Finish()

	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
	defer cancel()
	req := &transport.Request{
		Caller:    "somecaller",
		Service:   "someservice",
		Encoding:  transport.Encoding("raw"),
		Procedure: "hello",
		Body:      bytes.NewReader([]byte{1, 2, 3}),
	}
	res := &transport.Response{
		Body: ioutil.NopCloser(bytes.NewReader([]byte{4, 5, 6})),
	}

	o := transporttest.NewMockUnaryOutbound(mockCtrl)
	o.EXPECT().Call(ctx, req).After(
		o.EXPECT().Call(ctx, req).Return(nil, errors.New("great sadness")),
	).Return(res, nil)

	before := &countFilter{}
	after := &countFilter{}
	gotRes, err := transport.ApplyFilter(
		o, Chain(before, retryFilter, after)).Call(ctx, req)

	assert.NoError(t, err, "expected success")
	assert.Equal(t, 1, before.Count, "expected outer filter to be called once")
	assert.Equal(t, 2, after.Count, "expected inner filter to be called twice")
	assert.Equal(t, res, gotRes, "expected response to match")
}
Exemplo n.º 3
0
func TestBasicBitswap(t *testing.T) {
	net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
	sg := NewTestSessionGenerator(net)
	defer sg.Close()
	bg := blocksutil.NewBlockGenerator()

	t.Log("Test a one node trying to get one block from another")

	instances := sg.Instances(2)
	blocks := bg.Blocks(1)
	err := instances[0].Exchange.HasBlock(blocks[0])
	if err != nil {
		t.Fatal(err)
	}

	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
	defer cancel()
	blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid())
	if err != nil {
		t.Fatal(err)
	}

	t.Log(blk)
	for _, inst := range instances {
		err := inst.Exchange.Close()
		if err != nil {
			t.Fatal(err)
		}
	}
}
Exemplo n.º 4
0
func TestChain(t *testing.T) {
	mockCtrl := gomock.NewController(t)
	defer mockCtrl.Finish()

	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
	defer cancel()
	req := &transport.Request{
		Caller:    "somecaller",
		Service:   "someservice",
		Encoding:  transport.Encoding("raw"),
		Procedure: "hello",
		Body:      bytes.NewReader([]byte{1, 2, 3}),
	}
	resw := new(transporttest.FakeResponseWriter)

	h := transporttest.NewMockUnaryHandler(mockCtrl)
	h.EXPECT().Handle(ctx, req, resw).After(
		h.EXPECT().Handle(ctx, req, resw).Return(errors.New("great sadness")),
	).Return(nil)

	before := &countInterceptor{}
	after := &countInterceptor{}
	err := transport.ApplyInterceptor(
		h, Chain(before, retryInterceptor, after),
	).Handle(ctx, req, resw)

	assert.NoError(t, err, "expected success")
	assert.Equal(t, 1, before.Count, "expected outer interceptor to be called once")
	assert.Equal(t, 2, after.Count, "expected inner interceptor to be called twice")
}
Exemplo n.º 5
0
Arquivo: reg.go Projeto: google/acme
func runReg(args []string) {
	key, err := anyKey(filepath.Join(configDir, accountKey), regGen)
	if err != nil {
		fatalf("account key: %v", err)
	}
	uc := &userConfig{
		Account: acme.Account{Contact: args},
		key:     key,
	}

	prompt := ttyPrompt
	if regAccept {
		prompt = acme.AcceptTOS
	}
	client := &acme.Client{
		Key:          uc.key,
		DirectoryURL: string(regDisco),
	}

	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
	defer cancel()

	a, err := client.Register(ctx, &uc.Account, prompt)
	if err != nil {
		fatalf("%v", err)
	}
	uc.Account = *a
	if err := writeConfig(uc); err != nil {
		errorf("write config: %v", err)
	}
}
Exemplo n.º 6
0
// GetDiagnostic runs a diagnostics request across the entire network
func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration) ([]*DiagInfo, error) {
	log.Debug("getting diagnostic")
	ctx, cancel := context.WithTimeout(ctx, timeout)
	defer cancel()

	diagID := newID()
	d.diagLock.Lock()
	d.diagMap[diagID] = time.Now()
	d.diagLock.Unlock()

	log.Debug("begin diagnostic")

	peers := d.getPeers()
	log.Debugf("Sending diagnostic request to %d peers.", len(peers))

	pmes := newMessage(diagID)

	pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) // decrease timeout per hop
	dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
	if err != nil {
		return nil, fmt.Errorf("diagnostic from peers err: %s", err)
	}

	di := d.getDiagInfo()
	out := []*DiagInfo{di}
	for dpi := range dpeers {
		out = append(out, dpi)
	}
	return out, nil
}
Exemplo n.º 7
0
Arquivo: base.go Projeto: vmware/vic
func (c *containerBase) start(ctx context.Context) error {
	// make sure we have vm
	if c.vm == nil {
		return NotYetExistError{c.ExecConfig.ID}
	}

	// Power on
	_, err := c.vm.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) {
		return c.vm.PowerOn(ctx)
	})
	if err != nil {
		return err
	}

	// guestinfo key that we want to wait for
	key := fmt.Sprintf("guestinfo.vice..sessions|%s.started", c.ExecConfig.ID)
	var detail string

	// Wait some before giving up...
	ctx, cancel := context.WithTimeout(ctx, propertyCollectorTimeout)
	defer cancel()

	detail, err = c.vm.WaitForKeyInExtraConfig(ctx, key)
	if err != nil {
		return fmt.Errorf("unable to wait for process launch status: %s", err.Error())
	}

	if detail != "true" {
		return errors.New(detail)
	}

	return nil
}
Exemplo n.º 8
0
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {

	ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
	defer cancel()

	result, err := c.Core().RESTClient().Get().
		Prefix("proxy").
		Namespace("kube-system").
		Resource("services").
		Name(influxdbService+":api").
		Suffix("query").
		Param("q", query).
		Param("db", influxdbDatabaseName).
		Param("epoch", "s").
		Do().
		Raw()

	if err != nil {
		if ctx.Err() != nil {
			framework.Failf("Failed to query influx db: %v", err)
		}
		return nil, err
	}

	var response influxdb.Response
	dec := json.NewDecoder(bytes.NewReader(result))
	dec.UseNumber()
	err = dec.Decode(&response)

	if err != nil {
		return nil, err
	}
	return &response, nil
}
Exemplo n.º 9
0
func (s *Supervised) call(f func(Client) error) error {
	c, err := s.dcf()
	if err != nil {
		return err
	}

	ctx := c.Context()
	if err = f(c); err != ErrDisconnected {
		return err
	}

	// Wait for new client.
	ctx, cancel := context.WithTimeout(ctx, s.timeout)
	defer cancel()

	if <-ctx.Done(); ctx.Err() == context.DeadlineExceeded {
		// Client is still disconnected. Return it as is.
		return ErrDisconnected
	}

	// Previous context was canceled. This means that the client changed.
	c, err = s.dcf()
	if err != nil {
		return err
	}

	return f(c)
}
Exemplo n.º 10
0
func TestRecording(t *testing.T) {
	tMock := testingTMock{t, 0}

	dir, err := ioutil.TempDir("", "yarpcgorecorder")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(dir) // clean up

	recorder := NewRecorder(&tMock, RecordMode(Append), RecordsPath(dir))

	withConnectedClient(t, recorder, func(client raw.Client) {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()

		rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
		require.NoError(t, err)
		assert.Equal(t, []byte("Hello, World"), rbody)
	})

	recordPath := path.Join(dir, refRecordFilename)
	_, err = os.Stat(recordPath)
	require.NoError(t, err)

	recordContent, err := ioutil.ReadFile(recordPath)
	require.NoError(t, err)
	assert.Equal(t, refRecordContent, string(recordContent))
}
Exemplo n.º 11
0
func runUpdate(args []string) {
	uc, err := readConfig()
	if err != nil {
		fatalf("read config: %v", err)
	}
	if uc.key == nil {
		fatalf("no key found for %s", uc.URI)
	}

	client := acme.Client{Key: uc.key}
	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
	defer cancel()

	if updateAccept {
		a, err := client.GetReg(ctx, uc.URI)
		if err != nil {
			fatalf(err.Error())
		}
		uc.Account = *a
		uc.AgreedTerms = a.CurrentTerms
	}
	if len(args) != 0 {
		uc.Contact = args
	}

	a, err := client.UpdateReg(ctx, &uc.Account)
	if err != nil {
		fatalf(err.Error())
	}
	uc.Account = *a
	if err := writeConfig(uc); err != nil {
		fatalf("write config: %v", err)
	}
	printAccount(os.Stdout, &uc.Account, filepath.Join(configDir, accountKey))
}
Exemplo n.º 12
0
// ResolveLinks iteratively resolves names by walking the link hierarchy.
// Every node is fetched from the DAGService, resolving the next name.
// Returns the list of nodes forming the path, starting with ndd. This list is
// guaranteed never to be empty.
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
func (s *Resolver) ResolveLinks(ctx context.Context, ndd node.Node, names []string) ([]node.Node, error) {

	result := make([]node.Node, 0, len(names)+1)
	result = append(result, ndd)
	nd := ndd // dup arg workaround

	// for each of the path components
	for len(names) > 0 {
		var cancel context.CancelFunc
		ctx, cancel = context.WithTimeout(ctx, time.Minute)
		defer cancel()

		lnk, rest, err := nd.ResolveLink(names)
		if err == dag.ErrLinkNotFound {
			return result, ErrNoLink{Name: names[0], Node: nd.Cid()}
		} else if err != nil {
			return result, err
		}

		nextnode, err := lnk.GetNode(ctx, s.DAG)
		if err != nil {
			return result, err
		}

		nd = nextnode
		result = append(result, nextnode)
		names = rest
	}
	return result, nil
}
Exemplo n.º 13
0
// handleError makes the request to the StackDriver Error Reporting API
func handleError(errorsClient *errors.Client, d device) {
	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
	defer cancel()

	log.Printf("Sending report for %s (%s)", d.Name, d.Id)
	errorsClient.Reportf(ctx, nil, "Device is offline: %s (%s)", d.Name, d.Id)
}
Exemplo n.º 14
0
// ParseTTL takes a context parses the given TTL, clamping the context to that TTL
// and as a side-effect, tracking any errors encountered while attempting to
// parse and validate that TTL. Should only be used for unary requests
func (v *Validator) ParseTTL(ctx context.Context, ttl string) (context.Context, func()) {
	if ttl == "" {
		// The TTL is missing so set it to 0 and let Validate() fail with the
		// correct error message.
		return ctx, func() {}
	}

	ttlms, err := strconv.Atoi(ttl)
	if err != nil {
		v.errTTL = invalidTTLError{
			Service:   v.Request.Service,
			Procedure: v.Request.Procedure,
			TTL:       ttl,
		}
		return ctx, func() {}
	}
	// negative TTLs are invalid
	if ttlms < 0 {
		v.errTTL = invalidTTLError{
			Service:   v.Request.Service,
			Procedure: v.Request.Procedure,
			TTL:       fmt.Sprint(ttlms),
		}
		return ctx, func() {}
	}

	return context.WithTimeout(ctx, time.Duration(ttlms)*time.Millisecond)
}
Exemplo n.º 15
0
func main() {
	// Pass a context with a timeout to tell a blocking function that it
	// should abandon its work after the timeout elapses.

	ctx, cancel := context.WithTimeout(context.Background(), 3000*time.Millisecond)

	ctx = context.WithValue(ctx, "a", 42)

	go t(ctx)
	// time.Sleep(time.Millisecond * 200)
	// cancel()
	select {
	case <-time.After(4 * time.Second):
		fmt.Println("overslept")
	case <-ctx.Done():
		fmt.Println(ctx.Err()) // prints "context deadline exceeded"
	}

	// Even though ctx should have expired already, it is good
	// practice to call its cancelation function in any case.
	// Failure to do so may keep the context and its parent alive
	// longer than necessary.
	fmt.Println(ctx.Value("a"))
	cancel()

}
Exemplo n.º 16
0
func TestNopFilter(t *testing.T) {
	mockCtrl := gomock.NewController(t)
	defer mockCtrl.Finish()

	o := transporttest.NewMockUnaryOutbound(mockCtrl)
	wrappedO := transport.ApplyFilter(o, transport.NopFilter)

	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
	defer cancel()
	req := &transport.Request{
		Caller:    "somecaller",
		Service:   "someservice",
		Encoding:  raw.Encoding,
		Procedure: "hello",
		Body:      bytes.NewReader([]byte{1, 2, 3}),
	}

	res := &transport.Response{Body: ioutil.NopCloser(bytes.NewReader([]byte{4, 5, 6}))}
	o.EXPECT().Call(ctx, req).Return(res, nil)

	got, err := wrappedO.Call(ctx, req)
	if assert.NoError(t, err) {
		assert.Equal(t, res, got)
	}
}
Exemplo n.º 17
0
func main() {
	configFile := flag.String("config-file", "", "Config file for RETS connection")
	metadataFile := flag.String("metadata-options", "", "Config file for metadata options")
	output := flag.String("output", "", "Directory for file output")

	config := common.Config{}
	config.SetFlags()

	metadataOpts := MetadataOptions{}
	metadataOpts.SetFlags()

	flag.Parse()

	if *configFile != "" {
		err := config.LoadFrom(*configFile)
		if err != nil {
			panic(err)
		}
	}
	fmt.Printf("Connection Settings: %v\n", config)
	if *metadataFile != "" {
		err := metadataOpts.LoadFrom(*metadataFile)
		if err != nil {
			panic(err)
		}
	}
	fmt.Printf("Search Options: %v\n", metadataOpts)

	// should we throw an err here too?
	session, err := config.Initialize()
	if err != nil {
		panic(err)
	}

	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
	defer cancel()

	capability, err := rets.Login(session, ctx, rets.LoginRequest{URL: config.URL})
	if err != nil {
		panic(err)
	}
	defer rets.Logout(session, ctx, rets.LogoutRequest{URL: capability.Logout})

	reader, err := rets.MetadataStream(session, ctx, rets.MetadataRequest{
		URL:    capability.GetMetadata,
		Format: metadataOpts.Format,
		MType:  metadataOpts.MType,
		ID:     metadataOpts.ID,
	})
	defer reader.Close()
	if err != nil {
		panic(err)
	}
	out := os.Stdout
	if *output != "" {
		out, _ = os.Create(*output + "/metadata.xml")
		defer out.Close()
	}
	io.Copy(out, reader)
}
Exemplo n.º 18
0
// pipeStream relays over a stream to a remote peer. It's like `cat`
func (rs *RelayService) pipeStream(src, dst peer.ID, s inet.Stream) error {
	// TODO: find a good way to pass contexts into here
	nsctx, cancel := context.WithTimeout(context.TODO(), time.Second*30)
	defer cancel()

	s2, err := rs.openStreamToPeer(nsctx, dst)
	if err != nil {
		return fmt.Errorf("failed to open stream to peer: %s -- %s", dst, err)
	}
	cancel() // cancel here because this function might last a while

	if err := WriteHeader(s2, src, dst); err != nil {
		return err
	}

	// connect the series of tubes.
	done := make(chan retio, 2)
	go func() {
		n, err := io.Copy(s2, s)
		done <- retio{n, err}
	}()
	go func() {
		n, err := io.Copy(s, s2)
		done <- retio{n, err}
	}()

	r1 := <-done
	r2 := <-done
	log.Infof("%s relayed %d/%d bytes between %s and %s", rs.host.ID(), r1.n, r2.n, src, dst)

	if r1.err != nil {
		return r1.err
	}
	return r2.err
}
Exemplo n.º 19
0
func TestPutManyAddsToBloom(t *testing.T) {
	bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))

	ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
	cachedbs, err := testBloomCached(bs, ctx)

	select {
	case <-cachedbs.rebuildChan:
	case <-ctx.Done():
		t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded())
	}

	block1 := blocks.NewBlock([]byte("foo"))
	block2 := blocks.NewBlock([]byte("bar"))

	cachedbs.PutMany([]blocks.Block{block1})
	has, err := cachedbs.Has(block1.Cid())
	if err != nil {
		t.Fatal(err)
	}
	if has == false {
		t.Fatal("added block is reported missing")
	}

	has, err = cachedbs.Has(block2.Cid())
	if err != nil {
		t.Fatal(err)
	}
	if has == true {
		t.Fatal("not added block is reported to be in blockstore")
	}
}
Exemplo n.º 20
0
func (client *Client) ping(ws *websocket.Conn, addr string) {
	log.WithField("port", client.Port).Infoln("DailTLS ok: " + addr)
	info, err := client.getServerInfo()
	if err != nil {
		log.WithField("port", client.Port).Errorln("getServerInfo", err)
		return
	}

	ticker := time.NewTicker(info.PingSecond * time.Second)
	defer func() {
		ticker.Stop()
		ws.Close()
		log.WithField("port", client.Port).Infoln("Ws closed")
	}()
	log.WithField("port", client.Port).Infoln("Ws started")

	req := client.innerRequest("HEAD", HOST_OK)

	for {
		select {
		case <-ticker.C:
			ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
			res, err := client.h2Transport.RoundTrip(req.WithContext(ctx))
			if err != nil || res.StatusCode != http.StatusOK {
				cancel()
				return
			}
			cancel()
		}
	}
}
Exemplo n.º 21
0
// readTransactions reads # of transactions from the k8petstore web server endpoint.
// for more details see the source of the k8petstore web server.
func readTransactions(c clientset.Interface, ns string) (error, int) {
	proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
	if errProxy != nil {
		return errProxy, -1
	}

	ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
	defer cancel()

	body, err := proxyRequest.Namespace(ns).
		Context(ctx).
		Name("frontend").
		Suffix("llen").
		DoRaw()
	if err != nil {
		if ctx.Err() != nil {
			framework.Failf("Failed to read petstore transactions: %v", err)
		}
		return err, -1
	}

	totalTrans, err := strconv.Atoi(string(body))
	return err, totalTrans

}
Exemplo n.º 22
0
func TestGetBlocksSequential(t *testing.T) {
	var servs = Mocks(4)
	for _, s := range servs {
		defer s.Close()
	}
	objs := makeObjects(50)

	var cids []*cid.Cid
	for _, o := range objs {
		cids = append(cids, o.Cid())
		servs[0].AddBlock(o)
	}

	t.Log("one instance at a time, get blocks concurrently")

	for i := 1; i < len(servs); i++ {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
		defer cancel()
		out := servs[i].GetBlocks(ctx, cids)
		gotten := make(map[string]blocks.Block)
		for blk := range out {
			if _, ok := gotten[blk.Cid().KeyString()]; ok {
				t.Fatal("Got duplicate block!")
			}
			gotten[blk.Cid().KeyString()] = blk
		}
		if len(gotten) != len(objs) {
			t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(objs))
		}
	}
}
Exemplo n.º 23
0
Arquivo: cal2.go Projeto: elos/elos
func (c *Cal2Command) runGoogle(args []string) int {
	ctx, _ := context.WithTimeout(context.Background(), 1*time.Minute)
	config, err := google.ConfigFromJSON([]byte(clientSecret), calendar.CalendarScope)
	if err != nil {
		c.UI.Error(fmt.Sprintf("unable to parse client secrete file to config: %v", err))
		return failure
	}

	u, err := stringInput(c.UI, "Username:"******"unable to retrieve calendar client %v", err))
		return failure
	}
	events, err := srv.Events.List("primary").
		ShowDeleted(false).
		SingleEvents(true).
		TimeMin(time.Now().AddDate(0, -1, 0).Format(time.RFC3339)).
		OrderBy("startTime").Do()
	if err != nil {
		c.UI.Error(fmt.Sprintf("unable to retrieve user events: $v", err))
		return failure
	}

	n := 0
	recurring := map[string]bool{}
	for _, e := range events.Items {
		if e.RecurringEventId != "" {
			recurring[e.RecurringEventId] = true
			continue // don't ingest recurring instances
		}
		c.UI.Output(fmt.Sprintf("Processing: %v", e.Summary))
		_, err := ingestEvent(ctx, c.DBClient, c.UserID, e)
		if err != nil {
			c.UI.Error(err.Error())
			return failure
		}
		n++
	}
	for id := range recurring {
		e, err := srv.Events.Get("primary", id).Do()
		if err != nil {
			c.UI.Error(err.Error())
			return failure
		}
		_, err = ingestEvent(ctx, c.DBClient, c.UserID, e)
		if err != nil {
			c.UI.Error(err.Error())
			return failure
		}
	}
	return success
}
Exemplo n.º 24
0
func TestOutboundHeaders(t *testing.T) {
	tests := []struct {
		desc    string
		context context.Context
		headers transport.Headers

		wantHeaders map[string]string
	}{
		{
			desc:    "application headers",
			headers: transport.NewHeaders().With("foo", "bar").With("baz", "Qux"),
			wantHeaders: map[string]string{
				"Rpc-Header-Foo": "bar",
				"Rpc-Header-Baz": "Qux",
			},
		},
	}

	for _, tt := range tests {
		server := httptest.NewServer(http.HandlerFunc(
			func(w http.ResponseWriter, r *http.Request) {
				defer r.Body.Close()
				for k, v := range tt.wantHeaders {
					assert.Equal(
						t, v, r.Header.Get(k), "%v: header %v did not match", tt.desc, k)
				}
			},
		))
		defer server.Close()

		ctx := tt.context
		if ctx == nil {
			var cancel context.CancelFunc
			ctx, cancel = context.WithTimeout(context.Background(), time.Second)
			defer cancel()
		}

		out := NewOutbound(server.URL)
		require.NoError(t, out.Start(transport.NoDeps), "failed to start outbound")
		defer out.Stop()

		res, err := out.Call(ctx, &transport.Request{
			Caller:    "caller",
			Service:   "service",
			Encoding:  raw.Encoding,
			Headers:   tt.headers,
			Procedure: "hello",
			Body:      bytes.NewReader([]byte("world")),
		})

		if !assert.NoError(t, err, "%v: call failed", tt.desc) {
			continue
		}

		if !assert.NoError(t, res.Body.Close(), "%v: failed to close response body") {
			continue
		}
	}
}
Exemplo n.º 25
0
// LoadPinner loads a pinner and its keysets from the given datastore
func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error) {
	p := new(pinner)

	rootKeyI, err := d.Get(pinDatastoreKey)
	if err != nil {
		return nil, fmt.Errorf("cannot load pin state: %v", err)
	}
	rootKeyBytes, ok := rootKeyI.([]byte)
	if !ok {
		return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey)
	}

	rootCid, err := cid.Cast(rootKeyBytes)
	if err != nil {
		return nil, err
	}

	ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
	defer cancel()

	root, err := internal.Get(ctx, rootCid)
	if err != nil {
		return nil, fmt.Errorf("cannot find pinning root object: %v", err)
	}

	rootpb, ok := root.(*mdag.ProtoNode)
	if !ok {
		return nil, mdag.ErrNotProtobuf
	}

	internalset := cid.NewSet()
	internalset.Add(rootCid)
	recordInternal := internalset.Add

	{ // load recursive set
		recurseKeys, err := loadSet(ctx, internal, rootpb, linkRecursive, recordInternal)
		if err != nil {
			return nil, fmt.Errorf("cannot load recursive pins: %v", err)
		}
		p.recursePin = cidSetWithValues(recurseKeys)
	}

	{ // load direct set
		directKeys, err := loadSet(ctx, internal, rootpb, linkDirect, recordInternal)
		if err != nil {
			return nil, fmt.Errorf("cannot load direct pins: %v", err)
		}
		p.directPin = cidSetWithValues(directKeys)
	}

	p.internalPin = internalset

	// assign services
	p.dserv = dserv
	p.dstore = d
	p.internal = internal

	return p, nil
}
Exemplo n.º 26
0
// ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive.
func ClusterLevelLoggingWithKibana(f *framework.Framework) {
	// graceTime is how long to keep retrying requests for status information.
	const graceTime = 20 * time.Minute

	// Check for the existence of the Kibana service.
	By("Checking the Kibana service exists.")
	s := f.ClientSet.Core().Services(api.NamespaceSystem)
	// Make a few attempts to connect. This makes the test robust against
	// being run as the first e2e test just after the e2e cluster has been created.
	var err error
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		if _, err = s.Get("kibana-logging", metav1.GetOptions{}); err == nil {
			break
		}
		framework.Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start))
	}
	Expect(err).NotTo(HaveOccurred())

	// Wait for the Kibana pod(s) to enter the running state.
	By("Checking to make sure the Kibana pods are running")
	label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
	options := v1.ListOptions{LabelSelector: label.String()}
	pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
	Expect(err).NotTo(HaveOccurred())
	for _, pod := range pods.Items {
		err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Checking to make sure we get a response from the Kibana UI.")
	err = nil
	for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
		proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
		if errProxy != nil {
			framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
			err = errProxy
			continue
		}

		ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
		defer cancel()

		// Query against the root URL for Kibana.
		_, err = proxyRequest.Namespace(api.NamespaceSystem).
			Context(ctx).
			Name("kibana-logging").
			DoRaw()
		if err != nil {
			if ctx.Err() != nil {
				framework.Failf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
				break
			}
			framework.Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
			continue
		}
		break
	}
	Expect(err).NotTo(HaveOccurred())
}
Exemplo n.º 27
0
// TODO(btc): break this apart into separate handlers using a more expressive muxer
func (i *gatewayHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	ctx, cancel := context.WithTimeout(i.node.Context(), time.Hour)
	// the hour is a hard fallback, we don't expect it to happen, but just in case
	defer cancel()

	if cn, ok := w.(http.CloseNotifier); ok {
		clientGone := cn.CloseNotify()
		go func() {
			select {
			case <-clientGone:
			case <-ctx.Done():
			}
			cancel()
		}()
	}

	defer func() {
		if r := recover(); r != nil {
			log.Error("A panic occurred in the gateway handler!")
			log.Error(r)
			debug.PrintStack()
		}
	}()

	if i.config.Writable {
		switch r.Method {
		case "POST":
			i.postHandler(ctx, w, r)
			return
		case "PUT":
			i.putHandler(w, r)
			return
		case "DELETE":
			i.deleteHandler(w, r)
			return
		}
	}

	if r.Method == "GET" || r.Method == "HEAD" {
		i.getOrHeadHandler(ctx, w, r)
		return
	}

	if r.Method == "OPTIONS" {
		i.optionsHandler(w, r)
		return
	}

	errmsg := "Method " + r.Method + " not allowed: "
	if !i.config.Writable {
		w.WriteHeader(http.StatusMethodNotAllowed)
		errmsg = errmsg + "read only access"
	} else {
		w.WriteHeader(http.StatusBadRequest)
		errmsg = errmsg + "bad request for " + r.URL.Path
	}
	fmt.Fprint(w, errmsg)
	log.Error(errmsg) // TODO(cryptix): log errors until we have a better way to expose these (counter metrics maybe)
}
Exemplo n.º 28
0
func runYARPCClient(b *testing.B, c raw.Client) {
	for i := 0; i < b.N; i++ {
		ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
		defer cancel()
		_, _, err := c.Call(ctx, yarpc.NewReqMeta().Procedure("echo"), _reqBody)
		require.NoError(b, err, "request %d failed", i+1)
	}
}
Exemplo n.º 29
0
Arquivo: core.go Projeto: qnib/go-ipfs
func (n *IpfsNode) HandlePeerFound(p pstore.PeerInfo) {
	log.Warning("trying peer info: ", p)
	ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout)
	defer cancel()
	if err := n.PeerHost.Connect(ctx, p); err != nil {
		log.Warning("Failed to connect to peer found by discovery: ", err)
	}
}
Exemplo n.º 30
0
func TestEndToEnd(t *testing.T) {
	tMock := testingTMock{t, 0}

	dir, err := ioutil.TempDir("", "yarpcgorecorder")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(dir) // clean up

	// First we double check that our cache is empty.
	recorder := NewRecorder(&tMock, RecordMode(Replay), RecordsPath(dir))

	withDisconnectedClient(t, recorder, func(client raw.Client) {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()

		require.Panics(t, func() {
			client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
		})
		assert.Equal(t, tMock.fatalCount, 1)
	})

	// Now let's record our call.
	recorder = NewRecorder(&tMock, RecordMode(Overwrite), RecordsPath(dir))

	withConnectedClient(t, recorder, func(client raw.Client) {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()

		rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
		require.NoError(t, err)
		assert.Equal(t, rbody, []byte("Hello, World"))
	})

	// Now replay the call.
	recorder = NewRecorder(&tMock, RecordMode(Replay), RecordsPath(dir))

	withDisconnectedClient(t, recorder, func(client raw.Client) {
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()

		rbody, _, err := client.Call(ctx, yarpc.NewReqMeta().Procedure("hello"), []byte("Hello"))
		require.NoError(t, err)
		assert.Equal(t, rbody, []byte("Hello, World"))
	})
}