Example #1
1
// WithDeadline is a clock library implementation of context.WithDeadline that
// uses the clock library's time features instead of the Go time library.
//
// For more information, see context.WithDeadline.
func WithDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
		// The current deadline is already sooner than the new one.
		return context.WithCancel(parent)
	}

	parent, cancelFunc := context.WithCancel(parent)
	c := &clockContext{
		Context:  parent,
		deadline: deadline,
	}

	d := deadline.Sub(Now(c))
	if d <= 0 {
		// Deadline has already passed.
		c.setError(context.DeadlineExceeded)
		cancelFunc()
		return c, cancelFunc
	}

	// Invoke our cancelFunc after the specified time.
	go func() {
		select {
		case <-c.Done():
			break

		case <-After(c, d):
			c.setError(context.DeadlineExceeded)
			cancelFunc()
		}
	}()
	return c, cancelFunc
}
Example #2
1
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
	ctx, cancel := context.WithCancel(context.Background())
	a.lock.Lock()
	a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
	a.lock.Unlock()
	var loopErr error
	//TODO this gets really weird with branching... we need to figure out what that looks like.
	mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
	var lock sync.Mutex
	var wg sync.WaitGroup
	for _, inputRepo := range pipelineInfo.InputRepo {
		inputRepo := inputRepo
		wg.Add(1)
		go func() {
			defer wg.Done()
			var lastCommit *pfs.Commit
			listCommitRequest := &pfs.ListCommitRequest{
				Repo:       inputRepo,
				CommitType: pfs.CommitType_COMMIT_TYPE_READ,
				From:       lastCommit,
				Block:      true,
			}
			commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
			if err != nil && loopErr == nil {
				loopErr = err
				return
			}
			for _, commitInfo := range commitInfos.CommitInfo {
				lock.Lock()
				mostRecentCommit[*inputRepo] = commitInfo.Commit
				var commits []*pfs.Commit
				for _, commit := range mostRecentCommit {
					commits = append(commits, commit)
				}
				lock.Unlock()
				if len(commits) < len(pipelineInfo.InputRepo) {
					// we don't yet have a commit for every input repo so there's no way to run the job
					continue
				}
				outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
				if err != nil && loopErr == nil {
					loopErr = err
					return
				}
				_, err = a.jobAPIClient.CreateJob(
					ctx,
					&pps.CreateJobRequest{
						Spec: &pps.CreateJobRequest_Pipeline{
							Pipeline: pipelineInfo.Pipeline,
						},
						InputCommit:  []*pfs.Commit{commitInfo.Commit},
						OutputParent: outParentCommit,
					},
				)
			}
		}()
	}
	wg.Wait()
	return loopErr
}
Example #3
0
func (r *runner) run() {
	ctx, cancel := context.WithCancel(context.Background())
STOP:
	for {
		select {
		case <-r.done:
			cancel()
			break STOP
		case serv := <-r.add:
			go func(c context.Context, s core.Service) {
				serviceUUID := util.GenUUID()

				ctxWith, servCancel := context.WithCancel(ctx)

				err := s.Init(ctxWith, serviceUUID)
				if err != nil {
					//TODO handle?
				}
				info := s.Info()
				serviceName := info.Name() + "." + fmt.Sprint(info.Status()) + "." + info.ID()
				go r.monitorService(ctxWith, servCancel, s)
				r.running.add(s.Info())
				registry.Register(serviceName, s)
			}(ctx, serv)
		}
	}
}
Example #4
0
func main() {
	flag.Parse()
	c := func() {}

	// read keyword from stdin
	ctx, cancel := context.WithCancel(gx)
	keyc, errc := stdinStage(ctx)
	c = errHanler(c, cancel, errc, "read kewrods")

	// obtains links from google, just one thread
	ctx, cancel = context.WithCancel(gx)
	serpc, errc := serpStage(ctx, keyc)
	c = errHanler(c, cancel, errc, "get links")

	// create shop
	shopc, c := create(serpc, c)

	// check if grabbed links is really a shop
	checkedc, c := stage(checkStage, shopc, c, "check shop")

	// gather emails from site
	gatheredc, _ := stage(emailStage, checkedc, c, "gather email")
	//stage(emailStage, checkedc, c, "gather email")

	for range gatheredc {
	}
}
func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) {
	// ch1 is buffered so server can send error on close
	ch1, ch2 := make(chan interface{}, 1), make(chan interface{})
	headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)

	cctx, ccancel := context.WithCancel(ctx)
	cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel}
	wclient := &ws2wcClientStream{chanClientStream{headerc, trailerc, cli}}

	sctx, scancel := context.WithCancel(ctx)
	srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel}
	wserver := &ws2wcServerStream{chanServerStream{headerc, trailerc, srv, nil}}
	go func() {
		if err := s.wserv.Watch(wserver); err != nil {
			select {
			case srv.sendc <- err:
			case <-sctx.Done():
			case <-cctx.Done():
			}
		}
		scancel()
		ccancel()
	}()
	return wclient, nil
}
Example #6
0
func TestCancel(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	p, q := pipetransport.New()
	if *logMessages {
		p = logtransport.New(nil, p)
	}
	c := rpc.NewConn(p)
	notify := make(chan struct{})
	hanger := testcapnp.Hanger_ServerToClient(Hanger{notify: notify})
	d := rpc.NewConn(q, rpc.MainInterface(hanger.Client))
	defer d.Wait()
	defer c.Close()
	client := testcapnp.Hanger{Client: c.Bootstrap(ctx)}

	subctx, subcancel := context.WithCancel(ctx)
	promise := client.Hang(subctx, func(r testcapnp.Hanger_hang_Params) error { return nil })
	<-notify
	subcancel()
	_, err := promise.Struct()
	<-notify // test will deadlock if cancel not delivered

	if err != context.Canceled {
		t.Errorf("promise.Get() error: %v; want %v", err, context.Canceled)
	}
}
Example #7
0
// Start starts the application
func (sys *System) Start() {

	sys.parentContext = context.Background()
	sys.parentContext, sys.parentCancel = context.WithCancel(sys.parentContext)

	sys.ctx, sys.cancelFn = context.WithCancel(sys.parentContext)

	log.Log(sys.ctx).Debug("Starting system", "name", sys.name)
}
Example #8
0
// StartWithParent starts the application with the parent sys as the context
func (sys *System) StartWithParent(parent *System) {

	sys.parentContext = context.Background()
	sys.parentContext, sys.parentCancel = context.WithCancel(sys.parentContext)

	sys.ctx, sys.cancelFn = context.WithCancel(parent.ctx)

	log.Log(sys.ctx).Debug("Starting system with parent system", "name", sys.name, "parent", parent.name)
}
Example #9
0
// StartWithContext starts the application
func (sys *System) StartWithContext(ctx context.Context) {

	sys.parentContext = ctx
	sys.parentContext, sys.parentCancel = context.WithCancel(sys.parentContext)

	sys.ctx, sys.cancelFn = context.WithCancel(sys.parentContext)

	log.Log(sys.ctx).Debug("Starting system", "system", sys.name)
}
Example #10
0
func (ls *leaseStresser) keepLeaseAlive(leaseID int64) {
	defer ls.aliveWg.Done()
	ctx, cancel := context.WithCancel(ls.ctx)
	stream, err := ls.lc.LeaseKeepAlive(ctx)
	defer func() { cancel() }()
	for {
		select {
		case <-time.After(500 * time.Millisecond):
		case <-ls.ctx.Done():
			plog.Debugf("keepLeaseAlive lease %v context canceled ", leaseID)
			// it is  possible that lease expires at invariant checking phase but not at keepLeaseAlive() phase.
			// this scenerio is possible when alive lease is just about to expire when keepLeaseAlive() exists and expires at invariant checking phase.
			// to circumvent that scenerio, we check each lease before keepalive loop exist to see if it has been renewed in last TTL/2 duration.
			// if it is renewed, this means that invariant checking have at least ttl/2 time before lease exipres which is long enough for the checking to finish.
			// if it is not renewed, we remove the lease from the alive map so that the lease doesn't exipre during invariant checking
			renewTime, ok := ls.aliveLeases.read(leaseID)
			if ok && renewTime.Add(TTL/2*time.Second).Before(time.Now()) {
				ls.aliveLeases.remove(leaseID)
				plog.Debugf("keepLeaseAlive lease %v has not been renewed. drop it.", leaseID)
			}
			return
		}

		if err != nil {
			plog.Debugf("keepLeaseAlive lease %v creates stream error: (%v)", leaseID, err)
			cancel()
			ctx, cancel = context.WithCancel(ls.ctx)
			stream, err = ls.lc.LeaseKeepAlive(ctx)
			continue
		}
		err = stream.Send(&pb.LeaseKeepAliveRequest{ID: leaseID})
		plog.Debugf("keepLeaseAlive stream sends lease %v keepalive request", leaseID)
		if err != nil {
			plog.Debugf("keepLeaseAlive stream sends lease %v error (%v)", leaseID, err)
			continue
		}
		leaseRenewTime := time.Now()
		plog.Debugf("keepLeaseAlive stream sends lease %v keepalive request succeed", leaseID)
		respRC, err := stream.Recv()
		if err != nil {
			plog.Debugf("keepLeaseAlive stream receives lease %v stream error (%v)", leaseID, err)
			continue
		}
		// lease expires after TTL become 0
		// don't send keepalive if the lease has expired
		if respRC.TTL <= 0 {
			plog.Debugf("keepLeaseAlive stream receives lease %v has TTL <= 0", leaseID)
			ls.aliveLeases.remove(leaseID)
			return
		}
		// renew lease timestamp only if lease is present
		plog.Debugf("keepLeaseAlive renew lease %v", leaseID)
		ls.aliveLeases.update(leaseID, leaseRenewTime)
	}
}
Example #11
0
func TestInterrupt(t *testing.T) {
	os, err := NewOutputStream("")
	if err != nil {
		t.Fatal(err)
	}

	go func() {
		for range time.NewTicker(1 * time.Millisecond).C {
			os.InterruptGetNext()
		}
	}()

	addEmptyMsg(os, 1, 1)

	msgs := os.GetNext(context.TODO(), types.RobustId{})
	if want := (types.RobustId{Id: 1, Reply: 1}); msgs[0].Id != want {
		t.Fatalf("got %v, want %v", msgs[0].Id, want)
	}

	ctx1, cancel1 := context.WithCancel(context.Background())
	ctx2, _ := context.WithCancel(context.Background())

	unblocked1 := make(chan bool)
	unblocked2 := make(chan bool)

	go func() {
		msgs = os.GetNext(ctx1, msgs[0].Id)
		unblocked1 <- true
	}()

	go func() {
		msgs = os.GetNext(ctx2, msgs[0].Id)
		unblocked2 <- true
	}()

	time.Sleep(1 * time.Millisecond)
	select {
	case <-unblocked1:
		t.Fatalf("GetNext() returned before cancelled is true")
	default:
	}
	cancel1()
	select {
	case <-unblocked1:
	case <-time.After(1 * time.Second):
		t.Fatalf("GetNext() did not return after setting cancelled to true")
	}

	select {
	case <-unblocked2:
		t.Fatalf("Second GetNext() returned before cancelled is true")
	default:
	}
}
Example #12
0
// parent, sn can be nil.
// If sn is not nil, it will be called when the working loop exits.
func (p *Pumper) Start(parent context.Context, sn StopNotifier) {
	if parent == nil {
		parent = context.Background()
	}

	var ctx context.Context
	ctx, p.quitF = context.WithCancel(parent)

	rwctx, rwqF := context.WithCancel(context.Background())
	go p.reading(rwctx)
	go p.writing(rwctx)
	go p.work(ctx, rwqF, sn)
}
Example #13
0
func Server(urlServer string, urlPubSub string, opt Options) (*DiscoveryServer, error) {
	var sock mangos.Socket
	var err error
	var publisher *Publisher

	ctx, cancel := context.WithCancel(context.Background())

	sock, err = surveyor.NewSocket()
	if err != nil {
		return nil, err
	}

	sock.AddTransport(ipc.NewTransport())
	sock.AddTransport(tcp.NewTransport())

	err = sock.Listen(urlServer)
	if err != nil {
		return nil, err
	}
	err = sock.SetOption(mangos.OptionSurveyTime, opt.SurveyTime)
	if err != nil {
		return nil, err
	}
	err = sock.SetOption(mangos.OptionRecvDeadline, opt.RecvDeadline)
	if err != nil {
		return nil, err
	}

	pubCtx, pubCancel := context.WithCancel(ctx)
	publisher, err = NewPublisher(pubCtx, urlPubSub)
	if err != nil {
		pubCancel()
		return nil, err
	}
	services := NewServices(publisher)

	server := &DiscoveryServer{
		services: services,

		urlServer: urlServer,
		urlPubSub: urlPubSub,
		opt:       opt,

		ctx:    ctx,
		cancel: cancel,
		sock:   sock,
	}

	go server.run()
	return server, nil
}
Example #14
0
// RegisterEchoServiceHandler registers the http handlers for service EchoService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterEchoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
	client := NewEchoServiceClient(conn)

	mux.Handle("POST", pattern_EchoService_Echo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
		ctx, cancel := context.WithCancel(ctx)
		defer cancel()
		if cn, ok := w.(http.CloseNotifier); ok {
			go func(done <-chan struct{}, closed <-chan bool) {
				select {
				case <-done:
				case <-closed:
					cancel()
				}
			}(ctx.Done(), cn.CloseNotify())
		}
		resp, md, err := request_EchoService_Echo_0(runtime.AnnotateContext(ctx, req), client, req, pathParams)
		ctx = runtime.NewServerMetadataContext(ctx, md)
		if err != nil {
			runtime.HTTPError(ctx, w, req, err)
			return
		}

		forward_EchoService_Echo_0(ctx, w, req, resp, mux.GetForwardResponseOptions()...)

	})

	mux.Handle("POST", pattern_EchoService_EchoBody_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
		ctx, cancel := context.WithCancel(ctx)
		defer cancel()
		if cn, ok := w.(http.CloseNotifier); ok {
			go func(done <-chan struct{}, closed <-chan bool) {
				select {
				case <-done:
				case <-closed:
					cancel()
				}
			}(ctx.Done(), cn.CloseNotify())
		}
		resp, md, err := request_EchoService_EchoBody_0(runtime.AnnotateContext(ctx, req), client, req, pathParams)
		ctx = runtime.NewServerMetadataContext(ctx, md)
		if err != nil {
			runtime.HTTPError(ctx, w, req, err)
			return
		}

		forward_EchoService_EchoBody_0(ctx, w, req, resp, mux.GetForwardResponseOptions()...)

	})

	return nil
}
Example #15
0
// parent, sn can be nil.
// If sn is not nil, it will be called when the working loop exits.
func (p *Pumper) Start(parent context.Context, sn StopNotifier) {
	if parent == nil {
		parent = context.Background()
	}

	var ctx context.Context
	ctx, p.quitF = context.WithCancel(parent)

	var rwctx context.Context
	var rwqF context.CancelFunc
	rwctx, rwqF = context.WithCancel(context.Background())
	go p.work(ctx, rwqF, sn)
	go p.bgRead(rwctx)
	go p.bgWrite(rwctx)
}
Example #16
0
func TestCancelledUpload(t *testing.T) {
	lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})

	go func() {
		for range progressChan {
		}
		close(progressDone)
	}()

	ctx, cancel := context.WithCancel(context.Background())

	go func() {
		<-time.After(time.Millisecond)
		cancel()
	}()

	descriptors := uploadDescriptors(nil)
	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
	if err != context.Canceled {
		t.Fatal("expected upload to be cancelled")
	}

	close(progressChan)
	<-progressDone
}
Example #17
0
// DeleteVolume deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// Create vSphere client
	c, err := vsphereLogin(vs.cfg, ctx)
	if err != nil {
		return err
	}
	defer c.Logout(ctx)

	// Create a new finder
	f := find.NewFinder(c.Client, true)

	// Fetch and set data center
	dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
	f.SetDatacenter(dc)

	// Create a virtual disk manager
	virtualDiskManager := object.NewVirtualDiskManager(c.Client)

	// Delete virtual disk
	task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc)
	if err != nil {
		return err
	}

	return task.Wait(ctx)
}
Example #18
0
func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) {
	tr := newFakeTransport()
	c := &simpleHTTPClient{transport: tr}

	donechan := make(chan struct{})
	ctx, cancel := context.WithCancel(context.Background())
	go func() {
		c.Do(ctx, &fakeAction{})
		close(donechan)
	}()

	// This should call CancelRequest and begin the cancellation process
	cancel()

	select {
	case <-donechan:
		t.Fatalf("simpleHTTPClient.Do should not have exited yet")
	default:
	}

	tr.finishCancel <- struct{}{}

	select {
	case <-donechan:
		//expected behavior
		return
	case <-time.After(time.Second):
		t.Fatalf("simpleHTTPClient.Do did not exit within 1s")
	}
}
Example #19
0
func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) {
	tr := newFakeTransport()
	c := &simpleHTTPClient{transport: tr}

	// create an already-cancelled context
	ctx, cancel := context.WithCancel(context.Background())
	cancel()

	body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))}
	go func() {
		// wait that simpleHTTPClient knows the context is already timed out,
		// and calls CancelRequest
		testutil.WaitSchedule()

		// response is returned before cancel effects
		tr.respchan <- &http.Response{Body: body}
	}()

	_, _, err := c.Do(ctx, &fakeAction{})
	if err == nil {
		t.Fatalf("expected non-nil error, got nil")
	}

	if !body.closed {
		t.Fatalf("expected closed body")
	}
}
Example #20
0
// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config.
// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName,
// will be read from the Context passed to Build().
func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) {
	if config == nil {
		config = new(types.ImageBuildOptions)
	}
	if config.BuildArgs == nil {
		config.BuildArgs = make(map[string]string)
	}
	ctx, cancel := context.WithCancel(clientCtx)
	b = &Builder{
		clientCtx:        ctx,
		cancel:           cancel,
		options:          config,
		Stdout:           os.Stdout,
		Stderr:           os.Stderr,
		docker:           backend,
		context:          buildContext,
		runConfig:        new(container.Config),
		tmpContainers:    map[string]struct{}{},
		id:               stringid.GenerateNonCryptoID(),
		allowedBuildArgs: make(map[string]bool),
	}
	if dockerfile != nil {
		b.dockerfile, err = parser.Parse(dockerfile)
		if err != nil {
			return nil, err
		}
	}

	return b, nil
}
Example #21
0
// gossip loops, sending deltas of the infostore and receiving deltas
// in turn. If an alternate is proposed on response, the client addr
// is modified and method returns for forwarding by caller.
func (c *client) gossip(g *Gossip, gossipClient GossipClient, stopper *stop.Stopper) error {
	// For un-bootstrapped node, g.is.NodeID is 0 when client start gossip,
	// so it's better to get nodeID from g.is every time.
	g.mu.Lock()
	addr := g.is.NodeAddr
	g.mu.Unlock()

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	stream, err := gossipClient.Gossip(ctx)
	if err != nil {
		return err
	}

	if err := c.requestGossip(g, addr, stream); err != nil {
		return err
	}

	sendGossipChan := make(chan struct{}, 1)

	// Register a callback for gossip updates.
	updateCallback := func(_ string, _ roachpb.Value) {
		select {
		case sendGossipChan <- struct{}{}:
		default:
		}
	}
	// Defer calling "undoer" callback returned from registration.
	defer g.RegisterCallback(".*", updateCallback)()

	errCh := make(chan error, 1)
	stopper.RunWorker(func() {
		errCh <- func() error {
			for {
				reply, err := stream.Recv()
				if err != nil {
					return err
				}
				if err := c.handleResponse(g, reply); err != nil {
					return err
				}
			}
		}()
	})

	for {
		select {
		case <-c.closer:
			return nil
		case <-stopper.ShouldStop():
			return nil
		case err := <-errCh:
			return err
		case <-sendGossipChan:
			if err := c.sendGossip(g, addr, stream); err != nil {
				return err
			}
		}
	}
}
func (m *MesosMessenger) sendLoop() {
	for {
		select {
		case <-m.stop:
			return
		case msg := <-m.sendingQueue:
			e := func() error {
				//TODO(jdef) implement timeout for context
				ctx, cancel := context.WithCancel(context.TODO())
				defer cancel()

				c := make(chan error, 1)
				go func() { c <- m.tr.Send(ctx, msg) }()

				select {
				case <-ctx.Done():
					// Transport layer must use the context to detect cancelled requests.
					<-c // wait for Send to return
					return ctx.Err()
				case err := <-c:
					return err
				}
			}()
			if e != nil {
				m.reportError(fmt.Errorf("Failed to send message %v: %v", msg.Name, e))
			}
		}
	}
}
func (m *MesosMessenger) encodeLoop() {
	for {
		select {
		case <-m.stop:
			return
		case msg := <-m.encodingQueue:
			e := func() error {
				//TODO(jdef) implement timeout for context
				ctx, cancel := context.WithCancel(context.TODO())
				defer cancel()

				b, err := proto.Marshal(msg.ProtoMessage)
				if err != nil {
					return err
				}
				msg.Bytes = b
				select {
				case <-ctx.Done():
					return ctx.Err()
				case m.sendingQueue <- msg:
					return nil
				}
			}()
			if e != nil {
				m.reportError(fmt.Errorf("Failed to enqueue message %v: %v", msg, e))
			}
		}
	}
}
Example #24
0
func workers() context.CancelFunc {
	var (
		writerChan     = make(chan interface{})
		schedulerChan  = make(chan interface{})
		aggregatorChan = make(chan Datum)

		keeperReqChan    = make(chan chan []byte)
		keeperSubmitChan = make(chan []byte)
		httpReqChan      = make(chan []byte)
	)

	ctx, cancel := context.WithCancel(context.Background())

	// Data collection pipeline
	go writer(ctx, writerChan)
	go scheduler(ctx, schedulerChan, writerChan)
	go aggregator(ctx, aggregatorChan, schedulerChan)
	go datasources(ctx, aggregatorChan)

	// Data server pipeline
	go keeper(ctx, keeperSubmitChan, keeperReqChan)
	go watcher(ctx, keeperSubmitChan)

	http.HandleFunc("/meta", func(w http.ResponseWriter, r *http.Request) {
		keeperReqChan <- httpReqChan
		data := <-httpReqChan
		w.Header().Set("Content-Type", "application/json")
		w.Write(data)
	})

	return cancel
}
Example #25
0
// List lists instances that match 'filter' which is a regular expression
// which must match the entire instance name (fqdn).
func (c *MesosCloud) List(filter string) ([]string, error) {
	//TODO(jdef) use a timeout here? 15s?
	ctx, cancel := context.WithCancel(context.TODO())
	defer cancel()

	nodes, err := c.client.listSlaves(ctx)
	if err != nil {
		return nil, err
	}
	if len(nodes) == 0 {
		log.V(2).Info("no slaves found, are any running?")
		return nil, nil
	}
	filterRegex, err := regexp.Compile(filter)
	if err != nil {
		return nil, err
	}
	addr := []string{}
	for _, node := range nodes {
		if filterRegex.MatchString(node.hostname) {
			addr = append(addr, node.hostname)
		}
	}
	return addr, err
}
Example #26
0
// ListClusters lists the names of the available Mesos clusters.
func (c *MesosCloud) ListClusters() ([]string, error) {
	// Always returns a single cluster (this one!)
	ctx, cancel := context.WithCancel(context.TODO())
	defer cancel()
	name, err := c.client.clusterName(ctx)
	return []string{name}, err
}
Example #27
0
func (ts *targetSet) runScraping(ctx context.Context) {
	ctx, ts.cancelScraping = context.WithCancel(ctx)

	ts.scrapePool.init(ctx)

Loop:
	for {
		// Throttle syncing to once per five seconds.
		select {
		case <-ctx.Done():
			break Loop
		case <-time.After(5 * time.Second):
		}

		select {
		case <-ctx.Done():
			break Loop
		case <-ts.syncCh:
			ts.mtx.RLock()
			ts.sync()
			ts.mtx.RUnlock()
		}
	}

	// We want to wait for all pending target scrapes to complete though to ensure there'll
	// be no more storage writes after this point.
	ts.scrapePool.stop()
}
func TestPushLogChunkCancelledContext(t *testing.T) {
	ts := testserver.NewTestServer(t)
	defer ts.CloseAndAssertExpectations()

	ctx, cancel := context.WithCancel(context.Background())
	client := NewArtifactStoreClientWithContext(ts.URL, 100*time.Millisecond, ctx)

	ts.ExpectAndRespond("POST", "/buckets/", http.StatusOK, `{"Id": "foo"}`)
	ts.ExpectAndRespond("POST", "/buckets/foo/artifacts", http.StatusOK, `{"Name": "artifact"}`)

	b, _ := client.NewBucket("foo", "bar", 32)
	sa, err := b.NewChunkedArtifact("artifact")
	require.NotNil(t, sa)
	require.NoError(t, err)

	// Cancel the context to prevent any further requests
	cancel()

	{
		err := sa.AppendLog("console contents")
		require.NoError(t, err)
		err = sa.Close()
		require.Error(t, err)
		require.False(t, err.IsRetriable())
	}
}
Example #29
0
// Endpoint returns a usable endpoint that will invoke the gRPC specified by the
// client.
func (c Client) Endpoint() endpoint.Endpoint {
	return func(ctx context.Context, request interface{}) (interface{}, error) {
		ctx, cancel := context.WithCancel(ctx)
		defer cancel()

		req, err := c.enc(ctx, request)
		if err != nil {
			return nil, fmt.Errorf("Encode: %v", err)
		}

		md := &metadata.MD{}
		for _, f := range c.before {
			ctx = f(ctx, md)
		}
		ctx = metadata.NewContext(ctx, *md)

		if err = grpc.Invoke(ctx, c.method, req, c.grpcReply, c.client); err != nil {
			return nil, fmt.Errorf("Invoke: %v", err)
		}

		response, err := c.dec(ctx, c.grpcReply)
		if err != nil {
			return nil, fmt.Errorf("Decode: %v", err)
		}
		return response, nil
	}
}
Example #30
0
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
	ctx, cancel := context.WithCancel(context.Background())
	if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
		t.Fatalf("expected non-nil watcher channel")
	}
	if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil {
		t.Fatal(err)
	}
	cancel()
	select {
	case <-time.After(time.Second):
		t.Fatalf("took too long to cancel")
	case v, ok := <-wctx.ch:
		if !ok {
			// closed before getting put; OK
			break
		}
		// got the PUT; should close next
		select {
		case <-time.After(time.Second):
			t.Fatalf("took too long to close")
		case v, ok = <-wctx.ch:
			if ok {
				t.Fatalf("expected watcher channel to close, got %v", v)
			}
		}
	}
}