Exemple #1
1
// Handle is the quicklog handle method for processing a log line
func (u *Handler) Handle(ctx context.Context, prev <-chan ql.Line, next chan<- ql.Line, config map[string]interface{}) error {

	field := "uuid"
	if u.FieldName != "" {
		field = u.FieldName
	}

	ok := true

	fieldIface := config["field"]
	if fieldIface != nil {
		field, ok = fieldIface.(string)
		if !ok {
			log.Log(ctx).Warn("Could not parse UUID config, using field=uuid")
			field = "uuid"
		}
	}

	log.Log(ctx).Debug("Starting filter handler", "handler", "uuid", "field", field)

	go func() {
		for {
			select {
			case line := <-prev:
				line.Data[field] = uuid.NewV4().String()
				next <- line
			case <-ctx.Done():
				return
			}
		}
	}()

	return nil
}
Exemple #2
1
func WebSensorsAgent(ctx context.Context, db data.DB, u *models.User) {
	// Get the db's changes, then filter by updates, then
	// filter by whether this user can read the record
	changes := data.Filter(data.FilterKind(db.Changes(), models.EventKind), func(c *data.Change) bool {
		ok, _ := access.CanRead(db, u, c.Record)
		return ok
	})

Run:
	for {
		select {
		case c, ok := <-*changes:
			if !ok {
				break Run
			}

			switch c.Record.(*models.Event).Name {
			case WEB_SENSOR_LOCATION:
				webSensorLocation(db, u, c.Record.(*models.Event).Data)
			}
		case <-ctx.Done():
			break Run

		}
	}
}
Exemple #3
1
// waitForStateChange blocks until the state changes to something other than the sourceState.
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
	ac.mu.Lock()
	defer ac.mu.Unlock()
	if sourceState != ac.state {
		return ac.state, nil
	}
	done := make(chan struct{})
	var err error
	go func() {
		select {
		case <-ctx.Done():
			ac.mu.Lock()
			err = ctx.Err()
			ac.stateCV.Broadcast()
			ac.mu.Unlock()
		case <-done:
		}
	}()
	defer close(done)
	for sourceState == ac.state {
		ac.stateCV.Wait()
		if err != nil {
			return ac.state, err
		}
	}
	return ac.state, nil
}
Exemple #4
1
// Waits until a sufficient quorum is assembled
func (ks *Keyserver) blockingLookup(ctx context.Context, req *proto.LookupRequest, epoch uint64) (*proto.LookupProof, error) {
	newSignatures := make(chan interface{}, newSignatureBufferSize)
	ks.signatureBroadcast.Subscribe(epoch, newSignatures)
	defer ks.signatureBroadcast.Unsubscribe(epoch, newSignatures)
	verifiersLeft := coname.ListQuorum(req.QuorumRequirement, nil)
	ratifications, haveVerifiers, err := ks.findRatificationsForEpoch(epoch, verifiersLeft)
	if err != nil {
		return nil, err
	}
	for v := range haveVerifiers {
		delete(verifiersLeft, v)
	}
	for !coname.CheckQuorum(req.QuorumRequirement, haveVerifiers) {
		select {
		case <-ctx.Done():
			return nil, fmt.Errorf("timed out while waiting for ratification")
		case v := <-newSignatures:
			newSig := v.(*proto.SignedEpochHead)
			for id := range newSig.Signatures {
				if _, ok := verifiersLeft[id]; ok {
					ratifications = append(ratifications, newSig)
					delete(verifiersLeft, id)
					haveVerifiers[id] = struct{}{}
				}
			}
		}
	}
	return ks.assembleLookupProof(req, epoch, ratifications)
}
Exemple #5
1
func (c *Cluster) reconnectOnFailure(ctx context.Context) {
	for {
		<-ctx.Done()
		c.Lock()
		if c.stop || c.node != nil {
			c.Unlock()
			return
		}
		c.reconnectDelay *= 2
		if c.reconnectDelay > maxReconnectDelay {
			c.reconnectDelay = maxReconnectDelay
		}
		logrus.Warnf("Restarting swarm in %.2f seconds", c.reconnectDelay.Seconds())
		delayCtx, cancel := context.WithTimeout(context.Background(), c.reconnectDelay)
		c.cancelDelay = cancel
		c.Unlock()
		<-delayCtx.Done()
		if delayCtx.Err() != context.DeadlineExceeded {
			return
		}
		c.Lock()
		if c.node != nil {
			c.Unlock()
			return
		}
		var err error
		_, ctx, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false)
		if err != nil {
			c.err = err
			ctx = delayCtx
		}
		c.Unlock()
	}
}
Exemple #6
1
// Upload is called to perform the upload.
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
	if u.currentUploads != nil {
		defer atomic.AddInt32(u.currentUploads, -1)

		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
			return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
		}
	}

	// Sleep a bit to simulate a time-consuming upload.
	for i := int64(0); i <= 10; i++ {
		select {
		case <-ctx.Done():
			return distribution.Descriptor{}, ctx.Err()
		case <-time.After(10 * time.Millisecond):
			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
		}
	}

	if u.simulateRetries != 0 {
		u.simulateRetries--
		return distribution.Descriptor{}, errors.New("simulating retry")
	}

	return distribution.Descriptor{}, nil
}
Exemple #7
1
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
	r.ID = s.reqIDGen.Next()

	data, err := r.Marshal()
	if err != nil {
		return nil, err
	}

	if len(data) > maxRequestBytes {
		return nil, ErrRequestTooLarge
	}

	ch := s.w.Register(r.ID)

	s.r.Propose(ctx, data)

	select {
	case x := <-ch:
		return x.(*applyResult), nil
	case <-ctx.Done():
		s.w.Trigger(r.ID, nil) // GC wait
		return nil, ctx.Err()
	case <-s.done:
		return nil, ErrStopped
	}
}
Exemple #8
1
func (md *MDServerRemote) backgroundRekeyChecker(ctx context.Context) {
	for {
		select {
		case <-md.rekeyTimer.C:
			if !md.conn.IsConnected() {
				md.rekeyTimer.Reset(MdServerBackgroundRekeyPeriod)
				continue
			}

			// Assign an ID to this rekey check so we can track it.
			logTags := make(logger.CtxLogTags)
			logTags[CtxMDSRIDKey] = CtxMDSROpID
			newCtx := logger.NewContextWithLogTags(ctx, logTags)
			id, err := MakeRandomRequestID()
			if err != nil {
				md.log.CWarningf(ctx,
					"Couldn't generate a random request ID: %v", err)
			} else {
				newCtx = context.WithValue(newCtx, CtxMDSRIDKey, id)
			}

			md.log.CDebugf(newCtx, "Checking for rekey folders")
			if err := md.getFoldersForRekey(newCtx, md.client); err != nil {
				md.log.CWarningf(newCtx, "MDServerRemote: getFoldersForRekey "+
					"failed with %v", err)
			}
			md.rekeyTimer.Reset(MdServerBackgroundRekeyPeriod)
		case <-ctx.Done():
			return
		}
	}
}
Exemple #9
1
func runExec(ctx context.Context, db *sql.DB, query string) error {
	done := make(chan struct{})
	var (
		errMsg error
	)
	go func() {
		for {
			if _, err := db.Exec(query); err != nil {
				errMsg = err
				time.Sleep(time.Second)
				continue
			} else {
				errMsg = nil
				done <- struct{}{}
				break
			}
		}
	}()
	select {
	case <-done:
		return errMsg
	case <-ctx.Done():
		return fmt.Errorf("runExec %s timed out with %v / %v", query, ctx.Err(), errMsg)
	}
}
Exemple #10
0
func (h *testHandler) Handle(ctx context.Context, args *raw.Args) (*raw.Res, error) {
	h.mut.Lock()
	h.format = args.Format
	h.caller = args.Caller
	h.mut.Unlock()

	assert.Equal(h.t, args.Caller, CurrentCall(ctx).CallerName())

	switch args.Operation {
	case "timeout":
		deadline, _ := ctx.Deadline()
		time.Sleep(deadline.Add(time.Second * 1).Sub(time.Now()))
		h.t.FailNow()
	case "echo":
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	case "busy":
		return &raw.Res{
			SystemErr: ErrServerBusy,
		}, nil
	case "app-error":
		return &raw.Res{
			IsErr: true,
		}, nil
	}
	return nil, errors.New("unknown operation")
}
// PromoteSlaveWhenCaughtUp waits for this slave to be caught up on
// replication up to the provided point, and then makes the slave the
// shard master.
func (agent *ActionAgent) PromoteSlaveWhenCaughtUp(ctx context.Context, position string) (string, error) {
	pos, err := replication.DecodePosition(position)
	if err != nil {
		return "", err
	}

	// TODO(alainjobart) change the flavor API to take the context directly
	// For now, extract the timeout from the context, or wait forever
	var waitTimeout time.Duration
	if deadline, ok := ctx.Deadline(); ok {
		waitTimeout = deadline.Sub(time.Now())
		if waitTimeout <= 0 {
			waitTimeout = time.Millisecond
		}
	}
	if err := agent.MysqlDaemon.WaitMasterPos(pos, waitTimeout); err != nil {
		return "", err
	}

	pos, err = agent.MysqlDaemon.PromoteSlave(agent.hookExtraEnv())
	if err != nil {
		return "", err
	}

	if err := agent.MysqlDaemon.SetReadOnly(false); err != nil {
		return "", err
	}

	if _, err := topotools.ChangeType(ctx, agent.TopoServer, agent.TabletAlias, topodatapb.TabletType_MASTER, topotools.ClearHealthMap); err != nil {
		return "", err
	}

	return replication.EncodePosition(pos), nil
}
Exemple #12
0
// events issues a call to the events API and returns a channel with all
// events. The stream of events can be shutdown by cancelling the context.
func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
	log.G(ctx).Debugf("waiting on events")
	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
	eventsq := make(chan events.Message, len(buffer))

	for _, event := range buffer {
		eventsq <- event
	}

	go func() {
		defer c.backend.UnsubscribeFromEvents(l)

		for {
			select {
			case ev := <-l:
				jev, ok := ev.(events.Message)
				if !ok {
					log.G(ctx).Warnf("unexpected event message: %q", ev)
					continue
				}
				select {
				case eventsq <- jev:
				case <-ctx.Done():
					return
				}
			case <-ctx.Done():
				return
			}
		}
	}()

	return eventsq
}
Exemple #13
0
// Helper to reset a ping ticker.
func (md *MDServerRemote) resetPingTicker(intervalSeconds int) {
	md.tickerMu.Lock()
	defer md.tickerMu.Unlock()

	if md.tickerCancel != nil {
		md.tickerCancel()
		md.tickerCancel = nil
	}
	if intervalSeconds <= 0 {
		return
	}

	md.log.Debug("MDServerRemote: starting new ping ticker with interval %d",
		intervalSeconds)

	var ctx context.Context
	ctx, md.tickerCancel = context.WithCancel(context.Background())
	go func() {
		ticker := time.NewTicker(time.Duration(intervalSeconds) * time.Second)
		for {
			select {
			case <-ticker.C:
				err := md.client.Ping(ctx)
				if err != nil {
					md.log.Debug("MDServerRemote: ping error %s", err)
				}

			case <-ctx.Done():
				md.log.Debug("MDServerRemote: stopping ping ticker")
				ticker.Stop()
				return
			}
		}
	}()
}
Exemple #14
0
func deleteNote(ctx context.Context, req interface{}) (interface{}, *ErrorResponse) {
	db := ctx.Value("db").(*gorp.DbMap)
	noteId, err := strconv.Atoi(kami.Param(ctx, "noteId"))

	if err != nil {
		return nil, &ErrorResponse{
			http.StatusBadRequest,
			fmt.Sprintf("Invalid note id format: %v", err),
		}
	}

	note := new(model.Note)
	err = db.SelectOne(note, "select * from notes where id = ?", noteId)
	if err != nil {
		return nil, &ErrorResponse{
			http.StatusBadRequest,
			fmt.Sprintf("Query failed: %v", err),
		}
	}

	if _, err := db.Delete(note); err != nil {
		return nil, &ErrorResponse{
			http.StatusInternalServerError,
			fmt.Sprintf("Delete failed: %v", err),
		}
	}

	return nil, nil
}
Exemple #15
0
func updateNote(ctx context.Context, req interface{}) (interface{}, *ErrorResponse) {
	db := ctx.Value("db").(*gorp.DbMap)
	newNote := req.(*model.Note)
	noteId, err := strconv.Atoi(kami.Param(ctx, "noteId"))

	if err != nil {
		return nil, &ErrorResponse{
			http.StatusBadRequest,
			fmt.Sprintf("Invalid note id format: %v", err),
		}
	}

	note := new(model.Note)
	err = db.SelectOne(note, "select * from notes where id = ?", noteId)
	if err != nil {
		return nil, &ErrorResponse{
			http.StatusBadRequest,
			fmt.Sprintf("Query failed: %v", err),
		}
	}

	note.Title = newNote.Title
	note.Content = newNote.Content
	note.OwnerId = newNote.OwnerId
	note.UpdatedAt = time.Now().UnixNano()

	if _, err := db.Update(note); err != nil {
		return nil, &ErrorResponse{
			http.StatusInternalServerError,
			fmt.Sprintf("Update failed: %v", err),
		}
	}

	return note, nil
}
func (m *MockHandlerContextRead) ServeHTTP(rw http.ResponseWriter, r *http.Request, ctx context.Context, n interfaces.HandlerFunc) {
	ctxValue, ok := ctx.Value("testKey").(string)
	if ok {
		rw.Write([]byte(ctxValue))
	}
	n(rw, r, ctx)
}
Exemple #17
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
// iv) transport is in TransientFailure and the RPC is fail-fast.
func (ac *addrConn) wait(ctx context.Context, failFast bool) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		case ac.state == TransientFailure && failFast:
			ac.mu.Unlock()
			return nil, Errorf(codes.Unavailable, "grpc: RPC failed fast due to transport failure")
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, toRPCErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Exemple #18
0
// ImmediateCallerIDFromContext returns the ImmediateCallerID(querypb.VTGateCallerID)
// stored in the Context, if any
func ImmediateCallerIDFromContext(ctx context.Context) *querypb.VTGateCallerID {
	im, ok := ctx.Value(immediateCallerIDKey).(*querypb.VTGateCallerID)
	if ok && im != nil {
		return im
	}
	return nil
}
Exemple #19
0
func appUserToken(ctx context.Context, w http.ResponseWriter, r *http.Request) {
	// get t from ctx
	t := ctx.Value("test").(*testing.T)

	// Test whether the user is authenticated
	if r.Header.Get("X-Identity-Status") != "Confirmed" {
		t.Errorf("At the app, the user token should already be confirmed, got: %v", r.Header.Get("X-Identity-Status"))
	}
	if r.Header.Get("X-User-Id") != "10a2e6e717a245d9acad3e5f97aeca3d" {
		t.Errorf("At the app, the user should be 10a2e6e717a245d9acad3e5f97aeca3d, got: %v", r.Header.Get("X-User-Id"))
	}
	if r.Header.Get("X-User-Name") != "testuser" {
		t.Errorf("At the app, the user should be testuser, got: %v", r.Header.Get("X-User-Name"))
	}
	if r.Header.Get("X-Domain-Id") != "default" {
		t.Errorf("At the app, the user should be default, got: %v", r.Header.Get("X-Domain-Id"))
	}

	// Test ctx's Context Parameter with key "UserAccessInfo"
	value := router.MiddlewareParam(ctx, UserAccessInfoKey)
	if value == nil {
		t.Error("ctx should contain user access info")
	} else {
		access, ok := value.(*client.AccessInfo)
		if !ok {
			t.Error("it is not accessinfo, what is it?")
		}
		if access.Token != "usertoken" || access.TokenInfo.User.Domain.Name != "Default" {
			t.Error("ctx's accessinfo contains wrong information")
		}
	}

	w.Write([]byte("Test success!"))
}
Exemple #20
0
// EffectiveCallerIDFromContext returns the EffectiveCallerID(vtpb.CallerID)
// stored in the Context, if any
func EffectiveCallerIDFromContext(ctx context.Context) *vtpb.CallerID {
	ef, ok := ctx.Value(effectiveCallerIDKey).(*vtpb.CallerID)
	if ok && ef != nil {
		return ef
	}
	return nil
}
Exemple #21
0
func (n *network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}

	log.Info("Watching for new subnet leases")
	evts := make(chan []subnet.Event)
	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.lease, evts)
		wg.Done()
	}()

	n.rl = make([]netlink.Route, 0, 10)
	wg.Add(1)
	go func() {
		n.routeCheck(ctx)
		wg.Done()
	}()

	defer wg.Wait()

	for {
		select {
		case evtBatch := <-evts:
			n.handleSubnetEvents(evtBatch)

		case <-ctx.Done():
			return
		}
	}
}
Exemple #22
0
func statusFrom(ctx context.Context) statusType {
	s, ok := ctx.Value(statusKey).(statusType)
	if !ok {
		panic("missing status in context")
	}
	return s
}
Exemple #23
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed.
func (ac *addrConn) wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Exemple #24
0
func (*hostnameHandler) Handle(ctx context.Context, prev <-chan ql.Line, next chan<- ql.Line, config map[string]interface{}) error {

	field := "hostname"
	ok := true

	fieldIface := config["field"]
	if fieldIface != nil {
		field, ok = fieldIface.(string)
		if !ok {
			log.Log(ctx).Warn("Could not parse hostname config, using field=hostname")
			field = "hostname"
		}
	}

	log.Log(ctx).Debug("Starting filter handler", "handler", "hostname", "field", field)

	hostname, _ := os.Hostname()

	go func() {
		for {
			select {
			case line := <-prev:
				line.Data[field] = hostname
				next <- line
			case <-ctx.Done():
				return
			}
		}
	}()

	return nil
}
Exemple #25
0
func getCurFilters(c context.Context) []RawFilter {
	curFiltsI := c.Value(taskQueueFilterKey)
	if curFiltsI != nil {
		return curFiltsI.([]RawFilter)
	}
	return nil
}
Exemple #26
0
/*
Path returns the path that the Goji router uses to perform the PathPrefix
optimization. While this function does not distinguish between the absence of a
path and an empty path, Goji will automatically extract a path from the request
if none is present.

By convention, paths are stored in their escaped form (i.e., the value returned
by net/url.URL.EscapedPath, and not URL.Path) to ensure that Patterns have as
much discretion as possible (e.g., to behave differently for '/' and '%2f').
*/
func Path(ctx context.Context) string {
	pi := ctx.Value(internal.Path)
	if pi == nil {
		return ""
	}
	return pi.(string)
}
Exemple #27
0
func (sto *appengineStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error {
	defer close(dest)

	loan := ctxPool.Get()
	defer loan.Return()
	actx := loan

	prefix := sto.namespace + "|"
	keyBegin := datastore.NewKey(actx, memKind, prefix+after, 0, nil)
	keyEnd := datastore.NewKey(actx, memKind, sto.namespace+"~", 0, nil)

	q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd)
	it := q.Run(actx)
	var row memEnt
	for {
		key, err := it.Next(&row)
		if err == datastore.Done {
			break
		}
		if err != nil {
			return err
		}
		select {
		case dest <- blob.SizedRef{blob.ParseOrZero(key.StringID()[len(prefix):]), uint32(row.Size)}:
		case <-ctx.Done():
			return ctx.Err()
		}
	}
	return nil
}
Exemple #28
0
func runQuery(ctx context.Context, db *sql.DB, query string) (*sql.Rows, error) {
	done := make(chan struct{})
	var (
		rows   *sql.Rows
		errMsg error
	)
	go func() {
		for {
			rs, err := db.Query(query)
			if err != nil {
				errMsg = err
				time.Sleep(time.Second)
				continue
			} else {
				rows = rs
				errMsg = nil
				done <- struct{}{}
				break
			}
		}
	}()
	select {
	case <-done:
		return rows, errMsg
	case <-ctx.Done():
		return nil, fmt.Errorf("runQuery %s timed out with %v / %v", query, ctx.Err(), errMsg)
	}
}
Exemple #29
0
func (ts *targetSet) runScraping(ctx context.Context) {
	ctx, ts.cancelScraping = context.WithCancel(ctx)

	ts.scrapePool.init(ctx)

Loop:
	for {
		// Throttle syncing to once per five seconds.
		select {
		case <-ctx.Done():
			break Loop
		case <-time.After(5 * time.Second):
		}

		select {
		case <-ctx.Done():
			break Loop
		case <-ts.syncCh:
			ts.mtx.RLock()
			ts.sync()
			ts.mtx.RUnlock()
		}
	}

	// We want to wait for all pending target scrapes to complete though to ensure there'll
	// be no more storage writes after this point.
	ts.scrapePool.stop()
}
Exemple #30
0
func (vnet *VNET) dispatchICMP(ctx context.Context) chan<- *Packet {
	var in = make(chan *Packet)

	vnet.wg.Add(1)
	go func() {
		defer vnet.wg.Done()

		vnet.system.WaitForControllerMAC()
		log.Printf("ICMP: running")

		for {
			var pkt *Packet

			select {
			case pkt = <-in:
			case <-ctx.Done():
				return
			}

			vnet.handleICMP(pkt)
		}
	}()

	return in
}