예제 #1
0
파일: watch.go 프로젝트: xrq1990/etcd
func doWatch(stream etcdserverpb.Watch_WatchClient, requests <-chan *etcdserverpb.WatchRequest) {
	for r := range requests {
		st := time.Now()
		err := stream.Send(r)
		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- &result{
			errStr:   errStr,
			duration: time.Since(st),
		}
		bar.Increment()
	}
	wg.Done()

	for {
		_, err := stream.Recv()
		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- &result{
			errStr: errStr,
		}
		bar.Increment()
	}
}
예제 #2
0
파일: watch.go 프로젝트: vsayer/etcd
func doWatch(stream etcdserverpb.Watch_WatchClient, requests <-chan etcdserverpb.WatchRequest) {
	for r := range requests {
		st := time.Now()
		err := stream.Send(&r)
		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- result{errStr: errStr, duration: time.Since(st)}
		bar.Increment()
	}
	atomic.AddInt32(&nrWatchCompleted, 1)
	if atomic.LoadInt32(&nrWatchCompleted) == int32(watchTotalStreams) {
		watchCompletedNotifier <- struct{}{}
	}

	<-putStartNotifier

	for {
		st := time.Now()
		_, err := stream.Recv()
		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- result{errStr: errStr, duration: time.Since(st)}
		bar.Increment()

		atomic.AddInt32(&nrRecvCompleted, 1)
		if atomic.LoadInt32(&nrRecvCompleted) == int32(eventsTotal) {
			recvCompletedNotifier <- struct{}{}
		}
	}
}
예제 #3
0
파일: watch.go 프로젝트: ncdc/kubernetes
// resumeWatchers rebuilds every registered watcher on a new client
func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error {
	w.mu.RLock()
	streams := make([]*watcherStream, 0, len(w.streams))
	for _, ws := range w.streams {
		streams = append(streams, ws)
	}
	w.mu.RUnlock()

	for _, ws := range streams {
		// drain recvc so no old WatchResponses (e.g., Created messages)
		// are processed while resuming
		ws.drain()

		// pause serveStream
		ws.resumec <- -1

		// reconstruct watcher from initial request
		if ws.lastRev != 0 {
			ws.initReq.rev = ws.lastRev
		}
		if err := wc.Send(ws.initReq.toPB()); err != nil {
			return err
		}

		// wait for request ack
		resp, err := wc.Recv()
		if err != nil {
			return err
		} else if len(resp.Events) != 0 || !resp.Created {
			return fmt.Errorf("watcher: unexpected response (%+v)", resp)
		}

		// id may be different since new remote watcher; update map
		w.mu.Lock()
		delete(w.streams, ws.id)
		ws.id = resp.WatchId
		w.streams[ws.id] = ws
		w.mu.Unlock()

		// unpause serveStream
		ws.resumec <- ws.lastRev
	}
	return nil
}
예제 #4
0
파일: watch.go 프로젝트: rhuss/gofabric8
// resumeWatchers rebuilds every registered watcher on a new client
func (w *watcher) resumeWatchers(wc pb.Watch_WatchClient) error {
	streams := []*watcherStream{}
	w.mu.RLock()
	for _, ws := range w.streams {
		streams = append(streams, ws)
	}
	w.mu.RUnlock()

	for _, ws := range streams {
		// reconstruct watcher from initial request
		if ws.lastRev != 0 {
			ws.initReq.rev = ws.lastRev
		}
		if err := wc.Send(ws.initReq.toPB()); err != nil {
			return err
		}

		// wait for request ack
		resp, err := wc.Recv()
		if err != nil {
			return err
		} else if len(resp.Events) != 0 || resp.Created != true {
			return fmt.Errorf("watcher: unexpected response (%+v)", resp)
		}

		// id may be different since new remote watcher; update map
		w.mu.Lock()
		delete(w.streams, ws.id)
		ws.id = resp.WatchId
		w.streams[ws.id] = ws
		w.mu.Unlock()

		ws.resumec <- ws.lastRev
	}
	return nil
}
예제 #5
0
파일: watch.go 프로젝트: chipironcin/etcd
// run is the root of the goroutines for managing a watcher client
func (w *watchGrpcStream) run() {
	var wc pb.Watch_WatchClient
	var closeErr error

	defer func() {
		w.owner.mu.Lock()
		w.closeErr = closeErr
		if w.owner.streams != nil {
			delete(w.owner.streams, w.ctxKey)
		}
		close(w.donec)
		w.owner.mu.Unlock()
		w.cancel()
	}()

	// start a stream with the etcd grpc server
	if wc, closeErr = w.newWatchClient(); closeErr != nil {
		return
	}

	var pendingReq, failedReq *watchRequest
	curReqC := w.reqc
	cancelSet := make(map[int64]struct{})

	for {
		select {
		// Watch() requested
		case pendingReq = <-curReqC:
			// no more watch requests until there's a response
			curReqC = nil
			if err := wc.Send(pendingReq.toPB()); err == nil {
				// pendingReq now waits on w.respc
				break
			}
			failedReq = pendingReq
		// New events from the watch client
		case pbresp := <-w.respc:
			switch {
			case pbresp.Created:
				// response to pending req, try to add
				w.addStream(pbresp, pendingReq)
				pendingReq = nil
				curReqC = w.reqc
			case pbresp.Canceled:
				delete(cancelSet, pbresp.WatchId)
				// shutdown serveStream, if any
				w.mu.Lock()
				if ws, ok := w.streams[pbresp.WatchId]; ok {
					close(ws.recvc)
					delete(w.streams, ws.id)
				}
				numStreams := len(w.streams)
				w.mu.Unlock()
				if numStreams == 0 {
					// don't leak watcher streams
					return
				}
			default:
				// dispatch to appropriate watch stream
				if ok := w.dispatchEvent(pbresp); ok {
					break
				}
				// watch response on unexpected watch id; cancel id
				if _, ok := cancelSet[pbresp.WatchId]; ok {
					break
				}
				cancelSet[pbresp.WatchId] = struct{}{}
				cr := &pb.WatchRequest_CancelRequest{
					CancelRequest: &pb.WatchCancelRequest{
						WatchId: pbresp.WatchId,
					},
				}
				req := &pb.WatchRequest{RequestUnion: cr}
				wc.Send(req)
			}
		// watch client failed to recv; spawn another if possible
		// TODO report watch client errors from errc?
		case err := <-w.errc:
			if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
				closeErr = err
				return
			}
			if wc, closeErr = w.newWatchClient(); closeErr != nil {
				return
			}
			curReqC = w.reqc
			if pendingReq != nil {
				failedReq = pendingReq
			}
			cancelSet = make(map[int64]struct{})
		case <-w.stopc:
			return
		}

		// send failed; queue for retry
		if failedReq != nil {
			go func(wr *watchRequest) {
				select {
				case w.reqc <- wr:
				case <-wr.ctx.Done():
				case <-w.donec:
				}
			}(pendingReq)
			failedReq = nil
			pendingReq = nil
		}
	}
}
예제 #6
0
파일: watch.go 프로젝트: jbeda/kubernetes
// run is the root of the goroutines for managing a watcher client
func (w *watchGrpcStream) run() {
	var wc pb.Watch_WatchClient
	var closeErr error

	// substreams marked to close but goroutine still running; needed for
	// avoiding double-closing recvc on grpc stream teardown
	closing := make(map[*watcherStream]struct{})

	defer func() {
		w.closeErr = closeErr
		// shutdown substreams and resuming substreams
		for _, ws := range w.substreams {
			if _, ok := closing[ws]; !ok {
				close(ws.recvc)
			}
		}
		for _, ws := range w.resuming {
			if _, ok := closing[ws]; ws != nil && !ok {
				close(ws.recvc)
			}
		}
		w.joinSubstreams()
		for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- {
			w.closeSubstream(<-w.closingc)
		}

		w.owner.closeStream(w)
	}()

	// start a stream with the etcd grpc server
	if wc, closeErr = w.newWatchClient(); closeErr != nil {
		return
	}

	cancelSet := make(map[int64]struct{})

	for {
		select {
		// Watch() requested
		case wreq := <-w.reqc:
			outc := make(chan WatchResponse, 1)
			ws := &watcherStream{
				initReq: *wreq,
				id:      -1,
				outc:    outc,
				// unbufffered so resumes won't cause repeat events
				recvc: make(chan *WatchResponse),
			}

			ws.donec = make(chan struct{})
			go w.serveSubstream(ws, w.resumec)

			// queue up for watcher creation/resume
			w.resuming = append(w.resuming, ws)
			if len(w.resuming) == 1 {
				// head of resume queue, can register a new watcher
				wc.Send(ws.initReq.toPB())
			}
		// New events from the watch client
		case pbresp := <-w.respc:
			switch {
			case pbresp.Created:
				// response to head of queue creation
				if ws := w.resuming[0]; ws != nil {
					w.addSubstream(pbresp, ws)
					w.dispatchEvent(pbresp)
					w.resuming[0] = nil
				}
				if ws := w.nextResume(); ws != nil {
					wc.Send(ws.initReq.toPB())
				}
			case pbresp.Canceled:
				delete(cancelSet, pbresp.WatchId)
				if ws, ok := w.substreams[pbresp.WatchId]; ok {
					// signal to stream goroutine to update closingc
					close(ws.recvc)
					closing[ws] = struct{}{}
				}
			default:
				// dispatch to appropriate watch stream
				if ok := w.dispatchEvent(pbresp); ok {
					break
				}
				// watch response on unexpected watch id; cancel id
				if _, ok := cancelSet[pbresp.WatchId]; ok {
					break
				}
				cancelSet[pbresp.WatchId] = struct{}{}
				cr := &pb.WatchRequest_CancelRequest{
					CancelRequest: &pb.WatchCancelRequest{
						WatchId: pbresp.WatchId,
					},
				}
				req := &pb.WatchRequest{RequestUnion: cr}
				wc.Send(req)
			}
		// watch client failed to recv; spawn another if possible
		case err := <-w.errc:
			if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
				closeErr = err
				return
			}
			if wc, closeErr = w.newWatchClient(); closeErr != nil {
				return
			}
			if ws := w.nextResume(); ws != nil {
				wc.Send(ws.initReq.toPB())
			}
			cancelSet = make(map[int64]struct{})
		case <-w.stopc:
			return
		case ws := <-w.closingc:
			w.closeSubstream(ws)
			delete(closing, ws)
			if len(w.substreams)+len(w.resuming) == 0 {
				// no more watchers on this stream, shutdown
				return
			}
		}
	}
}