// StartVbStreams implements Feeder{} interface. func (bdcp *bucketDcp) StartVbStreams( opaque uint16, reqTs *protobuf.TsVbuuid) error { var err error if bdcp.bucket != nil { bdcp.bucket.Refresh() } vbnos := c.Vbno32to16(reqTs.GetVbnos()) vbuuids, seqnos := reqTs.GetVbuuids(), reqTs.GetSeqnos() for i, vbno := range vbnos { snapshots := reqTs.GetSnapshots() flags, vbuuid := uint32(0), vbuuids[i] start, end := seqnos[i], uint64(0xFFFFFFFFFFFFFFFF) snapStart, snapEnd := snapshots[i].GetStart(), snapshots[i].GetEnd() e := bdcp.dcpFeed.DcpRequestStream( vbno, opaque, flags, vbuuid, start, end, snapStart, snapEnd) if e != nil { err = e } // FIXME/TODO: the below sleep avoid back-to-back dispatch of // StreamRequest to DCP, which seem to cause some problems. time.Sleep(time.Millisecond) } return err }
// EndVbStreams implements Feeder{} interface. func (bdcp *bucketDcp) EndVbStreams( opaque uint16, ts *protobuf.TsVbuuid) (err error) { if bdcp.bucket != nil { bdcp.bucket.Refresh() } vbnos := c.Vbno32to16(ts.GetVbnos()) for _, vbno := range vbnos { if e := bdcp.dcpFeed.DcpCloseStream(vbno, opaque); e != nil { err = e } } return err }
// // Find the offset/index in the timestamp for the given vbucket no. Return // -1 if no matching vbno being found. // func findTimestampOffsetForVb(ts *protobuf.TsVbuuid, vbno uint32) int { if ts == nil { return -1 } for i, ts_vbno := range ts.GetVbnos() { if ts_vbno == vbno { return i } } return -1 }
// // Compute a new request timestamp based on the response from projector. // If all the vb is active for the given requestTs, then this function returns nil. // func recomputeRequestTimestamp(requestTs *protobuf.TsVbuuid, rollbackTimestamps []*protobuf.TsVbuuid) *protobuf.TsVbuuid { newTs := protobuf.NewTsVbuuid(DEFAULT_POOL_NAME, requestTs.GetBucket(), len(requestTs.GetVbnos())) rollbackTs := findTimestampForBucket(rollbackTimestamps, requestTs.GetBucket()) for i, vbno := range requestTs.GetVbnos() { offset := findTimestampOffsetForVb(rollbackTs, vbno) if offset != -1 { // there is a failover Ts for this vbno. Use that one for retry. newTs.Append(uint16(vbno), rollbackTs.Seqnos[offset], rollbackTs.Vbuuids[offset], rollbackTs.Snapshots[offset].GetStart(), rollbackTs.Snapshots[offset].GetEnd()) } else { // the vb is not active, just copy from the original requestTS newTs.Append(uint16(vbno), requestTs.Seqnos[i], requestTs.Vbuuids[i], requestTs.Snapshots[i].GetStart(), requestTs.Snapshots[i].GetEnd()) } } return newTs }
func (k *kvSender) restartVbuckets(streamId c.StreamId, restartTs *c.TsVbuuid, connErrVbs []Vbucket, respCh MsgChannel, stopCh StopChannel) { addrs, err := k.getProjAddrsForVbuckets(restartTs.Bucket, restartTs.GetVbnos()) if err != nil { logging.Errorf("KVSender::restartVbuckets %v %v Error in fetching cluster info %v", streamId, restartTs.Bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } //convert TS to protobuf format var protoRestartTs *protobuf.TsVbuuid numVbuckets := k.config["numVbuckets"].Int() protoTs := protobuf.NewTsVbuuid(DEFAULT_POOL, restartTs.Bucket, numVbuckets) protoRestartTs = protoTs.FromTsVbuuid(restartTs) var rollbackTs *protobuf.TsVbuuid topic := getTopicForStreamId(streamId) rollback := false fn := func(r int, err error) error { for _, addr := range addrs { ap := newProjClient(addr) if res, ret := k.sendRestartVbuckets(ap, topic, connErrVbs, protoRestartTs); ret != nil { //retry for all errors logging.Errorf("KVSender::restartVbuckets %v %v Error Received %v from %v", streamId, restartTs.Bucket, ret, addr) err = ret } else { rollbackTs = updateRollbackTsFromResponse(restartTs.Bucket, rollbackTs, res) } } if rollbackTs != nil && checkVbListInTS(protoRestartTs.GetVbnos(), rollbackTs) { //if rollback, no need to retry rollback = true return nil } else { return err } } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() //if any of the requested vb is in rollback ts, send rollback //msg to caller if rollback { //convert from protobuf to native format nativeTs := rollbackTs.ToTsVbuuid(numVbuckets) respCh <- &MsgRollback{streamId: streamId, rollbackTs: nativeTs} } else if err != nil { //if there is a topicMissing/genServer.Closed error, a fresh //MutationTopicRequest is required. if err.Error() == projClient.ErrorTopicMissing.Error() || err.Error() == c.ErrorClosed.Error() || err.Error() == projClient.ErrorInvalidBucket.Error() { respCh <- &MsgKVStreamRepair{ streamId: streamId, bucket: restartTs.Bucket, } } else { respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} } } else { respCh <- &MsgSuccess{} } }