func protobuf2Vbmap(vbmap *protobuf.VbConnectionMap) *c.VbConnectionMap { return &c.VbConnectionMap{ Bucket: vbmap.GetBucket(), Vbuckets: c.Vbno32to16(vbmap.GetVbuckets()), Vbuuids: vbmap.GetVbuuids(), } }
// StartVbStreams implements Feeder{} interface. func (bdcp *bucketDcp) StartVbStreams( opaque uint16, reqTs *protobuf.TsVbuuid) error { var err error if bdcp.bucket != nil { bdcp.bucket.Refresh() } vbnos := c.Vbno32to16(reqTs.GetVbnos()) vbuuids, seqnos := reqTs.GetVbuuids(), reqTs.GetSeqnos() for i, vbno := range vbnos { snapshots := reqTs.GetSnapshots() flags, vbuuid := uint32(0), vbuuids[i] start, end := seqnos[i], uint64(0xFFFFFFFFFFFFFFFF) snapStart, snapEnd := snapshots[i].GetStart(), snapshots[i].GetEnd() e := bdcp.dcpFeed.DcpRequestStream( vbno, opaque, flags, vbuuid, start, end, snapStart, snapEnd) if e != nil { err = e } // FIXME/TODO: the below sleep avoid back-to-back dispatch of // StreamRequest to DCP, which seem to cause some problems. time.Sleep(time.Millisecond) } return err }
// AllVbuckets16 return all vbuckets hosted by all kvnodes // in sort order. vbuckets are returned as 16-bit values. func (resp *VbmapResponse) AllVbuckets16() []uint16 { vbs := make([]uint16, 0) for _, vs := range resp.GetKvvbnos() { vbs = append(vbs, c.Vbno32to16(vs.GetVbnos())...) } vbuckets := c.Vbuckets(vbs) sort.Sort(vbuckets) return []uint16(vbuckets) }
// AllVbuckets32 return all vbuckets hosted by all kvnodes // in sort order. vbuckets are returned as 32-bit values. func (resp *VbmapResponse) AllVbuckets32() []uint32 { vbs := make([]uint32, 0) for _, vs := range resp.GetKvvbnos() { vbs = append(vbs, vs.GetVbnos()...) } vbuckets := c.Vbuckets(c.Vbno32to16(vbs)) sort.Sort(vbuckets) return vbuckets.To32() }
// - return couchbase SDK error if any. func (p *Projector) doFailoverLog( request *protobuf.FailoverLogRequest, opaque uint16) ap.MessageMarshaller { response := &protobuf.FailoverLogResponse{} pooln := request.GetPool() bucketn := request.GetBucket() vbuckets := request.GetVbnos() // log this request. prefix := p.logPrefix fmsg := "%v ##%x doFailoverLog() {%q, %q, %v}\n" logging.Infof(fmsg, prefix, opaque, pooln, bucketn, vbuckets) defer logging.Infof("%v ##%x doFailoverLog() returns ...\n", prefix, opaque) bucket, err := c.ConnectBucket(p.clusterAddr, pooln, bucketn) if err != nil { logging.Errorf("%v ##%x ConnectBucket(): %v\n", prefix, opaque, err) response.Err = protobuf.NewError(err) return response } defer bucket.Close() protoFlogs := make([]*protobuf.FailoverLog, 0, len(vbuckets)) vbnos := c.Vbno32to16(vbuckets) dcpConfig := map[string]interface{}{ "genChanSize": p.config["projector.dcp.genChanSize"].Int(), "dataChanSize": p.config["projector.dcp.dataChanSize"].Int(), } flogs, err := bucket.GetFailoverLogs(opaque, vbnos, dcpConfig) if err == nil { for vbno, flog := range flogs { vbuuids := make([]uint64, 0, len(flog)) seqnos := make([]uint64, 0, len(flog)) for _, x := range flog { vbuuids = append(vbuuids, x[0]) seqnos = append(seqnos, x[1]) } protoFlog := &protobuf.FailoverLog{ Vbno: proto.Uint32(uint32(vbno)), Vbuuids: vbuuids, Seqnos: seqnos, } protoFlogs = append(protoFlogs, protoFlog) } } else { logging.Errorf("%v ##%x GetFailoverLogs(): %v\n", prefix, opaque, err) response.Err = protobuf.NewError(err) return response } response.Logs = protoFlogs return response }
// EndVbStreams implements Feeder{} interface. func (bdcp *bucketDcp) EndVbStreams( opaque uint16, ts *protobuf.TsVbuuid) (err error) { if bdcp.bucket != nil { bdcp.bucket.Refresh() } vbnos := c.Vbno32to16(ts.GetVbnos()) for _, vbno := range vbnos { if e := bdcp.dcpFeed.DcpCloseStream(vbno, opaque); e != nil { err = e } } return err }
// GetVbmaps return a map of kvaddr -> list-of-vbuckets in node. func (resp *VbmapResponse) GetVbmaps() (map[string][]uint16, error) { vbm := make(map[string][]uint16) kvaddrs := resp.GetKvaddrs() kvvbnos := resp.GetKvvbnos() if len(kvaddrs) != len(kvvbnos) { return nil, ErrorInvalidVbmap } for i, kvaddr := range kvaddrs { vbm[kvaddr] = c.Vbno32to16(kvvbnos[i].GetVbnos()) } return vbm, nil }
func (k *kvSender) makeRestartTsFromKV(bucket string, vbnos []uint32) (*protobuf.TsVbuuid, error) { flogs, err := k.getFailoverLogs(bucket, vbnos) if err != nil { logging.Fatalf("KVSender::makeRestartTS Unexpected Error During Failover "+ "Log Request for Bucket %v. Err %v", bucket, err) return nil, err } ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos)) ts = ts.ComputeRestartTs(flogs.ToFailoverLog(c.Vbno32to16(vbnos))) return ts, nil }
// AddRollbackTimestamp will add a subset of vbucket's // rollback-timestamp for a `bucket`. func (resp *TopicResponse) AddRollbackTimestamp( pool, bucket string, rollbTs *c.TsVbuuid) *TopicResponse { // add rollback timestamp ts := TsVbuuid{ Pool: proto.String(pool), Bucket: proto.String(bucket), } resp.RollbackTimestamps = append( resp.RollbackTimestamps, ts.FromTsVbuuid(rollbTs)) // prune active timestamp, that received rollback. actTss := make([]*TsVbuuid, len(resp.GetActiveTimestamps())) for i, actTs := range resp.GetActiveTimestamps() { if actTs.GetBucket() == bucket { vbnos := c.Vbno32to16(ts.GetVbnos()) actTss[i] = actTs.FilterByVbuckets(vbnos) } else { actTss[i] = actTs } } resp.ActiveTimestamps = actTss return resp }