func (ss *StreamState) initBucketInStream(streamId common.StreamId, bucket string) { numVbuckets := ss.config["numVbuckets"].Int() ss.streamBucketHWTMap[streamId][bucket] = common.NewTsVbuuid(bucket, numVbuckets) ss.streamBucketInMemTsCountMap[streamId][bucket] = 0 ss.streamBucketNewTsReqdMap[streamId][bucket] = false ss.streamBucketFlushInProgressTsMap[streamId][bucket] = nil ss.streamBucketAbortInProgressMap[streamId][bucket] = false ss.streamBucketTsListMap[streamId][bucket] = list.New() ss.streamBucketLastFlushedTsMap[streamId][bucket] = nil ss.streamBucketLastSnapAlignFlushedTsMap[streamId][bucket] = nil ss.streamBucketFlushEnabledMap[streamId][bucket] = true ss.streamBucketDrainEnabledMap[streamId][bucket] = true ss.streamBucketVbStatusMap[streamId][bucket] = NewTimestamp(numVbuckets) ss.streamBucketVbRefCountMap[streamId][bucket] = NewTimestamp(numVbuckets) ss.streamBucketRestartVbRetryMap[streamId][bucket] = NewTimestamp(numVbuckets) ss.streamBucketRestartVbTsMap[streamId][bucket] = nil ss.streamBucketRestartVbErrMap[streamId][bucket] = false ss.streamBucketIndexCountMap[streamId][bucket] = 0 ss.streamBucketRepairStopCh[streamId][bucket] = nil ss.streamBucketTimerStopCh[streamId][bucket] = make(StopChannel) ss.streamBucketLastPersistTime[streamId][bucket] = time.Now() ss.streamBucketRestartTsMap[streamId][bucket] = nil ss.streamBucketLastSnapMarker[streamId][bucket] = common.NewTsVbuuid(bucket, numVbuckets) ss.streamBucketStatus[streamId][bucket] = STREAM_ACTIVE logging.Debugf("StreamState::initBucketInStream \n\tNew Bucket %v Added for "+ "Stream %v", bucket, streamId) }
func newScannerTestHarness() (*scannerTestHarness, error) { // TODO Set NUM_VBUCKETS = 8 in config h := new(scannerTestHarness) h.cmdch = make(chan Message) h.msgch = make(chan Message) si, errMsg := NewScanCoordinator(h.cmdch, h.msgch, c.SystemConfig.SectionConfig("indexer.", true)) h.scanner = si.(*scanCoordinator) if errMsg.GetMsgType() != MSG_SUCCESS { return nil, (errMsg.(*MsgError)).GetError().cause } h.scanner.indexInstMap = make(c.IndexInstMap) h.scanner.indexPartnMap = make(IndexPartnMap) // FIXME: // This is hack to comply with existing timestamp datastructure // We need to come up with right timestamp datastructrue to be // used to index queries. h.scanTS = c.NewTsVbuuid("default", 8) h.scanTS.Snapshots = [][2]uint64{ [2]uint64{0, 1}, [2]uint64{0, 2}, [2]uint64{0, 3}, [2]uint64{0, 4}, [2]uint64{0, 5}, [2]uint64{0, 6}, [2]uint64{0, 7}, [2]uint64{0, 8}, } go h.handleScanTimestamps() return h, nil }
//initBucketFilter initializes the bucket filter func (r *mutationStreamReader) initBucketFilter(bucketFilter map[string]*common.TsVbuuid) { r.syncLock.Lock() defer r.syncLock.Unlock() //allocate a new filter for the buckets which don't //have a filter yet for b, q := range r.bucketQueueMap { if _, ok := r.bucketFilterMap[b]; !ok { logging.Tracef("MutationStreamReader::initBucketFilter Added new filter "+ "for Bucket %v Stream %v", b, r.streamId) //if there is non-nil filter, use that. otherwise use a zero filter. if filter, ok := bucketFilter[b]; ok && filter != nil { r.bucketFilterMap[b] = filter.Copy() } else { r.bucketFilterMap[b] = common.NewTsVbuuid(b, int(q.queue.GetNumVbuckets())) } r.bucketSyncDue[b] = false } } //remove the bucket filters for which bucket doesn't exist anymore for b, _ := range r.bucketFilterMap { if _, ok := r.bucketQueueMap[b]; !ok { logging.Tracef("MutationStreamReader::initBucketFilter Deleted filter "+ "for Bucket %v Stream %v", b, r.streamId) delete(r.bucketFilterMap, b) delete(r.bucketSyncDue, b) } } }
// // Create a timestamp history for a particular stream, bucket // func newTimestampHistory(bucket string) *timestampHistory { result := ×tampHistory{history: make([]*common.TsVbuuid, TIMESTAMP_HISTORY_COUNT), last: nil, current: 0} result.history[result.current] = common.NewTsVbuuid(bucket, NUM_VB) return result }
// ToTsVbuuid converts timestamp from protobuf format to common.TsVbuuid, // later requires the full set of timestamp. func (ts *TsVbuuid) ToTsVbuuid(maxVbuckets int) *c.TsVbuuid { nativeTs := c.NewTsVbuuid(ts.GetBucket(), maxVbuckets) seqnos, vbuuids, ss := ts.GetSeqnos(), ts.GetVbuuids(), ts.GetSnapshots() for i, vbno := range ts.GetVbnos() { nativeTs.Seqnos[vbno] = seqnos[i] nativeTs.Vbuuids[vbno] = vbuuids[i] nativeTs.Snapshots[vbno] = [2]uint64{ss[i].GetStart(), ss[i].GetEnd()} } return nativeTs }
//initBucketFilter initializes the bucket filter func (r *mutationStreamReader) initBucketFilter(bucketFilter map[string]*common.TsVbuuid) { r.syncLock.Lock() defer r.syncLock.Unlock() //allocate a new filter for the buckets which don't //have a filter yet for b, q := range r.bucketQueueMap { if _, ok := r.bucketFilterMap[b]; !ok { logging.Debugf("MutationStreamReader::initBucketFilter Added new filter "+ "for Bucket %v Stream %v", b, r.streamId) //if there is non-nil filter, use that. otherwise use a zero filter. if filter, ok := bucketFilter[b]; ok && filter != nil { r.bucketFilterMap[b] = filter.Copy() r.bucketPrevSnapMap[b] = filter.Copy() //reset vbuuids to 0 in filter. mutations for a vbucket are //only processed after streambegin is received, which will set //the vbuuid again. for i := 0; i < len(filter.Vbuuids); i++ { r.bucketFilterMap[b].Vbuuids[i] = 0 } } else { r.bucketFilterMap[b] = common.NewTsVbuuid(b, int(q.queue.GetNumVbuckets())) r.bucketPrevSnapMap[b] = common.NewTsVbuuid(b, int(q.queue.GetNumVbuckets())) } r.bucketSyncDue[b] = false } } //remove the bucket filters for which bucket doesn't exist anymore for b, _ := range r.bucketFilterMap { if _, ok := r.bucketQueueMap[b]; !ok { logging.Debugf("MutationStreamReader::initBucketFilter Deleted filter "+ "for Bucket %v Stream %v", b, r.streamId) delete(r.bucketFilterMap, b) delete(r.bucketPrevSnapMap, b) delete(r.bucketSyncDue, b) } } }
func (s *storageMgr) addNilSnapshot(idxInstId common.IndexInstId, bucket string) { if _, ok := s.indexSnapMap[idxInstId]; !ok { ts := common.NewTsVbuuid(bucket, s.config["numVbuckets"].Int()) snap := &indexSnapshot{ instId: idxInstId, ts: ts, // nil snapshot should have ZERO Crc64 :) epoch: true, } s.indexSnapMap[idxInstId] = snap s.notifySnapshotCreation(snap) } }
func unmarshallTimestamp(str string) (*common.TsVbuuid, error) { data, err := base64.StdEncoding.DecodeString(str) if err != nil { return nil, err } source := new(protobuf.TsVbuuid) if err := proto.Unmarshal(data, source); err != nil { return nil, err } target := common.NewTsVbuuid(source.GetBucket(), NUM_VB) for _, vbno := range source.Vbnos { target.Seqnos[vbno] = source.Seqnos[vbno] target.Vbuuids[vbno] = source.Vbuuids[vbno] } return target, nil }
func (s *scanCoordinator) newRequest(protoReq interface{}, cancelCh <-chan interface{}) (r *ScanRequest, err error) { var indexInst *common.IndexInst r = new(ScanRequest) r.ScanId = platform.AddUint64(&s.reqCounter, 1) r.LogPrefix = fmt.Sprintf("SCAN##%d", r.ScanId) cfg := s.config.Load() timeout := time.Millisecond * time.Duration(cfg["settings.scan_timeout"].Int()) if timeout != 0 { r.ExpiredTime = time.Now().Add(timeout) r.TimeoutCh = time.After(timeout) } r.CancelCh = cancelCh isNil := func(k []byte) bool { if len(k) == 0 || (!r.isPrimary && string(k) == "[]") { return true } return false } newKey := func(k []byte) (IndexKey, error) { if len(k) == 0 { return nil, fmt.Errorf("Key is null") } if r.isPrimary { return NewPrimaryKey(k) } else { return NewSecondaryKey(k) } } newLowKey := func(k []byte) (IndexKey, error) { if isNil(k) { return MinIndexKey, nil } return newKey(k) } newHighKey := func(k []byte) (IndexKey, error) { if isNil(k) { return MaxIndexKey, nil } return newKey(k) } fillRanges := func(low, high []byte, keys [][]byte) { var key IndexKey var localErr error defer func() { if err == nil { err = localErr } }() // range r.LowBytes = low r.HighBytes = high if r.Low, localErr = newLowKey(low); localErr != nil { localErr = fmt.Errorf("Invalid low key %s (%s)", string(low), localErr) return } if r.High, localErr = newHighKey(high); localErr != nil { localErr = fmt.Errorf("Invalid high key %s (%s)", string(high), localErr) return } // point query for keys for _, k := range keys { r.KeysBytes = append(r.KeysBytes, k) if key, localErr = newKey(k); localErr != nil { localErr = fmt.Errorf("Invalid equal key %s (%s)", string(k), localErr) return } r.Keys = append(r.Keys, key) } } setConsistency := func( cons common.Consistency, vector *protobuf.TsConsistency) { r.Consistency = &cons cfg := s.config.Load() if cons == common.QueryConsistency && vector != nil { r.Ts = common.NewTsVbuuid("", cfg["numVbuckets"].Int()) // if vector == nil, it is similar to AnyConsistency for i, vbno := range vector.Vbnos { r.Ts.Seqnos[vbno] = vector.Seqnos[i] r.Ts.Vbuuids[vbno] = vector.Vbuuids[i] } } else if cons == common.SessionConsistency { r.Ts = common.NewTsVbuuid("", cfg["numVbuckets"].Int()) r.Ts.Seqnos = vector.Seqnos // full set of seqnos. r.Ts.Crc64 = vector.GetCrc64() } } setIndexParams := func() { var localErr error defer func() { if err == nil { err = localErr } }() s.mu.RLock() defer s.mu.RUnlock() stats := s.stats.Get() indexInst, localErr = s.findIndexInstance(r.DefnID) if localErr == nil { r.isPrimary = indexInst.Defn.IsPrimary r.IndexName, r.Bucket = indexInst.Defn.Name, indexInst.Defn.Bucket r.IndexInstId = indexInst.InstId if r.Ts != nil { r.Ts.Bucket = r.Bucket } if indexInst.State != common.INDEX_STATE_ACTIVE { localErr = ErrIndexNotReady } else { r.Stats = stats.indexes[r.IndexInstId] } } } switch req := protoReq.(type) { case *protobuf.StatisticsRequest: r.DefnID = req.GetDefnID() r.ScanType = StatsReq r.Incl = Inclusion(req.GetSpan().GetRange().GetInclusion()) setIndexParams() fillRanges( req.GetSpan().GetRange().GetLow(), req.GetSpan().GetRange().GetHigh(), req.GetSpan().GetEquals()) case *protobuf.CountRequest: r.DefnID = req.GetDefnID() cons := common.Consistency(req.GetCons()) vector := req.GetVector() setConsistency(cons, vector) r.ScanType = CountReq r.Incl = Inclusion(req.GetSpan().GetRange().GetInclusion()) setIndexParams() fillRanges( req.GetSpan().GetRange().GetLow(), req.GetSpan().GetRange().GetHigh(), req.GetSpan().GetEquals()) case *protobuf.ScanRequest: r.DefnID = req.GetDefnID() cons := common.Consistency(req.GetCons()) vector := req.GetVector() setConsistency(cons, vector) r.ScanType = ScanReq r.Incl = Inclusion(req.GetSpan().GetRange().GetInclusion()) setIndexParams() fillRanges( req.GetSpan().GetRange().GetLow(), req.GetSpan().GetRange().GetHigh(), req.GetSpan().GetEquals()) r.Limit = req.GetLimit() case *protobuf.ScanAllRequest: r.DefnID = req.GetDefnID() cons := common.Consistency(req.GetCons()) vector := req.GetVector() setConsistency(cons, vector) r.ScanType = ScanAllReq r.Limit = req.GetLimit() setIndexParams() default: err = ErrUnsupportedRequest } return }
// This function gets the list of vb and seqno to repair stream. // Termination condition for stream repair: // 1) All vb are in StreamBegin state // 2) All vb have ref count == 1 // 3) There is no error in stream repair func (ss *StreamState) getRepairTsForBucket(streamId common.StreamId, bucket string) (*common.TsVbuuid, bool, []Vbucket) { // always repair if the last repair is not successful anythingToRepair := ss.streamBucketRestartVbErrMap[streamId][bucket] numVbuckets := ss.config["numVbuckets"].Int() repairTs := common.NewTsVbuuid(bucket, numVbuckets) var shutdownVbs []Vbucket = nil var count = 0 hwtTs := ss.streamBucketHWTMap[streamId][bucket] hasConnError := ss.hasConnectionError(streamId, bucket) // First step : Find out if there is any StreamEnd or ConnError on any vb. for i, s := range ss.streamBucketVbStatusMap[streamId][bucket] { if s == VBS_STREAM_END || s == VBS_CONN_ERROR { ss.addRepairTs(repairTs, hwtTs, Vbucket(i)) count++ anythingToRepair = true if hasConnError { // Make sure that we shutdown vb for BOTH StreamEnd and // ConnErr. This is to ensure to cover the case where // indexer may miss a StreamBegin from the new owner // due to connection error. Dataport will not be able // to tell indexer that vb needs to start since // StreamBegin never arrives. shutdownVbs = append(shutdownVbs, Vbucket(i)) } } } // Second step: Find out if any StreamEnd over max retry limit. If so, // add it to ShutdownVbs (for shutdown/restart). Only need to do this // if there is no vb marked with conn error because vb with StreamEnd // would already be in shutdownVbs, if there is connErr. if !hasConnError { for i, s := range ss.streamBucketVbStatusMap[streamId][bucket] { if s == VBS_STREAM_END { vbs := ss.streamBucketRestartVbRetryMap[streamId][bucket] vbs[i] = Seqno(int(vbs[i]) + 1) if int(vbs[i]) > REPAIR_RETRY_BEFORE_SHUTDOWN { logging.Debugf("StreamState::getRepairTsForBucket\n\t"+ "Bucket %v StreamId %v Vbucket %v repair is being retried for %v times.", bucket, streamId, i, vbs[i]) ss.clearRestartVbRetry(streamId, bucket, Vbucket(i)) shutdownVbs = append(shutdownVbs, Vbucket(i)) } } } } // Third step: If there is nothing to repair, then double check if every vb has // exactly one vb owner. If not, then the accounting is wrong (most likely due // to connection error). Make the vb as ConnErr and continue to repair. // Note: This will also take care of vb in VBS_INIT state. if !anythingToRepair { for i, _ := range ss.streamBucketVbStatusMap[streamId][bucket] { count := ss.streamBucketVbRefCountMap[streamId][bucket][i] if count != 1 { logging.Debugf("StreamState::getRepairTsForBucket\n\t"+ "Bucket %v StreamId %v Vbucket %v have ref count (%v != 1). Convert to CONN_ERROR.", bucket, streamId, i, count) // Make it a ConnErr such that subsequent retry will // force a shutdown/restart sequence. ss.makeConnectionError(streamId, bucket, Vbucket(i)) ss.addRepairTs(repairTs, hwtTs, Vbucket(i)) count++ shutdownVbs = append(shutdownVbs, Vbucket(i)) anythingToRepair = true } } } // Forth Step: If there is something to repair, but indexer has received StreamBegin for // all vb, then retry with the last timestamp. if anythingToRepair && count == 0 { logging.Debugf("StreamState::getRepairTsForBucket\n\t"+ "Bucket %v StreamId %v previous repair fails. Retry using previous repairTs", bucket, streamId) ts := ss.streamBucketRestartVbTsMap[streamId][bucket] if ts != nil { repairTs = ts.Copy() } else { repairTs = hwtTs.Copy() } shutdownVbs = nil vbnos := repairTs.GetVbnos() for _, vbno := range vbnos { shutdownVbs = append(shutdownVbs, Vbucket(vbno)) } } if !anythingToRepair { ss.streamBucketRestartVbTsMap[streamId][bucket] = nil ss.clearRestartVbError(streamId, bucket) } else { ss.streamBucketRestartVbTsMap[streamId][bucket] = repairTs.Copy() } ss.adjustNonSnapAlignedVbs(repairTs, streamId, bucket) return repairTs, anythingToRepair, shutdownVbs }