Example #1
0
//updateHWT will update the HW Timestamp for a bucket in the stream
//based on the Sync message received.
func (ss *StreamState) updateHWT(streamId common.StreamId,
	bucket string, hwt *common.TsVbuuid) {

	ts := ss.streamBucketHWTMap[streamId][bucket]

	for i, seq := range hwt.Seqnos {
		//if seqno has incremented, update it
		if seq > ts.Seqnos[i] {
			ts.Seqnos[i] = seq
			ss.streamBucketNewTsReqdMap[streamId][bucket] = true
		}
		//if snapEnd is greater than current hwt snapEnd
		if hwt.Snapshots[i][1] > ts.Snapshots[i][1] {
			lastSnap := ss.streamBucketLastSnapMarker[streamId][bucket]
			//store the current snap marker in the lastSnapMarker map
			lastSnap.Snapshots[i][0] = ts.Snapshots[i][0]
			lastSnap.Snapshots[i][1] = ts.Snapshots[i][1]

			//store the new snap marker in hwt
			ts.Snapshots[i][0] = hwt.Snapshots[i][0]
			ts.Snapshots[i][1] = hwt.Snapshots[i][1]
			ss.streamBucketNewTsReqdMap[streamId][bucket] = true
		}
	}

	logging.LazyTrace(func() string {
		return fmt.Sprintf("StreamState::updateHWT HWT Updated : %v", ts)
	})
}
Example #2
0
//handleSingleMutation enqueues mutation in the mutation queue
func (r *mutationStreamReader) handleSingleMutation(mut *MutationKeys, stopch StopChannel) {

	logging.LazyTrace(func() string {
		return fmt.Sprintf("MutationStreamReader::handleSingleMutation received mutation %v", mut)
	})

	//based on the index, enqueue the mutation in the right queue
	if q, ok := r.bucketQueueMap[mut.meta.bucket]; ok {
		q.queue.Enqueue(mut, mut.meta.vbucket, stopch)

		stats := r.stats.Get()
		if rstats, ok := stats.buckets[mut.meta.bucket]; ok {
			rstats.mutationQueueSize.Add(1)
			rstats.numMutationsQueued.Add(1)
		}

	} else {
		logging.Warnf("MutationStreamReader::handleSingleMutation got mutation for "+
			"unknown bucket. Skipped  %v", mut)
	}

}
Example #3
0
//flushSingleVbucket is the actual implementation which flushes the given queue
//for a single vbucket till the given seqno or till the stop signal(whichever is earlier)
func (f *flusher) flushSingleVbucketUptoSeqno(q MutationQueue, streamId common.StreamId,
	bucket string, vbucket Vbucket, seqno Seqno, persist bool, stopch StopChannel,
	workerMsgCh MsgChannel, wg *sync.WaitGroup) {

	defer wg.Done()

	logging.LazyTrace(func() string {
		return fmt.Sprintf("Flusher::flushSingleVbucketUptoSeqno Started worker to flush vbucket: "+
			"%v till Seqno: %v for Stream: %v", vbucket, seqno, streamId)
	})

	mutch, err := q.DequeueUptoSeqno(vbucket, seqno)
	if err != nil {
		//TODO
	}

	ok := true
	var mut *MutationKeys
	bucketStats := f.stats.buckets[bucket]

	//Read till the channel is closed by queue indicating it has sent all the
	//sequence numbers requested
	for ok {
		select {
		case mut, ok = <-mutch:
			if ok {
				if !persist {
					//No persistence is required. Just skip this mutation.
					continue
				}
				f.flushSingleMutation(mut, streamId)
				mut.Free()
				if bucketStats != nil {
					bucketStats.mutationQueueSize.Add(-1)
				}
			}
		}
	}
}
Example #4
0
func (f *flusher) flush(mutk *MutationKeys, streamId common.StreamId) {

	logging.LazyTrace(func() string {
		return fmt.Sprintf("Flusher::flush Flushing Stream %v Mutations %v", streamId, mutk)
	})

	var processedUpserts []common.IndexInstId
	for _, mut := range mutk.mut {

		var idxInst common.IndexInst
		var ok bool
		if idxInst, ok = f.indexInstMap[mut.uuid]; !ok {
			logging.LazyTrace(func() string {
				return fmt.Sprintf("Flusher::flush Unknown Index Instance Id %v. "+
					"Skipped Mutation Key %v", mut.uuid, mut.key)
			})
			continue
		}

		//Skip this mutation if the index doesn't belong to the stream being flushed
		if streamId != idxInst.Stream && streamId != common.CATCHUP_STREAM {
			logging.LazyTrace(func() string {
				return fmt.Sprintf("Flusher::flush \n\tFound Mutation For IndexId: %v Stream: %v In "+
					"Stream: %v. Skipped Mutation Key %v", idxInst.InstId, idxInst.Stream,
					streamId, mut.key)
			})
			continue
		}

		//Skip mutations for indexes in DELETED state. This may happen if complete
		//couldn't happen when processing drop index.
		if idxInst.State == common.INDEX_STATE_DELETED {
			logging.LazyTrace(func() string {
				return fmt.Sprintf("Flusher::flush \n\tFound Mutation For IndexId: %v In "+
					"DELETED State. Skipped Mutation Key %v", idxInst.InstId, mut.key)
			})
			continue
		}

		switch mut.command {

		case common.Upsert:
			processedUpserts = append(processedUpserts, mut.uuid)

			f.processUpsert(mut, mutk.docid)

		case common.Deletion:
			f.processDelete(mut, mutk.docid)

		case common.UpsertDeletion:

			var skipUpsertDeletion bool
			//if Upsert has been processed for this IndexInstId,
			//skip processing UpsertDeletion
			for _, id := range processedUpserts {
				if id == mut.uuid {
					skipUpsertDeletion = true
				}
			}

			if skipUpsertDeletion {
				continue
			} else {
				f.processDelete(mut, mutk.docid)
			}

		default:
			logging.Errorf("Flusher::flush Unknown mutation type received. Skipped %v",
				mut.key)
		}
	}
}
Example #5
0
//handleSingleKeyVersion processes a single mutation based on the command type
//A mutation is put in a worker queue and control message is sent to supervisor
func (r *mutationStreamReader) handleSingleKeyVersion(bucket string, vbucket Vbucket, vbuuid Vbuuid,
	kv *protobuf.KeyVersions) {

	meta := NewMutationMeta()
	meta.bucket = bucket
	meta.vbucket = vbucket
	meta.vbuuid = vbuuid
	meta.seqno = Seqno(kv.GetSeqno())

	defer meta.Free()

	var mutk *MutationKeys
	r.skipMutation = false
	r.evalFilter = true

	logging.LazyTrace(func() string {
		return fmt.Sprintf("MutationStreamReader::handleSingleKeyVersion received KeyVersions %v", kv)
	})

	for i, cmd := range kv.GetCommands() {

		//based on the type of command take appropriate action
		switch byte(cmd) {

		//case protobuf.Command_Upsert, protobuf.Command_Deletion, protobuf.Command_UpsertDeletion:
		case common.Upsert, common.Deletion, common.UpsertDeletion:

			//As there can multiple keys in a KeyVersion for a mutation,
			//filter needs to be evaluated and set only once.
			if r.evalFilter {
				r.evalFilter = false
				//check the bucket filter to see if this mutation can be processed
				//valid mutation will increment seqno of the filter
				if !r.checkAndSetBucketFilter(meta) {
					r.skipMutation = true
				}
			}

			if r.skipMutation {
				continue
			}

			r.logReaderStat()

			//allocate new mutation first time
			if mutk == nil {
				//TODO use free list here to reuse the struct and reduce garbage
				mutk = NewMutationKeys()
				mutk.meta = meta.Clone()
				mutk.docid = kv.GetDocid()
				mutk.mut = mutk.mut[:0]
			}

			mut := NewMutation()
			mut.uuid = common.IndexInstId(kv.GetUuids()[i])
			mut.key = kv.GetKeys()[i]
			mut.command = byte(kv.GetCommands()[i])

			mutk.mut = append(mutk.mut, mut)

		case common.DropData:
			//send message to supervisor to take decision
			msg := &MsgStream{mType: STREAM_READER_STREAM_DROP_DATA,
				streamId: r.streamId,
				meta:     meta.Clone()}
			r.supvRespch <- msg

		case common.StreamBegin:

			r.updateVbuuidInFilter(meta)

			//send message to supervisor to take decision
			msg := &MsgStream{mType: STREAM_READER_STREAM_BEGIN,
				streamId: r.streamId,
				meta:     meta.Clone()}
			r.supvRespch <- msg

		case common.StreamEnd:
			//send message to supervisor to take decision
			msg := &MsgStream{mType: STREAM_READER_STREAM_END,
				streamId: r.streamId,
				meta:     meta.Clone()}
			r.supvRespch <- msg

		case common.Snapshot:
			//get snapshot information from message
			r.snapType, r.snapStart, r.snapEnd = kv.Snapshot()

			// Snapshot marker can be processed only if
			// they belong to ondisk type or inmemory type.
			if r.snapType&(0x1|0x2) != 0 {
				r.updateSnapInFilter(meta, r.snapStart, r.snapEnd)
			}

		}
	}

	//place secKey in the right worker's queue
	if mutk != nil {
		r.workerch[int(vbucket)%r.numWorkers] <- mutk
	}

}