Ejemplo n.º 1
0
func (s *scanCoordinator) handleUpdateIndexInstMap(cmd Message) {
	s.mu.Lock()
	defer s.mu.Unlock()

	req := cmd.(*MsgUpdateInstMap)
	logging.Tracef("ScanCoordinator::handleUpdateIndexInstMap %v", cmd)
	indexInstMap := req.GetIndexInstMap()
	s.stats.Set(req.GetStatsObject())
	s.indexInstMap = common.CopyIndexInstMap(indexInstMap)

	s.supvCmdch <- &MsgSuccess{}
}
Ejemplo n.º 2
0
//Persist will keep flushing the mutation queue till caller closes
//the stop channel.  This function will be used when:
//1. Flushing Backfill Catchup Queue
//
//Can be stopped anytime by closing the StopChannel.
//Any error condition is reported back on the MsgChannel.
//Caller can wait on MsgChannel after closing StopChannel to get notified
//about shutdown completion.
func (f *flusher) Persist(q MutationQueue, streamId common.StreamId,
	bucket string, indexInstMap common.IndexInstMap, indexPartnMap IndexPartnMap,
	stopch StopChannel) MsgChannel {

	logging.Infof("Flusher::Persist %v %v", streamId, bucket)

	f.indexInstMap = common.CopyIndexInstMap(indexInstMap)
	f.indexPartnMap = CopyIndexPartnMap(indexPartnMap)

	msgch := make(MsgChannel)
	go f.flushQueue(q, streamId, bucket, nil, nil, true, stopch, msgch)
	return msgch
}
Ejemplo n.º 3
0
//handleUpdateIndexInstMap updates the indexInstMap
func (m *mutationMgr) handleUpdateIndexInstMap(cmd Message) {

	logging.Infof("MutationMgr::handleUpdateIndexInstMap %v", cmd)

	m.lock.Lock()
	defer m.lock.Unlock()

	req := cmd.(*MsgUpdateInstMap)
	indexInstMap := req.GetIndexInstMap()
	m.indexInstMap = common.CopyIndexInstMap(indexInstMap)
	m.stats.Set(req.GetStatsObject())
	m.supvCmdch <- &MsgSuccess{}

}
Ejemplo n.º 4
0
//PersistUptoTS will flush the mutation queue upto the
//Timestamp provided.  This function will be used when:
//1. Flushing Maintenance Queue
//2. Flushing Maintenance Catchup Queue
//3. Flushing Backfill Queue
//
//Can be stopped anytime by closing StopChannel.
//Sends SUCCESS on the MsgChannel when its done flushing till timestamp.
//Any error condition is reported back on the MsgChannel.
//Caller can wait on MsgChannel after closing StopChannel to get notified
//about shutdown completion.
func (f *flusher) PersistUptoTS(q MutationQueue, streamId common.StreamId,
	bucket string, indexInstMap common.IndexInstMap, indexPartnMap IndexPartnMap,
	ts Timestamp, changeVec []bool, stopch StopChannel) MsgChannel {

	logging.Debugf("Flusher::PersistUptoTS %v %v Timestamp: %v",
		streamId, bucket, ts)

	f.indexInstMap = common.CopyIndexInstMap(indexInstMap)
	f.indexPartnMap = CopyIndexPartnMap(indexPartnMap)

	msgch := make(MsgChannel)
	go f.flushQueue(q, streamId, bucket, ts, changeVec, true, stopch, msgch)
	return msgch
}
Ejemplo n.º 5
0
//handleCreateSnapshot will create the necessary snapshots
//after flush has completed
func (s *storageMgr) handleCreateSnapshot(cmd Message) {

	s.supvCmdch <- &MsgSuccess{}

	logging.Tracef("StorageMgr::handleCreateSnapshot %v", cmd)

	msgFlushDone := cmd.(*MsgMutMgrFlushDone)

	bucket := msgFlushDone.GetBucket()
	tsVbuuid := msgFlushDone.GetTS()
	streamId := msgFlushDone.GetStreamId()

	numVbuckets := s.config["numVbuckets"].Int()
	snapType := tsVbuuid.GetSnapType()
	tsVbuuid.Crc64 = common.HashVbuuid(tsVbuuid.Vbuuids)

	if snapType == common.NO_SNAP {
		logging.Infof("StorageMgr::handleCreateSnapshot Skip Snapshot For %v "+
			"%v SnapType %v", streamId, bucket, snapType)

		s.supvRespch <- &MsgMutMgrFlushDone{mType: STORAGE_SNAP_DONE,
			streamId: streamId,
			bucket:   bucket,
			ts:       tsVbuuid}
		return
	}

	s.muSnap.Lock()
	defer s.muSnap.Unlock()

	//pass copy of maps to worker
	indexSnapMap := copyIndexSnapMap(s.indexSnapMap)
	indexInstMap := common.CopyIndexInstMap(s.indexInstMap)
	indexPartnMap := CopyIndexPartnMap(s.indexPartnMap)
	stats := s.stats.Get()

	go s.createSnapshotWorker(streamId, bucket, tsVbuuid, indexSnapMap,
		numVbuckets, indexInstMap, indexPartnMap, stats)

}
Ejemplo n.º 6
0
func (s *storageMgr) handleUpdateIndexInstMap(cmd Message) {

	logging.Tracef("StorageMgr::handleUpdateIndexInstMap %v", cmd)
	req := cmd.(*MsgUpdateInstMap)
	indexInstMap := req.GetIndexInstMap()
	s.stats.Set(req.GetStatsObject())
	s.indexInstMap = common.CopyIndexInstMap(indexInstMap)

	s.muSnap.Lock()
	defer s.muSnap.Unlock()

	// Remove all snapshot waiters for indexes that do not exist anymore
	for id, ws := range s.waitersMap {
		if inst, ok := s.indexInstMap[id]; !ok ||
			inst.State == common.INDEX_STATE_DELETED {
			for _, w := range ws {
				w.Error(ErrIndexNotFound)
			}
			delete(s.waitersMap, id)
		}
	}

	// Cleanup all invalid index's snapshots
	for idxInstId, is := range s.indexSnapMap {
		if inst, ok := s.indexInstMap[idxInstId]; !ok ||
			inst.State == common.INDEX_STATE_DELETED {
			DestroyIndexSnapshot(is)
			delete(s.indexSnapMap, idxInstId)
			s.notifySnapshotDeletion(idxInstId)
		}
	}

	// Add 0 items index snapshots for newly added indexes
	for idxInstId, inst := range s.indexInstMap {
		s.addNilSnapshot(idxInstId, inst.Defn.Bucket)
	}

	//if manager is not enable, store the updated InstMap in
	//meta file
	if s.config["enableManager"].Bool() == false {

		instMap := common.CopyIndexInstMap(s.indexInstMap)

		for id, inst := range instMap {
			inst.Pc = nil
			instMap[id] = inst
		}

		//store indexInstMap in metadata store
		var instBytes bytes.Buffer
		var err error

		enc := gob.NewEncoder(&instBytes)
		err = enc.Encode(instMap)
		if err != nil {
			logging.Errorf("StorageMgr::handleUpdateIndexInstMap \n\t Error Marshalling "+
				"IndexInstMap %v. Err %v", instMap, err)
		}

		if err = s.meta.SetKV([]byte(INST_MAP_KEY_NAME), instBytes.Bytes()); err != nil {
			logging.Errorf("StorageMgr::handleUpdateIndexInstMap \n\tError "+
				"Storing IndexInstMap %v", err)
		}

		s.dbfile.Commit(forestdb.COMMIT_MANUAL_WAL_FLUSH)
	}

	s.supvCmdch <- &MsgSuccess{}
}