Пример #1
0
func (c *MetadataRepo) GetNextIndexInstId() (common.IndexInstId, error) {

	id, err := common.NewUUID()
	if err != nil {
		return common.IndexInstId(0), err
	}

	return common.IndexInstId(id.Uint64()), nil
}
Пример #2
0
func (meta *metaNotifier) OnIndexBuild(indexDefnList []common.IndexDefnId, buckets []string) map[common.IndexInstId]error {

	logging.Infof("clustMgrAgent::OnIndexBuild Notification "+
		"Received for Build Index %v", indexDefnList)

	respCh := make(MsgChannel)

	var indexInstList []common.IndexInstId
	for _, defnId := range indexDefnList {
		indexInstList = append(indexInstList, common.IndexInstId(defnId))
	}

	meta.adminCh <- &MsgBuildIndex{indexInstList: indexInstList,
		respCh:     respCh,
		bucketList: buckets}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case CLUST_MGR_BUILD_INDEX_DDL_RESPONSE:
			errMap := res.(*MsgBuildIndexResponse).GetErrorMap()
			logging.Infof("clustMgrAgent::OnIndexBuild returns "+
				"for Build Index %v", indexDefnList)
			return errMap

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexBuild Error "+
				"for Build Index %v. Error %v.", indexDefnList, res)
			err := res.(*MsgError).GetError()
			errMap := make(map[common.IndexInstId]error)
			for _, instId := range indexDefnList {
				errMap[common.IndexInstId(instId)] = errors.New(err.String())
			}
			return errMap

		default:
			logging.Fatalf("clustMgrAgent::OnIndexBuild Unknown Response "+
				"Received for Build Index %v. Response %v", indexDefnList, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {

		logging.Fatalf("clustMgrAgent::OnIndexBuild Unexpected Channel Close "+
			"for Create Index %v", indexDefnList)
		common.CrashOnError(errors.New("Unknown Response"))
	}

	return nil
}
Пример #3
0
// Create a mock index that uses a feeder function to provide query results.
// Creates an index with single partition, single slice with a snapshot.
func (s *scannerTestHarness) createIndex(name, bucket string, feeder snapshotFeeder) c.IndexDefnId {
	s.indexCount++

	pc := c.NewKeyPartitionContainer()
	pId := c.PartitionId(0)
	endpt := c.Endpoint("localhost:1000")
	pDef := c.KeyPartitionDefn{Id: pId, Endpts: []c.Endpoint{endpt}}
	pc.AddPartition(pId, pDef)

	instId := c.IndexInstId(s.indexCount)
	defnId := c.IndexDefnId(0xABBA)
	indDefn := c.IndexDefn{Name: name, Bucket: bucket, DefnId: defnId}
	indInst := c.IndexInst{InstId: instId, State: c.INDEX_STATE_ACTIVE,
		Defn: indDefn, Pc: pc,
	}
	// TODO: Use cmdch to update map
	s.scanner.indexInstMap[instId] = indInst

	sc := NewHashedSliceContainer()
	partInst := PartitionInst{Defn: pDef, Sc: sc}
	partInstMap := PartitionInstMap{pId: partInst}

	snap := &mockSnapshot{feeder: feeder}
	snap.SetTimestamp(s.scanTS)

	slice := &mockSlice{}
	slId := SliceId(0)
	sc.AddSlice(slId, slice)
	// TODO: Use cmdch to update map
	s.scanner.indexPartnMap[instId] = partInstMap
	return defnId
}
Пример #4
0
func (c *clustMgrAgent) handleGetGlobalTopology(cmd Message) {

	logging.Debugf("ClustMgr:handleGetGlobalTopology %v", cmd)

	//get the latest topology from manager
	metaIter, err := c.mgr.NewIndexDefnIterator()
	if err != nil {
		common.CrashOnError(err)
	}
	defer metaIter.Close()

	indexInstMap := make(common.IndexInstMap)

	for _, defn, err := metaIter.Next(); err == nil; _, defn, err = metaIter.Next() {

		var idxDefn common.IndexDefn
		idxDefn = *defn

		t, e := c.mgr.GetTopologyByBucket(idxDefn.Bucket)
		if e != nil {
			common.CrashOnError(e)
		}

		inst := t.GetIndexInstByDefn(idxDefn.DefnId)

		if inst == nil {
			logging.Warnf("ClustMgr:handleGetGlobalTopology Index Instance Not "+
				"Found For Index Definition %v. Ignored.", idxDefn)
			continue
		}

		//for indexer, Ready state doesn't matter. Till index build,
		//the index stays in Created state.
		var state common.IndexState
		instState := common.IndexState(inst.State)
		if instState == common.INDEX_STATE_READY {
			state = common.INDEX_STATE_CREATED
		} else {
			state = instState
		}

		idxInst := common.IndexInst{InstId: common.IndexInstId(inst.InstId),
			Defn:   idxDefn,
			State:  state,
			Stream: common.StreamId(inst.StreamId),
		}

		indexInstMap[idxInst.InstId] = idxInst

	}

	c.supvCmdch <- &MsgClustMgrTopology{indexInstMap: indexInstMap}
}
Пример #5
0
func (meta *metaNotifier) OnIndexCreate(indexDefn *common.IndexDefn) error {

	logging.Infof("clustMgrAgent::OnIndexCreate Notification "+
		"Received for Create Index %v", indexDefn)

	pc := meta.makeDefaultPartitionContainer()

	idxInst := common.IndexInst{InstId: common.IndexInstId(indexDefn.DefnId),
		Defn:  *indexDefn,
		State: common.INDEX_STATE_CREATED,
		Pc:    pc,
	}

	respCh := make(MsgChannel)

	meta.adminCh <- &MsgCreateIndex{mType: CLUST_MGR_CREATE_INDEX_DDL,
		indexInst: idxInst,
		respCh:    respCh}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case MSG_SUCCESS:
			logging.Infof("clustMgrAgent::OnIndexCreate Success "+
				"for Create Index %v", indexDefn)
			return nil

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexCreate Error "+
				"for Create Index %v. Error %v.", indexDefn, res)
			err := res.(*MsgError).GetError()
			return err.cause

		default:
			logging.Fatalf("clustMgrAgent::OnIndexCreate Unknown Response "+
				"Received for Create Index %v. Response %v", indexDefn, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {

		logging.Fatalf("clustMgrAgent::OnIndexCreate Unexpected Channel Close "+
			"for Create Index %v", indexDefn)
		common.CrashOnError(errors.New("Unknown Response"))
	}

	return nil
}
Пример #6
0
//drop
func (cbq *cbqBridge) handleDrop(w http.ResponseWriter, r *http.Request) {
	var res IndexMetaResponse

	indexinfo := indexRequest(r).Index

	logging.Debugf("CbqBridge::handleDrop Received DropIndex %v", indexinfo)

	defnID := indexinfo.DefnID

	respCh := make(MsgChannel)
	cbq.supvRespch <- &MsgDropIndex{mType: CBQ_DROP_INDEX_DDL,
		indexInstId: common.IndexInstId(defnID),
		respCh:      respCh}

	//wait for response from indexer
	msg := <-respCh
	if msg.GetMsgType() == MSG_SUCCESS {
		res = IndexMetaResponse{
			Status: RESP_SUCCESS,
		}
		delete(cbq.indexMap, common.IndexInstId(defnID))
	} else {
		err := msg.(*MsgError).GetError()

		logging.Debugf("CbqBridge: DropIndex Received Error %s", err.cause)

		ierr := IndexError{Code: string(RESP_ERROR),
			Msg: err.cause.Error()}

		res = IndexMetaResponse{
			Status: RESP_ERROR,
			Errors: []IndexError{ierr},
		}
	}
	sendResponse(w, res)

}
Пример #7
0
func (meta *metaNotifier) OnIndexDelete(defnId common.IndexDefnId, bucket string) error {

	logging.Infof("clustMgrAgent::OnIndexDelete Notification "+
		"Received for Drop IndexId %v", defnId)

	respCh := make(MsgChannel)

	//Treat DefnId as InstId for now
	meta.adminCh <- &MsgDropIndex{mType: CLUST_MGR_DROP_INDEX_DDL,
		indexInstId: common.IndexInstId(defnId),
		respCh:      respCh,
		bucket:      bucket}

	//wait for response
	if res, ok := <-respCh; ok {

		switch res.GetMsgType() {

		case MSG_SUCCESS:
			logging.Infof("clustMgrAgent::OnIndexDelete Success "+
				"for Drop IndexId %v", defnId)
			return nil

		case MSG_ERROR:
			logging.Errorf("clustMgrAgent::OnIndexDelete Error "+
				"for Drop IndexId %v. Error %v", defnId, res)
			err := res.(*MsgError).GetError()
			return err.cause

		default:
			logging.Fatalf("clustMgrAgent::OnIndexDelete Unknown Response "+
				"Received for Drop IndexId %v. Response %v", defnId, res)
			common.CrashOnError(errors.New("Unknown Response"))

		}

	} else {
		logging.Fatalf("clustMgrAgent::OnIndexDelete Unexpected Channel Close "+
			"for Drop IndexId %v", defnId)
		common.CrashOnError(errors.New("Unknown Response"))

	}

	return nil
}
Пример #8
0
func (r *metadataRepo) updateIndexMetadataNoLock(defnId c.IndexDefnId, inst *IndexInstDistribution) {

	meta, ok := r.indices[defnId]
	if ok {
		idxInst := new(InstanceDefn)
		idxInst.InstId = c.IndexInstId(inst.InstId)
		idxInst.State = c.IndexState(inst.State)
		idxInst.Error = inst.Error
		idxInst.BuildTime = inst.BuildTime

		for _, partition := range inst.Partitions {
			for _, slice := range partition.SinglePartition.Slices {
				idxInst.IndexerId = c.IndexerId(slice.IndexerId)
				break
			}
		}
		meta.Instances = []*InstanceDefn{idxInst}
	}
}
Пример #9
0
func newIndexMetaData(info *indexInfo, queryport string) *mclient.IndexMetadata {
	defn := &common.IndexDefn{
		DefnId:       common.IndexDefnId(info.DefnID),
		Name:         info.Name,
		Using:        common.IndexType(info.Using),
		Bucket:       info.Bucket,
		IsPrimary:    info.IsPrimary,
		ExprType:     common.ExprType(info.ExprType),
		SecExprs:     info.SecExprs,
		PartitionKey: info.PartnExpr,
	}
	instances := []*mclient.InstanceDefn{
		&mclient.InstanceDefn{
			InstId: common.IndexInstId(info.DefnID), // TODO: defnID as InstID
			State:  common.INDEX_STATE_READY,
			Endpts: []common.Endpoint{common.Endpoint(queryport)},
		},
	}
	imeta := &mclient.IndexMetadata{
		Definition: defn,
		Instances:  instances,
	}
	return imeta
}
Пример #10
0
//handleSingleKeyVersion processes a single mutation based on the command type
//A mutation is put in a worker queue and control message is sent to supervisor
func (r *mutationStreamReader) handleSingleKeyVersion(bucket string, vbucket Vbucket, vbuuid Vbuuid,
	kv *protobuf.KeyVersions) {

	meta := NewMutationMeta()
	meta.bucket = bucket
	meta.vbucket = vbucket
	meta.vbuuid = vbuuid
	meta.seqno = Seqno(kv.GetSeqno())

	defer meta.Free()

	var mutk *MutationKeys
	r.skipMutation = false
	r.evalFilter = true

	logging.LazyTrace(func() string {
		return fmt.Sprintf("MutationStreamReader::handleSingleKeyVersion received KeyVersions %v", kv)
	})

	for i, cmd := range kv.GetCommands() {

		//based on the type of command take appropriate action
		switch byte(cmd) {

		//case protobuf.Command_Upsert, protobuf.Command_Deletion, protobuf.Command_UpsertDeletion:
		case common.Upsert, common.Deletion, common.UpsertDeletion:

			//As there can multiple keys in a KeyVersion for a mutation,
			//filter needs to be evaluated and set only once.
			if r.evalFilter {
				r.evalFilter = false
				//check the bucket filter to see if this mutation can be processed
				//valid mutation will increment seqno of the filter
				if !r.checkAndSetBucketFilter(meta) {
					r.skipMutation = true
				}
			}

			if r.skipMutation {
				continue
			}

			r.logReaderStat()

			//allocate new mutation first time
			if mutk == nil {
				//TODO use free list here to reuse the struct and reduce garbage
				mutk = NewMutationKeys()
				mutk.meta = meta.Clone()
				mutk.docid = kv.GetDocid()
				mutk.mut = mutk.mut[:0]
			}

			mut := NewMutation()
			mut.uuid = common.IndexInstId(kv.GetUuids()[i])
			mut.key = kv.GetKeys()[i]
			mut.command = byte(kv.GetCommands()[i])

			mutk.mut = append(mutk.mut, mut)

		case common.DropData:
			//send message to supervisor to take decision
			msg := &MsgStream{mType: STREAM_READER_STREAM_DROP_DATA,
				streamId: r.streamId,
				meta:     meta.Clone()}
			r.supvRespch <- msg

		case common.StreamBegin:

			r.updateVbuuidInFilter(meta)

			//send message to supervisor to take decision
			msg := &MsgStream{mType: STREAM_READER_STREAM_BEGIN,
				streamId: r.streamId,
				meta:     meta.Clone()}
			r.supvRespch <- msg

		case common.StreamEnd:
			//send message to supervisor to take decision
			msg := &MsgStream{mType: STREAM_READER_STREAM_END,
				streamId: r.streamId,
				meta:     meta.Clone()}
			r.supvRespch <- msg

		case common.Snapshot:
			//get snapshot information from message
			r.snapType, r.snapStart, r.snapEnd = kv.Snapshot()

			// Snapshot marker can be processed only if
			// they belong to ondisk type or inmemory type.
			if r.snapType&(0x1|0x2) != 0 {
				r.updateSnapInFilter(meta, r.snapStart, r.snapEnd)
			}

		}
	}

	//place secKey in the right worker's queue
	if mutk != nil {
		r.workerch[int(vbucket)%r.numWorkers] <- mutk
	}

}
Пример #11
0
func (s *memDBSnapshot) IndexInstId() c.IndexInstId {
	return c.IndexInstId(0)
}
Пример #12
0
//create
func (cbq *cbqBridge) handleCreate(w http.ResponseWriter, r *http.Request) {
	var res IndexMetaResponse

	indexinfo := indexRequest(r).Index

	logging.Debugf("CbqBridge::handleCreate Received CreateIndex %v", indexinfo)

	//generate a new unique id
	defnID := rand.Int()

	idxDefn := common.IndexDefn{DefnId: common.IndexDefnId(defnID),
		Name:            indexinfo.Name,
		Using:           common.ForestDB,
		Bucket:          indexinfo.Bucket,
		IsPrimary:       indexinfo.IsPrimary,
		SecExprs:        indexinfo.SecExprs,
		ExprType:        common.N1QL,
		PartitionScheme: common.SINGLE,
		PartitionKey:    indexinfo.PartnExpr,
		WhereExpr:       indexinfo.WhereExpr}

	idxInst := common.IndexInst{InstId: common.IndexInstId(defnID),
		Defn:  idxDefn,
		State: common.INDEX_STATE_INITIAL,
	}

	if !cbq.config["enableManager"].Bool() {
		pc := common.NewKeyPartitionContainer()

		//Add one partition for now
		partnId := common.PartitionId(0)
		addr := net.JoinHostPort("", cbq.config["streamMaintPort"].String())
		endpt := []common.Endpoint{common.Endpoint(addr)}
		partnDefn := common.KeyPartitionDefn{Id: partnId,
			Endpts: endpt}
		pc.AddPartition(partnId, partnDefn)

		idxInst.Pc = pc
	}

	indexinfo.DefnID = uint64(defnID)

	respCh := make(MsgChannel)
	cbq.supvRespch <- &MsgCreateIndex{mType: CBQ_CREATE_INDEX_DDL,
		indexInst: idxInst,
		respCh:    respCh}

	//wait for response from indexer
	msg := <-respCh
	if msg.GetMsgType() == MSG_SUCCESS {
		res = IndexMetaResponse{
			Status:  RESP_SUCCESS,
			Indexes: []IndexInfo{indexinfo},
		}
		cbq.indexMap[idxInst.InstId] = indexinfo
	} else {
		err := msg.(*MsgError).GetError()

		logging.Debugf("CbqBridge::handleCreate Received Error %s", err.cause)

		ierr := IndexError{Code: string(RESP_ERROR),
			Msg: err.cause.Error()}

		res = IndexMetaResponse{
			Status: RESP_ERROR,
			Errors: []IndexError{ierr},
		}
	}
	sendResponse(w, res)
}
Пример #13
0
func (m *LifecycleMgr) CreateIndex(defn *common.IndexDefn) error {

	if !defn.Deferred && !m.canBuildIndex(defn.Bucket) {
		logging.Errorf("LifecycleMgr.handleCreateIndex() : Cannot create index %s.%s while another index is being built",
			defn.Bucket, defn.Name)
		return errors.New(fmt.Sprintf("Cannot create Index %s.%s while another index is being built.",
			defn.Bucket, defn.Name))
	}

	existDefn, err := m.repo.GetIndexDefnByName(defn.Bucket, defn.Name)
	if err != nil {
		logging.Errorf("LifecycleMgr.handleCreateIndex() : createIndex fails. Reason = %v", err)
		return err
	}

	if existDefn != nil {
		topology, err := m.repo.GetTopologyByBucket(existDefn.Bucket)
		if err != nil {
			logging.Errorf("LifecycleMgr.handleCreateIndex() : fails to find index instance. Reason = %v", err)
			return err
		}

		state, _ := topology.GetStatusByDefn(existDefn.DefnId)
		if state != common.INDEX_STATE_NIL && state != common.INDEX_STATE_DELETED {
			return errors.New(fmt.Sprintf("Index %s.%s already exist", defn.Bucket, defn.Name))
		}
	}

	// Fetch bucket UUID.   This confirms that the bucket has existed, but it cannot confirm if the bucket
	// is still existing in the cluster (due to race condition or network partitioned).
	bucketUUID, err := m.verifyBucket(defn.Bucket)
	if err != nil || bucketUUID == common.BUCKET_UUID_NIL {
		return errors.New("Bucket does not exist or temporarily unavaible for creating new index." +
			" Please retry the operation at a later time.")
	}
	defn.BucketUUID = bucketUUID

	if err := m.repo.CreateIndex(defn); err != nil {
		logging.Errorf("LifecycleMgr.handleCreateIndex() : createIndex fails. Reason = %v", err)
		return err
	}

	if err := m.repo.addIndexToTopology(defn, common.IndexInstId(defn.DefnId)); err != nil {
		logging.Errorf("LifecycleMgr.handleCreateIndex() : createIndex fails. Reason = %v", err)
		m.repo.DropIndexById(defn.DefnId)
		return err
	}

	if m.notifier != nil {
		if err := m.notifier.OnIndexCreate(defn); err != nil {
			logging.Errorf("LifecycleMgr.handleCreateIndex() : createIndex fails. Reason = %v", err)
			m.repo.DropIndexById(defn.DefnId)
			m.repo.deleteIndexFromTopology(defn.Bucket, defn.DefnId)
			return err
		}
	}

	if err := m.updateIndexState(defn.Bucket, defn.DefnId, common.INDEX_STATE_READY); err != nil {
		logging.Errorf("LifecycleMgr.handleCreateIndex() : createIndex fails. Reason = %v", err)

		if m.notifier != nil {
			m.notifier.OnIndexDelete(defn.DefnId, defn.Bucket)
		}
		m.repo.DropIndexById(defn.DefnId)
		m.repo.deleteIndexFromTopology(defn.Bucket, defn.DefnId)
		return err
	}

	if !defn.Deferred {
		if m.notifier != nil {
			logging.Debugf("LifecycleMgr.handleCreateIndex() : start Index Build")
			if errMap := m.notifier.OnIndexBuild([]common.IndexDefnId{defn.DefnId}, []string{defn.Bucket}); len(errMap) != 0 {
				err := errMap[common.IndexInstId(defn.DefnId)]
				logging.Errorf("LifecycleMgr.hanaleCreateIndex() : createIndex fails. Reason = %v", err)

				m.notifier.OnIndexDelete(defn.DefnId, defn.Bucket)
				m.repo.DropIndexById(defn.DefnId)
				m.repo.deleteIndexFromTopology(defn.Bucket, defn.DefnId)
				return err
			}
		}
	}

	logging.Debugf("LifecycleMgr.handleCreateIndex() : createIndex completes")

	return nil
}