Пример #1
0
func (c *MetadataRepo) GetNextPartitionId() (common.PartitionId, error) {

	id, err := common.NewUUID()
	if err != nil {
		return common.PartitionId(0), err
	}

	return common.PartitionId(id.Uint64()), nil
}
Пример #2
0
func (meta *metaNotifier) makeDefaultPartitionContainer() common.PartitionContainer {

	pc := common.NewKeyPartitionContainer()

	//Add one partition for now
	addr := net.JoinHostPort("", meta.config["streamMaintPort"].String())
	endpt := []common.Endpoint{common.Endpoint(addr)}

	partnDefn := common.KeyPartitionDefn{Id: common.PartitionId(1),
		Endpts: endpt}
	pc.AddPartition(common.PartitionId(1), partnDefn)

	return pc

}
Пример #3
0
// Create a mock index that uses a feeder function to provide query results.
// Creates an index with single partition, single slice with a snapshot.
func (s *scannerTestHarness) createIndex(name, bucket string, feeder snapshotFeeder) c.IndexDefnId {
	s.indexCount++

	pc := c.NewKeyPartitionContainer()
	pId := c.PartitionId(0)
	endpt := c.Endpoint("localhost:1000")
	pDef := c.KeyPartitionDefn{Id: pId, Endpts: []c.Endpoint{endpt}}
	pc.AddPartition(pId, pDef)

	instId := c.IndexInstId(s.indexCount)
	defnId := c.IndexDefnId(0xABBA)
	indDefn := c.IndexDefn{Name: name, Bucket: bucket, DefnId: defnId}
	indInst := c.IndexInst{InstId: instId, State: c.INDEX_STATE_ACTIVE,
		Defn: indDefn, Pc: pc,
	}
	// TODO: Use cmdch to update map
	s.scanner.indexInstMap[instId] = indInst

	sc := NewHashedSliceContainer()
	partInst := PartitionInst{Defn: pDef, Sc: sc}
	partInstMap := PartitionInstMap{pId: partInst}

	snap := &mockSnapshot{feeder: feeder}
	snap.SetTimestamp(s.scanTS)

	slice := &mockSlice{}
	slId := SliceId(0)
	sc.AddSlice(slId, slice)
	// TODO: Use cmdch to update map
	s.scanner.indexPartnMap[instId] = partInstMap
	return defnId
}
Пример #4
0
// Update index-snapshot map using index partition map
// This function should be called only during initialization
// of storage manager and during rollback.
// FIXME: Current implementation makes major assumption that
// single slice is supported.
func (s *storageMgr) updateIndexSnapMap(indexPartnMap IndexPartnMap,
	streamId common.StreamId, bucket string) {

	s.muSnap.Lock()
	defer s.muSnap.Unlock()

	var tsVbuuid *common.TsVbuuid
	for idxInstId, partnMap := range indexPartnMap {

		//if bucket and stream have been provided
		if bucket != "" && streamId != common.ALL_STREAMS {
			idxInst := s.indexInstMap[idxInstId]
			//skip the index if either bucket or stream don't match
			if idxInst.Defn.Bucket != bucket || idxInst.Stream != streamId {
				continue
			}
			//skip deleted indexes
			if idxInst.State == common.INDEX_STATE_DELETED {
				continue
			}
		}

		//there is only one partition for now
		partnInst := partnMap[0]
		sc := partnInst.Sc

		//there is only one slice for now
		slice := sc.GetSliceById(0)
		infos, err := slice.GetSnapshots()
		// TODO: Proper error handling if possible
		if err != nil {
			panic("Unable to read snapinfo -" + err.Error())
		}

		DestroyIndexSnapshot(s.indexSnapMap[idxInstId])
		delete(s.indexSnapMap, idxInstId)
		s.notifySnapshotDeletion(idxInstId)

		snapInfoContainer := NewSnapshotInfoContainer(infos)
		latestSnapshotInfo := snapInfoContainer.GetLatest()

		if latestSnapshotInfo != nil {
			logging.Infof("StorageMgr::updateIndexSnapMap IndexInst:%v Attempting to open snapshot (%v)",
				idxInstId, latestSnapshotInfo)
			latestSnapshot, err := slice.OpenSnapshot(latestSnapshotInfo)
			if err != nil {
				panic("Unable to open snapshot -" + err.Error())
			}
			ss := &sliceSnapshot{
				id:   SliceId(0),
				snap: latestSnapshot,
			}

			tsVbuuid = latestSnapshotInfo.Timestamp()

			sid := SliceId(0)
			pid := common.PartitionId(0)

			ps := &partitionSnapshot{
				id:     pid,
				slices: map[SliceId]SliceSnapshot{sid: ss},
			}

			is := &indexSnapshot{
				instId: idxInstId,
				ts:     tsVbuuid,
				partns: map[common.PartitionId]PartitionSnapshot{pid: ps},
			}
			s.indexSnapMap[idxInstId] = is
			s.notifySnapshotCreation(is)
		} else {
			s.addNilSnapshot(idxInstId, bucket)
		}
	}
}
Пример #5
0
//create
func (cbq *cbqBridge) handleCreate(w http.ResponseWriter, r *http.Request) {
	var res IndexMetaResponse

	indexinfo := indexRequest(r).Index

	logging.Debugf("CbqBridge::handleCreate Received CreateIndex %v", indexinfo)

	//generate a new unique id
	defnID := rand.Int()

	idxDefn := common.IndexDefn{DefnId: common.IndexDefnId(defnID),
		Name:            indexinfo.Name,
		Using:           common.ForestDB,
		Bucket:          indexinfo.Bucket,
		IsPrimary:       indexinfo.IsPrimary,
		SecExprs:        indexinfo.SecExprs,
		ExprType:        common.N1QL,
		PartitionScheme: common.SINGLE,
		PartitionKey:    indexinfo.PartnExpr,
		WhereExpr:       indexinfo.WhereExpr}

	idxInst := common.IndexInst{InstId: common.IndexInstId(defnID),
		Defn:  idxDefn,
		State: common.INDEX_STATE_INITIAL,
	}

	if !cbq.config["enableManager"].Bool() {
		pc := common.NewKeyPartitionContainer()

		//Add one partition for now
		partnId := common.PartitionId(0)
		addr := net.JoinHostPort("", cbq.config["streamMaintPort"].String())
		endpt := []common.Endpoint{common.Endpoint(addr)}
		partnDefn := common.KeyPartitionDefn{Id: partnId,
			Endpts: endpt}
		pc.AddPartition(partnId, partnDefn)

		idxInst.Pc = pc
	}

	indexinfo.DefnID = uint64(defnID)

	respCh := make(MsgChannel)
	cbq.supvRespch <- &MsgCreateIndex{mType: CBQ_CREATE_INDEX_DDL,
		indexInst: idxInst,
		respCh:    respCh}

	//wait for response from indexer
	msg := <-respCh
	if msg.GetMsgType() == MSG_SUCCESS {
		res = IndexMetaResponse{
			Status:  RESP_SUCCESS,
			Indexes: []IndexInfo{indexinfo},
		}
		cbq.indexMap[idxInst.InstId] = indexinfo
	} else {
		err := msg.(*MsgError).GetError()

		logging.Debugf("CbqBridge::handleCreate Received Error %s", err.cause)

		ierr := IndexError{Code: string(RESP_ERROR),
			Msg: err.cause.Error()}

		res = IndexMetaResponse{
			Status: RESP_ERROR,
			Errors: []IndexError{ierr},
		}
	}
	sendResponse(w, res)
}