Пример #1
0
//
// Create a timer that keeps track of the timestamp history across streams and buckets
//
func newTimer(repo *MetadataRepo) *Timer {

	timestamps := make(map[common.StreamId]timestampHistoryBucketMap)
	tickers := make(map[common.StreamId]tickerBucketMap)
	stopchs := make(map[common.StreamId]stopchBucketMap)
	outch := make(chan *timestampSerializable, TIMESTAMP_CHANNEL_SIZE)

	timer := &Timer{timestamps: timestamps,
		tickers: tickers,
		stopchs: stopchs,
		outch:   outch,
		ready:   false}

	savedTimestamps, err := repo.GetStabilityTimestamps()
	if err == nil {
		for _, timestamp := range savedTimestamps.Timestamps {
			ts, err := unmarshallTimestamp(timestamp.Timestamp)
			if err != nil {
				logging.Errorf("Timer.newTimer() : unable to unmarshall timestamp for bucket %v.  Skip initialization.",
					timestamp.Bucket)
				continue
			}
			timer.start(common.StreamId(timestamp.StreamId), timestamp.Bucket)
			for vb, seqno := range ts.Seqnos {
				timer.increment(common.StreamId(timestamp.StreamId), timestamp.Bucket, uint32(vb), ts.Vbuuids[vb], seqno)
			}
			logging.Errorf("Timer.newTimer() : initialized timestamp for bucket %v from repository.", timestamp.Bucket)
		}
	} else {
		// TODO : Determine timestamp not exist versus forestdb error
		logging.Errorf("Timer.newTimer() : cannot get stability timestamp from repository. Skip initialization.")
	}

	return timer
}
Пример #2
0
func startBucket(cluster, bucketn string, rch chan []interface{}) int {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("Recovered from panic %v", r)
			logging.Errorf(logging.StackTrace())
		}
	}()

	logging.Infof("Connecting with %q\n", bucketn)
	b, err := common.ConnectBucket(cluster, "default", bucketn)
	mf(err, "bucket")

	dcpConfig := map[string]interface{}{
		"genChanSize":  10000,
		"dataChanSize": 10000,
	}
	dcpFeed, err := b.StartDcpFeed("rawupr", uint32(0), 0xABCD, dcpConfig)
	mf(err, "- upr")

	vbnos := listOfVbnos(options.maxVbno)
	flogs, err := b.GetFailoverLogs(0xABCD, vbnos, dcpConfig)
	mf(err, "- dcp failoverlogs")
	if options.printflogs {
		printFlogs(vbnos, flogs)
	}
	go startDcp(dcpFeed, flogs)

	for {
		e, ok := <-dcpFeed.C
		if ok == false {
			logging.Infof("Closing for bucket %q\n", bucketn)
		}
		rch <- []interface{}{bucketn, e}
	}
}
Пример #3
0
//updates snapshot information in bucket filter
func (r *mutationStreamReader) updateSnapInFilter(meta *MutationMeta,
	snapStart uint64, snapEnd uint64) {

	r.syncLock.Lock()
	defer r.syncLock.Unlock()

	if filter, ok := r.bucketFilterMap[meta.bucket]; ok {
		if snapEnd > filter.Snapshots[meta.vbucket][1] {

			//store the existing snap marker in prevSnap map
			prevSnap := r.bucketPrevSnapMap[meta.bucket]
			prevSnap.Snapshots[meta.vbucket][0] = filter.Snapshots[meta.vbucket][0]
			prevSnap.Snapshots[meta.vbucket][1] = filter.Snapshots[meta.vbucket][1]
			prevSnap.Vbuuids[meta.vbucket] = filter.Vbuuids[meta.vbucket]

			filter.Snapshots[meta.vbucket][0] = snapStart
			filter.Snapshots[meta.vbucket][1] = snapEnd
		} else {
			logging.Errorf("MutationStreamReader::updateSnapInFilter Skipped "+
				"Snapshot %v-%v for vb %v %v %v. Current Filter %v", snapStart,
				snapEnd, meta.vbucket, meta.bucket, r.streamId,
				filter.Snapshots[meta.vbucket][1])
		}
	} else {
		logging.Errorf("MutationStreamReader::updateSnapInFilter Missing"+
			"bucket %v in Filter for Stream %v", meta.bucket, r.streamId)
	}

}
Пример #4
0
func newMetaBridgeClient(
	cluster string, config common.Config) (c *metadataClient, err error) {

	b := &metadataClient{
		cluster:    cluster,
		finch:      make(chan bool),
		adminports: make(map[string]common.IndexerId),
		loads:      make(map[common.IndexDefnId]*loadHeuristics),
	}
	b.topology = make(map[common.IndexerId]map[common.IndexDefnId]*mclient.IndexMetadata)
	b.servicesNotifierRetryTm = config["servicesNotifierRetryTm"].Int()
	// initialize meta-data-provide.
	uuid, err := common.NewUUID()
	if err != nil {
		logging.Errorf("Could not generate UUID in common.NewUUID\n")
		return nil, err
	}
	b.mdClient, err = mclient.NewMetadataProvider(uuid.Str())
	if err != nil {
		return nil, err
	}

	if err := b.updateIndexerList(false); err != nil {
		logging.Errorf("updateIndexerList(): %v\n", err)
		b.mdClient.Close()
		return nil, err
	}

	b.Refresh()
	go b.watchClusterChanges() // will also update the indexer list
	return b, nil
}
Пример #5
0
// NewGSIIndexer manage new set of indexes under namespace->keyspace,
// also called as, pool->bucket.
// will return an error when,
// - GSI cluster is not available.
// - network partitions / errors.
func NewGSIIndexer(
	clusterURL, namespace, keyspace string) (datastore.Indexer, errors.Error) {

	l.SetLogLevel(l.Info)

	gsi := &gsiKeyspace{
		clusterURL:     clusterURL,
		namespace:      namespace,
		keyspace:       keyspace,
		indexes:        make(map[uint64]*secondaryIndex), // defnID -> index
		primaryIndexes: make(map[uint64]*secondaryIndex),
	}
	gsi.logPrefix = fmt.Sprintf("GSIC[%s; %s]", namespace, keyspace)

	// get the singleton-client
	client, err := getSingletonClient(clusterURL)
	if err != nil {
		l.Errorf("%v GSI instantiation failed: %v", gsi.logPrefix, err)
		return nil, errors.NewError(err, "GSI client instantiation failed")
	}
	gsi.gsiClient = client
	// refresh indexes for this service->namespace->keyspace
	if err := gsi.Refresh(); err != nil {
		l.Errorf("%v Refresh() failed: %v", gsi.logPrefix, err)
		return nil, err
	}
	l.Debugf("%v instantiated ...", gsi.logPrefix)
	return gsi, nil
}
Пример #6
0
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
	var err error

	logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)

	//check if the docid exists in the main index
	t0 := time.Now()
	if _, err = fdb.main[workerId].GetKV(key); err == nil {
		fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
		//skip
		logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
			"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
	} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
			"mainindex entry %v", fdb.id, fdb.idxInstId, err)
	} else if err == forestdb.RESULT_KEY_NOT_FOUND {
		//set in main index
		t0 := time.Now()
		if err = fdb.main[workerId].SetKV(key, nil); err != nil {
			fdb.checkFatalDbError(err)
			logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
				"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
		}
		fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
		platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
	}
}
Пример #7
0
func Send(conn transporter, buf []byte, flags TransportFlag, payload []byte) (err error) {
	var n int

	// transport framing
	l := pktLenSize + pktFlagSize
	if maxLen := len(buf); l > maxLen {
		logging.Errorf("sending packet length %v > %v\n", l, maxLen)
		err = ErrorPacketOverflow
		return
	}

	a, b := pktLenOffset, pktLenOffset+pktLenSize
	binary.BigEndian.PutUint32(buf[a:b], uint32(len(payload)))
	a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
	binary.BigEndian.PutUint16(buf[a:b], uint16(flags))
	if n, err = conn.Write(buf[:pktDataOffset]); err == nil {
		if n, err = conn.Write(payload); err == nil && n != len(payload) {
			logging.Errorf("transport wrote only %v bytes for payload\n", n)
			err = ErrorPacketWrite
		}
		laddr, raddr := conn.LocalAddr(), conn.RemoteAddr()
		logging.Tracef("wrote %v bytes on connection %v->%v", len(payload), laddr, raddr)

	} else if n != pktDataOffset {
		logging.Errorf("transport wrote only %v bytes for header\n", n)
		err = ErrorPacketWrite
	}
	return
}
Пример #8
0
func (feed *DcpFeed) dcpRequestStream(
	vb uint16, opaque uint16, flags uint32,
	vbuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {

	prefix := feed.logPrefix
	vbm := feed.bucket.VBServerMap()
	if l := len(vbm.VBucketMap); int(vb) >= l {
		fmsg := "%v ##%x invalid vbucket id %d >= %d\n"
		logging.Errorf(fmsg, prefix, opaque, vb, l)
		return ErrorInvalidVbucket
	}

	masterID := vbm.VBucketMap[vb][0]
	master := feed.bucket.getMasterNode(masterID)
	if master == "" {
		fmsg := "%v ##%x notFound master node for vbucket %d\n"
		logging.Errorf(fmsg, prefix, opaque, vb)
		return ErrorInvalidVbucket
	}
	singleFeed, ok := feed.nodeFeeds[master]
	if !ok {
		fmsg := "%v ##%x notFound DcpFeed host: %q vb:%d\n"
		logging.Errorf(fmsg, prefix, opaque, master, vb)
		return memcached.ErrorInvalidFeed
	}
	err := singleFeed.dcpFeed.DcpRequestStream(
		vb, opaque, flags, vbuuid, startSequence, endSequence,
		snapStart, snapEnd)
	if err != nil {
		return err
	}
	return nil
}
Пример #9
0
func (feed *DcpFeed) dcpCloseStream(vb, opaqueMSB uint16) error {
	prefix := feed.logPrefix
	vbm := feed.bucket.VBServerMap()
	if l := len(vbm.VBucketMap); int(vb) >= l {
		fmsg := "%v ##%x invalid vbucket id %d >= %d\n"
		logging.Errorf(fmsg, prefix, opaqueMSB, vb, l)
		return ErrorInvalidVbucket
	}

	masterID := vbm.VBucketMap[vb][0]
	master := feed.bucket.getMasterNode(masterID)
	if master == "" {
		fmsg := "%v ##%x notFound master node for vbucket %d\n"
		logging.Errorf(fmsg, prefix, opaqueMSB, vb)
		return ErrorInvalidVbucket
	}
	singleFeed, ok := feed.nodeFeeds[master]
	if !ok {
		fmsg := "%v ##%x notFound DcpFeed host: %q vb:%d"
		logging.Errorf(fmsg, prefix, opaqueMSB, master, vb)
		return memcached.ErrorInvalidFeed
	}
	if err := singleFeed.dcpFeed.CloseStream(vb, opaqueMSB); err != nil {
		return err
	}
	return nil
}
Пример #10
0
func Receive(conn transporter, buf []byte) (flags TransportFlag, payload []byte, err error) {
	// transport de-framing
	if err = fullRead(conn, buf[:pktDataOffset]); err != nil {
		if err == io.EOF {
			logging.Tracef("receiving packet: %v\n", err)
		} else {
			logging.Errorf("receiving packet: %v\n", err)
		}
		return
	}
	a, b := pktLenOffset, pktLenOffset+pktLenSize
	pktlen := binary.BigEndian.Uint32(buf[a:b])

	a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
	flags = TransportFlag(binary.BigEndian.Uint16(buf[a:b]))
	if maxLen := uint32(len(buf)); pktlen > maxLen {
		logging.Errorf("receiving packet length %v > %v\n", pktlen, maxLen)
		err = ErrorPacketOverflow
		return
	}
	if err = fullRead(conn, buf[:pktlen]); err != nil {
		if err == io.EOF {
			logging.Tracef("receiving packet: %v\n", err)
		} else {
			logging.Errorf("receiving packet: %v\n", err)
		}
		return
	}

	return flags, buf[:pktlen], err
}
Пример #11
0
func (k *kvSender) deleteIndexesFromStream(streamId c.StreamId, indexInstList []c.IndexInst,
	respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getAllProjectorAddrs()
	if err != nil {
		logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error in fetching cluster info %v",
			streamId, indexInstList[0].Defn.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	var uuids []uint64
	for _, indexInst := range indexInstList {
		uuids = append(uuids, uint64(indexInst.InstId))
	}

	topic := getTopicForStreamId(streamId)

	fn := func(r int, err error) error {

		//clear the error before every retry
		err = nil
		for _, addr := range addrs {
			execWithStopCh(func() {
				ap := newProjClient(addr)
				if ret := sendDelInstancesRequest(ap, topic, uuids); ret != nil {
					logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v from %v",
						streamId, indexInstList[0].Defn.Bucket, ret, addr)
					//Treat TopicMissing/GenServer.Closed/InvalidBucket as success
					if ret.Error() == projClient.ErrorTopicMissing.Error() ||
						ret.Error() == c.ErrorClosed.Error() ||
						ret.Error() == projClient.ErrorInvalidBucket.Error() {
						logging.Infof("KVSender::deleteIndexesFromStream %v %v Treating %v As Success",
							streamId, indexInstList[0].Defn.Bucket, ret)
					} else {
						err = ret
					}
				}
			}, stopCh)
		}
		return err
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()
	if err != nil {
		logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v",
			streamId, indexInstList[0].Defn.Bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	respCh <- &MsgSuccess{}
}
Пример #12
0
func (m *LifecycleMgr) UpdateIndexInstance(bucket string, defnId common.IndexDefnId, state common.IndexState,
	streamId common.StreamId, errStr string, buildTime []uint64) error {

	topology, err := m.repo.GetTopologyByBucket(bucket)
	if err != nil {
		logging.Errorf("LifecycleMgr.handleTopologyChange() : index instance update fails. Reason = %v", err)
		return err
	}

	changed := false
	if state != common.INDEX_STATE_NIL {
		changed = topology.UpdateStateForIndexInstByDefn(common.IndexDefnId(defnId), common.IndexState(state)) || changed
	}

	if streamId != common.NIL_STREAM {
		changed = topology.UpdateStreamForIndexInstByDefn(common.IndexDefnId(defnId), common.StreamId(streamId)) || changed
	}

	changed = topology.SetErrorForIndexInstByDefn(common.IndexDefnId(defnId), errStr) || changed

	if changed {
		if err := m.repo.SetTopologyByBucket(bucket, topology); err != nil {
			logging.Errorf("LifecycleMgr.handleTopologyChange() : index instance update fails. Reason = %v", err)
			return err
		}
	}

	return nil
}
Пример #13
0
// Start is part of Server interface.
func (s *httpServer) Start() (err error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	if s.lis != nil {
		logging.Errorf("%v already started ...\n", s.logPrefix)
		return ErrorServerStarted
	}

	if s.lis, err = net.Listen("tcp", s.srv.Addr); err != nil {
		logging.Errorf("%v listen failed %v\n", s.logPrefix, err)
		return err
	}

	// Server routine
	go func() {
		defer s.shutdown()

		logging.Infof("%s starting ...\n", s.logPrefix)
		err := s.srv.Serve(s.lis) // serve until listener is closed.
		// TODO: look into error message and skip logging if Stop().
		if err != nil {
			logging.Errorf("%s %v\n", s.logPrefix, err)
		}
	}()

	logging.PeriodicProfile(logging.Trace, s.srv.Addr, "goroutine")
	return
}
Пример #14
0
// Range scan index between low and high.
func (c *GsiScanClient) Range(
	defnID uint64, low, high common.SecondaryKey, inclusion Inclusion,
	distinct bool, limit int64, cons common.Consistency, vector *TsConsistency,
	callb ResponseHandler) (error, bool) {

	// serialize low and high values.
	l, err := json.Marshal(low)
	if err != nil {
		return err, false
	}
	h, err := json.Marshal(high)
	if err != nil {
		return err, false
	}

	connectn, err := c.pool.Get()
	if err != nil {
		return err, false
	}
	healthy := true
	defer func() { c.pool.Return(connectn, healthy) }()

	conn, pkt := connectn.conn, connectn.pkt

	req := &protobuf.ScanRequest{
		DefnID: proto.Uint64(defnID),
		Span: &protobuf.Span{
			Range: &protobuf.Range{
				Low: l, High: h, Inclusion: proto.Uint32(uint32(inclusion)),
			},
		},
		Distinct: proto.Bool(distinct),
		Limit:    proto.Int64(limit),
		Cons:     proto.Uint32(uint32(cons)),
	}
	if vector != nil {
		req.Vector = protobuf.NewTsConsistency(
			vector.Vbnos, vector.Seqnos, vector.Vbuuids, vector.Crc64)
	}
	// ---> protobuf.ScanRequest
	if err := c.sendRequest(conn, pkt, req); err != nil {
		fmsg := "%v Range() request transport failed `%v`\n"
		logging.Errorf(fmsg, c.logPrefix, err)
		healthy = false
		return err, false
	}

	cont, partial := true, false
	for cont {
		// <--- protobuf.ResponseStream
		cont, healthy, err = c.streamResponse(conn, pkt, callb)
		if err != nil { // if err, cont should have been set to false
			fmsg := "%v Range() response failed `%v`\n"
			logging.Errorf(fmsg, c.logPrefix, err)
		} else { // partial succeeded
			partial = true
		}
	}
	return err, partial
}
Пример #15
0
func (f *flusher) processDelete(mut *Mutation, docid []byte) {

	idxInst, _ := f.indexInstMap[mut.uuid]

	partnId := idxInst.Pc.GetPartitionIdByPartitionKey(mut.partnkey)

	var partnInstMap PartitionInstMap
	var ok bool
	if partnInstMap, ok = f.indexPartnMap[mut.uuid]; !ok {
		logging.Errorf("Flusher:processDelete Missing Partition Instance Map"+
			"for IndexInstId: %v. Skipped Mutation Key: %v", mut.uuid, mut.key)
		return
	}

	if partnInst := partnInstMap[partnId]; ok {
		slice := partnInst.Sc.GetSliceByIndexKey(common.IndexKey(mut.key))
		if err := slice.Delete(docid); err != nil {
			logging.Errorf("Flusher::processDelete Error Deleting DocId: %v "+
				"from Slice: %v", docid, slice.Id())
		}
	} else {
		logging.Errorf("Flusher::processDelete Partition Instance not found "+
			"for Id: %v. Skipped Mutation Key: %v", partnId, mut.key)
	}
}
Пример #16
0
// Lookup scan index between low and high.
func (c *GsiScanClient) Lookup(
	defnID uint64, values []common.SecondaryKey,
	distinct bool, limit int64,
	cons common.Consistency, vector *TsConsistency,
	callb ResponseHandler) (error, bool) {

	// serialize lookup value.
	equals := make([][]byte, 0, len(values))
	for _, value := range values {
		val, err := json.Marshal(value)
		if err != nil {
			return err, false
		}
		equals = append(equals, val)
	}

	connectn, err := c.pool.Get()
	if err != nil {
		return err, false
	}
	healthy := true
	defer func() { c.pool.Return(connectn, healthy) }()

	conn, pkt := connectn.conn, connectn.pkt

	req := &protobuf.ScanRequest{
		DefnID:   proto.Uint64(defnID),
		Span:     &protobuf.Span{Equals: equals},
		Distinct: proto.Bool(distinct),
		Limit:    proto.Int64(limit),
		Cons:     proto.Uint32(uint32(cons)),
	}
	if vector != nil {
		req.Vector = protobuf.NewTsConsistency(
			vector.Vbnos, vector.Seqnos, vector.Vbuuids, vector.Crc64)
	}

	// ---> protobuf.ScanRequest
	if err := c.sendRequest(conn, pkt, req); err != nil {
		fmsg := "%v Lookup() request transport failed `%v`\n"
		logging.Errorf(fmsg, c.logPrefix, err)
		healthy = false
		return err, false
	}

	cont, partial := true, false
	for cont {
		// <--- protobuf.ResponseStream
		cont, healthy, err = c.streamResponse(conn, pkt, callb)
		if err != nil { // if err, cont should have been set to false
			fmsg := "%v Lookup() response failed `%v`\n"
			logging.Errorf(fmsg, c.logPrefix, err)
		} else { // partially succeeded
			partial = true
		}
	}
	return err, partial
}
Пример #17
0
func addDBSbucket(cluster, pooln, bucketn string) (err error) {
	var bucket *couchbase.Bucket

	bucket, err = ConnectBucket(cluster, pooln, bucketn)
	if err != nil {
		logging.Errorf("Unable to connect with bucket %q\n", bucketn)
		return err
	}
	dcp_buckets_seqnos.buckets[bucketn] = bucket

	// get all kv-nodes
	if err = bucket.Refresh(); err != nil {
		logging.Errorf("bucket.Refresh(): %v\n", err)
		return err
	}

	// get current list of kv-nodes
	var m map[string][]uint16
	m, err = bucket.GetVBmap(nil)
	if err != nil {
		logging.Errorf("GetVBmap() failed: %v\n", err)
		return err
	}
	// calculate and cache the number of vbuckets.
	if dcp_buckets_seqnos.numVbs == 0 { // to happen only first time.
		for _, vbnos := range m {
			dcp_buckets_seqnos.numVbs += len(vbnos)
		}
	}

	// make sure a feed is available for all kv-nodes
	var kvfeed *couchbase.DcpFeed

	kvfeeds := make(map[string]*couchbase.DcpFeed)
	config := map[string]interface{}{"genChanSize": 10, "dataChanSize": 10}
	for kvaddr := range m {
		uuid, _ := NewUUID()
		name := uuid.Str()
		if name == "" {
			err = fmt.Errorf("invalid uuid")
			logging.Errorf("NewUUID() failed: %v\n", err)
			return err
		}
		name = "dcp-get-seqnos:" + name
		kvfeed, err = bucket.StartDcpFeedOver(
			name, uint32(0), []string{kvaddr}, uint16(0xABBA), config)
		if err != nil {
			logging.Errorf("StartDcpFeedOver(): %v\n", err)
			return err
		}
		kvfeeds[kvaddr] = kvfeed
	}
	dcp_buckets_seqnos.feeds[bucketn] = kvfeeds

	logging.Infof("{bucket,feeds} %q created for dcp_seqno cache...\n", bucketn)
	return nil
}
Пример #18
0
func (vr *VbucketRoutine) handleEvent(m *mc.DcpEvent, seqno uint64) uint64 {
	logging.Tracef(
		traceMutFormat,
		vr.logPrefix, m.Opaque, m.Seqno, m.Opcode, string(m.Key))

	switch m.Opcode {
	case mcd.DCP_STREAMREQ: // broadcast StreamBegin
		if data := vr.makeStreamBeginData(seqno); data != nil {
			vr.broadcast2Endpoints(data)
		} else {
			fmsg := "%v ##%x StreamBeginData NOT PUBLISHED\n"
			logging.Errorf(fmsg, vr.logPrefix, m.Opaque)
		}

	case mcd.DCP_SNAPSHOT: // broadcast Snapshot
		typ, start, end := m.SnapshotType, m.SnapstartSeq, m.SnapendSeq
		logging.Tracef(ssFormat, vr.logPrefix, m.Opaque, start, end, typ)
		if data := vr.makeSnapshotData(m, seqno); data != nil {
			vr.broadcast2Endpoints(data)
		} else {
			fmsg := "%v ##%x Snapshot NOT PUBLISHED\n"
			logging.Errorf(fmsg, vr.logPrefix, m.Opaque)
		}

	case mcd.DCP_MUTATION, mcd.DCP_DELETION, mcd.DCP_EXPIRATION:
		seqno = m.Seqno // sequence number gets incremented only here
		// prepare a data for each endpoint.
		dataForEndpoints := make(map[string]interface{})
		// for each engine distribute transformations to endpoints.
		fmsg := "%v ##%x TransformRoute: %v\n"
		for _, engine := range vr.engines {
			err := engine.TransformRoute(vr.vbuuid, m, dataForEndpoints)
			if err != nil {
				logging.Errorf(fmsg, vr.logPrefix, m.Opaque, err)
				continue
			}
		}
		// send data to corresponding endpoint.
		for raddr, data := range dataForEndpoints {
			if endpoint, ok := vr.endpoints[raddr]; ok {
				// FIXME: without the coordinator doing shared topic
				// management, we will allow the feed to block.
				// Otherwise, send might fail due to ErrorChannelFull
				// or ErrorClosed
				if err := endpoint.Send(data); err != nil {
					msg := "%v ##%x endpoint(%q).Send() failed: %v"
					logging.Debugf(msg, vr.logPrefix, m.Opaque, raddr, err)
					endpoint.Close()
					delete(vr.endpoints, raddr)
				}
			}
		}
	}
	return seqno
}
Пример #19
0
func (k *kvSender) closeMutationStream(streamId c.StreamId, bucket string,
	respCh MsgChannel, stopCh StopChannel) {

	addrs, err := k.getAllProjectorAddrs()
	if err != nil {
		logging.Errorf("KVSender::closeMutationStream %v %v Error in fetching cluster info %v",
			streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	topic := getTopicForStreamId(streamId)

	fn := func(r int, err error) error {

		//clear the error before every retry
		err = nil
		for _, addr := range addrs {
			execWithStopCh(func() {
				ap := newProjClient(addr)
				if ret := sendShutdownTopic(ap, topic); ret != nil {
					logging.Errorf("KVSender::closeMutationStream %v %v Error Received %v from %v",
						streamId, bucket, ret, addr)
					//Treat TopicMissing/GenServer.Closed as success
					if ret.Error() == projClient.ErrorTopicMissing.Error() ||
						ret.Error() == c.ErrorClosed.Error() {
						logging.Infof("KVSender::closeMutationStream %v %v Treating %v As Success",
							streamId, bucket, ret)
					} else {
						err = ret
					}
				}
			}, stopCh)
		}
		return err
	}

	rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn)
	err = rh.Run()
	if err != nil {
		logging.Errorf("KVSender::closeMutationStream %v %v Error Received %v", streamId, bucket, err)
		respCh <- &MsgError{
			err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR,
				severity: FATAL,
				cause:    err}}
		return
	}

	respCh <- &MsgSuccess{}

}
Пример #20
0
func (m *IndexManager) runTimestampKeeper() {

	defer logging.Debugf("IndexManager.runTimestampKeeper() : terminate")

	inboundch := m.timer.getOutputChannel()

	persistTimestamp := true // save the first timestamp always
	lastPersistTime := uint64(time.Now().UnixNano())

	timestamps, err := m.repo.GetStabilityTimestamps()
	if err != nil {
		// TODO : Determine timestamp not exist versus forestdb error
		logging.Errorf("IndexManager.runTimestampKeeper() : cannot get stability timestamp from repository. Create a new one.")
		timestamps = createTimestampListSerializable()
	}

	for {
		select {
		case <-m.timekeeperStopCh:
			return

		case timestamp, ok := <-inboundch:

			if !ok {
				return
			}

			gometaC.SafeRun("IndexManager.runTimestampKeeper()",
				func() {
					timestamps.addTimestamp(timestamp)
					persistTimestamp = persistTimestamp ||
						uint64(time.Now().UnixNano())-lastPersistTime > m.timestampPersistInterval
					if persistTimestamp {
						if err := m.repo.SetStabilityTimestamps(timestamps); err != nil {
							logging.Errorf("IndexManager.runTimestampKeeper() : cannot set stability timestamp into repository.")
						} else {
							logging.Debugf("IndexManager.runTimestampKeeper() : saved stability timestamp to repository")
							persistTimestamp = false
							lastPersistTime = uint64(time.Now().UnixNano())
						}
					}

					data, err := marshallTimestampSerializable(timestamp)
					if err != nil {
						logging.Debugf(
							"IndexManager.runTimestampKeeper(): error when marshalling timestamp. Ignore timestamp.  Error=%s",
							err.Error())
					} else {
						m.coordinator.NewRequest(uint32(OPCODE_NOTIFY_TIMESTAMP), "Stability Timestamp", data)
					}
				})
		}
	}
}
Пример #21
0
// GetFailoverLogs get the failover logs for a set of vbucket ids
func (b *Bucket) GetFailoverLogs(
	opaque uint16,
	vBuckets []uint16, config map[string]interface{}) (FailoverLog, error) {
	// map vbids to their corresponding hosts
	vbHostList := make(map[string][]uint16)
	vbm := b.VBServerMap()
	for _, vb := range vBuckets {
		if l := len(vbm.VBucketMap); int(vb) >= l {
			fmsg := "DCPF[] ##%x invalid vbucket id %d >= %d"
			logging.Errorf(fmsg, opaque, vb, l)
			return nil, ErrorInvalidVbucket
		}

		masterID := vbm.VBucketMap[vb][0]
		master := b.getMasterNode(masterID)
		if master == "" {
			fmsg := "DCP[] ##%x master node not found for vbucket %d"
			logging.Errorf(fmsg, opaque, vb)
			return nil, ErrorInvalidVbucket
		}

		vbList := vbHostList[master]
		if vbList == nil {
			vbList = make([]uint16, 0)
		}
		vbList = append(vbList, vb)
		vbHostList[master] = vbList
	}

	failoverLogMap := make(FailoverLog)
	for _, serverConn := range b.getConnPools() {
		vbList := vbHostList[serverConn.host]
		if vbList == nil {
			continue
		}

		name := NewDcpFeedName(fmt.Sprintf("getfailoverlog-%s-%v", b.Name, time.Now().UnixNano()))
		singleFeed, err := serverConn.StartDcpFeed(name, 0, nil, opaque, config)
		if err != nil {
			return nil, err
		}
		defer singleFeed.Close()

		failoverlogs, err := singleFeed.DcpGetFailoverLog(opaque, vbList)
		if err != nil {
			return nil, err
		}
		for vb, log := range failoverlogs {
			failoverLogMap[vb] = *log
		}
	}
	return failoverLogMap, nil
}
Пример #22
0
// - return couchbase SDK error if any.
func (p *Projector) doFailoverLog(
	request *protobuf.FailoverLogRequest, opaque uint16) ap.MessageMarshaller {

	response := &protobuf.FailoverLogResponse{}

	pooln := request.GetPool()
	bucketn := request.GetBucket()
	vbuckets := request.GetVbnos()

	// log this request.
	prefix := p.logPrefix
	fmsg := "%v ##%x doFailoverLog() {%q, %q, %v}\n"
	logging.Infof(fmsg, prefix, opaque, pooln, bucketn, vbuckets)
	defer logging.Infof("%v ##%x doFailoverLog() returns ...\n", prefix, opaque)

	bucket, err := c.ConnectBucket(p.clusterAddr, pooln, bucketn)
	if err != nil {
		logging.Errorf("%v ##%x ConnectBucket(): %v\n", prefix, opaque, err)
		response.Err = protobuf.NewError(err)
		return response
	}
	defer bucket.Close()

	protoFlogs := make([]*protobuf.FailoverLog, 0, len(vbuckets))
	vbnos := c.Vbno32to16(vbuckets)
	dcpConfig := map[string]interface{}{
		"genChanSize":  p.config["projector.dcp.genChanSize"].Int(),
		"dataChanSize": p.config["projector.dcp.dataChanSize"].Int(),
	}
	flogs, err := bucket.GetFailoverLogs(opaque, vbnos, dcpConfig)
	if err == nil {
		for vbno, flog := range flogs {
			vbuuids := make([]uint64, 0, len(flog))
			seqnos := make([]uint64, 0, len(flog))
			for _, x := range flog {
				vbuuids = append(vbuuids, x[0])
				seqnos = append(seqnos, x[1])
			}
			protoFlog := &protobuf.FailoverLog{
				Vbno:    proto.Uint32(uint32(vbno)),
				Vbuuids: vbuuids,
				Seqnos:  seqnos,
			}
			protoFlogs = append(protoFlogs, protoFlog)
		}
	} else {
		logging.Errorf("%v ##%x GetFailoverLogs(): %v\n", prefix, opaque, err)
		response.Err = protobuf.NewError(err)
		return response
	}
	response.Logs = protoFlogs
	return response
}
Пример #23
0
func (feed *DcpFeed) doDcpGetFailoverLog(
	opaque uint16,
	vblist []uint16,
	rcvch chan []interface{}) (map[uint16]*FailoverLog, error) {

	rq := &transport.MCRequest{
		Opcode: transport.DCP_FAILOVERLOG,
		Opaque: opaqueFailover,
	}
	failoverLogs := make(map[uint16]*FailoverLog)
	for _, vBucket := range vblist {
		rq.VBucket = vBucket
		if err := feed.conn.Transmit(rq); err != nil {
			fmsg := "%v ##%x doDcpGetFailoverLog.Transmit(): %v"
			logging.Errorf(fmsg, feed.logPrefix, opaque, err)
			return nil, err
		}
		msg, ok := <-rcvch
		if !ok {
			fmsg := "%v ##%x doDcpGetFailoverLog.rcvch closed"
			logging.Errorf(fmsg, feed.logPrefix, opaque)
			return nil, ErrorConnection
		}
		pkt := msg[0].(*transport.MCRequest)
		req := &transport.MCResponse{
			Opcode: pkt.Opcode,
			Cas:    pkt.Cas,
			Opaque: pkt.Opaque,
			Status: transport.Status(pkt.VBucket),
			Extras: pkt.Extras,
			Key:    pkt.Key,
			Body:   pkt.Body,
		}
		if req.Opcode != transport.DCP_FAILOVERLOG {
			fmsg := "%v ##%x for failover log request unexpected #opcode %v"
			logging.Errorf(fmsg, feed.logPrefix, opaque, req.Opcode)
			return nil, ErrorInvalidFeed

		} else if req.Status != transport.SUCCESS {
			fmsg := "%v ##%x for failover log request unexpected #status %v"
			logging.Errorf(fmsg, feed.logPrefix, opaque, req.Status)
			return nil, ErrorInvalidFeed
		}
		flog, err := parseFailoverLog(req.Body)
		if err != nil {
			fmsg := "%v ##%x parse failover logs for vb %d"
			logging.Errorf(fmsg, feed.logPrefix, opaque, vBucket)
			return nil, ErrorInvalidFeed
		}
		failoverLogs[vBucket] = flog
	}
	return failoverLogs, nil
}
Пример #24
0
// ResetConfig accepts a full-set or subset of global configuration
// and updates projector related fields.
func (p *Projector) ResetConfig(config c.Config) {
	p.rw.Lock()
	defer p.rw.Unlock()
	defer logging.Infof("%v\n", c.LogRuntime())

	// reset configuration.
	if cv, ok := config["projector.settings.log_level"]; ok {
		logging.SetLogLevel(logging.Level(cv.String()))
	}
	if cv, ok := config["projector.maxCpuPercent"]; ok {
		c.SetNumCPUs(cv.Int())
	}
	p.config = p.config.Override(config)

	// CPU-profiling
	cpuProfile, ok := config["projector.cpuProfile"]
	if ok && cpuProfile.Bool() && p.cpuProfFd == nil {
		cpuProfFname, ok := config["projector.cpuProfFname"]
		if ok {
			fname := cpuProfFname.String()
			logging.Infof("%v cpu profiling => %q\n", p.logPrefix, fname)
			p.cpuProfFd = p.startCPUProfile(fname)

		} else {
			logging.Errorf("Missing cpu-profile o/p filename\n")
		}

	} else if ok && !cpuProfile.Bool() {
		if p.cpuProfFd != nil {
			pprof.StopCPUProfile()
			logging.Infof("%v cpu profiling stopped\n", p.logPrefix)
		}
		p.cpuProfFd = nil

	} else if ok {
		logging.Warnf("%v cpu profiling already active !!\n", p.logPrefix)
	}

	// MEM-profiling
	memProfile, ok := config["projector.memProfile"]
	if ok && memProfile.Bool() {
		memProfFname, ok := config["projector.memProfFname"]
		if ok {
			fname := memProfFname.String()
			if p.takeMEMProfile(fname) {
				logging.Infof("%v mem profile => %q\n", p.logPrefix, fname)
			}
		} else {
			logging.Errorf("Missing mem-profile o/p filename\n")
		}
	}
}
Пример #25
0
// start cpu profiling.
func (p *Projector) startCPUProfile(filename string) *os.File {
	if filename == "" {
		fmsg := "%v empty cpu profile filename\n"
		logging.Errorf(fmsg, p.logPrefix, filename)
		return nil
	}
	fd, err := os.Create(filename)
	if err != nil {
		logging.Errorf("%v unable to create %q: %v\n", p.logPrefix, filename, err)
	}
	pprof.StartCPUProfile(fd)
	return fd
}
Пример #26
0
func (feed *DcpFeed) genServer(reqch chan []interface{}, opaque uint16) {
	defer func() { // panic safe
		close(feed.finch)
		if r := recover(); r != nil {
			logging.Errorf("%v ##%x crashed: %v\n", feed.logPrefix, opaque, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		for _, nodeFeed := range feed.nodeFeeds {
			nodeFeed.dcpFeed.Close()
		}
		feed.nodeFeeds = nil
		close(feed.output)
	}()

loop:
	for {
		select {
		case msg := <-reqch:
			cmd := msg[0].(byte)
			switch cmd {
			case ufCmdRequestStream:
				vb, opaque := msg[1].(uint16), msg[2].(uint16)
				flags, vbuuid := msg[3].(uint32), msg[4].(uint64)
				startSeq, endSeq := msg[5].(uint64), msg[6].(uint64)
				snapStart, snapEnd := msg[7].(uint64), msg[8].(uint64)
				err := feed.dcpRequestStream(
					vb, opaque, flags, vbuuid, startSeq, endSeq,
					snapStart, snapEnd)
				respch := msg[9].(chan []interface{})
				respch <- []interface{}{err}

			case ufCmdCloseStream:
				vb, opaqueMSB := msg[1].(uint16), msg[2].(uint16)
				err := feed.dcpCloseStream(vb, opaqueMSB)
				respch := msg[3].(chan []interface{})
				respch <- []interface{}{err}

			case ufCmdGetSeqnos:
				respch := msg[1].(chan []interface{})
				seqnos, err := feed.dcpGetSeqnos()
				respch <- []interface{}{seqnos, err}

			case ufCmdClose:
				respch := msg[1].(chan []interface{})
				respch <- []interface{}{nil}
				break loop
			}
		}
	}
}
Пример #27
0
func (cp *connectionPool) Close() (err error) {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("%v Close() crashed: %v\n", cp.logPrefix, r)
			logging.Errorf("%s", logging.StackTrace())
		}
	}()
	close(cp.connections)
	for connectn := range cp.connections {
		connectn.conn.Close()
	}
	logging.Infof("%v ... stopped\n", cp.logPrefix)
	return
}
Пример #28
0
//Rollback slice to given snapshot. Return error if
//not possible
func (fdb *fdbSlice) Rollback(info SnapshotInfo) error {

	//get the seqnum from snapshot
	snapInfo := info.(*fdbSnapshotInfo)

	infos, err := fdb.getSnapshotsMeta()
	if err != nil {
		return err
	}

	sic := NewSnapshotInfoContainer(infos)
	sic.RemoveRecentThanTS(info.Timestamp())

	//rollback meta-store first, if main/back index rollback fails, recovery
	//will pick up the rolled-back meta information.
	err = fdb.meta.Rollback(snapInfo.MetaSeq)
	if err != nil {
		logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
			"Meta Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
		return err
	}

	//call forestdb to rollback for each kv store
	err = fdb.main[0].Rollback(snapInfo.MainSeq)
	if err != nil {
		logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
			"Main Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
		return err
	}

	fdb.setCommittedCount()

	//rollback back-index only for non-primary indexes
	if !fdb.isPrimary {
		err = fdb.back[0].Rollback(snapInfo.BackSeq)
		if err != nil {
			logging.Errorf("ForestDBSlice::Rollback \n\tSliceId %v IndexInstId %v. Error Rollback "+
				"Back Index to Snapshot %v. Error %v", fdb.id, fdb.idxInstId, info, err)
			return err
		}
	}

	// Update valid snapshot list and commit
	err = fdb.updateSnapshotsMeta(sic.List())
	if err != nil {
		return err
	}

	return fdb.dbfile.Commit(forestdb.COMMIT_MANUAL_WAL_FLUSH)
}
Пример #29
0
func (m *LifecycleMgr) BuildIndexes(ids []common.IndexDefnId) error {

	buckets := []string(nil)
	for _, id := range ids {
		defn, err := m.repo.GetIndexDefnById(id)
		if err != nil {
			logging.Errorf("LifecycleMgr.handleBuildIndexes() : buildIndex fails. Reason = %v", err)
			return err
		}

		found := false
		for _, bucket := range buckets {
			if bucket == defn.Bucket {
				found = true
			}
		}

		if !found {
			buckets = append(buckets, defn.Bucket)
		}
	}

	if m.notifier != nil {
		if errMap := m.notifier.OnIndexBuild(ids, buckets); len(errMap) != 0 {
			logging.Errorf("LifecycleMgr.hanaleBuildIndexes() : buildIndex fails. Reason = %v", errMap)
			result := error(nil)

			for instId, build_err := range errMap {
				defnId := common.IndexDefnId(instId)

				if defn, err := m.repo.GetIndexDefnById(defnId); err == nil {
					m.UpdateIndexInstance(defn.Bucket, defnId, common.INDEX_STATE_NIL, common.NIL_STREAM, build_err.Error(), nil)
				}

				if result == nil {
					result = build_err
				} else if result.Error() != build_err.Error() {
					result = errors.New("Build index fails. Please check index status for error.")
				}
			}

			return result
		}
	}

	logging.Debugf("LifecycleMgr.handleBuildIndexes() : buildIndex completes")

	return nil
}
Пример #30
0
//
// Go-routine to bootstrap projectors for shared stream, as well as continous
// maintanence of the shared stream.  It listens to any new topology update and
// update the projector in response to topology update.
//
func (s *StreamManager) run() {

	// register to index manager for receiving topology change
	changeCh, err := s.indexMgr.StartListenTopologyUpdate("Stream Manager")
	if err != nil {
		panic(fmt.Sprintf("StreamManager.run(): Fail to listen to topology changes from repository.  Error = %v", err))
	}

	// load topology
	if err := s.loadTopology(); err != nil {
		panic(fmt.Sprintf("StreamManager.run(): Fail to load topology from repository.  Error = %v", err))
	}

	// initialize stream
	if err := s.initializeMaintenanceStream(); err != nil {
		panic(fmt.Sprintf("StreamManager.run(): Fail to initialize maintenance stream.  Error = %v", err))
	}

	for {
		select {
		case data, ok := <-changeCh:
			if !ok {
				logging.Debugf("StreamManager.run(): topology change channel is closed.  Terminates.")
				return
			}

			func() {
				defer func() {
					if r := recover(); r != nil {
						logging.Warnf("panic in StreamManager.run() : %s.  Ignored.", r)
					}
				}()

				topology, err := unmarshallIndexTopology(data.([]byte))
				if err != nil {
					logging.Errorf("StreamManager.run(): unable to unmarshall topology.  Topology change is ignored by stream manager.")
				} else {
					err := s.handleTopologyChange(topology)
					if err != nil {
						logging.Errorf("StreamManager.run(): receive error from handleTopologyChange.  Error = %v.  Ignore", err)
					}
				}
			}()

		case <-s.stopch:
			return
		}
	}
}