Ejemplo n.º 1
0
func Receive(conn transporter, buf []byte) (flags TransportFlag, payload []byte, err error) {
	// transport de-framing
	if err = fullRead(conn, buf[:pktDataOffset]); err != nil {
		if err == io.EOF {
			logging.Tracef("receiving packet: %v\n", err)
		} else {
			logging.Errorf("receiving packet: %v\n", err)
		}
		return
	}
	a, b := pktLenOffset, pktLenOffset+pktLenSize
	pktlen := binary.BigEndian.Uint32(buf[a:b])

	a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
	flags = TransportFlag(binary.BigEndian.Uint16(buf[a:b]))
	if maxLen := uint32(len(buf)); pktlen > maxLen {
		logging.Errorf("receiving packet length %v > %v\n", pktlen, maxLen)
		err = ErrorPacketOverflow
		return
	}
	if err = fullRead(conn, buf[:pktlen]); err != nil {
		if err == io.EOF {
			logging.Tracef("receiving packet: %v\n", err)
		} else {
			logging.Errorf("receiving packet: %v\n", err)
		}
		return
	}

	return flags, buf[:pktlen], err
}
Ejemplo n.º 2
0
func (fdb *fdbSlice) insertPrimaryIndex(key []byte, docid []byte, workerId int) {
	var err error

	logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Set Key - %s", fdb.id, fdb.idxInstId, docid)

	//check if the docid exists in the main index
	t0 := time.Now()
	if _, err = fdb.main[workerId].GetKV(key); err == nil {
		fdb.idxStats.Timings.stKVGet.Put(time.Now().Sub(t0))
		//skip
		logging.Tracef("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Key %v Already Exists. "+
			"Primary Index Update Skipped.", fdb.id, fdb.idxInstId, string(docid))
	} else if err != nil && err != forestdb.RESULT_KEY_NOT_FOUND {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error locating "+
			"mainindex entry %v", fdb.id, fdb.idxInstId, err)
	} else if err == forestdb.RESULT_KEY_NOT_FOUND {
		//set in main index
		t0 := time.Now()
		if err = fdb.main[workerId].SetKV(key, nil); err != nil {
			fdb.checkFatalDbError(err)
			logging.Errorf("ForestDBSlice::insert \n\tSliceId %v IndexInstId %v Error in Main Index Set. "+
				"Skipped Key %s. Error %v", fdb.id, fdb.idxInstId, string(docid), err)
		}
		fdb.idxStats.Timings.stKVSet.Put(time.Now().Sub(t0))
		platform.AddInt64(&fdb.insert_bytes, int64(len(key)))
	}
}
Ejemplo n.º 3
0
//initBucketFilter initializes the bucket filter
func (r *mutationStreamReader) initBucketFilter(bucketFilter map[string]*common.TsVbuuid) {

	r.syncLock.Lock()
	defer r.syncLock.Unlock()

	//allocate a new filter for the buckets which don't
	//have a filter yet
	for b, q := range r.bucketQueueMap {
		if _, ok := r.bucketFilterMap[b]; !ok {
			logging.Tracef("MutationStreamReader::initBucketFilter Added new filter "+
				"for Bucket %v Stream %v", b, r.streamId)

			//if there is non-nil filter, use that. otherwise use a zero filter.
			if filter, ok := bucketFilter[b]; ok && filter != nil {
				r.bucketFilterMap[b] = filter.Copy()
			} else {
				r.bucketFilterMap[b] = common.NewTsVbuuid(b, int(q.queue.GetNumVbuckets()))
			}

			r.bucketSyncDue[b] = false
		}
	}

	//remove the bucket filters for which bucket doesn't exist anymore
	for b, _ := range r.bucketFilterMap {
		if _, ok := r.bucketQueueMap[b]; !ok {
			logging.Tracef("MutationStreamReader::initBucketFilter Deleted filter "+
				"for Bucket %v Stream %v", b, r.streamId)
			delete(r.bucketFilterMap, b)
			delete(r.bucketSyncDue, b)
		}
	}

}
Ejemplo n.º 4
0
func (vr *VbucketRoutine) handleEvent(m *mc.DcpEvent, seqno uint64) uint64 {
	logging.Tracef(
		traceMutFormat,
		vr.logPrefix, m.Opaque, m.Seqno, m.Opcode, string(m.Key))

	switch m.Opcode {
	case mcd.DCP_STREAMREQ: // broadcast StreamBegin
		if data := vr.makeStreamBeginData(seqno); data != nil {
			vr.broadcast2Endpoints(data)
		} else {
			fmsg := "%v ##%x StreamBeginData NOT PUBLISHED\n"
			logging.Errorf(fmsg, vr.logPrefix, m.Opaque)
		}

	case mcd.DCP_SNAPSHOT: // broadcast Snapshot
		typ, start, end := m.SnapshotType, m.SnapstartSeq, m.SnapendSeq
		logging.Tracef(ssFormat, vr.logPrefix, m.Opaque, start, end, typ)
		if data := vr.makeSnapshotData(m, seqno); data != nil {
			vr.broadcast2Endpoints(data)
		} else {
			fmsg := "%v ##%x Snapshot NOT PUBLISHED\n"
			logging.Errorf(fmsg, vr.logPrefix, m.Opaque)
		}

	case mcd.DCP_MUTATION, mcd.DCP_DELETION, mcd.DCP_EXPIRATION:
		seqno = m.Seqno // sequence number gets incremented only here
		// prepare a data for each endpoint.
		dataForEndpoints := make(map[string]interface{})
		// for each engine distribute transformations to endpoints.
		fmsg := "%v ##%x TransformRoute: %v\n"
		for _, engine := range vr.engines {
			err := engine.TransformRoute(vr.vbuuid, m, dataForEndpoints)
			if err != nil {
				logging.Errorf(fmsg, vr.logPrefix, m.Opaque, err)
				continue
			}
		}
		// send data to corresponding endpoint.
		for raddr, data := range dataForEndpoints {
			if endpoint, ok := vr.endpoints[raddr]; ok {
				// FIXME: without the coordinator doing shared topic
				// management, we will allow the feed to block.
				// Otherwise, send might fail due to ErrorChannelFull
				// or ErrorClosed
				if err := endpoint.Send(data); err != nil {
					msg := "%v ##%x endpoint(%q).Send() failed: %v"
					logging.Debugf(msg, vr.logPrefix, m.Opaque, raddr, err)
					endpoint.Close()
					delete(vr.endpoints, raddr)
				}
			}
		}
	}
	return seqno
}
Ejemplo n.º 5
0
func (vr *VbucketRoutine) printCtrl(v interface{}) {
	switch val := v.(type) {
	case map[string]c.RouterEndpoint:
		for raddr := range val {
			fmsg := "%v ##%x knows endpoint %v\n"
			logging.Tracef(fmsg, vr.logPrefix, vr.opaque, raddr)
		}
	case map[uint64]*Engine:
		for uuid := range val {
			fmsg := "%v ##%x knows engine %v\n"
			logging.Tracef(fmsg, vr.logPrefix, vr.opaque, uuid)
		}
	}
}
Ejemplo n.º 6
0
//checkAndSetBucketFilter checks if mutation can be processed
//based on the current filter. Filter is also updated with new
//seqno/vbuuid if mutations can be processed.
func (r *mutationStreamReader) checkAndSetBucketFilter(meta *MutationMeta) bool {

	r.syncLock.Lock()
	defer r.syncLock.Unlock()

	if filter, ok := r.bucketFilterMap[meta.bucket]; ok {
		//the filter only checks if seqno of incoming mutation is greater than
		//the existing filter. Also there should be a valid StreamBegin(vbuuid)
		//for the vbucket. The vbuuid check is only to ensure that after stream
		//restart for a bucket, mutations get processed only after StreamBegin.
		//There can be residual mutations in projector endpoint queue after
		//a bucket gets deleted from stream in case of multiple buckets.
		//The vbuuid doesn't get reset after StreamEnd/StreamBegin. The
		//filter can be extended for that check if required.
		if uint64(meta.seqno) > filter.Seqnos[meta.vbucket] &&
			filter.Vbuuids[meta.vbucket] != 0 {
			filter.Seqnos[meta.vbucket] = uint64(meta.seqno)
			filter.Vbuuids[meta.vbucket] = uint64(meta.vbuuid)
			r.bucketSyncDue[meta.bucket] = true
			return true
		} else {
			logging.Tracef("MutationStreamReader::checkAndSetBucketFilter Skipped "+
				"Mutation %v for Bucket %v Stream %v. Current Filter %v", meta,
				meta.bucket, r.streamId, filter.Seqnos[meta.vbucket])
			return false
		}
	} else {
		logging.Errorf("MutationStreamReader::checkAndSetBucketFilter Missing"+
			"bucket %v in Filter for Stream %v", meta.bucket, r.streamId)
		return false
	}
}
Ejemplo n.º 7
0
// receive requests from remote, when this function returns
// the connection is expected to be closed.
func (s *Server) doReceive(conn net.Conn, rcvch chan<- interface{}) {
	raddr := conn.RemoteAddr()

	// transport buffer for receiving
	flags := transport.TransportFlag(0).SetProtobuf()
	rpkt := transport.NewTransportPacket(s.maxPayload, flags)
	rpkt.SetDecoder(transport.EncodingProtobuf, protobuf.ProtobufDecode)

	logging.Infof("%v connection %q doReceive() ...\n", s.logPrefix, raddr)

loop:
	for {
		// TODO: Fix read timeout correctly
		// timeoutMs := s.readDeadline * time.Millisecond
		// conn.SetReadDeadline(time.Now().Add(timeoutMs))

		req, err := rpkt.Receive(conn)
		// TODO: handle close-connection and don't print error message.
		if err != nil {
			if err == io.EOF {
				logging.Tracef("%v connection %q exited %v\n", s.logPrefix, raddr, err)
			} else {
				logging.Errorf("%v connection %q exited %v\n", s.logPrefix, raddr, err)
			}
			break loop
		}
		select {
		case rcvch <- req:
		case <-s.killch:
			break loop
		}
	}
	close(rcvch)
}
Ejemplo n.º 8
0
func Send(conn transporter, buf []byte, flags TransportFlag, payload []byte) (err error) {
	var n int

	// transport framing
	l := pktLenSize + pktFlagSize
	if maxLen := len(buf); l > maxLen {
		logging.Errorf("sending packet length %v > %v\n", l, maxLen)
		err = ErrorPacketOverflow
		return
	}

	a, b := pktLenOffset, pktLenOffset+pktLenSize
	binary.BigEndian.PutUint32(buf[a:b], uint32(len(payload)))
	a, b = pktFlagOffset, pktFlagOffset+pktFlagSize
	binary.BigEndian.PutUint16(buf[a:b], uint16(flags))
	if n, err = conn.Write(buf[:pktDataOffset]); err == nil {
		if n, err = conn.Write(payload); err == nil && n != len(payload) {
			logging.Errorf("transport wrote only %v bytes for payload\n", n)
			err = ErrorPacketWrite
		}
		laddr, raddr := conn.LocalAddr(), conn.RemoteAddr()
		logging.Tracef("wrote %v bytes on connection %v->%v", len(payload), laddr, raddr)

	} else if n != pktDataOffset {
		logging.Errorf("transport wrote only %v bytes for header\n", n)
		err = ErrorPacketWrite
	}
	return
}
Ejemplo n.º 9
0
func makeResponsehandler(
	client *qclient.GsiClient, conn *datastore.IndexConnection) qclient.ResponseHandler {

	entryChannel := conn.EntryChannel()
	stopChannel := conn.StopChannel()

	return func(data qclient.ResponseReader) bool {

		if err := data.Error(); err != nil {
			conn.Error(n1qlError(client, err))
			return false
		}
		skeys, pkeys, err := data.GetEntries()
		if err == nil {
			for i, skey := range skeys {
				// Primary-key is mandatory.
				e := &datastore.IndexEntry{
					PrimaryKey: string(pkeys[i]),
				}
				e.EntryKey = skey2Values(skey)

				fmsg := "current enqueued length: %d (max %d)\n"
				l.Tracef(fmsg, len(entryChannel), cap(entryChannel))
				select {
				case entryChannel <- e:
				case <-stopChannel:
					return false
				}
			}
			return true
		}
		conn.Error(errors.NewError(nil, err.Error()))
		return false
	}
}
Ejemplo n.º 10
0
// Receive payload from remote, decode, decompress the payload and return the
// payload.
func (pkt *TransportPacket) Receive(conn transporter) (payload interface{}, err error) {
	var data []byte
	var flags TransportFlag

	flags, data, err = Receive(conn, pkt.buf)
	if err != nil {
		return
	}

	// Special packet to indicate end response
	if len(data) == 0 && flags == 0 {
		return nil, nil
	}

	pkt.flags = flags

	laddr, raddr := conn.LocalAddr(), conn.RemoteAddr()
	logging.Tracef("read %v bytes on connection %v<-%v", len(data), laddr, raddr)

	// de-compression
	if data, err = pkt.decompress(data); err != nil {
		return
	}
	// decoding
	if payload, err = pkt.decode(data); err != nil {
		return
	}
	return
}
Ejemplo n.º 11
0
func (s *storageMgr) handleUpdateIndexPartnMap(cmd Message) {

	logging.Tracef("StorageMgr::handleUpdateIndexPartnMap %v", cmd)
	indexPartnMap := cmd.(*MsgUpdatePartnMap).GetIndexPartnMap()
	s.indexPartnMap = CopyIndexPartnMap(indexPartnMap)

	s.supvCmdch <- &MsgSuccess{}
}
Ejemplo n.º 12
0
func (s *scanCoordinator) handleUpdateIndexPartnMap(cmd Message) {
	s.mu.Lock()
	defer s.mu.Unlock()

	logging.Tracef("ScanCoordinator::handleUpdateIndexPartnMap %v", cmd)
	indexPartnMap := cmd.(*MsgUpdatePartnMap).GetIndexPartnMap()
	s.indexPartnMap = CopyIndexPartnMap(indexPartnMap)

	s.supvCmdch <- &MsgSuccess{}
}
Ejemplo n.º 13
0
func (s *httpServer) connState(conn net.Conn, state http.ConnState) {
	raddr := conn.RemoteAddr()
	logging.Tracef("%s connState for %q : %v\n", s.logPrefix, raddr, state)

	s.mu.Lock()
	defer s.mu.Unlock()

	if s.lis != nil && state == http.StateNew {
		s.conns = append(s.conns, conn)
	}
}
Ejemplo n.º 14
0
func receive(rch chan []interface{}) {
	// bucket -> Opcode -> #count
	counts := make(map[string]map[mcd.CommandCode]int)

	var tick <-chan time.Time
	if options.stats > 0 {
		tick = time.Tick(time.Millisecond * time.Duration(options.stats))
	}

	finTimeout := time.After(time.Millisecond * time.Duration(options.duration))
loop:
	for {
		select {
		case msg, ok := <-rch:
			if ok == false {
				break loop
			}
			bucket, e := msg[0].(string), msg[1].(*mc.DcpEvent)
			if e.Opcode == mcd.DCP_MUTATION {
				logging.Tracef("DcpMutation KEY -- %v\n", string(e.Key))
				logging.Tracef("     %v\n", string(e.Value))
			}
			if _, ok := counts[bucket]; !ok {
				counts[bucket] = make(map[mcd.CommandCode]int)
			}
			if _, ok := counts[bucket][e.Opcode]; !ok {
				counts[bucket][e.Opcode] = 0
			}
			counts[bucket][e.Opcode]++

		case <-tick:
			for bucket, m := range counts {
				logging.Infof("%q %s\n", bucket, sprintCounts(m))
			}
			logging.Infof("\n")

		case <-finTimeout:
			break loop
		}
	}
}
Ejemplo n.º 15
0
// send mutations for a set of vbuckets, update vbucket channels based on
// StreamBegin and StreamEnd.
func (c *Client) sendKeyVersions(
	vbs []*common.VbKeyVersions,
	vbChans map[string]chan interface{},
	quitch chan []string) []string {

	var idx int

	for _, vb := range vbs {
		if len(vb.Kvs) == 0 {
			logging.Warnf("%v empty mutations\n", c.logPrefix)
			continue
		}

		fin, l := false, len(vb.Kvs)

		if vb.Kvs[0].Commands[0] == common.StreamBegin { // first mutation
			vbChans[vb.Uuid], idx = c.addVbucket(vb.Uuid)
			logging.Tracef(
				"%v mapped vbucket {%v,%v}\n",
				c.logPrefixes[idx], vb.Bucket, vb.Vbucket)
		}

		if vb.Kvs[l-1].Commands[0] == common.StreamEnd { // last mutation
			fin = true
		}

		select {
		case vbChans[vb.Uuid] <- vb:
			if fin {
				logging.Tracef(
					"%v {%v,%v} ended\n", c.logPrefix, vb.Bucket, vb.Vbucket)
				c.delVbucket(vb.Uuid)
				delete(vbChans, vb.Uuid)
			}

		case msg := <-quitch:
			return msg
		}
	}
	return nil
}
Ejemplo n.º 16
0
func (s *scanCoordinator) handleUpdateIndexInstMap(cmd Message) {
	s.mu.Lock()
	defer s.mu.Unlock()

	req := cmd.(*MsgUpdateInstMap)
	logging.Tracef("ScanCoordinator::handleUpdateIndexInstMap %v", cmd)
	indexInstMap := req.GetIndexInstMap()
	s.stats.Set(req.GetStatsObject())
	s.indexInstMap = common.CopyIndexInstMap(indexInstMap)

	s.supvCmdch <- &MsgSuccess{}
}
Ejemplo n.º 17
0
func send(w http.ResponseWriter, res interface{}) {

	header := w.Header()
	header["Content-Type"] = []string{"application/json"}

	if buf, err := json.Marshal(res); err == nil {
		logging.Tracef("RequestHandler::sendResponse: sending response back to caller. %v", string(buf))
		w.Write(buf)
	} else {
		// note : buf is nil if err != nil
		logging.Debugf("RequestHandler::sendResponse: fail to marshall response back to caller. %s", err)
		sendHttpError(w, "RequestHandler::sendResponse: Unable to marshall response", http.StatusInternalServerError)
	}
}
Ejemplo n.º 18
0
// Gather index meta response from http response.
func (b *cbqClient) metaResponse(
	resp *http.Response) (mresp indexMetaResponse, err error) {

	var body []byte
	body, err = ioutil.ReadAll(resp.Body)
	if err == nil {
		if err = json.Unmarshal(body, &mresp); err == nil {
			logging.Tracef("%v received raw response %s", b.logPrefix, string(body))
			if strings.Contains(mresp.Status, "error") {
				err = errors.New(mresp.Errors[0].Msg)
			}
		}
	}
	return mresp, err
}
Ejemplo n.º 19
0
func (fdb *fdbSlice) deleteSecIndex(docid []byte, workerId int) {

	//logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Delete Key - %s",
	//	fdb.id, fdb.idxInstId, docid)

	var olditm []byte
	var err error

	if olditm, err = fdb.getBackIndexEntry(docid, workerId); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error locating "+
			"backindex entry for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
		return
	}

	//if the oldkey is nil, nothing needs to be done. This is the case of deletes
	//which happened before index was created.
	if olditm == nil {
		logging.Tracef("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v Received NIL Key for "+
			"Doc Id %v. Skipped.", fdb.id, fdb.idxInstId, docid)
		return
	}

	//delete from main index
	t0 := time.Now()
	if err = fdb.main[workerId].DeleteKV(olditm); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
			"entry from main index for Doc %s. Key %v. Error %v", fdb.id, fdb.idxInstId,
			docid, olditm, err)
		return
	}
	fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.delete_bytes, int64(len(olditm)))

	//delete from the back index
	t0 = time.Now()
	if err = fdb.back[workerId].DeleteKV(docid); err != nil {
		fdb.checkFatalDbError(err)
		logging.Errorf("ForestDBSlice::delete \n\tSliceId %v IndexInstId %v. Error deleting "+
			"entry from back index for Doc %s. Error %v", fdb.id, fdb.idxInstId, docid, err)
		return
	}
	fdb.idxStats.Timings.stKVDelete.Put(time.Now().Sub(t0))
	platform.AddInt64(&fdb.delete_bytes, int64(len(docid)))
	fdb.isDirty = true

}
Ejemplo n.º 20
0
func main() {
	platform.HideConsole(true)
	defer platform.HideConsole(false)
	common.SeedProcess()

	logging.Infof("Indexer started with command line: %v\n", os.Args)
	flag.Parse()

	logging.SetLogLevel(logging.Level(*logLevel))
	forestdb.Log = &logging.SystemLogger

	// setup cbauth
	if *auth != "" {
		up := strings.Split(*auth, ":")
		logging.Tracef("Initializing cbauth with user %v for cluster %v\n", up[0], *cluster)
		if _, err := cbauth.InternalRetryDefaultInit(*cluster, up[0], up[1]); err != nil {
			logging.Fatalf("Failed to initialize cbauth: %s", err)
		}
	}

	go platform.DumpOnSignal()
	go common.ExitOnStdinClose()

	config := common.SystemConfig
	config.SetValue("indexer.clusterAddr", *cluster)
	config.SetValue("indexer.numVbuckets", *numVbuckets)
	config.SetValue("indexer.enableManager", *enableManager)
	config.SetValue("indexer.adminPort", *adminPort)
	config.SetValue("indexer.scanPort", *scanPort)
	config.SetValue("indexer.httpPort", *httpPort)
	config.SetValue("indexer.streamInitPort", *streamInitPort)
	config.SetValue("indexer.streamCatchupPort", *streamCatchupPort)
	config.SetValue("indexer.streamMaintPort", *streamMaintPort)
	config.SetValue("indexer.storage_dir", *storageDir)

	storage_dir := config["indexer.storage_dir"].String()
	if err := os.MkdirAll(storage_dir, 0755); err != nil {
		common.CrashOnError(err)
	}

	_, msg := indexer.NewIndexer(config)

	if msg.GetMsgType() != indexer.MSG_SUCCESS {
		logging.Warnf("Indexer Failure to Init %v", msg)
	}

	logging.Infof("Indexer exiting normally\n")
}
Ejemplo n.º 21
0
//setBucketFilter sets the bucket filter based on seqno/vbuuid of mutation.
//filter is set when stream begin is received.
func (r *mutationStreamReader) setBucketFilter(meta *MutationMeta) {

	r.syncLock.Lock()
	defer r.syncLock.Unlock()

	if filter, ok := r.bucketFilterMap[meta.bucket]; ok {
		filter.Seqnos[meta.vbucket] = uint64(meta.seqno)
		filter.Vbuuids[meta.vbucket] = uint64(meta.vbuuid)
		logging.Tracef("MutationStreamReader::setBucketFilter Vbucket %v "+
			"Seqno %v Bucket %v Stream %v", meta.vbucket, meta.seqno, meta.bucket, r.streamId)
	} else {
		logging.Errorf("MutationStreamReader::setBucketFilter Missing bucket "+
			"%v in Filter for Stream %v", meta.bucket, r.streamId)
	}

}
Ejemplo n.º 22
0
func (c *GsiScanClient) streamResponse(
	conn net.Conn,
	pkt *transport.TransportPacket,
	callb ResponseHandler) (cont bool, healthy bool, err error) {

	var resp interface{}
	var finish bool

	laddr := conn.LocalAddr()
	timeoutMs := c.readDeadline * time.Millisecond
	conn.SetReadDeadline(time.Now().Add(timeoutMs))
	if resp, err = pkt.Receive(conn); err != nil {
		//resp := &protobuf.ResponseStream{
		//    Err: &protobuf.Error{Error: proto.String(err.Error())},
		//}
		//callb(resp) // callback with error
		cont, healthy = false, false
		if err == io.EOF {
			err = fmt.Errorf("server closed connection (EOF)")
		} else {
			fmsg := "%v connection %q response transport failed `%v`\n"
			logging.Errorf(fmsg, c.logPrefix, laddr, err)
		}

	} else if resp == nil {
		finish = true
		fmsg := "%v connection %q received StreamEndResponse"
		logging.Tracef(fmsg, c.logPrefix, laddr)
		callb(&protobuf.StreamEndResponse{}) // callback most likely return true
		cont, healthy = false, true

	} else {
		streamResp := resp.(*protobuf.ResponseStream)
		if err = streamResp.Error(); err == nil {
			cont = callb(streamResp)
		}
		healthy = true
	}

	var closeErr error
	if cont == false && healthy == true && finish == false {
		if closeErr, healthy = c.closeStream(conn, pkt); err == nil {
			err = closeErr
		}
	}
	return
}
Ejemplo n.º 23
0
//handleGetMutationQueueLWT calculates LWT for a mutation queue
//for a given stream and bucket
func (m *mutationMgr) handleGetMutationQueueLWT(cmd Message) {

	logging.Tracef("MutationMgr::handleGetMutationQueueLWT %v", cmd)
	bucket := cmd.(*MsgMutMgrGetTimestamp).GetBucket()
	streamId := cmd.(*MsgMutMgrGetTimestamp).GetStreamId()

	m.lock.Lock()
	defer m.lock.Unlock()

	q := m.streamBucketQueueMap[streamId][bucket]
	stats := m.stats.Get()
	go func(config common.Config) {
		flusher := NewFlusher(config, stats)
		ts := flusher.GetQueueLWT(q.queue)
		m.supvCmdch <- &MsgTimestamp{ts: ts}
	}(m.config)
}
Ejemplo n.º 24
0
//handleDrainMutationQueue handles drain queue message from
//supervisor. Success is sent on the supervisor Cmd channel
//if the flush can be processed. Once the queue gets drained,
//status is sent on the supervisor Response channel.
func (m *mutationMgr) handleDrainMutationQueue(cmd Message) {

	logging.Tracef("MutationMgr::handleDrainMutationQueue %v", cmd)

	bucket := cmd.(*MsgMutMgrFlushMutationQueue).GetBucket()
	streamId := cmd.(*MsgMutMgrFlushMutationQueue).GetStreamId()
	ts := cmd.(*MsgMutMgrFlushMutationQueue).GetTimestamp()
	changeVec := cmd.(*MsgMutMgrFlushMutationQueue).GetChangeVector()

	m.lock.Lock()
	defer m.lock.Unlock()

	q := m.streamBucketQueueMap[streamId][bucket]
	stats := m.stats.Get()
	go m.drainMutationQueue(q, streamId, bucket, ts, changeVec, stats)
	m.supvCmdch <- &MsgSuccess{}
}
Ejemplo n.º 25
0
// only endpoints that host engines defined on this vbucket.
func (vr *VbucketRoutine) updateEndpoints(
	opaque uint16,
	eps map[string]c.RouterEndpoint) map[string]c.RouterEndpoint {

	endpoints := make(map[string]c.RouterEndpoint)
	for _, engine := range vr.engines {
		for _, raddr := range engine.Endpoints() {
			if _, ok := eps[raddr]; !ok {
				fmsg := "%v ##%x endpoint %v not found\n"
				logging.Errorf(fmsg, vr.logPrefix, opaque, raddr)
				continue
			}
			fmsg := "%v ##%x UpdateEndpoint %v to %v\n"
			logging.Tracef(fmsg, vr.logPrefix, opaque, raddr, engine)
			endpoints[raddr] = eps[raddr]
		}
	}
	return endpoints
}
Ejemplo n.º 26
0
// receive loop
func (feed *DcpFeed) doReceive(rcvch chan []interface{}, conn *Client) {
	defer close(rcvch)

	var headerBuf [transport.HDR_LEN]byte
	var duration time.Duration
	var start time.Time
	var blocked bool

	epoc := time.Now()
	tick := time.Tick(time.Minute * 5) // log every 5 minutes.
	for {
		pkt := transport.MCRequest{} // always a new instance.
		bytes, err := pkt.Receive(conn.conn, headerBuf[:])
		if err != nil && err == io.EOF {
			logging.Infof("%v EOF received\n", feed.logPrefix)
			break

		} else if feed.isClosed() {
			logging.Infof("%v doReceive(): connection closed\n", feed.logPrefix)
			break

		} else if err != nil {
			logging.Errorf("%v doReceive(): %v\n", feed.logPrefix, err)
			break
		}
		logging.Tracef("%v packet received %#v", feed.logPrefix, pkt)
		if len(rcvch) == cap(rcvch) {
			start, blocked = time.Now(), true
		}
		rcvch <- []interface{}{&pkt, bytes}
		if blocked {
			duration += time.Since(start)
			blocked = false
			select {
			case <-tick:
				percent := float64(duration) / float64(time.Since(epoc))
				fmsg := "%v DCP-socket -> projector %f%% blocked"
				logging.Infof(fmsg, feed.logPrefix, percent)
			default:
			}
		}
	}
}
Ejemplo n.º 27
0
//handleWorkerMessage handles messages from workers
func (m *mutationMgr) handleWorkerMessage(cmd Message) {

	switch cmd.GetMsgType() {

	case STREAM_READER_STREAM_DROP_DATA,
		STREAM_READER_STREAM_BEGIN,
		STREAM_READER_STREAM_END,
		STREAM_READER_ERROR,
		STREAM_READER_CONN_ERROR,
		STREAM_READER_HWT:
		//send message to supervisor to take decision
		logging.Tracef("MutationMgr::handleWorkerMessage Received %v from worker", cmd)
		m.supvRespch <- cmd

	default:
		logging.Fatalf("MutationMgr::handleWorkerMessage Received unhandled "+
			"message from worker %v", cmd)
		common.CrashOnError(errors.New("Unknown Message On Worker Channel"))
	}

}
Ejemplo n.º 28
0
//handleCreateSnapshot will create the necessary snapshots
//after flush has completed
func (s *storageMgr) handleCreateSnapshot(cmd Message) {

	s.supvCmdch <- &MsgSuccess{}

	logging.Tracef("StorageMgr::handleCreateSnapshot %v", cmd)

	msgFlushDone := cmd.(*MsgMutMgrFlushDone)

	bucket := msgFlushDone.GetBucket()
	tsVbuuid := msgFlushDone.GetTS()
	streamId := msgFlushDone.GetStreamId()

	numVbuckets := s.config["numVbuckets"].Int()
	snapType := tsVbuuid.GetSnapType()
	tsVbuuid.Crc64 = common.HashVbuuid(tsVbuuid.Vbuuids)

	if snapType == common.NO_SNAP {
		logging.Infof("StorageMgr::handleCreateSnapshot Skip Snapshot For %v "+
			"%v SnapType %v", streamId, bucket, snapType)

		s.supvRespch <- &MsgMutMgrFlushDone{mType: STORAGE_SNAP_DONE,
			streamId: streamId,
			bucket:   bucket,
			ts:       tsVbuuid}
		return
	}

	s.muSnap.Lock()
	defer s.muSnap.Unlock()

	//pass copy of maps to worker
	indexSnapMap := copyIndexSnapMap(s.indexSnapMap)
	indexInstMap := common.CopyIndexInstMap(s.indexInstMap)
	indexPartnMap := CopyIndexPartnMap(s.indexPartnMap)
	stats := s.stats.Get()

	go s.createSnapshotWorker(streamId, bucket, tsVbuuid, indexSnapMap,
		numVbuckets, indexInstMap, indexPartnMap, stats)

}
Ejemplo n.º 29
0
// Refresh list of indexes and scanner clients.
func (gsi *gsiKeyspace) Refresh() errors.Error {
	l.Tracef("%v gsiKeyspace.Refresh()", gsi.logPrefix)
	indexes, err := gsi.gsiClient.Refresh()
	if err != nil {
		return errors.NewError(err, "GSI Refresh()")
	}
	si_s := make([]*secondaryIndex, 0, len(indexes))
	for _, index := range indexes {
		if index.Definition.Bucket != gsi.keyspace {
			continue
		}
		si, err := newSecondaryIndexFromMetaData(gsi, index)
		if err != nil {
			return err
		}
		si_s = append(si_s, si)
	}
	if err := gsi.setIndexes(si_s); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 30
0
// Send buffer ack
func (feed *DcpFeed) sendBufferAck(sendAck bool, bytes uint32) {
	prefix := feed.logPrefix
	if sendAck {
		totalBytes := feed.toAckBytes + bytes
		if totalBytes > feed.maxAckBytes {
			feed.toAckBytes = 0
			bufferAck := &transport.MCRequest{
				Opcode: transport.DCP_BUFFERACK,
			}
			bufferAck.Extras = make([]byte, 4)
			binary.BigEndian.PutUint32(bufferAck.Extras[:4], uint32(totalBytes))
			feed.stats.TotalBufferAckSent++
			if err := feed.conn.Transmit(bufferAck); err != nil {
				logging.Errorf("%v NOOP.Transmit(): %v", prefix, err)

			} else {
				logging.Tracef("%v buffer-ack %v\n", prefix, totalBytes)
			}
		}
		feed.toAckBytes += bytes
	}
}