Example #1
0
func startBucket(cluster, bucketn string, rch chan []interface{}) int {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("Recovered from panic %v", r)
			logging.Errorf(logging.StackTrace())
		}
	}()

	logging.Infof("Connecting with %q\n", bucketn)
	b, err := common.ConnectBucket(cluster, "default", bucketn)
	mf(err, "bucket")

	dcpConfig := map[string]interface{}{
		"genChanSize":  10000,
		"dataChanSize": 10000,
	}
	dcpFeed, err := b.StartDcpFeed("rawupr", uint32(0), 0xABCD, dcpConfig)
	mf(err, "- upr")

	vbnos := listOfVbnos(options.maxVbno)
	flogs, err := b.GetFailoverLogs(0xABCD, vbnos, dcpConfig)
	mf(err, "- dcp failoverlogs")
	if options.printflogs {
		printFlogs(vbnos, flogs)
	}
	go startDcp(dcpFeed, flogs)

	for {
		e, ok := <-dcpFeed.C
		if ok == false {
			logging.Infof("Closing for bucket %q\n", bucketn)
		}
		rch <- []interface{}{bucketn, e}
	}
}
Example #2
0
//panicHandler handles the panic from underlying stream library
func (r *mutationStreamReader) panicHandler() {

	//panic recovery
	if rc := recover(); rc != nil {
		logging.Fatalf("MutationStreamReader::panicHandler Received Panic for Stream %v", r.streamId)
		var err error
		switch x := rc.(type) {
		case string:
			err = errors.New(x)
		case error:
			err = x
		default:
			err = errors.New("Unknown panic")
		}

		logging.Fatalf("StreamReader Panic Err %v", err)
		logging.Fatalf("%s", logging.StackTrace())

		//panic from stream library, propagate to supervisor
		msg := &MsgStreamError{streamId: r.streamId,
			err: Error{code: ERROR_STREAM_READER_PANIC,
				severity: FATAL,
				category: STREAM_READER,
				cause:    err}}
		r.supvRespch <- msg
	}
}
//panicHandler handles the panic from index manager
func (c *clustMgrAgent) panicHandler() {

	//panic recovery
	if rc := recover(); rc != nil {
		var err error
		switch x := rc.(type) {
		case string:
			err = errors.New(x)
		case error:
			err = x
		default:
			err = errors.New("Unknown panic")
		}

		logging.Fatalf("ClusterMgrAgent Panic Err %v", err)
		logging.Fatalf("%s", logging.StackTrace())

		//panic, propagate to supervisor
		msg := &MsgError{
			err: Error{code: ERROR_INDEX_MANAGER_PANIC,
				severity: FATAL,
				category: CLUSTER_MGR,
				cause:    err}}
		c.supvRespch <- msg
	}

}
Example #4
0
func (cp *connectionPool) Close() (err error) {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("%v Close() crashed: %v\n", cp.logPrefix, r)
			logging.Errorf("%s", logging.StackTrace())
		}
	}()
	close(cp.connections)
	for connectn := range cp.connections {
		connectn.conn.Close()
	}
	logging.Infof("%v ... stopped\n", cp.logPrefix)
	return
}
Example #5
0
func (feed *DcpFeed) genServer(reqch chan []interface{}, opaque uint16) {
	defer func() { // panic safe
		close(feed.finch)
		if r := recover(); r != nil {
			logging.Errorf("%v ##%x crashed: %v\n", feed.logPrefix, opaque, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		for _, nodeFeed := range feed.nodeFeeds {
			nodeFeed.dcpFeed.Close()
		}
		feed.nodeFeeds = nil
		close(feed.output)
	}()

loop:
	for {
		select {
		case msg := <-reqch:
			cmd := msg[0].(byte)
			switch cmd {
			case ufCmdRequestStream:
				vb, opaque := msg[1].(uint16), msg[2].(uint16)
				flags, vbuuid := msg[3].(uint32), msg[4].(uint64)
				startSeq, endSeq := msg[5].(uint64), msg[6].(uint64)
				snapStart, snapEnd := msg[7].(uint64), msg[8].(uint64)
				err := feed.dcpRequestStream(
					vb, opaque, flags, vbuuid, startSeq, endSeq,
					snapStart, snapEnd)
				respch := msg[9].(chan []interface{})
				respch <- []interface{}{err}

			case ufCmdCloseStream:
				vb, opaqueMSB := msg[1].(uint16), msg[2].(uint16)
				err := feed.dcpCloseStream(vb, opaqueMSB)
				respch := msg[3].(chan []interface{})
				respch <- []interface{}{err}

			case ufCmdGetSeqnos:
				respch := msg[1].(chan []interface{})
				seqnos, err := feed.dcpGetSeqnos()
				respch <- []interface{}{seqnos, err}

			case ufCmdClose:
				respch := msg[1].(chan []interface{})
				respch <- []interface{}{nil}
				break loop
			}
		}
	}
}
Example #6
0
// gen-server
func (c *Client) genServer(reqch chan []interface{}, quitch chan []string) {
	defer func() { // panic safe
		if r := recover(); r != nil {
			logging.Errorf("%v gen-server crashed: %v\n", c.logPrefix, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		c.doClose()
	}()

	vbChans := make(map[string]chan interface{})

loop:
	for {
		select {
		case msg := <-reqch: // from upstream
			switch msg[0].(byte) {
			case clientCmdSendVbmap:
				vbmap := msg[1].(*common.VbConnectionMap)
				respch := msg[2].(chan []interface{})
				vbChans = c.sendVbmap(vbmap, vbChans)
				respch <- []interface{}{nil}

			case clientCmdSendKeyVersions:
				vbs := msg[1].([]*common.VbKeyVersions)
				quit := c.sendKeyVersions(vbs, vbChans, quitch)
				if quit != nil && quit[0] == "quit" {
					break loop
				}

			case clientCmdGetcontext:
				respch := msg[1].(chan []interface{})
				respch <- []interface{}{vbChans, c.conn2Vbs}

			case clientCmdClose:
				break loop
			}

		case msg := <-quitch: // from downstream
			if msg[0] == "quit" {
				break loop
			}
		}
	}
}
Example #7
0
// Close queryport daemon.
func (s *Server) Close() (err error) {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("%v Close() crashed: %v\n", s.logPrefix, r)
			err = fmt.Errorf("%v", r)
			logging.Errorf("%s", logging.StackTrace())
		}
	}()

	s.mu.Lock()
	defer s.mu.Unlock()
	if s.lis != nil {
		s.lis.Close() // close listener daemon
		s.lis = nil
		close(s.killch)
		logging.Infof("%v ... stopped\n", s.logPrefix)
	}
	return
}
Example #8
0
// close all connections with downstream host.
func (c *Client) doClose() (err error) {
	recoverClose := func(payloadch chan interface{}, conn net.Conn) {
		defer func() {
			if r := recover(); r != nil {
				logging.Errorf("%v doClose() crashed: %v\n", c.logPrefix, r)
				logging.Errorf("%s", logging.StackTrace())
				err = common.ErrorClosed
			}
		}()
		close(payloadch)
		conn.Close()
	}
	// close connections
	for i, payloadch := range c.connChans {
		recoverClose(payloadch, c.conns[i])
	}
	close(c.finch)
	logging.Infof("%v closed", c.logPrefix)
	return
}
Example #9
0
// go-routine to listen for new connections, if this routine goes down -
// server is shutdown and reason notified back to application.
func (s *Server) listener() {
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf("%v listener() crashed: %v\n", s.logPrefix, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		go s.Close()
	}()

	for {
		if conn, err := s.lis.Accept(); err == nil {
			go s.handleConnection(conn)
		} else {
			if e, ok := err.(*net.OpError); ok && e.Op != "accept" {
				panic(err)
			}
			break
		}
	}
}
Example #10
0
func (vr *VbucketRoutine) makeSyncData(seqno uint64) (data interface{}) {
	defer func() {
		if r := recover(); r != nil {
			fmsg := "%v ##%x sync crashed: %v\n"
			logging.Fatalf(fmsg, vr.logPrefix, vr.opaque, r)
			logging.Errorf("%s", logging.StackTrace())
		}
	}()

	if len(vr.engines) == 0 {
		return
	}
	// using the first engine that is capable of it.
	for _, engine := range vr.engines {
		data := engine.SyncData(vr.vbno, vr.vbuuid, seqno)
		if data != nil {
			return data
		}
	}
	return
}
Example #11
0
//panicHandler handles the panic from underlying stream library
func (r *mutationMgr) panicHandler() {

	//panic recovery
	if rc := recover(); rc != nil {
		var err error
		switch x := rc.(type) {
		case string:
			err = errors.New(x)
		case error:
			err = x
		default:
			err = errors.New("Unknown panic")
		}

		logging.Fatalf("MutationManager Panic Err %v", err)
		logging.Fatalf("%s", logging.StackTrace())

		//shutdown the mutation manager
		select {
		case <-r.shutdownCh:
			//if the shutdown channel is closed, this means shutdown was in progress
			//when panic happened, skip calling shutdown again
		default:
			r.shutdown()
		}

		//panic, propagate to supervisor
		msg := &MsgError{
			err: Error{code: ERROR_MUT_MGR_PANIC,
				severity: FATAL,
				category: MUTATION_MANAGER,
				cause:    err}}
		r.supvRespch <- msg
	}

}
Example #12
0
// per vbucket routine pushes *VbConnectionMap / *VbKeyVersions to other end.
func (c *Client) runTransmitter(
	logPrefix string,
	conn net.Conn,
	flags transport.TransportFlag,
	payloadch chan interface{},
	quitch chan []string) {

	laddr := conn.LocalAddr().String()
	defer func() {
		if r := recover(); r != nil {
			logging.Errorf(
				"%v runTransmitter(%q) crashed: %v\n", logPrefix, laddr, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		quitch <- []string{"quit", laddr}
	}()

	pkt := transport.NewTransportPacket(c.maxPayload, flags)
	pkt.SetEncoder(transport.EncodingProtobuf, protobufEncode)
	pkt.SetDecoder(transport.EncodingProtobuf, protobufDecode)

	transmit := func(payload interface{}) bool {
		if err := pkt.Send(conn, payload); err != nil {
			logging.Errorf("%v transport %q `%v`\n", logPrefix, laddr, err)
			return false
		}
		logging.Tracef("%v transported from %q\n", logPrefix, laddr)
		return true
	}

	timeout := time.Tick(c.bufferTimeout * time.Millisecond)
	vbs := make([]*common.VbKeyVersions, 0, c.bufferSize)

	resetAcc := func() {
		for _, vb := range vbs {
			vb.Free()
		}
		vbs = vbs[:0] // reset buffer
	}

loop:
	for {
		select {
		case payload, ok := <-payloadch:
			if !ok {
				break loop
			}

			switch val := payload.(type) {
			case *common.VbConnectionMap:
				if transmit(val) == false {
					break loop
				}

			case *common.VbKeyVersions:
				vbs = append(vbs, val)
				if len(vbs) > c.bufferSize {
					if transmit(vbs) == false {
						break loop
					}
					resetAcc()
				}
			}

		case <-timeout:
			if len(vbs) > 0 && transmit(vbs) == false {
				break loop
			}
			resetAcc()

		case <-c.finch:
			break loop
		}
	}
}
Example #13
0
// run
func (endpoint *RouterEndpoint) run(ch chan []interface{}) {
	defer func() { // panic safe
		if r := recover(); r != nil {
			logging.Errorf("%v run() crashed: %v\n", endpoint.logPrefix, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		// close the connection
		endpoint.conn.Close()
		// close this endpoint
		close(endpoint.finch)
		logging.Infof("%v ... stopped\n", endpoint.logPrefix)
	}()

	raddr := endpoint.raddr

	flushTimeout := time.Tick(endpoint.bufferTm * time.Millisecond)
	harakiri := time.After(endpoint.harakiriTm * time.Millisecond)
	buffers := newEndpointBuffers(raddr)

	messageCount := int64(0)
	flushCount := int64(0)
	mutationCount := int64(0)

	flushBuffers := func() (err error) {
		logging.Tracef("%v sent %v mutations to %q\n",
			endpoint.logPrefix, mutationCount, raddr)
		if mutationCount > 0 {
			flushCount++
			err = buffers.flushBuffers(endpoint.conn, endpoint.pkt)
			if err != nil {
				logging.Errorf("%v flushBuffers() %v\n", endpoint.logPrefix, err)
			}
		}
		mutationCount = 0
		return
	}

loop:
	for {
		select {
		case msg := <-ch:
			switch msg[0].(byte) {
			case endpCmdPing:
				respch := msg[1].(chan []interface{})
				respch <- []interface{}{true}

			case endpCmdSend:
				data, ok := msg[1].(*c.DataportKeyVersions)
				if !ok {
					panic(fmt.Errorf("invalid data type %T\n", msg[1]))
				}

				kv := data.Kv
				buffers.addKeyVersions(data.Bucket, data.Vbno, data.Vbuuid, kv)
				logging.Tracef("%v added %v keyversions <%v:%v:%v> to %q\n",
					endpoint.logPrefix, kv.Length(), data.Vbno, kv.Seqno,
					kv.Commands, buffers.raddr)
				messageCount++ // count cummulative mutations
				// reload harakiri
				mutationCount++ // count queued up mutations.
				if mutationCount > int64(endpoint.bufferSize) {
					if err := flushBuffers(); err != nil {
						break loop
					}
				}
				harakiri = time.After(endpoint.harakiriTm * time.Millisecond)

			case endpCmdResetConfig:
				prefix := endpoint.logPrefix
				config := msg[1].(c.Config)
				if cv, ok := config["remoteBlock"]; ok {
					endpoint.block = cv.Bool()
				}
				if cv, ok := config["bufferSize"]; ok {
					endpoint.bufferSize = cv.Int()
				}
				if cv, ok := config["bufferTimeout"]; ok {
					endpoint.bufferTm = time.Duration(cv.Int())
					flushTimeout = time.Tick(endpoint.bufferTm * time.Millisecond)
				}
				if cv, ok := config["harakiriTimeout"]; ok {
					endpoint.harakiriTm = time.Duration(cv.Int())
					if harakiri != nil { // load harakiri only when it is active
						harakiri = time.After(endpoint.harakiriTm * time.Millisecond)
						fmsg := "%v reloaded harakiriTm: %v\n"
						logging.Infof(fmsg, prefix, endpoint.harakiriTm)
					}
				}
				respch := msg[2].(chan []interface{})
				respch <- []interface{}{nil}

			case endpCmdGetStatistics:
				respch := msg[1].(chan []interface{})
				stats := endpoint.newStats()
				stats.Set("messageCount", float64(messageCount))
				stats.Set("flushCount", float64(flushCount))
				respch <- []interface{}{map[string]interface{}(stats)}

			case endpCmdClose:
				respch := msg[1].(chan []interface{})
				flushBuffers()
				respch <- []interface{}{nil}
				break loop
			}

		case <-flushTimeout:
			if err := flushBuffers(); err != nil {
				break loop
			}
			// FIXME: Ideally we don't have to reload the harakir here,
			// because _this_ execution path happens only when there is
			// little activity in the data-path. On the other hand,
			// downstream can block for reasons independant of datapath,
			// hence the precaution.
			harakiri = time.After(endpoint.harakiriTm * time.Millisecond)

		case <-harakiri:
			logging.Infof("%v committed harakiri\n", endpoint.logPrefix)
			flushBuffers()
			break loop
		}
	}
}
Example #14
0
// routine handles data path for a single vbucket.
func (vr *VbucketRoutine) run(reqch chan []interface{}, seqno uint64) {
	defer func() { // panic safe
		if r := recover(); r != nil {
			fmsg := "%v ##%x run() crashed: %v\n"
			logging.Fatalf(fmsg, vr.logPrefix, vr.opaque, r)
			logging.Errorf("%v", logging.StackTrace())
		}
		close(vr.finch)
		logging.Infof("%v ##%x ... stopped\n", vr.logPrefix, vr.opaque)
	}()

	sendStreamEnd := func() {
		if data := vr.makeStreamEndData(seqno); data == nil {
			fmsg := "%v ##%x StreamEnd NOT PUBLISHED\n"
			logging.Errorf(fmsg, vr.logPrefix, vr.opaque)

		} else { // publish stream-end
			logging.Debugf("%v ##%x StreamEnd\n", vr.logPrefix, vr.opaque)
			vr.broadcast2Endpoints(data)
		}
	}

	stats := vr.newStats()
	addEngineCount := stats.Get("addInsts").(float64)
	delEngineCount := stats.Get("delInsts").(float64)
	syncCount := stats.Get("syncs").(float64)
	sshotCount := stats.Get("snapshots").(float64)
	mutationCount := stats.Get("mutations").(float64)

loop:
	for {
		msg := <-reqch
		cmd := msg[0].(byte)
		switch cmd {
		case vrCmdSyncPulse:
			if len(vr.engines) > 0 {
				if data := vr.makeSyncData(seqno); data != nil {
					syncCount++
					fmsg := "%v ##%x sync count %v\n"
					logging.Tracef(fmsg, vr.logPrefix, vr.opaque, syncCount)
					vr.broadcast2Endpoints(data)

				} else {
					fmsg := "%v ##%x Sync NOT PUBLISHED\n"
					logging.Errorf(fmsg, vr.logPrefix, vr.opaque)
				}
			}

		case vrCmdAddEngines:
			vr.engines = make(map[uint64]*Engine)
			opaque := msg[1].(uint16)
			fmsg := "%v ##%x vrCmdAddEngines\n"
			logging.Tracef(fmsg, vr.logPrefix, opaque)
			if msg[2] != nil {
				fmsg := "%v ##%x AddEngine %v\n"
				for uuid, engine := range msg[2].(map[uint64]*Engine) {
					vr.engines[uuid] = engine
					logging.Tracef(fmsg, vr.logPrefix, opaque, uuid)
				}
				vr.printCtrl(vr.engines)
			}

			if msg[3] != nil {
				endpoints := msg[3].(map[string]c.RouterEndpoint)
				vr.endpoints = vr.updateEndpoints(opaque, endpoints)
				vr.printCtrl(vr.endpoints)
			}
			respch := msg[4].(chan []interface{})
			respch <- []interface{}{seqno, nil}
			addEngineCount++

		case vrCmdDeleteEngines:
			opaque := msg[1].(uint16)
			fmsg := "%v ##%x vrCmdDeleteEngines\n"
			logging.Tracef(fmsg, vr.logPrefix, opaque)
			engineKeys := msg[2].([]uint64)
			fmsg = "%v ##%x DelEngine %v\n"
			for _, uuid := range engineKeys {
				delete(vr.engines, uuid)
				logging.Tracef(fmsg, vr.logPrefix, opaque, uuid)
			}
			fmsg = "%v ##%x deleted engines %v\n"
			logging.Tracef(fmsg, vr.logPrefix, opaque, engineKeys)
			respch := msg[3].(chan []interface{})
			respch <- []interface{}{nil}
			delEngineCount++

		case vrCmdGetStatistics:
			logging.Tracef("%v vrCmdStatistics\n", vr.logPrefix)
			respch := msg[1].(chan []interface{})
			stats.Set("addInsts", addEngineCount)
			stats.Set("delInsts", delEngineCount)
			stats.Set("syncs", syncCount)
			stats.Set("snapshots", sshotCount)
			stats.Set("mutations", mutationCount)
			respch <- []interface{}{stats.ToMap()}

		case vrCmdResetConfig:
			_, respch := msg[1].(c.Config), msg[2].(chan []interface{})
			respch <- []interface{}{nil}

		case vrCmdEvent:
			m := msg[1].(*mc.DcpEvent)
			if m.Opaque != vr.opaque {
				fmsg := "%v ##%x mismatch with vr.##%x %v"
				logging.Fatalf(fmsg, vr.logPrefix, m.Opaque, vr.opaque, m.Opcode)
			}

			// count statistics
			seqno = vr.handleEvent(m, seqno)
			switch m.Opcode {
			case mcd.DCP_SNAPSHOT:
				sshotCount++
			case mcd.DCP_MUTATION, mcd.DCP_DELETION, mcd.DCP_EXPIRATION:
				mutationCount++
			case mcd.DCP_STREAMEND:
				sendStreamEnd()
				break loop
			}

		case vrCmdClose:
			sendStreamEnd()
			logging.Debugf("%v ##%x closed\n", vr.logPrefix, vr.opaque)
			respch := msg[1].(chan []interface{})
			respch <- []interface{}{nil}
			break loop
		}
	}
}
Example #15
0
// go-routine handles data path.
func (kvdata *KVData) runScatter(
	ts *protobuf.TsVbuuid, mutch <-chan *mc.DcpEvent) {

	// NOTE: panic will bubble up from vbucket-routine to kvdata.
	defer func() {
		if r := recover(); r != nil {
			fmsg := "%v ##%x runScatter() crashed: %v\n"
			logging.Errorf(fmsg, kvdata.logPrefix, kvdata.opaque, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		kvdata.publishStreamEnd()
		kvdata.feed.PostFinKVdata(kvdata.bucket)
		close(kvdata.finch)
		logging.Infof("%v ##%x ... stopped\n", kvdata.logPrefix, kvdata.opaque)
	}()

	// stats
	eventCount, addCount, delCount := int64(0), int64(0), int64(0)
	tsCount := int64(0)
	heartBeat := time.After(kvdata.syncTimeout)
	fmsg := "%v ##%x heartbeat (%v) loaded ...\n"
	logging.Infof(fmsg, kvdata.logPrefix, kvdata.opaque, kvdata.syncTimeout)

loop:
	for {
		select {
		case m, ok := <-mutch:
			if ok == false { // upstream has closed
				break loop
			}
			kvdata.scatterMutation(m, ts)
			eventCount++

		case <-heartBeat:
			vrs := make([]*VbucketRoutine, 0, len(kvdata.vrs))
			for _, vr := range kvdata.vrs {
				vrs = append(vrs, vr)
			}

			heartBeat = nil

			// propogate the sync-pulse via separate routine so that
			// the data-path is not blocked.
			go func() {
				// during cleanup, as long as the vbucket-routines are
				// shutdown this routine will eventually exit.
				for _, vr := range vrs {
					vr.SyncPulse()
				}
				if err := kvdata.ReloadHeartbeat(); err != nil {
					fmsg := "%v ##%x ReloadHeartbeat(): %v\n"
					logging.Errorf(fmsg, kvdata.logPrefix, kvdata.opaque, err)
				}
			}()

		case msg := <-kvdata.sbch:
			cmd := msg[0].(byte)
			switch cmd {
			case kvCmdAddEngines:
				opaque := msg[1].(uint16)
				respch := msg[4].(chan []interface{})
				if msg[2] != nil {
					for uuid, engine := range msg[2].(map[uint64]*Engine) {
						if _, ok := kvdata.engines[uuid]; !ok {
							fmsg := "%v ##%x new engine added %v"
							logging.Infof(fmsg, kvdata.logPrefix, opaque, uuid)
						}
						kvdata.engines[uuid] = engine
					}
				}
				if msg[3] != nil {
					rv := msg[3].(map[string]c.RouterEndpoint)
					for raddr, endp := range rv {
						fmsg := "%v ##%x updated endpoint %q"
						logging.Infof(fmsg, kvdata.logPrefix, opaque, raddr)
						kvdata.endpoints[raddr] = endp
					}
				}
				curSeqnos := make(map[uint16]uint64)
				if kvdata.engines != nil || kvdata.endpoints != nil {
					engines, endpoints := kvdata.engines, kvdata.endpoints
					for _, vr := range kvdata.vrs {
						curSeqno, err := vr.AddEngines(opaque, engines, endpoints)
						if err != nil {
							panic(err)
						}
						curSeqnos[vr.vbno] = curSeqno
					}
				}
				addCount++
				respch <- []interface{}{curSeqnos, nil}

			case kvCmdDelEngines:
				opaque := msg[1].(uint16)
				engineKeys := msg[2].([]uint64)
				respch := msg[3].(chan []interface{})
				for _, vr := range kvdata.vrs {
					if err := vr.DeleteEngines(opaque, engineKeys); err != nil {
						panic(err)
					}
				}
				for _, engineKey := range engineKeys {
					delete(kvdata.engines, engineKey)
					fmsg := "%v ##%x deleted engine %q"
					logging.Infof(fmsg, kvdata.logPrefix, opaque, engineKey)
				}
				delCount++
				respch <- []interface{}{nil}

			case kvCmdTs:
				_ /*opaque*/ = msg[1].(uint16)
				ts = ts.Union(msg[2].(*protobuf.TsVbuuid))
				respch := msg[3].(chan []interface{})
				tsCount++
				respch <- []interface{}{nil}

			case kvCmdGetStats:
				respch := msg[1].(chan []interface{})
				stats := kvdata.newStats()
				stats.Set("events", float64(eventCount))
				stats.Set("addInsts", float64(addCount))
				stats.Set("delInsts", float64(delCount))
				stats.Set("tsCount", float64(tsCount))
				statVbuckets := make(map[string]interface{})
				for i, vr := range kvdata.vrs {
					stats, err := vr.GetStatistics()
					if err != nil {
						panic(err)
					}
					statVbuckets[strconv.Itoa(int(i))] = stats
				}
				stats.Set("vbuckets", statVbuckets)
				respch <- []interface{}{map[string]interface{}(stats)}

			case kvCmdResetConfig:
				config, respch := msg[1].(c.Config), msg[2].(chan []interface{})
				if cv, ok := config["syncTimeout"]; ok && heartBeat != nil {
					kvdata.syncTimeout = time.Duration(cv.Int())
					kvdata.syncTimeout *= time.Millisecond
					logging.Infof(
						"%v ##%x heart-beat settings reloaded: %v\n",
						kvdata.logPrefix, kvdata.opaque, kvdata.syncTimeout)
					heartBeat = time.After(kvdata.syncTimeout)
				}
				for _, vr := range kvdata.vrs {
					if err := vr.ResetConfig(config); err != nil {
						panic(err)
					}
				}
				kvdata.config = kvdata.config.Override(config)
				respch <- []interface{}{nil}

			case kvCmdReloadHeartBeat:
				respch := msg[1].(chan []interface{})
				heartBeat = time.After(kvdata.syncTimeout)
				respch <- []interface{}{nil}

			case kvCmdClose:
				for _, vr := range kvdata.vrs {
					vr.Close()
				}
				respch := msg[1].(chan []interface{})
				respch <- []interface{}{nil}
				break loop
			}
		}
	}
}
Example #16
0
// handle incoming request.
func (s *httpServer) systemHandler(w http.ResponseWriter, r *http.Request) {
	var err error
	var dataIn, dataOut []byte

	logging.Infof("%s Request %q\n", s.logPrefix, r.URL.Path)

	stats := s.statsMessages[r.URL.Path]

	defer func() {
		s.mu.Lock()
		defer s.mu.Unlock()
		if recov := recover(); recov != nil {
			logging.Errorf("%s systemHandler() crashed: %v\n", s.logPrefix, recov)
			logging.Errorf("%s", logging.StackTrace())
			stats[2]++ // error count
		}
		if err != nil {
			logging.Errorf("%s %v\n", s.logPrefix, err)
			stats[2]++ // error count
		}
		stats[1]++ // response count
		if dataIn != nil {
			s.statsInBytes += uint64(len(dataIn))
		}
		if dataOut != nil {
			s.statsOutBytes += uint64(len(dataOut))
		}
		s.statsMessages[r.URL.Path] = stats
	}()

	s.mu.Lock()
	stats[0]++ // request count
	s.mu.Unlock()

	// get request message type.
	msg, ok := s.messages[r.URL.Path]
	if !ok {
		err = ErrorPathNotFound
		http.Error(w, "path not found", http.StatusNotFound)
		return
	}
	// read request
	dataIn = make([]byte, r.ContentLength)
	if err := requestRead(r.Body, dataIn); err != nil {
		err = fmt.Errorf("%v, %v", ErrorRequest, err)
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	// Get an instance of request type and decode request into that.
	typeOfMsg := reflect.ValueOf(msg).Elem().Type()
	msg = reflect.New(typeOfMsg).Interface().(MessageMarshaller)
	if err = msg.Decode(dataIn); err != nil {
		err = fmt.Errorf("%v, %v", ErrorDecodeRequest, err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	waitch := make(chan interface{}, 1)
	// send and wait
	s.reqch <- &httpAdminRequest{srv: s, msg: msg, waitch: waitch}
	val := <-waitch

	switch v := (val).(type) {
	case MessageMarshaller:
		if dataOut, err = v.Encode(); err == nil {
			header := w.Header()
			header["Content-Type"] = []string{v.ContentType()}
			w.Write(dataOut)

		} else {
			err = fmt.Errorf("%v, %v", ErrorEncodeResponse, err)
			http.Error(w, err.Error(), http.StatusInternalServerError)
		}

	case error:
		err = fmt.Errorf("%v, %v", ErrorInternal, v)
		http.Error(w, v.Error(), http.StatusInternalServerError)
	}
}
Example #17
0
func (feed *DcpFeed) genServer(
	opaque uint16,
	reqch chan []interface{}, finch chan bool, rcvch chan []interface{}) {

	defer func() { // panic safe
		if r := recover(); r != nil {
			logging.Errorf("%v ##%x crashed: %v\n", feed.logPrefix, opaque, r)
			logging.Errorf("%s", logging.StackTrace())
		}
		close(feed.finch)
		feed.conn.Close()
		feed.conn = nil
		logging.Infof("%v ##%x ... stopped\n", feed.logPrefix, opaque)
	}()

	prefix := feed.logPrefix
	inactivityTick := time.Tick(1 * 60 * time.Second) // 1 minute

loop:
	for {
		select {
		case <-inactivityTick:
			now := time.Now().UnixNano()
			for _, stream := range feed.vbstreams {
				strm_seqno := stream.Seqno
				if stream.Snapend == 0 || strm_seqno == stream.Snapend {
					continue
				}
				delta := (now - stream.LastSeen) / 1000000000 // in Seconds
				if stream.LastSeen != 0 && delta > 10 /*seconds*/ {
					fmsg := "%v ##%x event for vb %v lastSeen %vSec before\n"
					logging.Warnf(
						fmsg, prefix, stream.AppOpaque, stream.Vbucket, delta)
				}
			}

		case msg := <-reqch:
			cmd := msg[0].(byte)
			switch cmd {
			case dfCmdOpen:
				name, sequence := msg[1].(string), msg[2].(uint32)
				bufsize, opaque := msg[3].(uint32), msg[4].(uint16)
				respch := msg[5].(chan []interface{})
				err := feed.doDcpOpen(name, sequence, bufsize, opaque, rcvch)
				respch <- []interface{}{err}

			case dfCmdGetFailoverlog:
				opaque := msg[1].(uint16)
				vblist, respch := msg[2].([]uint16), msg[3].(chan []interface{})
				if len(feed.vbstreams) > 0 {
					fmsg := "%v %##x active streams in doDcpGetFailoverLog"
					logging.Errorf(fmsg, prefix, opaque)
					respch <- []interface{}{nil, ErrorInvalidFeed}
				}
				flog, err := feed.doDcpGetFailoverLog(opaque, vblist, rcvch)
				respch <- []interface{}{flog, err}

			case dfCmdGetSeqnos:
				respch := msg[1].(chan []interface{})
				seqnos, err := feed.doDcpGetSeqnos(rcvch)
				respch <- []interface{}{seqnos, err}

			case dfCmdRequestStream:
				vbno, opaqueMSB := msg[1].(uint16), msg[2].(uint16)
				flags, vuuid := msg[3].(uint32), msg[4].(uint64)
				startSequence, endSequence := msg[5].(uint64), msg[6].(uint64)
				snapStart, snapEnd := msg[7].(uint64), msg[8].(uint64)
				respch := msg[9].(chan []interface{})
				err := feed.doDcpRequestStream(
					vbno, opaqueMSB, flags, vuuid,
					startSequence, endSequence, snapStart, snapEnd)
				respch <- []interface{}{err}

			case dfCmdCloseStream:
				vbno, opaqueMSB := msg[1].(uint16), msg[2].(uint16)
				respch := msg[3].(chan []interface{})
				err := feed.doDcpCloseStream(vbno, opaqueMSB)
				respch <- []interface{}{err}

			case dfCmdClose:
				feed.sendStreamEnd(feed.outch)
				respch := msg[1].(chan []interface{})
				respch <- []interface{}{nil}
				break loop
			}

		case resp, ok := <-rcvch:
			if !ok {
				feed.sendStreamEnd(feed.outch)
				break loop
			}
			pkt, bytes := resp[0].(*transport.MCRequest), resp[1].(int)
			switch feed.handlePacket(pkt, bytes) {
			case "exit":
				break loop
			}
		}
	}
}