예제 #1
0
func doReceive(
	uprconn *uprConnection, host string, msgch chan msgT, killSwitch chan bool) {

	var hdr [mcd.HDR_LEN]byte
	var msg msgT
	var pkt mcd.MCRequest
	var err error

	mcconn := uprconn.conn.Hijack()

loop:
	for {
		if _, err = pkt.Receive(mcconn, hdr[:]); err != nil {
			msg = msgT{uprconn: uprconn, err: err}
		} else {
			msg = msgT{uprconn: uprconn, pkt: pkt}
		}
		select {
		case msgch <- msg:
		case <-killSwitch:
			break loop
		}
	}
	return
}
예제 #2
0
// Internal goroutine that reads from the socket and writes events to
// the channel
func (mc *Client) runFeed(ch chan TapEvent, feed *TapFeed) {
	defer close(ch)
	var headerBuf [gomemcached.HDR_LEN]byte
loop:
	for {
		// Read the next request from the server.
		//
		//  (Can't call mc.Receive() because it reads a
		//  _response_ not a request.)
		var pkt gomemcached.MCRequest
		n, err := pkt.Receive(mc.conn, headerBuf[:])
		if TapRecvHook != nil {
			TapRecvHook(&pkt, n, err)
		}

		if err != nil {
			if err != io.EOF {
				feed.Error = err
			}
			break loop
		}

		//log.Printf("** TapFeed received %#v : %q", pkt, pkt.Body)

		if pkt.Opcode == gomemcached.TAP_CONNECT {
			// This is not an event from the server; it's
			// an error response to my connect request.
			feed.Error = fmt.Errorf("tap connection failed: %s", pkt.Body)
			break loop
		}

		event := makeTapEvent(pkt)
		if event != nil {
			if event.Opcode == tapEndStream {
				break loop
			}

			select {
			case ch <- *event:
			case <-feed.closer:
				break loop
			}
		}

		if len(pkt.Extras) >= 4 {
			reqFlags := binary.BigEndian.Uint16(pkt.Extras[2:])
			if reqFlags&gomemcached.TAP_ACK != 0 {
				if _, err := mc.sendAck(&pkt); err != nil {
					feed.Error = err
					break loop
				}
			}
		}
	}
	if err := mc.Close(); err != nil {
		log.Printf("Error closing memcached client:  %v", err)
	}
}
예제 #3
0
func transmitRequest(o io.Writer, req *gomemcached.MCRequest) (int, error) {
	if o == nil {
		return 0, errNoConn
	}
	n, err := req.Transmit(o)
	if TransmitHook != nil {
		TransmitHook(req, n, err)
	}
	return n, err
}
예제 #4
0
func TestTransmitReq(t *testing.T) {
	b := bytes.NewBuffer([]byte{})
	buf := bufio.NewWriter(b)

	req := gomemcached.MCRequest{
		Opcode:  gomemcached.SET,
		Cas:     938424885,
		Opaque:  7242,
		VBucket: 824,
		Extras:  []byte{},
		Key:     []byte("somekey"),
		Body:    []byte("somevalue"),
	}

	// Verify nil transmit is OK
	_, err := transmitRequest(nil, &req)
	if err != errNoConn {
		t.Errorf("Expected errNoConn with no conn, got %v", err)
	}

	_, err = transmitRequest(buf, &req)
	if err != nil {
		t.Fatalf("Error transmitting request: %v", err)
	}

	buf.Flush()

	expected := []byte{
		gomemcached.REQ_MAGIC, byte(gomemcached.SET),
		0x0, 0x7, // length of key
		0x0,       // extra length
		0x0,       // reserved
		0x3, 0x38, // vbucket
		0x0, 0x0, 0x0, 0x10, // Length of value
		0x0, 0x0, 0x1c, 0x4a, // opaque
		0x0, 0x0, 0x0, 0x0, 0x37, 0xef, 0x3a, 0x35, // CAS
		's', 'o', 'm', 'e', 'k', 'e', 'y',
		's', 'o', 'm', 'e', 'v', 'a', 'l', 'u', 'e'}

	if len(b.Bytes()) != req.Size() {
		t.Fatalf("Expected %v bytes, got %v", req.Size(),
			len(b.Bytes()))
	}

	if !reflect.DeepEqual(b.Bytes(), expected) {
		t.Fatalf("Expected:\n%#v\n  -- got -- \n%#v",
			expected, b.Bytes())
	}
}
예제 #5
0
func BenchmarkTransmitReqNull(b *testing.B) {
	req := gomemcached.MCRequest{
		Opcode:  gomemcached.SET,
		Cas:     938424885,
		Opaque:  7242,
		VBucket: 824,
		Extras:  []byte{},
		Key:     []byte("somekey"),
		Body:    []byte("somevalue"),
	}

	b.SetBytes(int64(req.Size()))

	for i := 0; i < b.N; i++ {
		_, err := transmitRequest(ioutil.Discard, &req)
		if err != nil {
			b.Fatalf("Error transmitting request: %v", err)
		}
	}
}
예제 #6
0
func TestTransmitReqWithExtMeta(t *testing.T) {
	// test data for extended metadata
	ExtMetaStr := "extmeta"

	b := bytes.NewBuffer([]byte{})
	buf := bufio.NewWriter(b)

	req := gomemcached.MCRequest{
		Opcode:  gomemcached.SET,
		Cas:     938424885,
		Opaque:  7242,
		VBucket: 824,
		Key:     []byte("somekey"),
		Body:    []byte("somevalue"),
		ExtMeta: []byte(ExtMetaStr),
	}

	// add length of extended metadata to the corresponding bytes in Extras
	req.Extras = make([]byte, 30)
	binary.BigEndian.PutUint32(req.Extras[28:30], len(ExtMetaStr))

	// Verify nil transmit is OK
	_, err := transmitRequest(nil, &req)
	if err != errNoConn {
		t.Errorf("Expected errNoConn with no conn, got %v", err)
	}

	_, err = transmitRequest(buf, &req)
	if err != nil {
		t.Fatalf("Error transmitting request: %v", err)
	}

	buf.Flush()

	expected := []byte{
		gomemcached.REQ_MAGIC, byte(gomemcached.SET),
		0x0, 0x7, // length of key
		0x1e,      // extra length = 30 = 0x1e
		0x0,       // reserved
		0x3, 0x38, // vbucket
		0x0, 0x0, 0x0, 0x35, // Length of value = 7(key) + 9(value) + 30(extras) + 7(extmeta) = 53 = 0x35
		0x0, 0x0, 0x1c, 0x4a, // opaque
		0x0, 0x0, 0x0, 0x0, 0x37, 0xef, 0x3a, 0x35, // CAS
		's', 'o', 'm', 'e', 'k', 'e', 'y',
		's', 'o', 'm', 'e', 'v', 'a', 'l', 'u', 'e',
		'e', 'x', 't', 'm', 'e', 't', 'a'}

	if len(b.Bytes()) != req.Size() {
		t.Fatalf("Expected %v bytes, got %v", req.Size(),
			len(b.Bytes()))
	}

	if !reflect.DeepEqual(b.Bytes(), expected) {
		t.Fatalf("Expected:\n%#v\n  -- got -- \n%#v",
			expected, b.Bytes())
	}
}
예제 #7
0
func BenchmarkTransmitReqLarge(b *testing.B) {
	bout := bytes.NewBuffer([]byte{})

	req := gomemcached.MCRequest{
		Opcode:  gomemcached.SET,
		Cas:     938424885,
		Opaque:  7242,
		VBucket: 824,
		Extras:  []byte{},
		Key:     []byte("somekey"),
		Body:    make([]byte, 24*1024),
	}

	b.SetBytes(int64(req.Size()))

	for i := 0; i < b.N; i++ {
		bout.Reset()
		buf := bufio.NewWriterSize(bout, req.Size()*2)
		_, err := transmitRequest(buf, &req)
		if err != nil {
			b.Fatalf("Error transmitting request: %v", err)
		}
	}
}
예제 #8
0
// Connect once to the server and work the UPR stream.  If anything
// goes wrong, return our level of progress in order to let our caller
// control any potential retries.
func (d *bucketDataSource) worker(server string, workerCh chan []uint16) int {
	atomic.AddUint64(&d.stats.TotWorkerBody, 1)

	if !d.isRunning() {
		return -1
	}

	atomic.AddUint64(&d.stats.TotWorkerConnect, 1)
	connect := d.options.Connect
	if connect == nil {
		connect = memcached.Connect
	}

	client, err := connect("tcp", server)
	if err != nil {
		atomic.AddUint64(&d.stats.TotWorkerConnectErr, 1)
		d.receiver.OnError(fmt.Errorf("worker connect, server: %s, err: %v",
			server, err))
		return 0
	}
	defer client.Close()
	atomic.AddUint64(&d.stats.TotWorkerConnectOk, 1)

	if d.auth != nil {
		var user, pswd string
		var adminCred bool
		if auth, ok := d.auth.(couchbase.AuthWithSaslHandler); ok {
			user, pswd = auth.GetSaslCredentials()
			adminCred = true
		} else {
			user, pswd, _ = d.auth.GetCredentials()
		}
		if user != "" {
			atomic.AddUint64(&d.stats.TotWorkerAuth, 1)
			res, err := client.Auth(user, pswd)
			if err != nil {
				atomic.AddUint64(&d.stats.TotWorkerAuthErr, 1)
				d.receiver.OnError(fmt.Errorf("worker auth, server: %s, user: %s, err: %v",
					server, user, err))
				return 0
			}
			if res.Status != gomemcached.SUCCESS {
				atomic.AddUint64(&d.stats.TotWorkerAuthFail, 1)
				d.receiver.OnError(&AuthFailError{ServerURL: server, User: user})
				return 0
			}
			if adminCred {
				atomic.AddUint64(&d.stats.TotWorkerAuthOk, 1)
				_, err = client.SelectBucket(d.bucketName)
				if err != nil {
					atomic.AddUint64(&d.stats.TotWorkerSelBktFail, 1)
					d.receiver.OnError(fmt.Errorf("worker select bucket err: %v", err))
					return 0
				}
				atomic.AddUint64(&d.stats.TotWorkerSelBktOk, 1)
			}
		}
	}

	uprOpenName := d.options.Name
	if uprOpenName == "" {
		uprOpenName = fmt.Sprintf("cbdatasource-%x", rand.Int63())
	}

	err = UPROpen(client, uprOpenName, d.options.FeedBufferSizeBytes)
	if err != nil {
		atomic.AddUint64(&d.stats.TotWorkerUPROpenErr, 1)
		d.receiver.OnError(err)
		return 0
	}
	atomic.AddUint64(&d.stats.TotWorkerUPROpenOk, 1)

	ackBytes :=
		uint32(d.options.FeedBufferAckThreshold * float32(d.options.FeedBufferSizeBytes))

	sendCh := make(chan *gomemcached.MCRequest, 1)
	sendEndCh := make(chan struct{})
	recvEndCh := make(chan struct{})

	cleanup := func(progress int, err error) int {
		if err != nil {
			d.receiver.OnError(err)
		}
		go func() {
			<-recvEndCh
			close(sendCh)
		}()
		return progress
	}

	currVBuckets := make(map[uint16]*VBucketState)
	currVBucketsMutex := sync.Mutex{} // Protects currVBuckets.

	go func() { // Sender goroutine.
		defer close(sendEndCh)

		atomic.AddUint64(&d.stats.TotWorkerTransmitStart, 1)
		for msg := range sendCh {
			atomic.AddUint64(&d.stats.TotWorkerTransmit, 1)
			err := client.Transmit(msg)
			if err != nil {
				atomic.AddUint64(&d.stats.TotWorkerTransmitErr, 1)
				d.receiver.OnError(fmt.Errorf("client.Transmit, err: %v", err))
				return
			}
			atomic.AddUint64(&d.stats.TotWorkerTransmitOk, 1)
		}
		atomic.AddUint64(&d.stats.TotWorkerTransmitDone, 1)
	}()

	go func() { // Receiver goroutine.
		defer close(recvEndCh)

		atomic.AddUint64(&d.stats.TotWorkerReceiveStart, 1)

		var hdr [gomemcached.HDR_LEN]byte
		var pkt gomemcached.MCRequest
		var res gomemcached.MCResponse

		// Track received bytes in case we need to buffer-ack.
		recvBytesTotal := uint32(0)

		conn := client.Hijack()

		for {
			// TODO: memory allocation here.
			atomic.AddUint64(&d.stats.TotWorkerReceive, 1)
			_, err := pkt.Receive(conn, hdr[:])
			if err != nil {
				atomic.AddUint64(&d.stats.TotWorkerReceiveErr, 1)
				d.receiver.OnError(fmt.Errorf("pkt.Receive, err: %v", err))
				return
			}
			atomic.AddUint64(&d.stats.TotWorkerReceiveOk, 1)

			if pkt.Opcode == gomemcached.UPR_MUTATION ||
				pkt.Opcode == gomemcached.UPR_DELETION ||
				pkt.Opcode == gomemcached.UPR_EXPIRATION {
				atomic.AddUint64(&d.stats.TotUPRDataChange, 1)

				vbucketID := pkt.VBucket

				currVBucketsMutex.Lock()

				vbucketState := currVBuckets[vbucketID]
				if vbucketState == nil || vbucketState.State != "running" {
					currVBucketsMutex.Unlock()
					atomic.AddUint64(&d.stats.TotUPRDataChangeStateErr, 1)
					d.receiver.OnError(fmt.Errorf("error: DataChange,"+
						" wrong vbucketState: %#v, err: %v", vbucketState, err))
					return
				}

				if !vbucketState.SnapSaved {
					// NOTE: Following the ep-engine's approach, we
					// wait to persist SnapStart/SnapEnd until we see
					// the first mutation/deletion in the new snapshot
					// range.  That reduces a race window where if we
					// kill and restart this process right now after a
					// setVBucketMetaData() and before the next,
					// first-mutation-in-snapshot, then a restarted
					// stream-req using this just-saved
					// SnapStart/SnapEnd might have a lastSeq number <
					// SnapStart, where Couchbase Server will respond
					// to the stream-req with an ERANGE error code.
					v, _, err := d.getVBucketMetaData(vbucketID)
					if err != nil || v == nil {
						currVBucketsMutex.Unlock()
						d.receiver.OnError(fmt.Errorf("error: DataChange,"+
							" getVBucketMetaData, vbucketID: %d, err: %v",
							vbucketID, err))
						return
					}

					v.SnapStart = vbucketState.SnapStart
					v.SnapEnd = vbucketState.SnapEnd

					err = d.setVBucketMetaData(vbucketID, v)
					if err != nil {
						currVBucketsMutex.Unlock()
						d.receiver.OnError(fmt.Errorf("error: DataChange,"+
							" getVBucketMetaData, vbucketID: %d, err: %v",
							vbucketID, err))
						return
					}

					vbucketState.SnapSaved = true
				}

				currVBucketsMutex.Unlock()

				seq := binary.BigEndian.Uint64(pkt.Extras[:8])

				if pkt.Opcode == gomemcached.UPR_MUTATION {
					atomic.AddUint64(&d.stats.TotUPRDataChangeMutation, 1)
					err = d.receiver.DataUpdate(vbucketID, pkt.Key, seq, &pkt)
				} else {
					if pkt.Opcode == gomemcached.UPR_DELETION {
						atomic.AddUint64(&d.stats.TotUPRDataChangeDeletion, 1)
					} else {
						atomic.AddUint64(&d.stats.TotUPRDataChangeExpiration, 1)
					}
					err = d.receiver.DataDelete(vbucketID, pkt.Key, seq, &pkt)
				}

				if err != nil {
					atomic.AddUint64(&d.stats.TotUPRDataChangeErr, 1)
					d.receiver.OnError(fmt.Errorf("error: DataChange, err: %v", err))
					return
				}

				atomic.AddUint64(&d.stats.TotUPRDataChangeOk, 1)
			} else {
				res.Opcode = pkt.Opcode
				res.Opaque = pkt.Opaque
				res.Status = gomemcached.Status(pkt.VBucket)
				res.Extras = pkt.Extras
				res.Cas = pkt.Cas
				res.Key = pkt.Key
				res.Body = pkt.Body

				atomic.AddUint64(&d.stats.TotWorkerHandleRecv, 1)
				currVBucketsMutex.Lock()
				err := d.handleRecv(sendCh, currVBuckets, &res)
				currVBucketsMutex.Unlock()
				if err != nil {
					atomic.AddUint64(&d.stats.TotWorkerHandleRecvErr, 1)
					d.receiver.OnError(fmt.Errorf("error: HandleRecv, err: %v", err))
					return
				}
				atomic.AddUint64(&d.stats.TotWorkerHandleRecvOk, 1)
			}

			recvBytesTotal +=
				uint32(gomemcached.HDR_LEN) +
					uint32(len(pkt.Key)+len(pkt.Extras)+len(pkt.Body))
			if ackBytes > 0 && recvBytesTotal > ackBytes {
				atomic.AddUint64(&d.stats.TotUPRBufferAck, 1)
				ack := &gomemcached.MCRequest{Opcode: gomemcached.UPR_BUFFERACK}
				ack.Extras = make([]byte, 4) // TODO: Memory mgmt.
				binary.BigEndian.PutUint32(ack.Extras, uint32(recvBytesTotal))
				sendCh <- ack
				recvBytesTotal = 0
			}
		}
	}()

	atomic.AddUint64(&d.stats.TotWorkerBodyKick, 1)
	d.Kick("new-worker")

	for {
		select {
		case <-sendEndCh:
			atomic.AddUint64(&d.stats.TotWorkerSendEndCh, 1)
			return cleanup(0, nil)

		case <-recvEndCh:
			// If we lost a connection, then maybe a node was rebalanced out,
			// or failed over, so ask for a cluster refresh just in case.
			d.Kick("recvEndCh")

			atomic.AddUint64(&d.stats.TotWorkerRecvEndCh, 1)
			return cleanup(0, nil)

		case wantVBucketIDs, alive := <-workerCh:
			atomic.AddUint64(&d.stats.TotRefreshWorker, 1)

			if !alive {
				atomic.AddUint64(&d.stats.TotRefreshWorkerDone, 1)
				return cleanup(-1, nil) // We've been asked to shutdown.
			}

			currVBucketsMutex.Lock()
			err := d.refreshWorker(sendCh, currVBuckets, wantVBucketIDs)
			currVBucketsMutex.Unlock()
			if err != nil {
				return cleanup(0, err)
			}

			atomic.AddUint64(&d.stats.TotRefreshWorkerOk, 1)
		}
	}

	return cleanup(-1, nil) // Unreached.
}
예제 #9
0
func (feed *UprFeed) runFeed(ch chan *UprEvent) {
	defer close(ch)
	var headerBuf [gomemcached.HDR_LEN]byte
	var pkt gomemcached.MCRequest
	var event *UprEvent

	mc := feed.conn.Hijack()
	uprStats := &feed.stats

loop:
	for {
		sendAck := false
		bytes, err := pkt.Receive(mc, headerBuf[:])
		if err != nil {
			ul.LogError("", "", "Error in receive %s", err.Error())
			feed.Error = err
			// send all the stream close messages to the client
			feed.doStreamClose(ch)
			break loop
		} else {
			event = nil
			res := &gomemcached.MCResponse{
				Opcode: pkt.Opcode,
				Cas:    pkt.Cas,
				Opaque: pkt.Opaque,
				Status: gomemcached.Status(pkt.VBucket),
				Extras: pkt.Extras,
				Key:    pkt.Key,
				Body:   pkt.Body,
			}

			vb := vbOpaque(pkt.Opaque)
			uprStats.TotalBytes = uint64(bytes)

			feed.mu.RLock()
			stream := feed.vbstreams[vb]
			feed.mu.RUnlock()

			switch pkt.Opcode {
			case gomemcached.UPR_STREAMREQ:
				if stream == nil {
					ul.LogError("", "", "Stream not found for vb %d: %#v", vb, pkt)
					break loop
				}
				status, rb, flog, err := handleStreamRequest(res)
				if status == gomemcached.ROLLBACK {
					event = makeUprEvent(pkt, stream)
					// rollback stream
					msg := "UPR_STREAMREQ with rollback %d for vb %d Failed: %v"
					ul.LogError("", "", msg, rb, vb, err)
					// delete the stream from the vbmap for the feed
					feed.mu.Lock()
					delete(feed.vbstreams, vb)
					feed.mu.Unlock()

				} else if status == gomemcached.SUCCESS {
					event = makeUprEvent(pkt, stream)
					event.Seqno = stream.StartSeq
					event.FailoverLog = flog
					stream.connected = true
					ul.LogInfo("", "", "UPR_STREAMREQ for vb %d successful", vb)

				} else if err != nil {
					msg := "UPR_STREAMREQ for vbucket %d erro %s"
					ul.LogError("", "", msg, vb, err.Error())
					event = &UprEvent{
						Opcode:  gomemcached.UPR_STREAMREQ,
						Status:  status,
						VBucket: vb,
						Error:   err,
					}
				}

			case gomemcached.UPR_MUTATION,
				gomemcached.UPR_DELETION,
				gomemcached.UPR_EXPIRATION:
				if stream == nil {
					ul.LogError("", "", "Stream not found for vb %d: %#v", vb, pkt)
					break loop
				}
				event = makeUprEvent(pkt, stream)
				uprStats.TotalMutation++
				sendAck = true

			case gomemcached.UPR_STREAMEND:
				if stream == nil {
					ul.LogError("", "", "Stream not found for vb %d: %#v", vb, pkt)
					break loop
				}
				//stream has ended
				event = makeUprEvent(pkt, stream)
				ul.LogInfo("", "", "Stream Ended for vb %d", vb)
				sendAck = true

				feed.mu.Lock()
				delete(feed.vbstreams, vb)
				feed.mu.Unlock()

			case gomemcached.UPR_SNAPSHOT:
				if stream == nil {
					ul.LogError("", "", "Stream not found for vb %d: %#v", vb, pkt)
					break loop
				}
				// snapshot marker
				event = makeUprEvent(pkt, stream)
				event.SnapstartSeq = binary.BigEndian.Uint64(pkt.Extras[0:8])
				event.SnapendSeq = binary.BigEndian.Uint64(pkt.Extras[8:16])
				event.SnapshotType = binary.BigEndian.Uint32(pkt.Extras[16:20])
				uprStats.TotalSnapShot++
				sendAck = true

			case gomemcached.UPR_FLUSH:
				if stream == nil {
					ul.LogError("", "", "Stream not found for vb %d: %#v", vb, pkt)
					break loop
				}
				// special processing for flush ?
				event = makeUprEvent(pkt, stream)

			case gomemcached.UPR_CLOSESTREAM:
				if stream == nil {
					ul.LogError("", "", "Stream not found for vb %d: %#v", vb, pkt)
					break loop
				}
				event = makeUprEvent(pkt, stream)
				event.Opcode = gomemcached.UPR_STREAMEND // opcode re-write !!
				msg := "Stream Closed for vb %d StreamEnd simulated"
				ul.LogInfo("", "", msg, vb)
				sendAck = true

				feed.mu.Lock()
				delete(feed.vbstreams, vb)
				feed.mu.Unlock()

			case gomemcached.UPR_ADDSTREAM:
				ul.LogWarn("", "", "Opcode %v not implemented", pkt.Opcode)

			case gomemcached.UPR_CONTROL, gomemcached.UPR_BUFFERACK:
				if res.Status != gomemcached.SUCCESS {
					msg := "Opcode %v received status %d"
					ul.LogWarn("", "", msg, pkt.Opcode.String(), res.Status)
				}

			case gomemcached.UPR_NOOP:
				// send a NOOP back
				noop := &gomemcached.MCRequest{
					Opcode: gomemcached.UPR_NOOP,
				}
				feed.transmitCh <- noop

			default:
				msg := "Recived an unknown response for vbucket %d"
				ul.LogError("", "", msg, vb)
			}
		}

		if event != nil {
			select {
			case ch <- event:
			case <-feed.closer:
				break loop
			}

			feed.mu.RLock()
			l := len(feed.vbstreams)
			feed.mu.RUnlock()

			if event.Opcode == gomemcached.UPR_CLOSESTREAM && l == 0 {
				ul.LogInfo("", "", "No more streams")
				break loop
			}

		}

		needToSend, sendSize := feed.SendBufferAck(sendAck, uint32(bytes))
		if needToSend {
			bufferAck := &gomemcached.MCRequest{
				Opcode: gomemcached.UPR_BUFFERACK,
			}
			bufferAck.Extras = make([]byte, 4)
			binary.BigEndian.PutUint32(bufferAck.Extras[:4], uint32(sendSize))
			feed.transmitCh <- bufferAck
			uprStats.TotalBufferAckSent++
		}
	}

	feed.transmitCl <- true
}