func (mc *Client) sendAck(pkt *gomemcached.MCRequest) (int, error) { res := gomemcached.MCResponse{ Opcode: pkt.Opcode, Opaque: pkt.Opaque, Status: gomemcached.SUCCESS, } return res.Transmit(mc.conn) }
// Connect once to the server and work the UPR stream. If anything // goes wrong, return our level of progress in order to let our caller // control any potential retries. func (d *bucketDataSource) worker(server string, workerCh chan []uint16) int { atomic.AddUint64(&d.stats.TotWorkerBody, 1) if !d.isRunning() { return -1 } atomic.AddUint64(&d.stats.TotWorkerConnect, 1) connect := d.options.Connect if connect == nil { connect = memcached.Connect } client, err := connect("tcp", server) if err != nil { atomic.AddUint64(&d.stats.TotWorkerConnectErr, 1) d.receiver.OnError(fmt.Errorf("worker connect, server: %s, err: %v", server, err)) return 0 } defer client.Close() atomic.AddUint64(&d.stats.TotWorkerConnectOk, 1) if d.auth != nil { var user, pswd string var adminCred bool if auth, ok := d.auth.(couchbase.AuthWithSaslHandler); ok { user, pswd = auth.GetSaslCredentials() adminCred = true } else { user, pswd, _ = d.auth.GetCredentials() } if user != "" { atomic.AddUint64(&d.stats.TotWorkerAuth, 1) res, err := client.Auth(user, pswd) if err != nil { atomic.AddUint64(&d.stats.TotWorkerAuthErr, 1) d.receiver.OnError(fmt.Errorf("worker auth, server: %s, user: %s, err: %v", server, user, err)) return 0 } if res.Status != gomemcached.SUCCESS { atomic.AddUint64(&d.stats.TotWorkerAuthFail, 1) d.receiver.OnError(&AuthFailError{ServerURL: server, User: user}) return 0 } if adminCred { atomic.AddUint64(&d.stats.TotWorkerAuthOk, 1) _, err = client.SelectBucket(d.bucketName) if err != nil { atomic.AddUint64(&d.stats.TotWorkerSelBktFail, 1) d.receiver.OnError(fmt.Errorf("worker select bucket err: %v", err)) return 0 } atomic.AddUint64(&d.stats.TotWorkerSelBktOk, 1) } } } uprOpenName := d.options.Name if uprOpenName == "" { uprOpenName = fmt.Sprintf("cbdatasource-%x", rand.Int63()) } err = UPROpen(client, uprOpenName, d.options.FeedBufferSizeBytes) if err != nil { atomic.AddUint64(&d.stats.TotWorkerUPROpenErr, 1) d.receiver.OnError(err) return 0 } atomic.AddUint64(&d.stats.TotWorkerUPROpenOk, 1) ackBytes := uint32(d.options.FeedBufferAckThreshold * float32(d.options.FeedBufferSizeBytes)) sendCh := make(chan *gomemcached.MCRequest, 1) sendEndCh := make(chan struct{}) recvEndCh := make(chan struct{}) cleanup := func(progress int, err error) int { if err != nil { d.receiver.OnError(err) } go func() { <-recvEndCh close(sendCh) }() return progress } currVBuckets := make(map[uint16]*VBucketState) currVBucketsMutex := sync.Mutex{} // Protects currVBuckets. go func() { // Sender goroutine. defer close(sendEndCh) atomic.AddUint64(&d.stats.TotWorkerTransmitStart, 1) for msg := range sendCh { atomic.AddUint64(&d.stats.TotWorkerTransmit, 1) err := client.Transmit(msg) if err != nil { atomic.AddUint64(&d.stats.TotWorkerTransmitErr, 1) d.receiver.OnError(fmt.Errorf("client.Transmit, err: %v", err)) return } atomic.AddUint64(&d.stats.TotWorkerTransmitOk, 1) } atomic.AddUint64(&d.stats.TotWorkerTransmitDone, 1) }() go func() { // Receiver goroutine. defer close(recvEndCh) atomic.AddUint64(&d.stats.TotWorkerReceiveStart, 1) var hdr [gomemcached.HDR_LEN]byte var pkt gomemcached.MCRequest var res gomemcached.MCResponse // Track received bytes in case we need to buffer-ack. recvBytesTotal := uint32(0) conn := client.Hijack() for { // TODO: memory allocation here. atomic.AddUint64(&d.stats.TotWorkerReceive, 1) _, err := pkt.Receive(conn, hdr[:]) if err != nil { atomic.AddUint64(&d.stats.TotWorkerReceiveErr, 1) d.receiver.OnError(fmt.Errorf("pkt.Receive, err: %v", err)) return } atomic.AddUint64(&d.stats.TotWorkerReceiveOk, 1) if pkt.Opcode == gomemcached.UPR_MUTATION || pkt.Opcode == gomemcached.UPR_DELETION || pkt.Opcode == gomemcached.UPR_EXPIRATION { atomic.AddUint64(&d.stats.TotUPRDataChange, 1) vbucketID := pkt.VBucket currVBucketsMutex.Lock() vbucketState := currVBuckets[vbucketID] if vbucketState == nil || vbucketState.State != "running" { currVBucketsMutex.Unlock() atomic.AddUint64(&d.stats.TotUPRDataChangeStateErr, 1) d.receiver.OnError(fmt.Errorf("error: DataChange,"+ " wrong vbucketState: %#v, err: %v", vbucketState, err)) return } if !vbucketState.SnapSaved { // NOTE: Following the ep-engine's approach, we // wait to persist SnapStart/SnapEnd until we see // the first mutation/deletion in the new snapshot // range. That reduces a race window where if we // kill and restart this process right now after a // setVBucketMetaData() and before the next, // first-mutation-in-snapshot, then a restarted // stream-req using this just-saved // SnapStart/SnapEnd might have a lastSeq number < // SnapStart, where Couchbase Server will respond // to the stream-req with an ERANGE error code. v, _, err := d.getVBucketMetaData(vbucketID) if err != nil || v == nil { currVBucketsMutex.Unlock() d.receiver.OnError(fmt.Errorf("error: DataChange,"+ " getVBucketMetaData, vbucketID: %d, err: %v", vbucketID, err)) return } v.SnapStart = vbucketState.SnapStart v.SnapEnd = vbucketState.SnapEnd err = d.setVBucketMetaData(vbucketID, v) if err != nil { currVBucketsMutex.Unlock() d.receiver.OnError(fmt.Errorf("error: DataChange,"+ " getVBucketMetaData, vbucketID: %d, err: %v", vbucketID, err)) return } vbucketState.SnapSaved = true } currVBucketsMutex.Unlock() seq := binary.BigEndian.Uint64(pkt.Extras[:8]) if pkt.Opcode == gomemcached.UPR_MUTATION { atomic.AddUint64(&d.stats.TotUPRDataChangeMutation, 1) err = d.receiver.DataUpdate(vbucketID, pkt.Key, seq, &pkt) } else { if pkt.Opcode == gomemcached.UPR_DELETION { atomic.AddUint64(&d.stats.TotUPRDataChangeDeletion, 1) } else { atomic.AddUint64(&d.stats.TotUPRDataChangeExpiration, 1) } err = d.receiver.DataDelete(vbucketID, pkt.Key, seq, &pkt) } if err != nil { atomic.AddUint64(&d.stats.TotUPRDataChangeErr, 1) d.receiver.OnError(fmt.Errorf("error: DataChange, err: %v", err)) return } atomic.AddUint64(&d.stats.TotUPRDataChangeOk, 1) } else { res.Opcode = pkt.Opcode res.Opaque = pkt.Opaque res.Status = gomemcached.Status(pkt.VBucket) res.Extras = pkt.Extras res.Cas = pkt.Cas res.Key = pkt.Key res.Body = pkt.Body atomic.AddUint64(&d.stats.TotWorkerHandleRecv, 1) currVBucketsMutex.Lock() err := d.handleRecv(sendCh, currVBuckets, &res) currVBucketsMutex.Unlock() if err != nil { atomic.AddUint64(&d.stats.TotWorkerHandleRecvErr, 1) d.receiver.OnError(fmt.Errorf("error: HandleRecv, err: %v", err)) return } atomic.AddUint64(&d.stats.TotWorkerHandleRecvOk, 1) } recvBytesTotal += uint32(gomemcached.HDR_LEN) + uint32(len(pkt.Key)+len(pkt.Extras)+len(pkt.Body)) if ackBytes > 0 && recvBytesTotal > ackBytes { atomic.AddUint64(&d.stats.TotUPRBufferAck, 1) ack := &gomemcached.MCRequest{Opcode: gomemcached.UPR_BUFFERACK} ack.Extras = make([]byte, 4) // TODO: Memory mgmt. binary.BigEndian.PutUint32(ack.Extras, uint32(recvBytesTotal)) sendCh <- ack recvBytesTotal = 0 } } }() atomic.AddUint64(&d.stats.TotWorkerBodyKick, 1) d.Kick("new-worker") for { select { case <-sendEndCh: atomic.AddUint64(&d.stats.TotWorkerSendEndCh, 1) return cleanup(0, nil) case <-recvEndCh: // If we lost a connection, then maybe a node was rebalanced out, // or failed over, so ask for a cluster refresh just in case. d.Kick("recvEndCh") atomic.AddUint64(&d.stats.TotWorkerRecvEndCh, 1) return cleanup(0, nil) case wantVBucketIDs, alive := <-workerCh: atomic.AddUint64(&d.stats.TotRefreshWorker, 1) if !alive { atomic.AddUint64(&d.stats.TotRefreshWorkerDone, 1) return cleanup(-1, nil) // We've been asked to shutdown. } currVBucketsMutex.Lock() err := d.refreshWorker(sendCh, currVBuckets, wantVBucketIDs) currVBucketsMutex.Unlock() if err != nil { return cleanup(0, err) } atomic.AddUint64(&d.stats.TotRefreshWorkerOk, 1) } } return cleanup(-1, nil) // Unreached. }
func notFound(req *gomemcached.MCRequest, s *storage) *gomemcached.MCResponse { var response gomemcached.MCResponse response.Status = gomemcached.UNKNOWN_COMMAND return &response }
func transmitResponse(o io.Writer, res *gomemcached.MCResponse) (int, error) { return res.Transmit(o) }