func main() { flag.Parse() log.Printf("Connecting to %s/%s", *prot, *dest) client, err := memcached.Connect(*prot, *dest) if err != nil { log.Fatalf("Error connecting: %v", err) } if *u != "" { resp, err := client.Auth(*u, *p) if err != nil { log.Fatalf("auth error: %v", err) } log.Printf("Auth response = %v", resp) } args := memcached.DefaultTapArguments() args.Backfill = uint64(*back) args.Dump = *dump args.SupportAck = *ack args.KeysOnly = *keysOnly args.Checkpoint = *checkpoint feed, err := client.StartTapFeed(args) if err != nil { log.Fatalf("Error starting tap feed: %v", err) } for op := range feed.C { if *raw { log.Printf("Received %#v\n", op) } else { log.Printf("Received %s\n", op.String()) if len(op.Value) > 0 && len(op.Value) < 500 { log.Printf("\tValue: %s", op.Value) } } } log.Printf("Tap feed closed; err = %v.", feed.Error) }
// Connect once to the server and work the UPR stream. If anything // goes wrong, return our level of progress in order to let our caller // control any potential retries. func (d *bucketDataSource) worker(server string, workerCh chan []uint16) int { atomic.AddUint64(&d.stats.TotWorkerBody, 1) if !d.isRunning() { return -1 } atomic.AddUint64(&d.stats.TotWorkerConnect, 1) connect := d.options.Connect if connect == nil { connect = memcached.Connect } client, err := connect("tcp", server) if err != nil { atomic.AddUint64(&d.stats.TotWorkerConnectErr, 1) d.receiver.OnError(fmt.Errorf("worker connect, server: %s, err: %v", server, err)) return 0 } defer client.Close() atomic.AddUint64(&d.stats.TotWorkerConnectOk, 1) if d.auth != nil { var user, pswd string var adminCred bool if auth, ok := d.auth.(couchbase.AuthWithSaslHandler); ok { user, pswd = auth.GetSaslCredentials() adminCred = true } else { user, pswd, _ = d.auth.GetCredentials() } if user != "" { atomic.AddUint64(&d.stats.TotWorkerAuth, 1) res, err := client.Auth(user, pswd) if err != nil { atomic.AddUint64(&d.stats.TotWorkerAuthErr, 1) d.receiver.OnError(fmt.Errorf("worker auth, server: %s, user: %s, err: %v", server, user, err)) return 0 } if res.Status != gomemcached.SUCCESS { atomic.AddUint64(&d.stats.TotWorkerAuthFail, 1) d.receiver.OnError(&AuthFailError{ServerURL: server, User: user}) return 0 } if adminCred { atomic.AddUint64(&d.stats.TotWorkerAuthOk, 1) _, err = client.SelectBucket(d.bucketName) if err != nil { atomic.AddUint64(&d.stats.TotWorkerSelBktFail, 1) d.receiver.OnError(fmt.Errorf("worker select bucket err: %v", err)) return 0 } atomic.AddUint64(&d.stats.TotWorkerSelBktOk, 1) } } } uprOpenName := d.options.Name if uprOpenName == "" { uprOpenName = fmt.Sprintf("cbdatasource-%x", rand.Int63()) } err = UPROpen(client, uprOpenName, d.options.FeedBufferSizeBytes) if err != nil { atomic.AddUint64(&d.stats.TotWorkerUPROpenErr, 1) d.receiver.OnError(err) return 0 } atomic.AddUint64(&d.stats.TotWorkerUPROpenOk, 1) ackBytes := uint32(d.options.FeedBufferAckThreshold * float32(d.options.FeedBufferSizeBytes)) sendCh := make(chan *gomemcached.MCRequest, 1) sendEndCh := make(chan struct{}) recvEndCh := make(chan struct{}) cleanup := func(progress int, err error) int { if err != nil { d.receiver.OnError(err) } go func() { <-recvEndCh close(sendCh) }() return progress } currVBuckets := make(map[uint16]*VBucketState) currVBucketsMutex := sync.Mutex{} // Protects currVBuckets. go func() { // Sender goroutine. defer close(sendEndCh) atomic.AddUint64(&d.stats.TotWorkerTransmitStart, 1) for msg := range sendCh { atomic.AddUint64(&d.stats.TotWorkerTransmit, 1) err := client.Transmit(msg) if err != nil { atomic.AddUint64(&d.stats.TotWorkerTransmitErr, 1) d.receiver.OnError(fmt.Errorf("client.Transmit, err: %v", err)) return } atomic.AddUint64(&d.stats.TotWorkerTransmitOk, 1) } atomic.AddUint64(&d.stats.TotWorkerTransmitDone, 1) }() go func() { // Receiver goroutine. defer close(recvEndCh) atomic.AddUint64(&d.stats.TotWorkerReceiveStart, 1) var hdr [gomemcached.HDR_LEN]byte var pkt gomemcached.MCRequest var res gomemcached.MCResponse // Track received bytes in case we need to buffer-ack. recvBytesTotal := uint32(0) conn := client.Hijack() for { // TODO: memory allocation here. atomic.AddUint64(&d.stats.TotWorkerReceive, 1) _, err := pkt.Receive(conn, hdr[:]) if err != nil { atomic.AddUint64(&d.stats.TotWorkerReceiveErr, 1) d.receiver.OnError(fmt.Errorf("pkt.Receive, err: %v", err)) return } atomic.AddUint64(&d.stats.TotWorkerReceiveOk, 1) if pkt.Opcode == gomemcached.UPR_MUTATION || pkt.Opcode == gomemcached.UPR_DELETION || pkt.Opcode == gomemcached.UPR_EXPIRATION { atomic.AddUint64(&d.stats.TotUPRDataChange, 1) vbucketID := pkt.VBucket currVBucketsMutex.Lock() vbucketState := currVBuckets[vbucketID] if vbucketState == nil || vbucketState.State != "running" { currVBucketsMutex.Unlock() atomic.AddUint64(&d.stats.TotUPRDataChangeStateErr, 1) d.receiver.OnError(fmt.Errorf("error: DataChange,"+ " wrong vbucketState: %#v, err: %v", vbucketState, err)) return } if !vbucketState.SnapSaved { // NOTE: Following the ep-engine's approach, we // wait to persist SnapStart/SnapEnd until we see // the first mutation/deletion in the new snapshot // range. That reduces a race window where if we // kill and restart this process right now after a // setVBucketMetaData() and before the next, // first-mutation-in-snapshot, then a restarted // stream-req using this just-saved // SnapStart/SnapEnd might have a lastSeq number < // SnapStart, where Couchbase Server will respond // to the stream-req with an ERANGE error code. v, _, err := d.getVBucketMetaData(vbucketID) if err != nil || v == nil { currVBucketsMutex.Unlock() d.receiver.OnError(fmt.Errorf("error: DataChange,"+ " getVBucketMetaData, vbucketID: %d, err: %v", vbucketID, err)) return } v.SnapStart = vbucketState.SnapStart v.SnapEnd = vbucketState.SnapEnd err = d.setVBucketMetaData(vbucketID, v) if err != nil { currVBucketsMutex.Unlock() d.receiver.OnError(fmt.Errorf("error: DataChange,"+ " getVBucketMetaData, vbucketID: %d, err: %v", vbucketID, err)) return } vbucketState.SnapSaved = true } currVBucketsMutex.Unlock() seq := binary.BigEndian.Uint64(pkt.Extras[:8]) if pkt.Opcode == gomemcached.UPR_MUTATION { atomic.AddUint64(&d.stats.TotUPRDataChangeMutation, 1) err = d.receiver.DataUpdate(vbucketID, pkt.Key, seq, &pkt) } else { if pkt.Opcode == gomemcached.UPR_DELETION { atomic.AddUint64(&d.stats.TotUPRDataChangeDeletion, 1) } else { atomic.AddUint64(&d.stats.TotUPRDataChangeExpiration, 1) } err = d.receiver.DataDelete(vbucketID, pkt.Key, seq, &pkt) } if err != nil { atomic.AddUint64(&d.stats.TotUPRDataChangeErr, 1) d.receiver.OnError(fmt.Errorf("error: DataChange, err: %v", err)) return } atomic.AddUint64(&d.stats.TotUPRDataChangeOk, 1) } else { res.Opcode = pkt.Opcode res.Opaque = pkt.Opaque res.Status = gomemcached.Status(pkt.VBucket) res.Extras = pkt.Extras res.Cas = pkt.Cas res.Key = pkt.Key res.Body = pkt.Body atomic.AddUint64(&d.stats.TotWorkerHandleRecv, 1) currVBucketsMutex.Lock() err := d.handleRecv(sendCh, currVBuckets, &res) currVBucketsMutex.Unlock() if err != nil { atomic.AddUint64(&d.stats.TotWorkerHandleRecvErr, 1) d.receiver.OnError(fmt.Errorf("error: HandleRecv, err: %v", err)) return } atomic.AddUint64(&d.stats.TotWorkerHandleRecvOk, 1) } recvBytesTotal += uint32(gomemcached.HDR_LEN) + uint32(len(pkt.Key)+len(pkt.Extras)+len(pkt.Body)) if ackBytes > 0 && recvBytesTotal > ackBytes { atomic.AddUint64(&d.stats.TotUPRBufferAck, 1) ack := &gomemcached.MCRequest{Opcode: gomemcached.UPR_BUFFERACK} ack.Extras = make([]byte, 4) // TODO: Memory mgmt. binary.BigEndian.PutUint32(ack.Extras, uint32(recvBytesTotal)) sendCh <- ack recvBytesTotal = 0 } } }() atomic.AddUint64(&d.stats.TotWorkerBodyKick, 1) d.Kick("new-worker") for { select { case <-sendEndCh: atomic.AddUint64(&d.stats.TotWorkerSendEndCh, 1) return cleanup(0, nil) case <-recvEndCh: // If we lost a connection, then maybe a node was rebalanced out, // or failed over, so ask for a cluster refresh just in case. d.Kick("recvEndCh") atomic.AddUint64(&d.stats.TotWorkerRecvEndCh, 1) return cleanup(0, nil) case wantVBucketIDs, alive := <-workerCh: atomic.AddUint64(&d.stats.TotRefreshWorker, 1) if !alive { atomic.AddUint64(&d.stats.TotRefreshWorkerDone, 1) return cleanup(-1, nil) // We've been asked to shutdown. } currVBucketsMutex.Lock() err := d.refreshWorker(sendCh, currVBuckets, wantVBucketIDs) currVBucketsMutex.Unlock() if err != nil { return cleanup(0, err) } atomic.AddUint64(&d.stats.TotRefreshWorkerOk, 1) } } return cleanup(-1, nil) // Unreached. }
func main() { flag.Parse() log.Printf("Connecting to %s/%s", *prot, *dest) client, err := memcached.Connect(*prot, *dest) if err != nil { log.Fatalf("Error connecting: %v", err) } if *u != "" { resp, err := client.Auth(*u, *p) if err != nil { log.Fatalf("auth error: %v", err) } log.Printf("Auth response = %v", resp) } // get failover logs for some vbuckets vbuckets := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} failovermap, err := client.UprGetFailoverLog(vbuckets) if err != nil { log.Fatalf("Failed to get failover log %v", err) } for vb, flog := range failovermap { log.Printf("Failover log for vb %d is %v", vb, flog) } client, err = memcached.Connect(*prot, *dest) if err != nil { log.Fatalf("Error connecting: %v", err) } if *u != "" { resp, err := client.Auth(*u, *p) if err != nil { log.Fatalf("auth error: %v", err) } log.Printf("Auth response = %v", resp) } uf, err := client.NewUprFeed() if err != nil { log.Fatalf("Error connecting: %v", err) } err = uf.UprOpen("example", 0, 400) if err != nil { log.Fatalf("Error in UPR Open: %v", err) } //time.Sleep(10 * time.Second) for i := 0; i < 64; i++ { err := uf.UprRequestStream(uint16(i), 0, 0, 0, 0, 0xFFFFFFFFFFFFFFFF, 0, 0) if err != nil { log.Fatalf("Request stream for vb %d Failed %v", i, err) } } err = uf.UprRequestStream(uint16(100), 0, 0, 0, 0, 0, 0, 0) if err != nil { log.Fatalf("Request stream for vb 100 Failed %v", err) } err = uf.StartFeed() if err != nil { log.Fatalf("Error starting upr feed: %v", err) } for op := range uf.C { if op.String() == "UPR_SNAPSHOT" { log.Printf("Received Snapshot marker for Vbucket %d. Start Sequence %d End Sequence %d", op.VBucket, op.SnapstartSeq, op.SnapendSeq) } else if op.String() == "UPR_MUTATION" { log.Printf("Received %s Key %s, Sequence %d, Cas %d\n", op.String(), op.Key, op.Seqno, op.Cas) if len(op.Value) > 0 && len(op.Value) < 500 { log.Printf("\tValue: %s", op.Value) } } else if op.String() == "UPR_STREAMEND" { log.Printf("Received stream end event for vbucket %d", op.VBucket) } else if op.String() == "UPR_DELETION" { log.Printf("Received deletion for vbucket %d Seq No %d Rev Seq No %d", op.VBucket, op.Seqno, op.RevSeqno) } if op.Status != 0 { log.Printf("Got an Error for vbucket %d, Error %s", op.VBucket, op.Error.Error()) } log.Printf(" Receiving %v", op.String()) } log.Printf("Upr feed closed; err = %v.", uf.Error) }