func startBucket(cluster, bucketn string, rch chan []interface{}) int { defer func() { if r := recover(); r != nil { logging.Errorf("Recovered from panic %v", r) logging.Errorf(logging.StackTrace()) } }() logging.Infof("Connecting with %q\n", bucketn) b, err := common.ConnectBucket(cluster, "default", bucketn) mf(err, "bucket") dcpConfig := map[string]interface{}{ "genChanSize": 10000, "dataChanSize": 10000, } dcpFeed, err := b.StartDcpFeed("rawupr", uint32(0), 0xABCD, dcpConfig) mf(err, "- upr") vbnos := listOfVbnos(options.maxVbno) flogs, err := b.GetFailoverLogs(0xABCD, vbnos, dcpConfig) mf(err, "- dcp failoverlogs") if options.printflogs { printFlogs(vbnos, flogs) } go startDcp(dcpFeed, flogs) for { e, ok := <-dcpFeed.C if ok == false { logging.Infof("Closing for bucket %q\n", bucketn) } rch <- []interface{}{bucketn, e} } }
func main() { logging.SetLogLevel(logging.Error) runtime.GOMAXPROCS(runtime.NumCPU()) cmdOptions, args, fset, err := querycmd.ParseArgs(os.Args[1:]) if err != nil { logging.Fatalf("%v", err) os.Exit(0) } else if cmdOptions.Help { usage(fset) os.Exit(0) } else if len(args) < 1 { logging.Fatalf("%v", "specify a command") } b, err := c.ConnectBucket(cmdOptions.Server, "default", "default") if err != nil { log.Fatal(err) } defer b.Close() maxvb, err := c.MaxVbuckets(b) if err != nil { log.Fatal(err) } config := c.SystemConfig.SectionConfig("queryport.client.", true) client, err := qclient.NewGsiClient(cmdOptions.Server, config) if err != nil { log.Fatal(err) } switch args[0] { case "sanity": err = doSanityTests(cmdOptions.Server, client) if err != nil { fmt.Fprintf(os.Stderr, "Error occured %v\n", err) } case "mb14786": err = doMB14786(cmdOptions.Server, client) if err != nil { fmt.Fprintf(os.Stderr, "Error occured %v\n", err) } case "mb13339": err = doMB13339(cmdOptions.Server, client) if err != nil { fmt.Fprintf(os.Stderr, "Error occured %v\n", err) } case "benchmark": doBenchmark(cmdOptions.Server, "localhost:8101") case "consistency": doConsistency(cmdOptions.Server, maxvb, client) } client.Close() }
func numVbuckets(cluster, bucketn string) (numVb int) { b, err := common.ConnectBucket(cluster, "default" /*pooln*/, bucketn) if err != nil { log.Fatal(err) } defer b.Close() if numVb, err = common.MaxVbuckets(b); err != nil { log.Fatal(err) } return numVb }
// - return couchbase SDK error if any. func (p *Projector) doFailoverLog( request *protobuf.FailoverLogRequest, opaque uint16) ap.MessageMarshaller { response := &protobuf.FailoverLogResponse{} pooln := request.GetPool() bucketn := request.GetBucket() vbuckets := request.GetVbnos() // log this request. prefix := p.logPrefix fmsg := "%v ##%x doFailoverLog() {%q, %q, %v}\n" logging.Infof(fmsg, prefix, opaque, pooln, bucketn, vbuckets) defer logging.Infof("%v ##%x doFailoverLog() returns ...\n", prefix, opaque) bucket, err := c.ConnectBucket(p.clusterAddr, pooln, bucketn) if err != nil { logging.Errorf("%v ##%x ConnectBucket(): %v\n", prefix, opaque, err) response.Err = protobuf.NewError(err) return response } defer bucket.Close() protoFlogs := make([]*protobuf.FailoverLog, 0, len(vbuckets)) vbnos := c.Vbno32to16(vbuckets) dcpConfig := map[string]interface{}{ "genChanSize": p.config["projector.dcp.genChanSize"].Int(), "dataChanSize": p.config["projector.dcp.dataChanSize"].Int(), } flogs, err := bucket.GetFailoverLogs(opaque, vbnos, dcpConfig) if err == nil { for vbno, flog := range flogs { vbuuids := make([]uint64, 0, len(flog)) seqnos := make([]uint64, 0, len(flog)) for _, x := range flog { vbuuids = append(vbuuids, x[0]) seqnos = append(seqnos, x[1]) } protoFlog := &protobuf.FailoverLog{ Vbno: proto.Uint32(uint32(vbno)), Vbuuids: vbuuids, Seqnos: seqnos, } protoFlogs = append(protoFlogs, protoFlog) } } else { logging.Errorf("%v ##%x GetFailoverLogs(): %v\n", prefix, opaque, err) response.Err = protobuf.NewError(err) return response } response.Logs = protoFlogs return response }
func bucketTs(cluster, bucketn string, numVb int) { b, err := common.ConnectBucket(cluster, "default" /*pooln*/, bucketn) if err != nil { log.Fatal(err) } defer b.Close() start := time.Now() for i := 0; i < options.trials; i++ { if _, _, err = common.BucketTs(b, numVb); err != nil { log.Fatal(err) } } durtn := time.Since(start) / time.Duration(options.trials) log.Printf("bucketTs: %v\n", durtn) }
// - return couchbase SDK error if any. func (p *Projector) doVbmapRequest( request *protobuf.VbmapRequest, opaque uint16) ap.MessageMarshaller { response := &protobuf.VbmapResponse{} pooln := request.GetPool() bucketn := request.GetBucket() kvaddrs := request.GetKvaddrs() // log this request. prefix := p.logPrefix fmsg := "%v ##%x doVbmapRequest() {%q, %q, %v}\n" logging.Infof(fmsg, prefix, pooln, bucketn, kvaddrs, opaque) defer logging.Infof("%v ##%x doVbmapRequest() returns ...\n", prefix, opaque) // get vbmap from bucket connection. bucket, err := c.ConnectBucket(p.clusterAddr, pooln, bucketn) if err != nil { logging.Errorf("%v ##%x ConnectBucket(): %v\n", prefix, opaque, err) response.Err = protobuf.NewError(err) return response } defer bucket.Close() bucket.Refresh() m, err := bucket.GetVBmap(kvaddrs) if err != nil { logging.Errorf("%v ##%x GetVBmap(): %v\n", prefix, opaque, err) response.Err = protobuf.NewError(err) return response } // compose response response.Kvaddrs = make([]string, 0, len(kvaddrs)) response.Kvvbnos = make([]*protobuf.Vbuckets, 0, len(kvaddrs)) for kvaddr, vbnos := range m { response.Kvaddrs = append(response.Kvaddrs, kvaddr) response.Kvvbnos = append( response.Kvvbnos, &protobuf.Vbuckets{Vbnos: c.Vbno16to32(vbnos)}) } return response }
// BucketTs will return the current vbucket-timestamp using STATS // command. func (c *GsiClient) BucketTs(bucketn string) (*TsConsistency, error) { b, err := common.ConnectBucket(c.cluster, "default" /*pooln*/, bucketn) if err != nil { return nil, err } defer b.Close() if c.maxvb == -1 { if c.maxvb, err = common.MaxVbuckets(b); err != nil { return nil, err } } seqnos, vbuuids, err := common.BucketTs(b, c.maxvb) if err != nil { return nil, err } vbnos := make([]uint16, c.maxvb) for i := range vbnos { vbnos[i] = uint16(i) } return NewTsConsistency(vbnos, seqnos, vbuuids), nil }
func startBucket(cluster, bucketn string, ch chan *couchbase.UprFeed) int { defer func() { if r := recover(); r != nil { fmt.Printf("%s:\n%s\n", r, debug.Stack()) common.StackTrace(string(debug.Stack())) } }() common.Infof("Connecting with %q\n", bucketn) b, err := common.ConnectBucket(cluster, "default", bucketn) mf(err, "bucket") uprFeed, err := b.StartUprFeed("rawupr", uint32(0)) mf(err, "- upr") vbnos := listOfVbnos(options.maxVbno) flogs, err := b.GetFailoverLogs(vbnos) mf(err, "- upr failoverlogs") if options.printflogs { printFlogs(vbnos, flogs) } ch <- uprFeed go startUpr(uprFeed, flogs) for { e, ok := <-uprFeed.C if ok == false { common.Infof("Closing for bucket %q\n", bucketn) } rch <- []interface{}{bucketn, e} } }
func doConsistency( cluster string, maxvb int, client *qclient.GsiClient) (err error) { b, err := common.ConnectBucket(cluster, "default", "beer-sample") if err != nil { log.Fatal(err) } defer b.Close() vbnos := make([]uint16, maxvb) for i := range vbnos { vbnos[i] = uint16(i) } // Drop index args := []string{"-type", "drop", "-bucket", "beer-sample", "-index", "index-city"} cmd, _, _, _ := querycmd.ParseArgs(args) querycmd.HandleCommand(client, cmd, true, os.Stdout) // Create index args = []string{ "-type", "create", "-bucket", "beer-sample", "-index", "index-city", "-fields", "city", } cmd, _, _, err = querycmd.ParseArgs(args) if err != nil { log.Fatal(err) } querycmd.HandleCommand(client, cmd, true, os.Stdout) // Wait for index to come active. index, ok := querycmd.GetIndex(client, "beer-sample", "index-city") if !ok { log.Fatalf("cannot get definition ID") } defnID := uint64(index.Definition.DefnId) _, err = querycmd.WaitUntilIndexState( client, []uint64{defnID}, common.INDEX_STATE_ACTIVE, 100 /*period*/, 20000 /*timeout*/) synch := make(chan bool, 1) // Get the latest seqnos,vbuuid and vbucket that contains `docid`. seqnos, vbuuids, vbno, vbuuid, seqno := setValueConst(b, maxvb, constDocValue1) equal := common.SecondaryKey(querycmd.Arg2Key(constEqualLookup1)) equals := []common.SecondaryKey{equal} anyConsistency(client, defnID, equals) // query-consistency without any new mutations. ts := qclient.NewTsConsistency(vbnos, seqnos, vbuuids) queryConsistency(client, defnID, ts, equals, synch) <-synch // query-consistency with a new mutation. equal = common.SecondaryKey(querycmd.Arg2Key(constEqualLookup2)) equals = []common.SecondaryKey{equal} seqno++ ts = ts.Override(vbno, seqno, vbuuid) queryConsistency(client, defnID, ts, equals, synch) time.Sleep(2 * time.Second) setValueConst(b, maxvb, constDocValue2) <-synch // query-consistency with a new mutation. equal = common.SecondaryKey(querycmd.Arg2Key(constEqualLookup3)) equals = []common.SecondaryKey{equal} seqno++ ts = qclient.NewTsConsistency([]uint16{vbno}, []uint64{seqno}, []uint64{vbuuid}) queryConsistency(client, defnID, ts, equals, synch) time.Sleep(2 * time.Second) setValueConst(b, maxvb, constDocValue3) <-synch // session-consistency without any new mutations. sessionConsistency(client, defnID, equals, synch) <-synch // session-consistency with a new mutation. setValueConst(b, maxvb, constDocValue4) equal = common.SecondaryKey(querycmd.Arg2Key(constEqualLookup4)) equals = []common.SecondaryKey{equal} sessionConsistency(client, defnID, equals, synch) <-synch // session-consistency with a new mutation. equal = common.SecondaryKey(querycmd.Arg2Key(constEqualLookup5)) equals = []common.SecondaryKey{equal} setValueConst(b, maxvb, constDocValue5) sessionConsistency(client, defnID, equals, synch) <-synch return nil }
func startFeed(cluster, name string) { bucket, err := c.ConnectBucket(cluster, "default", "default") if err != nil { log.Fatal(err) } defer bucket.Close() options.maxVbs, err = c.MaxVbuckets(bucket) if err != nil { log.Fatal(err) } options.vbuckets = make([]uint16, 0, options.maxVbs) for i := 0; i < options.maxVbs; i++ { options.vbuckets = append(options.vbuckets, uint16(i)) } // get dcp feed for this bucket. config := map[string]interface{}{ "genChanSize": 10000, "dataChanSize": 10000, } dcpFeed, err := bucket.StartDcpFeed(name, uint32(0), 0xABCD, config) if err != nil { log.Fatal(err) } go func() { // start vbucket streams for _, vbno := range options.vbuckets { flags, vbuuid := uint32(0), uint64(0) start, end := uint64(0), uint64(0xFFFFFFFFFFFFFFFF) snapStart, snapEnd := uint64(0), uint64(0) err := dcpFeed.DcpRequestStream( vbno, vbno /*opaque*/, flags, vbuuid, start, end, snapStart, snapEnd) if err != nil { log.Fatal(err) } // FIXME/TODO: the below sleep avoid back-to-back dispatch of // StreamRequest to DCP, which seem to cause some problems. time.Sleep(1 * time.Millisecond) } }() tick := time.Tick(time.Second) countEvents := 0 commands := make(map[byte]int) for { select { case e, ok := <-dcpFeed.C: if !ok { log.Fatal("dcpFeed channel has closed") } if _, ok := commands[byte(e.Opcode)]; !ok { commands[byte(e.Opcode)] = 0 } commands[byte(e.Opcode)]++ countEvents++ case <-tick: log.Println("events received countEvents", countEvents) log.Println("commands received", commands) } } }