Exemple #1
0
func main() {
	logging.SetLogLevel(logging.Error)
	runtime.GOMAXPROCS(runtime.NumCPU())

	cmdOptions, args, fset, err := querycmd.ParseArgs(os.Args[1:])
	if err != nil {
		logging.Fatalf("%v", err)
		os.Exit(0)
	} else if cmdOptions.Help {
		usage(fset)
		os.Exit(0)
	} else if len(args) < 1 {
		logging.Fatalf("%v", "specify a command")
	}

	b, err := c.ConnectBucket(cmdOptions.Server, "default", "default")
	if err != nil {
		log.Fatal(err)
	}
	defer b.Close()
	maxvb, err := c.MaxVbuckets(b)
	if err != nil {
		log.Fatal(err)
	}

	config := c.SystemConfig.SectionConfig("queryport.client.", true)
	client, err := qclient.NewGsiClient(cmdOptions.Server, config)
	if err != nil {
		log.Fatal(err)
	}

	switch args[0] {
	case "sanity":
		err = doSanityTests(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "mb14786":
		err = doMB14786(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "mb13339":
		err = doMB13339(cmdOptions.Server, client)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Error occured %v\n", err)
		}

	case "benchmark":
		doBenchmark(cmdOptions.Server, "localhost:8101")

	case "consistency":
		doConsistency(cmdOptions.Server, maxvb, client)
	}
	client.Close()
}
Exemple #2
0
func numVbuckets(cluster, bucketn string) (numVb int) {
	b, err := common.ConnectBucket(cluster, "default" /*pooln*/, bucketn)
	if err != nil {
		log.Fatal(err)
	}
	defer b.Close()
	if numVb, err = common.MaxVbuckets(b); err != nil {
		log.Fatal(err)
	}
	return numVb
}
Exemple #3
0
// BucketTs will return the current vbucket-timestamp using STATS
// command.
func (c *GsiClient) BucketTs(bucketn string) (*TsConsistency, error) {
	b, err := common.ConnectBucket(c.cluster, "default" /*pooln*/, bucketn)
	if err != nil {
		return nil, err
	}
	defer b.Close()

	if c.maxvb == -1 {
		if c.maxvb, err = common.MaxVbuckets(b); err != nil {
			return nil, err
		}
	}
	seqnos, vbuuids, err := common.BucketTs(b, c.maxvb)
	if err != nil {
		return nil, err
	}
	vbnos := make([]uint16, c.maxvb)
	for i := range vbnos {
		vbnos[i] = uint16(i)
	}
	return NewTsConsistency(vbnos, seqnos, vbuuids), nil
}
Exemple #4
0
func startFeed(cluster, name string) {
	bucket, err := c.ConnectBucket(cluster, "default", "default")
	if err != nil {
		log.Fatal(err)
	}
	defer bucket.Close()
	options.maxVbs, err = c.MaxVbuckets(bucket)
	if err != nil {
		log.Fatal(err)
	}
	options.vbuckets = make([]uint16, 0, options.maxVbs)
	for i := 0; i < options.maxVbs; i++ {
		options.vbuckets = append(options.vbuckets, uint16(i))
	}

	// get dcp feed for this bucket.
	config := map[string]interface{}{
		"genChanSize":  10000,
		"dataChanSize": 10000,
	}
	dcpFeed, err := bucket.StartDcpFeed(name, uint32(0), 0xABCD, config)
	if err != nil {
		log.Fatal(err)
	}
	go func() {
		// start vbucket streams
		for _, vbno := range options.vbuckets {
			flags, vbuuid := uint32(0), uint64(0)
			start, end := uint64(0), uint64(0xFFFFFFFFFFFFFFFF)
			snapStart, snapEnd := uint64(0), uint64(0)
			err := dcpFeed.DcpRequestStream(
				vbno, vbno /*opaque*/, flags, vbuuid, start, end,
				snapStart, snapEnd)
			if err != nil {
				log.Fatal(err)
			}
			// FIXME/TODO: the below sleep avoid back-to-back dispatch of
			// StreamRequest to DCP, which seem to cause some problems.
			time.Sleep(1 * time.Millisecond)
		}
	}()
	tick := time.Tick(time.Second)
	countEvents := 0
	commands := make(map[byte]int)
	for {
		select {
		case e, ok := <-dcpFeed.C:
			if !ok {
				log.Fatal("dcpFeed channel has closed")
			}
			if _, ok := commands[byte(e.Opcode)]; !ok {
				commands[byte(e.Opcode)] = 0
			}
			commands[byte(e.Opcode)]++
			countEvents++

		case <-tick:
			log.Println("events received countEvents", countEvents)
			log.Println("commands received", commands)
		}
	}
}