Ejemplo n.º 1
0
func (c *timerTestProjectorClient) sendSync(instances []*protobuf.Instance) {

	logging.Infof("timerTestProjectorClient.sendSync() ")

	if len(instances) != 1 {
		util.TT.Fatal("timerTestProjectorClient.sendSync(): More than one index instance sent to fake projector")
	}

	for _, inst := range instances {
		if inst.GetIndexInstance().GetDefinition().GetDefnID() == uint64(406) {

			p := util.NewFakeProjector(manager.COORD_MAINT_STREAM_PORT)
			go p.Run(c.donech)

			payloads := make([]*common.VbKeyVersions, 0, 2000)

			// send StreamBegin for all vbuckets
			for i := 0; i < manager.NUM_VB; i++ {
				payload := common.NewVbKeyVersions("Default", uint16(i) /* vb */, 1, 10)
				kv := common.NewKeyVersions(1, []byte("document-name"), 1)
				kv.AddStreamBegin()
				kv.AddSync()
				payload.AddKeyVersions(kv)
				payloads = append(payloads, payload)
			}

			payload := common.NewVbKeyVersions("Default", 10, 1, 10)
			kv := common.NewKeyVersions(100, []byte("document-name"), 1)
			kv.AddSync()
			payload.AddKeyVersions(kv)
			payloads = append(payloads, payload)

			// send payload
			logging.Infof("****** runTimerTestReceiver() sending the first sync message")
			if err := p.Client.SendKeyVersions(payloads, true); err != nil {
				util.TT.Fatal(err)
			}

			payloads = make([]*common.VbKeyVersions, 0, 200)

			payload = common.NewVbKeyVersions("Default", 10, 1, 10)
			kv = common.NewKeyVersions(406, []byte("document-name"), 1)
			kv.AddSync()
			payload.AddKeyVersions(kv)
			payloads = append(payloads, payload)

			// send payload
			logging.Infof("****** runTimerTestReceiver() sending the second sync message")
			if err := p.Client.SendKeyVersions(payloads, true); err != nil {
				util.TT.Fatal(err)
			}
		}
	}
}
Ejemplo n.º 2
0
func (c *streamEndTestProjectorClient) sendSync(timestamps []*protobuf.TsVbuuid) {

	logging.Infof("streamEndTestProjectorClient.sendSync() ")

	if len(timestamps) != 1 {
		util.TT.Fatal("streamEndTestProjectorClient.sendSync(): More than one timestamp sent to fake projector. Num = %v", len(timestamps))
	}

	seqno, _, _, _, err := timestamps[0].Get(uint16(10))
	if err != nil {
		util.TT.Fatal(err)
	}

	p := util.NewFakeProjector(manager.COORD_MAINT_STREAM_PORT)
	go p.Run(c.donech)

	payloads := make([]*common.VbKeyVersions, 0, 2000)

	payload := common.NewVbKeyVersions("Default", 10 /* vb */, 1, 10)
	kv := common.NewKeyVersions(seqno, []byte("document-name"), 1)
	kv.AddStreamBegin()
	kv.AddSync()
	payload.AddKeyVersions(kv)
	payloads = append(payloads, payload)

	// send payload
	if err := p.Client.SendKeyVersions(payloads, true); err != nil {
		util.TT.Fatal(err)
	}
}
Ejemplo n.º 3
0
// SnapshotData implement Evaluator{} interface.
func (ie *IndexEvaluator) SnapshotData(
	m *mc.DcpEvent, vbno uint16, vbuuid, seqno uint64) (data interface{}) {

	bucket := ie.Bucket()
	kv := c.NewKeyVersions(seqno, nil, 1)
	kv.AddSnapshot(m.SnapshotType, m.SnapstartSeq, m.SnapendSeq)
	return &c.DataportKeyVersions{bucket, vbno, vbuuid, kv}
}
Ejemplo n.º 4
0
func kvDeletions() *common.KeyVersions {
	seqno, docid, maxCount := uint64(10), []byte("document-name"), 10
	kv := common.NewKeyVersions(seqno, docid, maxCount)
	kv.AddDeletion(1, []byte("varanasi"))
	kv.AddDeletion(2, []byte("pune"))
	kv.AddDeletion(3, []byte("mahe"))
	return kv
}
Ejemplo n.º 5
0
func kvUpserts() *common.KeyVersions {
	seqno, docid, maxCount := uint64(10), []byte("document-name"), 10
	kv := common.NewKeyVersions(seqno, docid, maxCount)
	kv.AddUpsert(1, []byte("bangalore"), []byte("varanasi"))
	kv.AddUpsert(2, []byte("delhi"), []byte("pune"))
	kv.AddUpsert(3, []byte("jaipur"), []byte("mahe"))
	return kv
}
Ejemplo n.º 6
0
// StreamEndData implement Evaluator{} interface.
func (ie *IndexEvaluator) StreamEndData(
	vbno uint16, vbuuid, seqno uint64) (data interface{}) {

	bucket := ie.Bucket()
	kv := c.NewKeyVersions(seqno, nil, 1)
	kv.AddStreamEnd()
	return &c.DataportKeyVersions{bucket, vbno, vbuuid, kv}
}
Ejemplo n.º 7
0
func TestNewStreamEnd(t *testing.T) {
	seqno, docid, maxCount := uint64(10), []byte("document-name"), 10
	kv := common.NewKeyVersions(seqno, docid, maxCount)
	kv.AddStreamEnd()
	// add it to VbKeyVersions
	vbno, vbuuid, nMuts := uint16(10), uint64(1000), 10
	vb := common.NewVbKeyVersions("default", vbno, vbuuid, nMuts)
	vb.AddKeyVersions(kv)
	testKeyVersions(t, vb)
}
Ejemplo n.º 8
0
func TestAddDropData(t *testing.T) {
	seqno, docid, maxCount := uint64(10), []byte(nil), 1
	kv := common.NewKeyVersions(seqno, docid, maxCount)
	kv.AddDropData()
	// add it to VbKeyVersions
	vbno, vbuuid, nMuts := uint16(10), uint64(1000), 10
	vb := common.NewVbKeyVersions("default", vbno, vbuuid, nMuts)
	vb.AddKeyVersions(kv)
	testKeyVersions(t, vb)
}
Ejemplo n.º 9
0
func BenchmarkAddSyncDecode(b *testing.B) {
	benchmarkMutationDecode(b, func() *common.VbKeyVersions {
		seqno, docid, maxCount := uint64(10), []byte(nil), 1
		kv := common.NewKeyVersions(seqno, docid, maxCount)
		kv.AddSync()
		vbno, vbuuid, nMuts := uint16(10), uint64(1000), 10
		vb := common.NewVbKeyVersions("default", vbno, vbuuid, nMuts)
		vb.AddKeyVersions(kv)
		return vb
	})
}
Ejemplo n.º 10
0
func BenchmarkAddStreamEndDecode(b *testing.B) {
	benchmarkMutationDecode(b, func() *common.VbKeyVersions {
		seqno, docid, maxCount := uint64(10), []byte("document-name"), 10
		kv := common.NewKeyVersions(seqno, docid, maxCount)
		kv.AddStreamEnd()
		// add it to VbKeyVersions
		vbno, vbuuid, nMuts := uint16(10), uint64(1000), 10
		vb := common.NewVbKeyVersions("default", vbno, vbuuid, nMuts)
		vb.AddKeyVersions(kv)
		return vb
	})
}
Ejemplo n.º 11
0
func TestStreamBegin(t *testing.T) {
	maxBuckets, maxvbuckets, mutChanSize := 2, 8, 1000
	logging.SetLogLevel(logging.Silent)

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(addr, maxvbuckets, config, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start client
	flags := transport.TransportFlag(0).SetProtobuf()
	prefix = "projector.dataport.client."
	config = c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	client, _ := NewClient(
		"cluster", "backfill", addr, flags, maxvbuckets, config)
	vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps
	for i := 0; i < maxBuckets; i++ {
		if err := client.SendVbmap(vbmaps[i]); err != nil {
			t.Fatal(err)
		}
	}

	// test a live StreamBegin
	bucket, vbno, vbuuid := "default0", uint16(maxvbuckets), uint64(1111)
	uuid := c.StreamID(bucket, vbno)
	vals, err := client.Getcontext()
	if err != nil {
		t.Fatal(err)
	}
	vbChans := vals[0].(map[string]chan interface{})
	if _, ok := vbChans[uuid]; ok {
		t.Fatal("duplicate id")
	}
	vb := c.NewVbKeyVersions(bucket, vbno, vbuuid, 1)
	seqno, docid, maxCount := uint64(10), []byte("document-name"), 10
	kv := c.NewKeyVersions(seqno, docid, maxCount)
	kv.AddStreamBegin()
	vb.AddKeyVersions(kv)
	err = client.SendKeyVersions([]*c.VbKeyVersions{vb}, true)
	client.Getcontext() // syncup
	if err != nil {
		t.Fatal(err)
	} else if _, ok := vbChans[uuid]; !ok {
		fmt.Printf("%v %v\n", len(vbChans), uuid)
		t.Fatal("failed StreamBegin")
	}
	client.Close()
	daemon.Close()
}
Ejemplo n.º 12
0
func constructVbKeyVersions(bucket string, seqno, nVbs, nMuts, nIndexes int) []*c.VbKeyVersions {
	vbs := make([]*c.VbKeyVersions, 0, nVbs)

	for i := 0; i < nVbs; i++ { // for N vbuckets
		vbno, vbuuid := uint16(i), uint64(i*10)
		vb := c.NewVbKeyVersions(bucket, vbno, vbuuid, nMuts)
		for j := 0; j < nMuts; j++ {
			kv := c.NewKeyVersions(uint64(seqno+j), []byte("Bourne"), nIndexes)
			for k := 0; k < nIndexes; k++ {
				key := fmt.Sprintf("bangalore%v", k)
				oldkey := fmt.Sprintf("varanasi%v", k)
				kv.AddUpsert(uint64(k), []byte(key), []byte(oldkey))
			}
			vb.AddKeyVersions(kv)
		}
		vbs = append(vbs, vb)
	}
	return vbs
}
Ejemplo n.º 13
0
func dataKeyVersions(bucket string, seqno, nVbs, nMuts, nIndexes int) []*c.DataportKeyVersions {
	dkvs := make([]*c.DataportKeyVersions, 0)
	for i := 0; i < nVbs; i++ { // for N vbuckets
		vbno, vbuuid := uint16(i), uint64(i*10)
		for j := 0; j < nMuts; j++ {
			kv := c.NewKeyVersions(uint64(seqno+j), []byte("Bourne"), nIndexes)
			for k := 0; k < nIndexes; k++ {
				key := fmt.Sprintf("bangalore%v", k)
				oldkey := fmt.Sprintf("varanasi%v", k)
				kv.AddUpsert(uint64(k), []byte(key), []byte(oldkey))
			}
			dkv := &c.DataportKeyVersions{
				Bucket: bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			dkvs = append(dkvs, dkv)
		}
	}
	return dkvs
}
Ejemplo n.º 14
0
func (c *deleteTestProjectorClient) sendSync() {

	logging.Infof("deleteTestProjectorClient.sendSync() server %v", c.server)

	p := util.NewFakeProjector(manager.COORD_MAINT_STREAM_PORT)
	go p.Run(c.donech)

	// create an array of KeyVersions
	payloads := make([]*common.VbKeyVersions, 0, 4000)

	delete_test_once.Do(func() {
		logging.Infof("deleteTestProjectorClient.sendSync() sending streamBegin %v", c.server)

		// send StreamBegin for all vbuckets
		for i := 0; i < manager.NUM_VB; i++ {
			if i != 10 && i != 11 {
				payload := common.NewVbKeyVersions("Default", uint16(i) /* vb */, 1, 10)
				kv := common.NewKeyVersions(1, []byte("document-name"), 1)
				kv.AddStreamBegin()
				kv.AddSync()
				payload.AddKeyVersions(kv)
				payloads = append(payloads, payload)
			}
		}

		for i := 0; i < manager.NUM_VB; i++ {
			if i != 12 && i != 13 {
				payload := common.NewVbKeyVersions("Defaultxx", uint16(i) /* vb */, 1, 10)
				kv := common.NewKeyVersions(1, []byte("document-name"), 1)
				kv.AddStreamBegin()
				kv.AddSync()
				payload.AddKeyVersions(kv)
				payloads = append(payloads, payload)
			}
		}
	})

	// bucket <Default>, node <127.0.0.1>  -> vb 10, seqno 401
	// bucket <Default>, node <127.0.0.2>  -> vb 11, seqno 402
	// bucket <Defaultxx>, node <127.0.0.1> -> vb 12, seqno 403
	// bucket <Defaultxx>, node <127.0.0.2> -> vb 13, seqno 404
	for _, inst := range delete_test_status {
		seqno := 0
		vb := 0
		bucket := inst.GetIndexInstance().GetDefinition().GetBucket()
		if bucket == "Default" {
			if c.server == "127.0.0.1" {
				seqno = 401
				vb = 10
			} else if c.server == "127.0.0.2" {
				seqno = 402
				vb = 11
			}
		} else if bucket == "Defaultxx" {
			if c.server == "127.0.0.1" {
				seqno = 403
				vb = 12
			} else if c.server == "127.0.0.2" {
				seqno = 404
				vb = 13
			}
		}

		logging.Infof("deleteTestProjectorClient.sendSync() for node %v and bucket %v vbucket %v seqno %d",
			c.server, bucket, vb, seqno)

		// Create Sync Message
		payload := common.NewVbKeyVersions(bucket, uint16(vb), 1, 10)
		kv := common.NewKeyVersions(uint64(seqno), []byte("document-name"), 1)
		kv.AddStreamBegin()
		kv.AddSync()
		payload.AddKeyVersions(kv)
		payloads = append(payloads, payload)
	}

	// Send payload
	if len(payloads) != 0 {
		logging.Infof("deleteTestProjectorClient.sendSync() sending payloads to stream manager for %s", c.server)
		err := p.Client.SendKeyVersions(payloads, true)
		if err != nil {
			util.TT.Fatal(err)
		}
	}
}
Ejemplo n.º 15
0
func TestLoopback(t *testing.T) {
	logging.SetLogLevel(logging.Silent)

	raddr := "localhost:8888"
	maxBuckets, maxvbuckets, mutChanSize := 2, 32, 100

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(raddr, maxvbuckets, config, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start endpoint
	config = c.SystemConfig.SectionConfig("projector.dataport.", true /*trim*/)
	endp, err := NewRouterEndpoint("clust", "topic", raddr, maxvbuckets, config)
	if err != nil {
		t.Fatal(err)
	}

	vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps

	// send StreamBegin
	for _, vbmap := range vbmaps {
		for i := 0; i < len(vbmap.Vbuckets); i++ { // for N vbuckets
			vbno, vbuuid := vbmap.Vbuckets[i], vbmap.Vbuuids[i]
			kv := c.NewKeyVersions(uint64(0), []byte("Bourne"), 1)
			kv.AddStreamBegin()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if err := endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
		}
	}

	count, seqno := 200, 1
	for i := 1; i <= count; i += 2 {
		nVbs, nMuts, nIndexes := maxvbuckets, 5, 5
		dkvs := dataKeyVersions("default0", seqno, nVbs, nMuts, nIndexes)
		dkvs = append(dkvs, dataKeyVersions("default1", seqno, nVbs, nMuts, nIndexes)...)
		for _, dkv := range dkvs {
			if err := endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
		}
		seqno += nMuts

		// gather
		pvbs := make([]*protobuf.VbKeyVersions, 0)
	loop:
		for {
			select {
			case msg := <-appch:
				//t.Logf("%T %v\n", msg, msg)
				if pvbsSub, ok := msg.([]*protobuf.VbKeyVersions); !ok {
					t.Fatalf("unexpected type in loopback %T", msg)
				} else {
					pvbs = append(pvbs, pvbsSub...)
				}
			case <-time.After(10 * time.Millisecond):
				break loop
			}
		}
		commands := make(map[byte]int)
		for _, vb := range protobuf2VbKeyVersions(pvbs) {
			for _, kv := range vb.Kvs {
				for _, cmd := range kv.Commands {
					if _, ok := commands[byte(cmd)]; !ok {
						commands[byte(cmd)] = 0
					}
					commands[byte(cmd)]++
				}
			}
		}
		if StreamBegins, ok := commands[c.StreamBegin]; ok && StreamBegins != 64 {
			t.Fatalf("unexpected response %v", StreamBegins)
		}
		if commands[c.Upsert] != 1600 {
			t.Fatalf("unexpected response %v", commands[c.Upsert])
		}
	}

	endp.Close()
	daemon.Close()
}
Ejemplo n.º 16
0
func TestTimeout(t *testing.T) {
	logging.SetLogLevel(logging.Silent)

	raddr := "localhost:8888"
	maxBuckets, maxvbuckets, mutChanSize := 2, 4, 100

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	dconfig := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(raddr, maxvbuckets, dconfig, appch)
	if err != nil {
		t.Fatal(err)
	}

	// start endpoint
	config := c.SystemConfig.SectionConfig("projector.dataport.", true /*trim*/)
	endp, err := NewRouterEndpoint("clust", "topic", raddr, maxvbuckets, config)
	if err != nil {
		t.Fatal(err)
	}

	vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps

	// send StreamBegin
	for _, vbmap := range vbmaps {
		for i := 0; i < len(vbmap.Vbuckets); i++ { // for N vbuckets
			vbno, vbuuid := vbmap.Vbuckets[i], vbmap.Vbuuids[i]
			kv := c.NewKeyVersions(uint64(0), []byte("Bourne"), 1)
			kv.AddStreamBegin()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if err := endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
		}
	}

	go func() { // this routine will keep one connection alive
		for i := 0; ; i++ {
			vbmap := vbmaps[0] // keep sending sync for first vbucket alone
			idx := i % len(vbmap.Vbuckets)
			vbno, vbuuid := vbmap.Vbuckets[idx], vbmap.Vbuuids[idx]
			// send sync messages
			kv := c.NewKeyVersions(10, nil, 1)
			kv.AddSync()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if endp.Send(dkv); err != nil {
				t.Fatal(err)
			}
			<-time.After(
				time.Duration(dconfig["tcpReadDeadline"].Int()) * time.Millisecond)
		}
	}()

	wait := true
	for wait {
		select {
		case msg := <-appch:
			switch ce := msg.(type) {
			case []*protobuf.VbKeyVersions:
			case ConnectionError:
				ref := maxvbuckets
				t.Logf("%T %v \n", ce, ce)
				if len(ce) != 2 {
					t.Fatal("mismatch in ConnectionError")
				}
				refBuckets := map[string]bool{"default0": true, "default1": true}
				for bucket, vbnos := range ce {
					delete(refBuckets, bucket)
					if len(vbnos) != ref {
						t.Fatalf("mismatch in ConnectionError %v %v", vbnos, ref)
					}
				}
				if len(refBuckets) > 0 {
					t.Fatalf("mismatch in ConnectionError %v", refBuckets)
				}
				wait = false

			default:
				t.Fatalf("expected connection error %T", msg)
			}
		}
	}

	<-time.After(100 * time.Millisecond)
	endp.Close()

	<-time.After(100 * time.Millisecond)
	daemon.Close()
}
Ejemplo n.º 17
0
// TransformRoute implement Evaluator{} interface.
func (ie *IndexEvaluator) TransformRoute(
	vbuuid uint64, m *mc.DcpEvent, data map[string]interface{}) (err error) {

	defer func() { // panic safe
		if r := recover(); r != nil {
			err = fmt.Errorf("%v", r)
		}
	}()

	var npkey /*new-partition*/, opkey /*old-partition*/, nkey, okey []byte
	instn := ie.instance

	meta := dcpEvent2Meta(m)
	where, err := ie.wherePredicate(m.Value, meta)
	if err != nil {
		return err
	}

	if where && len(m.Value) > 0 { // project new secondary key
		if npkey, err = ie.partitionKey(m.Value, meta); err != nil {
			return err
		}
		if nkey, err = ie.evaluate(m.Key, m.Value, meta); err != nil {
			return err
		}
	}
	if len(m.OldValue) > 0 { // project old secondary key
		if opkey, err = ie.partitionKey(m.OldValue, meta); err != nil {
			return err
		}
		if okey, err = ie.evaluate(m.Key, m.OldValue, meta); err != nil {
			return err
		}
	}

	vbno, seqno := m.VBucket, m.Seqno
	uuid := instn.GetInstId()

	bucket := ie.Bucket()

	logging.Tracef("inst: %v where: %v (pkey: %v) key: %v\n",
		uuid, where, string(npkey), string(nkey))
	switch m.Opcode {
	case mcd.DCP_MUTATION:
		// FIXME: TODO: where clause is not used to for optimizing out messages
		// not passing the where clause. For this we need a gaurantee that
		// where clause will be defined only on immutable fields.
		//if where { // WHERE predicate
		// NOTE: Upsert shall be targeted to indexer node hosting the
		// key.
		raddrs := instn.UpsertEndpoints(m, npkey, nkey, okey)
		for _, raddr := range raddrs {
			dkv, ok := data[raddr].(*c.DataportKeyVersions)
			if !ok {
				kv := c.NewKeyVersions(seqno, m.Key, 4)
				kv.AddUpsert(uuid, nkey, okey)
				dkv = &c.DataportKeyVersions{bucket, vbno, vbuuid, kv}
			} else {
				dkv.Kv.AddUpsert(uuid, nkey, okey)
			}
			data[raddr] = dkv
		}
		//}
		// NOTE: UpsertDeletion shall be broadcasted if old-key is not
		// available.
		raddrs = instn.UpsertDeletionEndpoints(m, opkey, nkey, okey)
		for _, raddr := range raddrs {
			dkv, ok := data[raddr].(*c.DataportKeyVersions)
			if !ok {
				kv := c.NewKeyVersions(seqno, m.Key, 4)
				kv.AddUpsertDeletion(uuid, okey)
				dkv = &c.DataportKeyVersions{bucket, vbno, vbuuid, kv}
			} else {
				dkv.Kv.AddUpsertDeletion(uuid, okey)
			}
			data[raddr] = dkv
		}

	case mcd.DCP_DELETION, mcd.DCP_EXPIRATION:
		// Delete shall be broadcasted if old-key is not available.
		raddrs := instn.DeletionEndpoints(m, opkey, okey)
		for _, raddr := range raddrs {
			dkv, ok := data[raddr].(*c.DataportKeyVersions)
			if !ok {
				kv := c.NewKeyVersions(seqno, m.Key, 4)
				kv.AddDeletion(uuid, okey)
				dkv = &c.DataportKeyVersions{bucket, vbno, vbuuid, kv}
			} else {
				dkv.Kv.AddDeletion(uuid, okey)
			}
			data[raddr] = dkv
		}
	}
	return nil
}
Ejemplo n.º 18
0
func BenchmarkLoopback(b *testing.B) {
	logging.SetLogLevel(logging.Silent)

	raddr := "localhost:8888"
	maxBuckets, maxvbuckets, mutChanSize := 2, 32, 100

	// start server
	appch := make(chan interface{}, mutChanSize)
	prefix := "indexer.dataport."
	config := c.SystemConfig.SectionConfig(prefix, true /*trim*/)
	daemon, err := NewServer(raddr, maxvbuckets, config, appch)
	if err != nil {
		b.Fatal(err)
	}

	// start endpoint
	config = c.SystemConfig.SectionConfig("projector.dataport.", true /*trim*/)
	endp, err := NewRouterEndpoint("clust", "topic", raddr, maxvbuckets, config)
	if err != nil {
		b.Fatal(err)
	}

	vbmaps := makeVbmaps(maxvbuckets, maxBuckets)

	// send StreamBegin
	for _, vbmap := range vbmaps {
		for i := 0; i < len(vbmap.Vbuckets); i++ { // for N vbuckets
			vbno, vbuuid := vbmap.Vbuckets[i], vbmap.Vbuuids[i]
			kv := c.NewKeyVersions(uint64(0), []byte("Bourne"), 1)
			kv.AddStreamBegin()
			dkv := &c.DataportKeyVersions{
				Bucket: vbmap.Bucket, Vbno: vbno, Vbuuid: vbuuid, Kv: kv,
			}
			if err := endp.Send(dkv); err != nil {
				b.Fatal(err)
			}
		}
	}

	go func() {
		nVbs, nMuts, nIndexes, seqno := maxvbuckets, 5, 5, 1
		for {
			dkvs := dataKeyVersions("default0", seqno, nVbs, nMuts, nIndexes)
			dkvs = append(dkvs, dataKeyVersions("default1", seqno, nVbs, nMuts, nIndexes)...)
			for _, dkv := range dkvs {
				endp.Send(dkv)
			}
			seqno += nMuts
		}
	}()

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		select {
		case msg := <-appch:
			if _, ok := msg.([]*protobuf.VbKeyVersions); !ok {
				b.Fatalf("unexpected type in loopback %T", msg)
			}
		}
	}

	endp.Close()
	daemon.Close()
}