func TestPktKeyVersions(t *testing.T) { seqno, nVbs, nMuts, nIndexes := 1, 20, 5, 5 vbsRef := constructVbKeyVersions("default", seqno, nVbs, nMuts, nIndexes) tc := newTestConnection() tc.reset() flags := transport.TransportFlag(0).SetProtobuf() pkt := transport.NewTransportPacket(1000*1024, flags) pkt.SetEncoder(transport.EncodingProtobuf, protobufEncode) pkt.SetDecoder(transport.EncodingProtobuf, protobufDecode) if err := pkt.Send(tc, vbsRef); err != nil { // Send reference t.Fatal(err) } if payload, err := pkt.Receive(tc); err != nil { // Receive reference t.Fatal(err) } else { // compare both val := payload.([]*protobuf.VbKeyVersions) vbs := protobuf2VbKeyVersions(val) if len(vbsRef) != len(vbs) { t.Fatal("Mismatch in length") } for i, vb := range vbs { if vb.Equal(vbsRef[i]) == false { t.Fatal("Mismatch in VbKeyVersions") } } } }
// receive requests from remote, when this function returns // the connection is expected to be closed. func (s *Server) doReceive(conn net.Conn, rcvch chan<- interface{}) { raddr := conn.RemoteAddr() // transport buffer for receiving flags := transport.TransportFlag(0).SetProtobuf() rpkt := transport.NewTransportPacket(s.maxPayload, flags) rpkt.SetDecoder(transport.EncodingProtobuf, protobuf.ProtobufDecode) logging.Infof("%v connection %q doReceive() ...\n", s.logPrefix, raddr) loop: for { // TODO: Fix read timeout correctly // timeoutMs := s.readDeadline * time.Millisecond // conn.SetReadDeadline(time.Now().Add(timeoutMs)) req, err := rpkt.Receive(conn) // TODO: handle close-connection and don't print error message. if err != nil { if err == io.EOF { logging.Tracef("%v connection %q exited %v\n", s.logPrefix, raddr, err) } else { logging.Errorf("%v connection %q exited %v\n", s.logPrefix, raddr, err) } break loop } select { case rcvch <- req: case <-s.killch: break loop } } close(rcvch) }
func EncodeAndWrite(conn net.Conn, buf []byte, r interface{}) (err error) { var data []byte data, err = ProtobufEncodeInBuf(r, buf[transport.MaxSendBufSize:][:0]) if err != nil { return } flags := transport.TransportFlag(0).SetProtobuf() err = transport.Send(conn, buf, flags, data) return }
func (cp *connectionPool) defaultMkConn(host string) (*connection, error) { logging.Infof("%v open new connection ...\n", cp.logPrefix) conn, err := net.Dial("tcp", host) if err != nil { return nil, err } flags := transport.TransportFlag(0).SetProtobuf() pkt := transport.NewTransportPacket(cp.maxPayload, flags) pkt.SetEncoder(transport.EncodingProtobuf, protobuf.ProtobufEncode) pkt.SetDecoder(transport.EncodingProtobuf, protobuf.ProtobufDecode) return &connection{conn, pkt}, nil }
func TestStreamBegin(t *testing.T) { maxBuckets, maxvbuckets, mutChanSize := 2, 8, 1000 logging.SetLogLevel(logging.Silent) // start server appch := make(chan interface{}, mutChanSize) prefix := "indexer.dataport." config := c.SystemConfig.SectionConfig(prefix, true /*trim*/) daemon, err := NewServer(addr, maxvbuckets, config, appch) if err != nil { t.Fatal(err) } // start client flags := transport.TransportFlag(0).SetProtobuf() prefix = "projector.dataport.client." config = c.SystemConfig.SectionConfig(prefix, true /*trim*/) client, _ := NewClient( "cluster", "backfill", addr, flags, maxvbuckets, config) vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps for i := 0; i < maxBuckets; i++ { if err := client.SendVbmap(vbmaps[i]); err != nil { t.Fatal(err) } } // test a live StreamBegin bucket, vbno, vbuuid := "default0", uint16(maxvbuckets), uint64(1111) uuid := c.StreamID(bucket, vbno) vals, err := client.Getcontext() if err != nil { t.Fatal(err) } vbChans := vals[0].(map[string]chan interface{}) if _, ok := vbChans[uuid]; ok { t.Fatal("duplicate id") } vb := c.NewVbKeyVersions(bucket, vbno, vbuuid, 1) seqno, docid, maxCount := uint64(10), []byte("document-name"), 10 kv := c.NewKeyVersions(seqno, docid, maxCount) kv.AddStreamBegin() vb.AddKeyVersions(kv) err = client.SendKeyVersions([]*c.VbKeyVersions{vb}, true) client.Getcontext() // syncup if err != nil { t.Fatal(err) } else if _, ok := vbChans[uuid]; !ok { fmt.Printf("%v %v\n", len(vbChans), uuid) t.Fatal("failed StreamBegin") } client.Close() daemon.Close() }
func BenchmarkSendVbKeyVersions(b *testing.B) { seqno, nVbs, nMuts, nIndexes := 1, 20, 5, 5 vbs := constructVbKeyVersions("default", seqno, nVbs, nMuts, nIndexes) tc := newTestConnection() flags := transport.TransportFlag(0).SetProtobuf() pkt := transport.NewTransportPacket(1000*1024, flags) pkt.SetEncoder(transport.EncodingProtobuf, protobufEncode) pkt.SetDecoder(transport.EncodingProtobuf, protobufDecode) b.ResetTimer() for i := 0; i < b.N; i++ { tc.reset() pkt.Send(tc, vbs) } }
func BenchmarkSendVbmap(b *testing.B) { vbmap := &c.VbConnectionMap{ Bucket: "default", Vbuckets: []uint16{1, 2, 3, 4}, Vbuuids: []uint64{10, 20, 30, 40}, } tc := newTestConnection() flags := transport.TransportFlag(0).SetProtobuf() pkt := transport.NewTransportPacket(1000*1024, flags) pkt.SetEncoder(transport.EncodingProtobuf, protobufEncode) pkt.SetDecoder(transport.EncodingProtobuf, protobufDecode) b.ResetTimer() for i := 0; i < b.N; i++ { tc.reset() pkt.Send(tc, vbmap) } }
func NewFakeProjector(port string) *fakeProjector { p := new(fakeProjector) addr := net.JoinHostPort("127.0.0.1", port) prefix := "projector.dataport." config := common.SystemConfig.SectionConfig(prefix, true) maxvbs := common.SystemConfig["maxVbuckets"].Int() flag := transport.TransportFlag(0).SetProtobuf() config.Set("mutationChanSize", common.ConfigValue{10000, "channel size of projector-dataport-client's data path routine", 10000}) config.Set("parConnections", common.ConfigValue{1, "number of parallel connections to open with remote", 1}) var err error p.Client, err = dataport.NewClient("unit-test", "mutation topic", addr, flag, maxvbs, config) if err != nil { TT.Fatal(err) } return p }
func TestClient(t *testing.T) { maxBuckets, maxvbuckets, mutChanSize := 2, 8, 1000 logging.SetLogLevel(logging.Silent) // start server appch := make(chan interface{}, mutChanSize) prefix := "indexer.dataport." config := c.SystemConfig.SectionConfig(prefix, true /*trim*/) daemon, err := NewServer(addr, maxvbuckets, config, appch) if err != nil { t.Fatal(err) } // start client and test number of connection. flags := transport.TransportFlag(0).SetProtobuf() prefix = "projector.dataport.client." config = c.SystemConfig.SectionConfig(prefix, true /*trim*/) maxconns := config["parConnections"].Int() client, err := NewClient( "cluster", "backfill", addr, flags, maxvbuckets, config) if err != nil { t.Fatal(err) } else if len(client.conns) != maxconns { t.Fatal("failed dataport client connections") } else if len(client.conns) != len(client.connChans) { t.Fatal("failed dataport client connection channels") } else if len(client.conns) != len(client.conn2Vbs) { t.Fatal("failed dataport client connection channels") } else { vbmaps := makeVbmaps(maxvbuckets, maxBuckets) // vbmaps for i := 0; i < maxBuckets; i++ { if err := client.SendVbmap(vbmaps[i]); err != nil { t.Fatal(err) } } validateClientInstance(client, maxvbuckets, maxconns, maxBuckets, t) } client.Close() daemon.Close() }
func TestPktVbmap(t *testing.T) { vbmapRef := &c.VbConnectionMap{ Bucket: "default", Vbuckets: []uint16{1, 2, 3, 4}, Vbuuids: []uint64{10, 20, 30, 40}, } tc := newTestConnection() tc.reset() flags := transport.TransportFlag(0).SetProtobuf() pkt := transport.NewTransportPacket(1000*1024, flags) pkt.SetEncoder(transport.EncodingProtobuf, protobufEncode) pkt.SetDecoder(transport.EncodingProtobuf, protobufDecode) if err := pkt.Send(tc, vbmapRef); err != nil { // send reference t.Fatal(err) } if payload, err := pkt.Receive(tc); err != nil { // receive reference t.Fatal(err) } else { // Compare both vbmap := protobuf2Vbmap(payload.(*protobuf.VbConnectionMap)) vbmap.Equal(vbmapRef) } }
// NewRouterEndpoint instantiate a new RouterEndpoint // routine and return its reference. func NewRouterEndpoint( cluster, topic, raddr string, maxvbs int, config c.Config) (*RouterEndpoint, error) { conn, err := net.Dial("tcp", raddr) if err != nil { return nil, err } endpoint := &RouterEndpoint{ topic: topic, raddr: raddr, finch: make(chan bool), timestamp: time.Now().UnixNano(), keyChSize: config["keyChanSize"].Int(), block: config["remoteBlock"].Bool(), bufferSize: config["bufferSize"].Int(), bufferTm: time.Duration(config["bufferTimeout"].Int()), harakiriTm: time.Duration(config["harakiriTimeout"].Int()), } endpoint.ch = make(chan []interface{}, endpoint.keyChSize) endpoint.conn = conn // TODO: add configuration params for transport flags. flags := transport.TransportFlag(0).SetProtobuf() maxPayload := config["maxPayload"].Int() endpoint.pkt = transport.NewTransportPacket(maxPayload, flags) endpoint.pkt.SetEncoder(transport.EncodingProtobuf, protobufEncode) endpoint.pkt.SetDecoder(transport.EncodingProtobuf, protobufDecode) endpoint.logPrefix = fmt.Sprintf( "ENDP[<-(%v,%4x)<-%v #%v]", endpoint.raddr, uint16(endpoint.timestamp), cluster, topic) go endpoint.run(endpoint.ch) logging.Infof("%v started ...\n", endpoint.logPrefix) return endpoint, nil }