示例#1
0
func benchmarkLatency(b *testing.B, block int) {
	// Override the overlay configuration
	swapConfigs()
	defer swapConfigs()

	b.SetBytes(int64(block))
	key, _ := x509.ParsePKCS1PrivateKey(privKeyDer)

	// Generate a batch of messages to send around
	head := proto.Header{[]byte{0x99, 0x98, 0x97, 0x96}, []byte{0x00, 0x01}, []byte{0x02, 0x03}}
	msgs := make([]proto.Message, b.N)
	for i := 0; i < b.N; i++ {
		msgs[i].Head = head
		msgs[i].Data = make([]byte, block)
		io.ReadFull(rand.Reader, msgs[i].Data)
		msgs[i].Encrypt()
	}
	// Create the sender node
	send := pastry.New(appId, key, new(nopCallback))
	send.Boot(nil)
	defer send.Shutdown()

	// Create the receiver app to sequence messages and the associated overlay node
	recvApp := &sequencer{send, nil, msgs, b.N, make(chan struct{})}
	recv := pastry.New(appId, key, recvApp)
	recvApp.dest = recv.NodeID
	recv.Boot(nil)
	defer recv.Shutdown()

	// Reset timer and start message passing
	b.ResetTimer()
	recvApp.Deliver(nil, nil)
	<-recvApp.quit
	b.StopTimer()
}
示例#2
0
func TestMaintenance(t *testing.T) {
	// Override the overlay configuration
	swapConfigs()
	defer swapConfigs()

	originals := 3
	additions := 2

	// Make sure there are enough ports to use
	olds := config.BootPorts
	defer func() { config.BootPorts = olds }()

	for i := 0; i < originals+additions; i++ {
		config.BootPorts = append(config.BootPorts, 65520+i)
	}
	// Parse encryption key
	key, _ := x509.ParsePKCS1PrivateKey(privKeyDer)

	// Start handful of nodes and ensure valid routing state
	nodes := []*pastry.Overlay{}
	for i := 0; i < originals; i++ {
		nodes = append(nodes, pastry.New(appId, key, new(nopCallback)))
		if _, err := nodes[i].Boot(nil); err != nil {
			t.Fatalf("failed to boot nodes: %v.", err)
		}
		defer nodes[i].Shutdown()
	}
	// Wait a while for state updates to propagate and check the routing table
	time.Sleep(100 * time.Millisecond)
	checkRoutes(t, nodes)

	// Start some additional nodes and ensure still valid routing state
	for i := 0; i < additions; i++ {
		nodes = append(nodes, pastry.New(appId, key, new(nopCallback)))
		if _, err := nodes[len(nodes)-1].Boot(nil); err != nil {
			t.Fatalf("failed to boot nodes: %v.", err)
		}
	}
	// Wait a while for state updates to propagate and check the routing table
	time.Sleep(100 * time.Millisecond)
	checkRoutes(t, nodes)

	// Terminate some nodes, and ensure still valid routing state
	for i := 0; i < additions; i++ {
		nodes[originals+i].Shutdown()
	}
	nodes = nodes[:originals]

	// Wait a while for state updates to propagate and check the routing table
	time.Sleep(100 * time.Millisecond)
	checkRoutes(t, nodes)
}
示例#3
0
func benchmarkThroughput(b *testing.B, block int) {
	// Override the overlay configuration
	swapConfigs()
	defer swapConfigs()

	b.SetBytes(int64(block))
	key, _ := x509.ParsePKCS1PrivateKey(privKeyDer)

	// Generate a bach of messages to send around
	head := proto.Header{[]byte{0x99, 0x98, 0x97, 0x96}, []byte{0x00, 0x01}, []byte{0x02, 0x03}}
	msgs := make([]proto.Message, b.N)
	for i := 0; i < b.N; i++ {
		msgs[i].Head = head
		msgs[i].Data = make([]byte, block)
		io.ReadFull(rand.Reader, msgs[i].Data)
		msgs[i].Encrypt()
	}
	// Create two overlay nodes to communicate
	send := pastry.New(appId, key, new(nopCallback))
	send.Boot(nil)
	defer send.Shutdown()

	wait := &waiter{
		left: int32(b.N),
		quit: make(chan struct{}),
	}
	recv := pastry.New(appId, key, wait)
	recv.Boot(nil)
	defer recv.Shutdown()

	// Create the sender to push the messages
	sender := func() {
		for i := 0; i < len(msgs); i++ {
			send.Send(recv.NodeID, &msgs[i])
		}
	}

	// Reset timer and start message passing
	b.ResetTimer()
	go sender()
	<-wait.quit
	b.StopTimer()
}
示例#4
0
// Creates a new scribe overlay.
func New(overId string, key *rsa.PrivateKey, app Callback) *Overlay {
	// Create and initialize the overlay
	o := &Overlay{
		app:    app,
		topics: make(map[string]*topic.Topic),
		names:  make(map[string]string),
	}
	o.pastry = pastry.New(overId, key, o)
	o.heart = heart.New(config.ScribeBeatPeriod, config.ScribeKillCount, o)
	return o
}
示例#5
0
func TestHandshake(t *testing.T) {
	// Override the overlay configuration
	swapConfigs()
	defer swapConfigs()

	// Load the valid and invalid private keys
	key, _ := x509.ParsePKCS1PrivateKey(privKeyDer)
	bad, _ := x509.ParsePKCS1PrivateKey(privKeyDerBad)

	// Start first overlay node
	alice := pastry.New(appId, key, new(nopCallback))
	if _, err := alice.Boot(nil); err != nil {
		t.Fatalf("failed to boot alice: %v.", err)
	}
	defer func() {
		if err := alice.Shutdown(); err != nil {
			t.Fatalf("failed to shutdown alice: %v.", err)
		}
	}()
	// Start second overlay node
	bob := pastry.New(appId, key, new(nopCallback))
	if _, err := bob.Boot(nil); err != nil {
		t.Fatalf("failed to boot bob: %v.", err)
	}
	defer func() {
		if err := bob.Shutdown(); err != nil {
			t.Fatalf("failed to shutdown bob: %v.", err)
		}
	}()
	// Verify that they found each other
	if size := len(alice.LivePeers); size != 1 {
		t.Fatalf("invalid pool size for alice: have %v, want %v.", size, 1)
	} else if _, ok := alice.LivePeers[bob.NodeID.String()]; !ok {
		t.Fatalf("bob (%v) missing from the pool of alice: %v.", bob.NodeID, alice.LivePeers)
	}
	if size := len(bob.LivePeers); size != 1 {
		t.Fatalf("invalid pool size for bob: have %v, want %v.", size, 1)
	} else if _, ok := bob.LivePeers[alice.NodeID.String()]; !ok {
		t.Fatalf("alice (%v) missing from the pool of bob: %v.", alice.NodeID, bob.LivePeers)
	}

	// Start a second application
	eve := pastry.New(appIdBad, key, new(nopCallback))
	if _, err := eve.Boot(nil); err != nil {
		t.Fatalf("failed to boot eve: %v.", err)
	}
	// Ensure that eve hasn't been added (app filtering)
	if len(eve.LivePeers) != 0 {
		t.Fatalf("invalid pool contents for eve: %v.", eve.LivePeers)
	}
	if _, ok := alice.LivePeers[eve.NodeID.String()]; ok {
		t.Fatalf("eve (%v) found in the pool of alice: %v.", eve.NodeID, alice.LivePeers)
	}
	if _, ok := bob.LivePeers[eve.NodeID.String()]; ok {
		t.Fatalf("eve (%v) found in the pool of bob: %v.", eve.NodeID, bob.LivePeers)
	}
	if err := eve.Shutdown(); err != nil {
		t.Fatalf("failed to shutdown eve: %v.", err)
	}

	// Start a malicious node impersonating the app but invalid key
	log.SetOutput(ioutil.Discard)
	defer log.SetOutput(os.Stderr)

	mallory := pastry.New(appId, bad, new(nopCallback))
	if _, err := mallory.Boot(nil); err != nil {
		t.Fatalf("failed to boot mallory: %v.", err)
	}
	defer func() {
		if err := mallory.Shutdown(); err != nil {
			t.Fatalf("failed to shutdown mallory: %v.", err)
		}
	}()
	// Ensure that mallory hasn't been added (invalid security key)
	if len(mallory.LivePeers) != 0 {
		t.Fatalf("invalid pool contents for mallory: %v.", mallory.LivePeers)
	}
	if _, ok := alice.LivePeers[mallory.NodeID.String()]; ok {
		t.Fatalf("mallory (%v) found in the pool of alice: %v.", mallory.NodeID, alice.LivePeers)
	}
	if _, ok := bob.LivePeers[mallory.NodeID.String()]; ok {
		t.Fatalf("mallory (%v) found in the pool of bob: %v.", mallory.NodeID, bob.LivePeers)
	}
}
示例#6
0
func TestRouting(t *testing.T) {
	// Override the overlay configuration
	swapConfigs()
	defer swapConfigs()

	originals := 4
	additions := 1

	// Make sure there are enough ports to use
	olds := config.BootPorts
	defer func() { config.BootPorts = olds }()
	for i := 0; i < originals+additions; i++ {
		config.BootPorts = append(config.BootPorts, 65500+i)
	}
	// Parse encryption key
	key, _ := x509.ParsePKCS1PrivateKey(privKeyDer)

	// Create the callbacks to listen on incoming messages
	apps := []*collector{}
	for i := 0; i < originals; i++ {
		apps = append(apps, &collector{delivs: []*proto.Message{}})
	}
	// Start handful of nodes and ensure valid routing state
	nodes := []*pastry.Overlay{}
	for i := 0; i < originals; i++ {
		nodes = append(nodes, pastry.New(appId, key, apps[i]))
		if _, err := nodes[i].Boot(nil); err != nil {
			t.Fatalf("failed to boot original node: %v.", err)
		}
		defer nodes[i].Shutdown()
	}
	time.Sleep(time.Second)

	// Create the messages to pass around
	meta := []byte{0x99, 0x98, 0x97, 0x96}
	msgs := make([][]proto.Message, originals)
	for i := 0; i < originals; i++ {
		msgs[i] = make([]proto.Message, originals)
		for j := 0; j < originals; j++ {
			msgs[i][j] = proto.Message{
				Head: proto.Header{
					Meta: meta,
				},
				Data: []byte(nodes[i].NodeID.String() + nodes[j].NodeID.String()),
			}
			msgs[i][j].Encrypt()
		}
	}
	// Check that each node can route to everybody
	for i, src := range nodes {
		for j, dst := range nodes {
			src.Send(dst.NodeID, &msgs[i][j])
			time.Sleep(250 * time.Millisecond) // Makes the deliver order verifiable
		}
	}
	// Sleep a bit and verify
	time.Sleep(time.Second)
	for i := 0; i < originals; i++ {
		apps[i].lock.RLock()
		if len(apps[i].delivs) != originals {
			t.Fatalf("app #%v: message count mismatch: have %v, want %v.", i, len(apps[i].delivs), originals)
		} else {
			for j := 0; j < originals; j++ {
				apps[j].lock.RLock()
				// Check contents (a bit reduced, not every field was verified below)
				if bytes.Compare(meta, apps[j].delivs[i].Head.Meta.([]byte)) != 0 {
					t.Fatalf("send/receive meta mismatch: have %v, want %v.", apps[j].delivs[i].Head.Meta, meta)
				}
				if bytes.Compare(msgs[i][j].Data, apps[j].delivs[i].Data) != 0 {
					t.Fatalf("send/receive data mismatch: have %v, want %v.", apps[j].delivs[i].Data, msgs[i][j].Data)
				}
				apps[j].lock.RUnlock()
			}
		}
		apps[i].lock.RUnlock()
	}
	// Clear out all the collectors
	for i := 0; i < len(apps); i++ {
		apps[i].lock.Lock()
		apps[i].delivs = apps[i].delivs[:0]
		apps[i].lock.Unlock()
	}
	// Start a load of parallel transfers
	sent := make([][]int, originals)
	quit := make([]chan chan struct{}, originals)
	for i := 0; i < originals; i++ {
		sent[i] = make([]int, originals)
		quit[i] = make(chan chan struct{})
	}
	for i := 0; i < originals; i++ {
		go func(idx int, quit chan chan struct{}) {
			for {
				// Send a message to all original nodes
				for j, dst := range nodes[0:originals] {
					// Create the message to pass around
					msg := &proto.Message{
						Head: proto.Header{
							Meta: meta,
						},
						Data: []byte(nodes[idx].NodeID.String() + dst.NodeID.String()),
					}
					msg.Encrypt()

					// Send the message and increment the counter
					nodes[idx].Send(dst.NodeID, msg)
					sent[idx][j]++
					select {
					case q := <-quit:
						q <- struct{}{}
						return
					default:
						// Repeat
					}
				}
			}
		}(i, quit[i])
	}
	// Boot a few more nodes and shut them down afterwards
	pend := new(sync.WaitGroup)
	for i := 0; i < additions; i++ {
		pend.Add(1)
		go func() {
			defer pend.Done()

			temp := pastry.New(appId, key, new(nopCallback))
			if _, err := temp.Boot(nil); err != nil {
				t.Fatalf("failed to boot additional node: %v.", err)
			}
			if err := temp.Shutdown(); err != nil {
				t.Fatalf("failed to terminate additional node: %v.", err)
			}
		}()
	}
	pend.Wait()

	// Terminate all the senders
	for i := 0; i < len(quit); i++ {
		q := make(chan struct{})
		quit[i] <- q
		<-q
	}
	time.Sleep(time.Second)

	// Verify send/receive count
	for i := 0; i < originals; i++ {
		count := 0
		for j := 0; j < originals; j++ {
			count += sent[j][i]
		}
		apps[i].lock.RLock()
		if len(apps[i].delivs) != count {
			t.Fatalf("send/receive count mismatch: have %v, want %v.", len(apps[i].delivs), count)
		}
		apps[i].lock.RUnlock()
	}
}