func SequenceTrackerTest(t *testing.T) {
	st := core.MakeSequenceTracker(2345, 77, 10)
	Convey("It remembers its own stream and node ids.", t, func() {
		So(st.StreamId(), ShouldEqual, 2345)
		So(st.NodeId(), ShouldEqual, 77)
	})
	Convey("Everything under maxContiguous is contained by the tracker.", t, func() {
		So(st.StreamId(), ShouldEqual, core.StreamId(2345))
		So(st.Contains(1), ShouldBeTrue)
		So(st.Contains(2), ShouldBeTrue)
		So(st.Contains(4), ShouldBeTrue)
		So(st.Contains(8), ShouldBeTrue)
		So(st.Contains(10), ShouldBeTrue)
		So(st.Contains(11), ShouldBeTrue)
		So(st.Contains(14), ShouldBeTrue)
		So(st.Contains(17), ShouldBeTrue)
		So(st.Contains(100), ShouldBeTrue)
		So(st.Contains(1000), ShouldBeTrue)
	})

	Convey("Scattered sequence ids are identified.", t, func() {
		st.AddSequenceId(12)
		st.AddSequenceId(13)
		st.AddSequenceId(14)
		st.AddSequenceId(20)
		st.AddSequenceId(22)
		sids := map[core.SequenceId]bool{
			10: false,
			11: false,
			12: true,
			13: true,
			14: true,
			15: false,
			16: false,
			17: false,
			18: false,
			19: false,
			20: true,
			21: false,
			22: true,
			23: false,
			24: false,
		}
		So(st.StreamId(), ShouldEqual, core.StreamId(2345))
		verifySequenceTracker(st, 9, sids)
		Convey("and chunkification/dechunkification works", func() {
			var config core.Config
			config.MaxChunkDataSize = 10
			datas := core.MakeSequenceTrackerChunkDatas(&config, st)
			So(len(datas), ShouldBeGreaterThan, 1)
			var sts []*core.SequenceTracker
			for _, data := range datas {
				st, err := core.ParseSequenceTrackerChunkData(data)
				So(err, ShouldBeNil)
				sts = append(sts, st)
			}

			// All trackers should agree on MaxContiguousSequence.
			So(st.MaxContiguousSequence(), ShouldEqual, 10)
			for i := range sts {
				So(sts[i].MaxContiguousSequence(), ShouldEqual, 10)
			}

			// For each scattered sequence id, at least one tracker should have it.  For sequences
			// not contained in the set, none should have it.
			for sequence, has := range sids {
				found := false
				for _, st := range sts {
					found = found || st.Contains(sequence)
				}
				So(found, ShouldEqual, has)
			}
		})
	})

	Convey("Serialization is correct after compaction.", t, func() {
		st.AddSequenceId(12)
		st.AddSequenceId(14)
		st.AddSequenceId(15)
		st.AddSequenceId(10)
		st.AddSequenceId(11) // Should compact up to 11
		st.AddSequenceId(13) // Should compact up to 15
		sids := map[core.SequenceId]bool{
			10: true,
			11: true,
			12: true,
			13: true,
			14: true,
			15: true,
			16: false,
			17: false,
			18: false,
		}
		verifySequenceTracker(st, 15, sids)

		Convey("and chunkification/dechunkification works", func() {
			var config core.Config
			config.MaxChunkDataSize = 10
			datas := core.MakeSequenceTrackerChunkDatas(&config, st)
			So(len(datas), ShouldBeGreaterThan, 1)
			var sts []*core.SequenceTracker
			for _, data := range datas {
				st, err := core.ParseSequenceTrackerChunkData(data)
				So(err, ShouldBeNil)
				sts = append(sts, st)
			}

			// All trackers should agree on MaxContiguousSequence.
			So(st.MaxContiguousSequence(), ShouldEqual, 10)
			for i := range sts {
				So(sts[i].MaxContiguousSequence(), ShouldEqual, 10)
			}

			// For each scattered sequence id, at least one tracker should have it.  For sequences
			// not contained in the set, none should have it.
			for sequence, has := range sids {
				found := false
				for _, st := range sts {
					found = found || st.Contains(sequence)
				}
				So(found, ShouldEqual, has)
			}
		})
	})
}
Esempio n. 2
0
func TestClientRecvChunks(t *testing.T) {
	Convey("ClientRecvChunksHandler", t, func() {
		config := &core.Config{
			Node:   5,
			Logger: log.New(os.Stdout, "", log.Lshortfile|log.Ltime),
			GlobalConfig: core.GlobalConfig{
				Streams: map[core.StreamId]core.StreamConfig{
					7: core.StreamConfig{
						Name: "UU",
						Id:   7,
						Mode: core.ModeUnreliableUnordered,
					},
					8: core.StreamConfig{
						Name: "UO",
						Id:   8,
						Mode: core.ModeUnreliableOrdered,
					},
					9: core.StreamConfig{
						Name: "RU",
						Id:   9,
						Mode: core.ModeReliableUnordered,
					},
					10: core.StreamConfig{
						Name: "RO",
						Id:   10,
						Mode: core.ModeReliableOrdered,
					},
				},
				MaxChunkDataSize: 50,
				MaxUnreliableAge: 25,
				Confirmation:     10 * time.Millisecond,
				Clock:            &clock.RealClock{},
			},
			Starts: map[core.Streamlet]core.SequenceId{
				core.Streamlet{9, 777}:  5,
				core.Streamlet{10, 777}: 5,
				core.Streamlet{9, 778}:  25,
				core.Streamlet{10, 778}: 25,
			},
		}
		fromHost := make(chan core.Chunk)
		toCore := make(chan core.Packet)
		toHost := make(chan core.Chunk)
		reserved := make(chan core.Chunk)
		handlerIsDone := make(chan struct{})
		defer func() {
			close(fromHost)
			for {
				select {
				case <-handlerIsDone:
					close(toHost)
					close(toCore)
					return
				case <-toHost:
				case <-toCore:
				}
			}
		}()
		go func() {
			core.ClientRecvChunksHandler(config, fromHost, toCore, toHost, reserved)
			close(handlerIsDone)
		}()

		Convey("Unreserved chunks from fromHost get assembled into packets and sent to toCore.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the host.
				for range toHost {
				}
			}()
			go func() {
				var chunks []core.Chunk
				// The first packet is before the start of the stream, it should not get reported.
				chunks = append(chunks, makeChunks(config, config.GetIdFromName("RO"), 777, 0, 5)...)
				chunks = append(chunks, makeChunks(config, config.GetIdFromName("RO"), 777, 5, 5)...)
				chunks = append(chunks, makeChunks(config, config.GetIdFromName("RO"), 778, 25, 5)...)
				chunks = append(chunks, makeChunks(config, config.GetIdFromName("RO"), 777, 10, 5)...)
				for _, chunk := range chunks {
					fromHost <- chunk
				}
			}()
			So(verifyPacket((<-toCore).Data, config.GetIdFromName("RO"), 777, 5, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, config.GetIdFromName("RO"), 778, 25, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, config.GetIdFromName("RO"), 777, 10, 5), ShouldBeTrue)
		})

		Convey("Reserved chunks from fromHost get sent directly through the reserved channel.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the host.
				for range toHost {
				}
			}()
			go func() {
				fromHost <- makeSimpleChunk(core.StreamTruncate, 1, 10)
				fromHost <- makeSimpleChunk(core.StreamPing, 1, 11)
				fromHost <- makeSimpleChunk(core.StreamDing, 1, 100)
				fromHost <- makeSimpleChunk(core.StreamResend, 1, 20)
			}()
			for i := 0; i < 4; i++ {
				chunk := <-reserved
				So(verifySimpleChunk(&chunk), ShouldBeTrue)
			}
			select {
			case <-toCore:
				panic("Should not have gotten anything from <-toCore")
			default:
			}
		})

		Convey("Unreliable and Unordered streams might drop packets, and might deliver them out of order.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the host.
				for range toHost {
				}
			}()
			var node core.NodeId = 777
			stream := config.GetIdFromName("UU")
			go func() {
				var chunks []core.Chunk
				// Send all but the first chunk for each packet.  Note that the last packet is more
				// than MaxAge in the future from the first packet.
				chunkSets := [][]core.Chunk{
					// The first packet is before the start of the stream, it should not get reported.
					makeChunks(config, stream, node, 0, 5),
					makeChunks(config, stream, node, 5, 5),
					makeChunks(config, stream, node, 10, 5),
					makeChunks(config, stream, node, 15, 5),
					makeChunks(config, stream, node, 25, 5),
					makeChunks(config, stream, node, 35, 5),
				}
				for _, chunkSet := range chunkSets {
					chunks = append(chunks, chunkSet[0])
					for j := 1; j < len(chunkSet); j++ {
						fromHost <- chunkSet[j]
					}
				}
				// Finish the packets in this order: 2,3,0,1,5,4.  We should receive, in order, 2,3,5,4
				for _, index := range []int{2, 3, 0, 1, 5, 4} {
					fromHost <- chunks[index]
				}
			}()
			So(verifyPacket((<-toCore).Data, stream, node, 10, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 15, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 35, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 25, 5), ShouldBeTrue)
		})

		Convey("Unreliable and Ordered streams might drop packets, but all packets that are received are in order.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the host.
				for range toHost {
				}
			}()
			var node core.NodeId = 777
			stream := config.GetIdFromName("UO")
			go func() {
				var chunks []core.Chunk
				// Send all but the first chunk for each packet
				chunkSets := [][]core.Chunk{
					// The first packet is before the start of the stream, it should not get reported.
					makeChunks(config, stream, node, 0, 5),
					makeChunks(config, stream, node, 5, 5),
					makeChunks(config, stream, node, 10, 5),
					makeChunks(config, stream, node, 15, 5),
					makeChunks(config, stream, node, 20, 5),
					makeChunks(config, stream, node, 25, 5),
				}
				for _, chunkSet := range chunkSets {
					chunks = append(chunks, chunkSet[0])
					for j := 1; j < len(chunkSet); j++ {
						fromHost <- chunkSet[j]
					}
				}
				// Finish the packets in this order: 2,3,0,1,5,4.  We should receive, in order, 2,3,5,4
				for _, index := range []int{2, 3, 0, 1, 5, 4} {
					fromHost <- chunks[index]
				}
			}()
			So(verifyPacket((<-toCore).Data, stream, node, 10, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 15, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 25, 5), ShouldBeTrue)
		})

		Convey("Reliable and Unordered streams produce all packets, but can produce them out of order.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the host.
				for range toHost {
				}
			}()
			var node core.NodeId = 777
			stream := config.GetIdFromName("RU")
			go func() {
				var chunks []core.Chunk
				// The first packet is before the start of the stream, it should not get reported.
				chunks = append(chunks, makeChunks(config, stream, node, 0, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 5, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 10, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 15, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 20, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 25, 5)...)
				// Send them in reverse order
				for i := range chunks {
					fromHost <- chunks[len(chunks)-1-i]
				}
			}()
			So(verifyPacket((<-toCore).Data, stream, node, 25, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 20, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 15, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 10, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 5, 5), ShouldBeTrue)
		})

		Convey("Reliable and Ordered streams produce all packets in order.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the host.
				for range toHost {
				}
			}()
			var node core.NodeId = 777
			stream := config.GetIdFromName("RO")
			go func() {
				var chunks []core.Chunk
				chunks = append(chunks, makeChunks(config, stream, node, 5, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 10, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 15, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 20, 5)...)
				chunks = append(chunks, makeChunks(config, stream, node, 25, 5)...)
				// Send them in reverse order
				for i := range chunks {
					fromHost <- chunks[len(chunks)-1-i]
				}
			}()
			So(verifyPacket((<-toCore).Data, stream, node, 5, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 10, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 15, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 20, 5), ShouldBeTrue)
			So(verifyPacket((<-toCore).Data, stream, node, 25, 5), ShouldBeTrue)
		})

		Convey("Confirmation packets are sent at regular intervals.", func() {
			go func() {
				// This is just to make sure the routine doesn't block trying to send to the core.
				for range toCore {
				}
			}()
			config.MaxChunkDataSize = 100
			var node core.NodeId = 777
			stream := config.GetIdFromName("RO")
			var chunks []core.Chunk
			chunks = append(chunks, makeChunks(config, stream, node, 5, 95)...)
			chunks = append(chunks, makeChunks(config, stream, node, 100, 100)...)
			chunks = append(chunks, makeChunks(config, stream, node, 200, 100)...)
			c := cmwc.MakeGoodCmwc()
			c.Seed(123)
			rng := rand.New(c)
			// shuffle
			for i := range chunks {
				swap := rng.Intn(len(chunks)-i) + i
				chunks[i], chunks[swap] = chunks[swap], chunks[i]
			}

			// decoy is used to sync up our test with the some routines in sluice
			decoy := core.Chunk{Stream: config.GetIdFromName("UU")}
			goldenSt := core.MakeSequenceTracker(stream, node, 5)
			for len(chunks) > 0 {
				// Send 150 chunks
				for i := 0; i < 150 && len(chunks) > 0; i++ {
					select {
					case fromHost <- chunks[0]:
						goldenSt.AddSequenceId(chunks[0].Sequence)
						chunks = chunks[1:]
					case <-toHost:
					}
				}

				// The first bach of confirmation packets might not be up-to-date, so take those
				// packets until we're able to send the decoy, then take and store the next set of
				// confirmation packets.
				var confirmationChunks []core.Chunk
				decoyCount := 0
				<-toHost
				for decoyCount < 2 {
					select {
					case confirm := <-toHost:
						if decoyCount > 0 {
							confirmationChunks = append(confirmationChunks, confirm)
						}
					case fromHost <- decoy:
						confirmationChunks = append(confirmationChunks, <-toHost)
						decoyCount++
					}
				}
				// We want it to be greater than 1 because we want to test that independent chunks
				// can be combined to get all of the information we want.
				So(len(confirmationChunks), ShouldBeGreaterThan, 1)

				var sts []*core.SequenceTracker
				for _, chunk := range confirmationChunks {
					st, err := core.ParseSequenceTrackerChunkData(chunk.Data)
					So(err, ShouldBeNil)
					So(st, ShouldNotBeNil)
					if st.StreamId() == stream && st.NodeId() == node {
						sts = append(sts, st)
					}
				}
				So(len(sts), ShouldBeGreaterThanOrEqualTo, 1)

				coverage := make(map[core.SequenceId]bool)
				goldCoverage := make(map[core.SequenceId]bool)
				for _, st := range sts {
					// Don't need to check MaxContiguous, all we really care about is whether
					// Contains() is always correct.
					for sequence := core.SequenceId(0); sequence < 500; sequence++ {
						if st.Contains(sequence) {
							coverage[sequence] = true
						}
						if goldenSt.Contains(sequence) {
							goldCoverage[sequence] = true
						}
					}
				}
				So(coverage, ShouldResemble, goldCoverage)
			}
		})

	})
}