Esempio n. 1
0
func TestDonateHard(t *testing.T) {
	//common.InitDefaultLogging(true)
	var (
		start                = ip("10.0.1.0")
		size  address.Offset = 48
	)

	// Fill a fresh space
	spaceset := makeSpace(start, size)
	for i := address.Offset(0); i < size; i++ {
		ok, _ := spaceset.Allocate(address.NewRange(start, size))
		require.True(t, ok, "Failed to get IP!")
	}

	require.Equal(t, address.Count(0), spaceset.NumFreeAddresses())

	// Now free all but the last address
	// this will force us to split the free list
	for i := address.Offset(0); i < size-1; i++ {
		require.NoError(t, spaceset.Free(address.Add(start, i)))
	}

	// Now split
	newRange, ok := spaceset.Donate(address.NewRange(start, size))
	require.True(t, ok, "GiveUpSpace result")
	require.Equal(t, address.NewRange(ip("10.0.1.16"), 16), newRange, "Wrong space")
	require.Equal(t, address.Count(31), spaceset.NumFreeAddresses())

	//Space set should now have 3 spaces
	expected := &Space{
		ours: add(nil, ip("10.0.1.47"), ip("10.0.1.48")),
		free: add(add(nil, ip("10.0.1.0"), ip("10.0.1.16")), ip("10.0.1.32"), ip("10.0.1.47")),
	}
	require.Equal(t, expected, spaceset)
}
Esempio n. 2
0
// Returns the distance between two tokens on this ring, dealing
// with ranges which cross the origin
func (r *Ring) distance(start, end address.Address) address.Offset {
	if end > start {
		return address.Offset(end - start)
	}

	return address.Offset((r.End - start) + (end - r.Start))
}
Esempio n. 3
0
// ClaimForPeers claims the entire ring for the array of peers passed
// in.  Only works for empty rings.
func (r *Ring) ClaimForPeers(peers []mesh.PeerName) {
	common.Assert(r.Empty())
	defer r.assertInvariants()
	defer r.updateExportedVariables()

	totalSize := r.distance(r.Start, r.End)
	share := totalSize/address.Offset(len(peers)) + 1
	remainder := totalSize % address.Offset(len(peers))
	pos := r.Start

	for i, peer := range peers {
		if address.Offset(i) == remainder {
			share--
			if share == 0 {
				break
			}
		}

		if e, found := r.Entries.get(pos); found {
			e.update(peer, share)
		} else {
			r.Entries.insert(entry{Token: pos, Peer: peer, Free: share})
		}

		pos += address.Address(share)
	}

	common.Assert(pos == r.End)

	r.Seeds = peers
}
Esempio n. 4
0
func TestDonateSimple(t *testing.T) {
	const (
		testAddr1 = "10.0.1.0"
		testAddr2 = "10.0.1.32"
		size      = 48
	)

	var (
		ipAddr1 = ip(testAddr1)
	)

	ps1 := makeSpace(ipAddr1, size)

	// Empty space set should split in two and give me the second half
	r, ok := ps1.Donate(address.NewRange(ip(testAddr1), size))
	numGivenUp := r.Size()
	require.True(t, ok, "Donate result")
	require.Equal(t, "10.0.1.24", r.Start.String(), "Invalid start")
	require.Equal(t, address.Offset(size/2), numGivenUp)
	require.Equal(t, address.Offset(size/2), ps1.NumFreeAddresses())

	// Now check we can give the rest up.
	count := 0 // count to avoid infinite loop
	for ; count < 1000; count++ {
		r, ok := ps1.Donate(address.NewRange(ip(testAddr1), size))
		if !ok {
			break
		}
		numGivenUp += r.Size()
	}
	require.Equal(t, address.Offset(0), ps1.NumFreeAddresses())
	require.Equal(t, address.Offset(size), numGivenUp)
}
Esempio n. 5
0
func TestSpaceFree(t *testing.T) {
	const (
		testAddr1   = "10.0.3.4"
		testAddrx   = "10.0.3.19"
		testAddry   = "10.0.9.19"
		containerID = "deadbeef"
	)

	entireRange := address.NewRange(ip(testAddr1), 20)
	space := makeSpace(ip(testAddr1), 20)

	// Check we are prepared to give up the entire space
	r := space.biggestFreeRange(entireRange)
	require.True(t, r.Start == ip(testAddr1) && r.Size() == 20, "Wrong space")

	for i := 0; i < 20; i++ {
		ok, _ := space.Allocate(entireRange)
		require.True(t, ok, "Failed to get address")
	}

	// Check we are full
	ok, _ := space.Allocate(entireRange)
	require.True(t, !ok, "Should have failed to get address")
	r, ok = space.Donate(entireRange)
	require.True(t, r.Size() == 0, "Wrong space")

	// Free in the middle
	require.NoError(t, space.Free(ip("10.0.3.13")))
	r = space.biggestFreeRange(entireRange)
	require.True(t, r.Start == ip("10.0.3.13") && r.Size() == 1, "Wrong space")

	// Free one at the end
	require.NoError(t, space.Free(ip("10.0.3.23")))
	r = space.biggestFreeRange(entireRange)
	require.True(t, r.Start == ip("10.0.3.23") && r.Size() == 1, "Wrong space")

	// Now free a few at the end
	require.NoError(t, space.Free(ip("10.0.3.22")))
	require.NoError(t, space.Free(ip("10.0.3.21")))

	require.Equal(t, address.Offset(4), space.NumFreeAddresses())

	// Now get the biggest free space; should be 3.21
	r = space.biggestFreeRange(entireRange)
	require.True(t, r.Start == ip("10.0.3.21") && r.Size() == 3, "Wrong space")

	// Now free a few in the middle
	require.NoError(t, space.Free(ip("10.0.3.12")))
	require.NoError(t, space.Free(ip("10.0.3.11")))
	require.NoError(t, space.Free(ip("10.0.3.10")))

	require.Equal(t, address.Offset(7), space.NumFreeAddresses())

	// Now get the biggest free space; should be 3.21
	r = space.biggestFreeRange(entireRange)
	require.True(t, r.Start == ip("10.0.3.10") && r.Size() == 4, "Wrong space")

	require.Equal(t, []address.Range{{Start: ip("10.0.3.4"), End: ip("10.0.3.24")}}, space.OwnedRanges())
}
Esempio n. 6
0
File: ring.go Progetto: n054/weave
// subdivide subdivides the [from,to) CIDR for the given peers into
// CIDR-aligned subranges.
func (r *Ring) subdivide(from, to address.Address, peers []mesh.PeerName) {
	share := address.Length(to, from)
	if share == 0 {
		return
	}
	if share == 1 || len(peers) == 1 {
		r.Entries.insert(entry{Token: from, Peer: peers[0], Free: share})
		return
	}
	mid := address.Add(from, address.Offset(share/2))
	r.subdivide(from, mid, peers[:len(peers)/2])
	r.subdivide(address.Add(mid, address.Offset(share%2)), to, peers[len(peers)/2:])
}
Esempio n. 7
0
func TestAllocFree(t *testing.T) {
	const (
		container1 = "abcdef"
		container2 = "baddf00d"
		container3 = "b01df00d"
		universe   = "10.0.3.0/26"
		subnet1    = "10.0.3.0/28"
		subnet2    = "10.0.3.32/28"
		testAddr1  = "10.0.3.1"
		testAddr2  = "10.0.3.33"
		spaceSize  = 62 // 64 IP addresses in /26, minus .0 and .63
	)

	alloc, subnet := makeAllocatorWithMockGossip(t, "01:00:00:01:00:00", universe, 1)
	defer alloc.Stop()
	_, cidr1, _ := address.ParseCIDR(subnet1)
	_, cidr2, _ := address.ParseCIDR(subnet2)

	alloc.claimRingForTesting()
	addr1, err := alloc.Allocate(container1, cidr1.HostRange(), returnFalse)
	require.NoError(t, err)
	require.Equal(t, testAddr1, addr1.String(), "address")

	addr2, err := alloc.Allocate(container1, cidr2.HostRange(), returnFalse)
	require.NoError(t, err)
	require.Equal(t, testAddr2, addr2.String(), "address")

	// Ask for another address for a different container and check it's different
	addr1b, _ := alloc.Allocate(container2, cidr1.HostRange(), returnFalse)
	if addr1b.String() == testAddr1 {
		t.Fatalf("Expected different address but got %s", addr1b.String())
	}

	// Ask for the first container again and we should get the same addresses again
	addr1a, _ := alloc.Allocate(container1, cidr1.HostRange(), returnFalse)
	require.Equal(t, testAddr1, addr1a.String(), "address")
	addr2a, _ := alloc.Allocate(container1, cidr2.HostRange(), returnFalse)
	require.Equal(t, testAddr2, addr2a.String(), "address")

	// Now delete the first container, and we should get its addresses back
	require.NoError(t, alloc.Delete(container1))
	addr3, _ := alloc.Allocate(container3, cidr1.HostRange(), returnFalse)
	require.Equal(t, testAddr1, addr3.String(), "address")
	addr4, _ := alloc.Allocate(container3, cidr2.HostRange(), returnFalse)
	require.Equal(t, testAddr2, addr4.String(), "address")

	alloc.ContainerDied(container2)

	// Resurrect
	addr1c, err := alloc.Allocate(container2, cidr1.HostRange(), returnFalse)
	require.NoError(t, err)
	require.Equal(t, addr1b, addr1c, "address")

	alloc.ContainerDied(container3)
	alloc.Encode() // sync up
	// Move the clock forward and clear out the dead container
	alloc.actionChan <- func() { alloc.now = func() time.Time { return time.Now().Add(containerDiedTimeout * 2) } }
	alloc.actionChan <- func() { alloc.removeDeadContainers() }
	require.Equal(t, address.Offset(spaceSize-1), alloc.NumFreeAddresses(subnet))
}
Esempio n. 8
0
// Helper function to avoid 'NumFreeAddressesInRange(start, end)'
// dozens of times in tests
func (s *Space) NumFreeAddresses() address.Offset {
	res := address.Offset(0)
	for i := 0; i < len(s.free); i += 2 {
		res += address.Subtract(s.free[i+1], s.free[i])
	}
	return res
}
Esempio n. 9
0
func TestTransfer(t *testing.T) {
	const (
		cidr = "10.0.1.7/22"
	)
	allocs, router, subnet := makeNetworkOfAllocators(3, cidr)
	alloc1 := allocs[0]
	alloc2 := allocs[1]
	alloc3 := allocs[2] // This will be 'master' and get the first range

	_, err := alloc2.Allocate("foo", subnet, nil)
	require.True(t, err == nil, "Failed to get address")

	_, err = alloc3.Allocate("bar", subnet, nil)
	require.True(t, err == nil, "Failed to get address")

	router.GossipBroadcast(alloc2.Gossip())
	router.Flush()
	router.GossipBroadcast(alloc3.Gossip())
	router.Flush()
	router.RemovePeer(alloc2.ourName)
	router.RemovePeer(alloc3.ourName)
	alloc2.Stop()
	alloc3.Stop()
	router.Flush()
	require.NoError(t, alloc1.AdminTakeoverRanges(alloc2.ourName.String()))
	require.NoError(t, alloc1.AdminTakeoverRanges(alloc3.ourName.String()))
	router.Flush()

	require.Equal(t, address.Offset(1022), alloc1.NumFreeAddresses(subnet))

	_, err = alloc1.Allocate("foo", subnet, nil)
	require.True(t, err == nil, "Failed to get address")
	alloc1.Stop()
}
Esempio n. 10
0
func (s *Space) NumFreeAddressesInRange(r address.Range) address.Offset {
	res := address.Offset(0)
	s.walkFree(r, func(chunk address.Range) bool {
		res += chunk.Size()
		return false
	})
	return res
}
Esempio n. 11
0
func (s *Space) biggestFreeRange(r address.Range) (biggest address.Range) {
	biggestSize := address.Offset(0)
	s.walkFree(r, func(chunk address.Range) bool {
		if size := chunk.Size(); size >= biggestSize {
			biggest = chunk
			biggestSize = size
		}
		return false
	})
	return
}
Esempio n. 12
0
func TestSpaceAllocate(t *testing.T) {
	const (
		testAddr1   = "10.0.3.4"
		testAddr2   = "10.0.3.5"
		testAddrx   = "10.0.3.19"
		testAddry   = "10.0.9.19"
		containerID = "deadbeef"
		size        = 20
	)
	var (
		start = ip(testAddr1)
	)

	space1 := makeSpace(start, size)
	require.Equal(t, address.Offset(20), space1.NumFreeAddresses())
	space1.assertInvariants()

	_, addr1 := space1.Allocate(address.NewRange(start, size))
	require.Equal(t, testAddr1, addr1.String(), "address")
	require.Equal(t, address.Offset(19), space1.NumFreeAddresses())
	space1.assertInvariants()

	_, addr2 := space1.Allocate(address.NewRange(start, size))
	require.False(t, addr2.String() == testAddr1, "address")
	require.Equal(t, address.Offset(18), space1.NumFreeAddresses())
	require.Equal(t, address.Offset(13), space1.NumFreeAddressesInRange(address.Range{Start: ip(testAddr1), End: ip(testAddrx)}))
	require.Equal(t, address.Offset(18), space1.NumFreeAddressesInRange(address.Range{Start: ip(testAddr1), End: ip(testAddry)}))
	space1.assertInvariants()

	space1.Free(addr2)
	space1.assertInvariants()

	wt.AssertErrorInterface(t, (*error)(nil), space1.Free(addr2), "double free")
	wt.AssertErrorInterface(t, (*error)(nil), space1.Free(ip(testAddrx)), "address not allocated")
	wt.AssertErrorInterface(t, (*error)(nil), space1.Free(ip(testAddry)), "wrong out of range")

	space1.assertInvariants()
}
Esempio n. 13
0
func TestAllocFree(t *testing.T) {
	const (
		container1 = "abcdef"
		container2 = "baddf00d"
		container3 = "b01df00d"
		universe   = "10.0.3.0/26"
		subnet1    = "10.0.3.0/28"
		subnet2    = "10.0.3.32/28"
		testAddr1  = "10.0.3.1"
		testAddr2  = "10.0.3.33"
		spaceSize  = 62 // 64 IP addresses in /26, minus .0 and .63
	)

	alloc, subnet := makeAllocatorWithMockGossip(t, "01:00:00:01:00:00", universe, 1)
	defer alloc.Stop()
	_, cidr1, _ := address.ParseCIDR(subnet1)
	_, cidr2, _ := address.ParseCIDR(subnet2)

	alloc.claimRingForTesting()
	addr1, err := alloc.Allocate(container1, cidr1.HostRange(), nil)
	require.NoError(t, err)
	require.Equal(t, testAddr1, addr1.String(), "address")

	addr2, err := alloc.Allocate(container1, cidr2.HostRange(), nil)
	require.NoError(t, err)
	require.Equal(t, testAddr2, addr2.String(), "address")

	// Ask for another address for a different container and check it's different
	addr1b, _ := alloc.Allocate(container2, cidr1.HostRange(), nil)
	if addr1b.String() == testAddr1 {
		t.Fatalf("Expected different address but got %s", addr1b.String())
	}

	// Ask for the first container again and we should get the same addresses again
	addr1a, _ := alloc.Allocate(container1, cidr1.HostRange(), nil)
	require.Equal(t, testAddr1, addr1a.String(), "address")
	addr2a, _ := alloc.Allocate(container1, cidr2.HostRange(), nil)
	require.Equal(t, testAddr2, addr2a.String(), "address")

	// Now delete the first container, and we should get its addresses back
	require.NoError(t, alloc.Delete(container1))
	addr3, _ := alloc.Allocate(container3, cidr1.HostRange(), nil)
	require.Equal(t, testAddr1, addr3.String(), "address")
	addr4, _ := alloc.Allocate(container3, cidr2.HostRange(), nil)
	require.Equal(t, testAddr2, addr4.String(), "address")

	alloc.ContainerDied(container2)
	alloc.ContainerDied(container3)
	require.Equal(t, address.Offset(spaceSize), alloc.NumFreeAddresses(subnet))
}
Esempio n. 14
0
File: ring.go Progetto: n054/weave
// ClaimForPeers claims the entire ring for the array of peers passed
// in.  Only works for empty rings. Each claimed range is CIDR-aligned.
func (r *Ring) ClaimForPeers(peers []mesh.PeerName) {
	common.Assert(r.Empty())

	defer r.trackUpdates()()
	defer r.assertInvariants()
	defer r.updateExportedVariables()
	defer func() {
		e := r.Entries[len(r.Entries)-1]
		common.Assert(address.Add(e.Token, address.Offset(e.Free)) == r.End)
	}()

	r.subdivide(r.Start, r.End, peers)
	r.Seeds = peers
}
Esempio n. 15
0
func TestFuzzRingHard(t *testing.T) {
	//common.SetLogLevel("debug")
	var (
		numPeers   = 100
		iterations = 3000
		peers      []mesh.PeerName
		rings      []*Ring
		nextPeerID = 0
	)

	addPeer := func() {
		peer := makePeerName(nextPeerID)
		common.Log.Debugf("%s: Adding peer", peer)
		nextPeerID++
		peers = append(peers, peer)
		rings = append(rings, New(start, end, peer))
	}

	for i := 0; i < numPeers; i++ {
		addPeer()
	}

	rings[0].ClaimItAll()

	randomPeer := func(exclude int) (int, mesh.PeerName, *Ring) {
		var peerIndex int
		if exclude >= 0 {
			peerIndex = rand.Intn(len(peers) - 1)
			if peerIndex == exclude {
				peerIndex++
			}
		} else {
			peerIndex = rand.Intn(len(peers))
		}
		return peerIndex, peers[peerIndex], rings[peerIndex]
	}

	// Keep a map of index -> ranges, as these are a little expensive to
	// calculate for every ring on every iteration.
	var theRanges = make(map[int][]address.Range)
	theRanges[0] = rings[0].OwnedRanges()

	addOrRmPeer := func() {
		if len(peers) < numPeers {
			addPeer()
			return
		}

		// Pick one peer to remove, and a different one to transfer to
		peerIndex, peername, _ := randomPeer(-1)
		_, otherPeername, otherRing := randomPeer(peerIndex)

		// We need to be in a ~converged ring to rmpeer
		for _, ring := range rings {
			require.NoError(t, otherRing.Merge(*ring))
		}

		common.Log.Debugf("%s: transferring from peer %s", otherPeername, peername)
		otherRing.Transfer(peername, otherPeername)

		// Remove peer from our state
		peers = append(peers[:peerIndex], peers[peerIndex+1:]...)
		rings = append(rings[:peerIndex], rings[peerIndex+1:]...)
		theRanges = make(map[int][]address.Range)

		// And now tell everyone about the transfer - rmpeer is
		// not partition safe
		for i, ring := range rings {
			require.NoError(t, ring.Merge(*otherRing))
			theRanges[i] = ring.OwnedRanges()
		}
	}

	doGrantOrGossip := func() {
		var ringsWithRanges = make([]int, 0, len(rings))
		for index, ranges := range theRanges {
			if len(ranges) > 0 {
				ringsWithRanges = append(ringsWithRanges, index)
			}
		}

		if len(ringsWithRanges) > 0 {
			// Produce a random split in a random owned range, given to a random peer
			indexWithRanges := ringsWithRanges[rand.Intn(len(ringsWithRanges))]
			ownedRanges := theRanges[indexWithRanges]
			ring := rings[indexWithRanges]

			rangeToSplit := ownedRanges[rand.Intn(len(ownedRanges))]
			size := address.Subtract(rangeToSplit.End, rangeToSplit.Start)
			ipInRange := address.Add(rangeToSplit.Start, address.Offset(rand.Intn(int(size))))
			_, peerToGiveTo, _ := randomPeer(-1)
			common.Log.Debugf("%s: Granting [%v, %v) to %s", ring.Peer, ipInRange, rangeToSplit.End, peerToGiveTo)
			ring.GrantRangeToHost(ipInRange, rangeToSplit.End, peerToGiveTo)

			// Now 'gossip' this to a random host (note, note could be same host as above)
			otherIndex, _, otherRing := randomPeer(-1)
			common.Log.Debugf("%s: 'Gossiping' to %s", ring.Peer, otherRing.Peer)
			require.NoError(t, otherRing.Merge(*ring))

			theRanges[indexWithRanges] = ring.OwnedRanges()
			theRanges[otherIndex] = otherRing.OwnedRanges()
			return
		}

		// No rings think they own anything (as gossip might be behind)
		// We're going to pick a random host (which has entries) and gossip
		// it to a random host (which may or may not have entries).
		var ringsWithEntries = make([]*Ring, 0, len(rings))
		for _, ring := range rings {
			if len(ring.Entries) > 0 {
				ringsWithEntries = append(ringsWithEntries, ring)
			}
		}
		ring1 := ringsWithEntries[rand.Intn(len(ringsWithEntries))]
		ring2index, _, ring2 := randomPeer(-1)
		common.Log.Debugf("%s: 'Gossiping' to %s", ring1.Peer, ring2.Peer)
		require.NoError(t, ring2.Merge(*ring1))
		theRanges[ring2index] = ring2.OwnedRanges()
	}

	for i := 0; i < iterations; i++ {
		// about 1 in 10 times, rmpeer or add host
		n := rand.Intn(10)
		switch {
		case n < 1:
			addOrRmPeer()
		default:
			doGrantOrGossip()
		}
	}
}
Esempio n. 16
0
func TestAllocatorFuzz(t *testing.T) {
	const (
		firstpass    = 1000
		secondpass   = 10000
		nodes        = 5
		maxAddresses = 1000
		concurrency  = 5
		cidr         = "10.0.4.0/22"
	)
	allocs, router, subnet := makeNetworkOfAllocators(nodes, cidr)
	defer stopNetworkOfAllocators(allocs, router)

	// Test state
	// For each IP issued we store the allocator
	// that issued it and the name of the container
	// it was issued to.
	type result struct {
		name  string
		alloc int32
		block bool
	}
	stateLock := sync.Mutex{}
	state := make(map[string]result)
	// Keep a list of addresses issued, so we
	// Can pick random ones
	var addrs []string
	numPending := 0

	rand.Seed(0)

	// Remove item from list by swapping it with last
	// and reducing slice length by 1
	rm := func(xs []string, i int32) []string {
		ls := len(xs) - 1
		xs[i] = xs[ls]
		return xs[:ls]
	}

	bumpPending := func() bool {
		stateLock.Lock()
		if len(addrs)+numPending >= maxAddresses {
			stateLock.Unlock()
			return false
		}
		numPending++
		stateLock.Unlock()
		return true
	}

	noteAllocation := func(allocIndex int32, name string, addr address.Address) {
		//common.Log.Infof("Allocate: got address %s for name %s", addr, name)
		addrStr := addr.String()

		stateLock.Lock()
		defer stateLock.Unlock()

		if res, existing := state[addrStr]; existing {
			panic(fmt.Sprintf("Dup found for address %s - %s and %s", addrStr,
				name, res.name))
		}

		state[addrStr] = result{name, allocIndex, false}
		addrs = append(addrs, addrStr)
		numPending--
	}

	// Do a Allocate and check the address
	// is unique.  Needs a unique container
	// name.
	allocate := func(name string) {
		if !bumpPending() {
			return
		}

		allocIndex := rand.Int31n(nodes)
		alloc := allocs[allocIndex]
		//common.Log.Infof("Allocate: asking allocator %d", allocIndex)
		addr, err := alloc.SimplyAllocate(name, subnet)

		if err != nil {
			panic(fmt.Sprintf("Could not allocate addr"))
		}

		noteAllocation(allocIndex, name, addr)
	}

	// Free a random address.
	free := func() {
		stateLock.Lock()
		if len(addrs) == 0 {
			stateLock.Unlock()
			return
		}
		// Delete an existing allocation
		// Pick random addr
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		addrs = rm(addrs, addrIndex)
		delete(state, addr)
		stateLock.Unlock()

		alloc := allocs[res.alloc]
		//common.Log.Infof("Freeing %s (%s) on allocator %d", res.name, addr, res.alloc)

		oldAddr, err := address.ParseIP(addr)
		if err != nil {
			panic(err)
		}
		require.NoError(t, alloc.Free(res.name, oldAddr))
	}

	// Do a Allocate on an existing container & allocator
	// and check we get the right answer.
	allocateAgain := func() {
		stateLock.Lock()
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		res.block = true
		state[addr] = res
		stateLock.Unlock()
		alloc := allocs[res.alloc]

		//common.Log.Infof("Asking for %s (%s) on allocator %d again", res.name, addr, res.alloc)

		newAddr, _ := alloc.SimplyAllocate(res.name, subnet)
		oldAddr, _ := address.ParseIP(addr)
		if newAddr != oldAddr {
			panic(fmt.Sprintf("Got different address for repeat request for %s: %s != %s", res.name, newAddr, oldAddr))
		}

		stateLock.Lock()
		res.block = false
		state[addr] = res
		stateLock.Unlock()
	}

	// Claim a random address for a unique container name - may not succeed
	claim := func(name string) {
		if !bumpPending() {
			return
		}
		allocIndex := rand.Int31n(nodes)
		addressIndex := rand.Int31n(int32(subnet.Size()))
		alloc := allocs[allocIndex]
		addr := address.Add(subnet.Addr, address.Offset(addressIndex))
		err := alloc.SimplyClaim(name, address.MakeCIDR(subnet, addr))
		if err == nil {
			noteAllocation(allocIndex, name, addr)
		}
	}

	// Run function _f_ _iterations_ times, in _concurrency_
	// number of goroutines
	doConcurrentIterations := func(iterations int, f func(int)) {
		iterationsPerThread := iterations / concurrency

		wg := sync.WaitGroup{}
		for i := 0; i < concurrency; i++ {
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				for k := 0; k < iterationsPerThread; k++ {
					f((j * iterationsPerThread) + k)
				}
			}(i)
		}
		wg.Wait()
	}

	// First pass, just allocate a bunch of ips
	doConcurrentIterations(firstpass, func(iteration int) {
		name := fmt.Sprintf("first%d", iteration)
		allocate(name)
	})

	// Second pass, random ask for more allocations,
	// or remove existing ones, or ask for allocation
	// again.
	doConcurrentIterations(secondpass, func(iteration int) {
		r := rand.Float32()
		switch {
		case 0.0 <= r && r < 0.4:
			// Ask for a new allocation
			name := fmt.Sprintf("second%d", iteration)
			allocate(name)

		case (0.4 <= r && r < 0.8):
			// free a random addr
			free()

		case 0.8 <= r && r < 0.95:
			// ask for an existing name again, check we get same ip
			allocateAgain()

		case 0.95 <= r && r < 1.0:
			name := fmt.Sprintf("second%d", iteration)
			claim(name)
		}
	})
}
Esempio n. 17
0
func TestLowlevel(t *testing.T) {
	a := []address.Address{}
	a = add(a, 100, 200)
	require.Equal(t, []address.Address{100, 200}, a)
	require.True(t, !contains(a, 99), "")
	require.True(t, contains(a, 100), "")
	require.True(t, contains(a, 199), "")
	require.True(t, !contains(a, 200), "")
	a = add(a, 700, 800)
	require.Equal(t, []address.Address{100, 200, 700, 800}, a)
	a = add(a, 300, 400)
	require.Equal(t, []address.Address{100, 200, 300, 400, 700, 800}, a)
	a = add(a, 400, 500)
	require.Equal(t, []address.Address{100, 200, 300, 500, 700, 800}, a)
	a = add(a, 600, 700)
	require.Equal(t, []address.Address{100, 200, 300, 500, 600, 800}, a)
	a = add(a, 500, 600)
	require.Equal(t, []address.Address{100, 200, 300, 800}, a)
	a = subtract(a, 500, 600)
	require.Equal(t, []address.Address{100, 200, 300, 500, 600, 800}, a)
	a = subtract(a, 600, 700)
	require.Equal(t, []address.Address{100, 200, 300, 500, 700, 800}, a)
	a = subtract(a, 400, 500)
	require.Equal(t, []address.Address{100, 200, 300, 400, 700, 800}, a)
	a = subtract(a, 300, 400)
	require.Equal(t, []address.Address{100, 200, 700, 800}, a)
	a = subtract(a, 700, 800)
	require.Equal(t, []address.Address{100, 200}, a)
	a = subtract(a, 100, 200)
	require.Equal(t, []address.Address{}, a)

	s := New()
	require.Equal(t, address.Offset(0), s.NumFreeAddresses())
	ok, got := s.Allocate(address.NewRange(0, 1000))
	require.False(t, ok, "allocate in empty space should fail")

	s.Add(100, 100)
	require.Equal(t, address.Offset(100), s.NumFreeAddresses())
	ok, got = s.Allocate(address.NewRange(0, 1000))
	require.True(t, ok && got == 100, "allocate")
	require.Equal(t, address.Offset(99), s.NumFreeAddresses())
	require.NoError(t, s.Claim(150))
	require.Equal(t, address.Offset(98), s.NumFreeAddresses())
	require.NoError(t, s.Free(100))
	require.Equal(t, address.Offset(99), s.NumFreeAddresses())
	wt.AssertErrorInterface(t, (*error)(nil), s.Free(0), "free not allocated")
	wt.AssertErrorInterface(t, (*error)(nil), s.Free(100), "double free")

	r, ok := s.Donate(address.NewRange(0, 1000))
	require.True(t, ok && r.Start == 125 && r.Size() == 25, "donate")

	// test Donate when addresses are scarce
	s = New()
	r, ok = s.Donate(address.NewRange(0, 1000))
	require.True(t, !ok, "donate on empty space should fail")
	s.Add(0, 3)
	require.NoError(t, s.Claim(0))
	require.NoError(t, s.Claim(2))
	r, ok = s.Donate(address.NewRange(0, 1000))
	require.True(t, ok && r.Start == 1 && r.End == 2, "donate")
	r, ok = s.Donate(address.NewRange(0, 1000))
	require.True(t, !ok, "donate should fail")
}