コード例 #1
0
ファイル: allocator_test.go プロジェクト: n054/weave
func TestAllocatorClaim(t *testing.T) {
	const (
		container1 = "abcdef"
		container3 = "b01df00d"
		universe   = "10.0.3.0/24"
		testAddr1  = "10.0.3.2/24"
		testAddr2  = "10.0.4.2/24"
	)

	allocs, router, subnet := makeNetworkOfAllocators(2, universe)
	defer stopNetworkOfAllocators(allocs, router)
	alloc := allocs[1]
	addr1, _ := address.ParseCIDR(testAddr1)

	// First claim should trigger "dunno, I'm going to wait"
	err := alloc.SimplyClaim(container3, addr1)
	require.NoError(t, err)

	alloc.Prime()
	// Do an allocate on the other peer, which we will try to claim later
	addrx, err := allocs[0].Allocate(container1, subnet, true, returnFalse)
	router.Flush()

	// Now try the claim again
	err = alloc.SimplyClaim(container3, addr1)
	require.NoError(t, err)
	// Check we get this address back if we try an allocate
	addr3, _ := alloc.SimplyAllocate(container3, subnet)
	require.Equal(t, testAddr1, address.MakeCIDR(subnet, addr3).String(), "address")
	// one more claim should still work
	err = alloc.SimplyClaim(container3, addr1)
	require.NoError(t, err)
	// claim for a different container should fail
	err = alloc.SimplyClaim(container1, addr1)
	require.Error(t, err)
	// claiming the address allocated on the other peer should fail
	err = alloc.SimplyClaim(container1, address.MakeCIDR(subnet, addrx))
	require.Error(t, err, "claiming address allocated on other peer should fail")
	// Check an address outside of our universe
	addr2, _ := address.ParseCIDR(testAddr2)
	err = alloc.SimplyClaim(container1, addr2)
	require.NoError(t, err)
}
コード例 #2
0
ファイル: allocate.go プロジェクト: n054/weave
// Try returns true if the request is completed, false if pending
func (g *allocate) Try(alloc *Allocator) bool {
	if g.hasBeenCancelled() {
		g.Cancel()
		return true
	}

	if addrs := alloc.ownedInRange(g.ident, g.r.Range()); len(addrs) > 0 {
		// If we had heard that this container died, resurrect it
		delete(alloc.dead, g.ident) // delete is no-op if key not in map
		g.resultChan <- allocateResult{addrs[0].Addr, nil}
		return true
	}

	if !alloc.universe.Range().Overlaps(g.r.Range()) {
		g.resultChan <- allocateResult{err: fmt.Errorf("range %s out of bounds: %s", g.r, alloc.universe)}
		return true
	}

	alloc.establishRing()

	if ok, addr := alloc.space.Allocate(g.r.HostRange()); ok {
		// If caller hasn't supplied a unique ID, file it under the IP address
		// which lets the caller then release the address using DELETE /ip/address
		if g.ident == "_" {
			g.ident = addr.String()
		}
		alloc.debugln("Allocated", addr, "for", g.ident, "in", g.r)
		alloc.addOwned(g.ident, address.MakeCIDR(g.r, addr), g.isContainer)
		g.resultChan <- allocateResult{addr, nil}
		return true
	}

	// out of space
	donors := alloc.ring.ChoosePeersToAskForSpace(g.r.Addr, g.r.Range().End)
	for _, donor := range donors {
		if err := alloc.sendSpaceRequest(donor, g.r.Range()); err != nil {
			alloc.debugln("Problem asking peer", donor, "for space:", err)
		} else {
			alloc.debugln("Decided to ask peer", donor, "for space in range", g.r)
			break
		}
	}

	return false
}
コード例 #3
0
ファイル: allocator_test.go プロジェクト: n054/weave
func TestAllocatorFuzz(t *testing.T) {
	const (
		firstpass    = 1000
		secondpass   = 10000
		nodes        = 5
		maxAddresses = 1000
		concurrency  = 5
		cidr         = "10.0.4.0/22"
	)
	allocs, router, subnet := makeNetworkOfAllocators(nodes, cidr)
	defer stopNetworkOfAllocators(allocs, router)

	// Test state
	// For each IP issued we store the allocator
	// that issued it and the name of the container
	// it was issued to.
	type result struct {
		name  string
		alloc int32
		block bool
	}
	stateLock := sync.Mutex{}
	state := make(map[string]result)
	// Keep a list of addresses issued, so we
	// Can pick random ones
	var addrs []string
	numPending := 0

	rand.Seed(0)

	// Remove item from list by swapping it with last
	// and reducing slice length by 1
	rm := func(xs []string, i int32) []string {
		ls := len(xs) - 1
		xs[i] = xs[ls]
		return xs[:ls]
	}

	bumpPending := func() bool {
		stateLock.Lock()
		if len(addrs)+numPending >= maxAddresses {
			stateLock.Unlock()
			return false
		}
		numPending++
		stateLock.Unlock()
		return true
	}

	noteAllocation := func(allocIndex int32, name string, addr address.Address) {
		//common.Log.Infof("Allocate: got address %s for name %s", addr, name)
		addrStr := addr.String()

		stateLock.Lock()
		defer stateLock.Unlock()

		if res, existing := state[addrStr]; existing {
			panic(fmt.Sprintf("Dup found for address %s - %s and %s", addrStr,
				name, res.name))
		}

		state[addrStr] = result{name, allocIndex, false}
		addrs = append(addrs, addrStr)
		numPending--
	}

	// Do a Allocate and check the address
	// is unique.  Needs a unique container
	// name.
	allocate := func(name string) {
		if !bumpPending() {
			return
		}

		allocIndex := rand.Int31n(nodes)
		alloc := allocs[allocIndex]
		//common.Log.Infof("Allocate: asking allocator %d", allocIndex)
		addr, err := alloc.SimplyAllocate(name, subnet)

		if err != nil {
			panic(fmt.Sprintf("Could not allocate addr"))
		}

		noteAllocation(allocIndex, name, addr)
	}

	// Free a random address.
	free := func() {
		stateLock.Lock()
		if len(addrs) == 0 {
			stateLock.Unlock()
			return
		}
		// Delete an existing allocation
		// Pick random addr
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		addrs = rm(addrs, addrIndex)
		delete(state, addr)
		stateLock.Unlock()

		alloc := allocs[res.alloc]
		//common.Log.Infof("Freeing %s (%s) on allocator %d", res.name, addr, res.alloc)

		oldAddr, err := address.ParseIP(addr)
		if err != nil {
			panic(err)
		}
		require.NoError(t, alloc.Free(res.name, oldAddr))
	}

	// Do a Allocate on an existing container & allocator
	// and check we get the right answer.
	allocateAgain := func() {
		stateLock.Lock()
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		res.block = true
		state[addr] = res
		stateLock.Unlock()
		alloc := allocs[res.alloc]

		//common.Log.Infof("Asking for %s (%s) on allocator %d again", res.name, addr, res.alloc)

		newAddr, _ := alloc.SimplyAllocate(res.name, subnet)
		oldAddr, _ := address.ParseIP(addr)
		if newAddr != oldAddr {
			panic(fmt.Sprintf("Got different address for repeat request for %s: %s != %s", res.name, newAddr, oldAddr))
		}

		stateLock.Lock()
		res.block = false
		state[addr] = res
		stateLock.Unlock()
	}

	// Claim a random address for a unique container name - may not succeed
	claim := func(name string) {
		if !bumpPending() {
			return
		}
		allocIndex := rand.Int31n(nodes)
		addressIndex := rand.Int31n(int32(subnet.Size()))
		alloc := allocs[allocIndex]
		addr := address.Add(subnet.Addr, address.Offset(addressIndex))
		err := alloc.SimplyClaim(name, address.MakeCIDR(subnet, addr))
		if err == nil {
			noteAllocation(allocIndex, name, addr)
		}
	}

	// Run function _f_ _iterations_ times, in _concurrency_
	// number of goroutines
	doConcurrentIterations := func(iterations int, f func(int)) {
		iterationsPerThread := iterations / concurrency

		wg := sync.WaitGroup{}
		for i := 0; i < concurrency; i++ {
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				for k := 0; k < iterationsPerThread; k++ {
					f((j * iterationsPerThread) + k)
				}
			}(i)
		}
		wg.Wait()
	}

	// First pass, just allocate a bunch of ips
	doConcurrentIterations(firstpass, func(iteration int) {
		name := fmt.Sprintf("first%d", iteration)
		allocate(name)
	})

	// Second pass, random ask for more allocations,
	// or remove existing ones, or ask for allocation
	// again.
	doConcurrentIterations(secondpass, func(iteration int) {
		r := rand.Float32()
		switch {
		case 0.0 <= r && r < 0.4:
			// Ask for a new allocation
			name := fmt.Sprintf("second%d", iteration)
			allocate(name)

		case (0.4 <= r && r < 0.8):
			// free a random addr
			free()

		case 0.8 <= r && r < 0.95:
			// ask for an existing name again, check we get same ip
			allocateAgain()

		case 0.95 <= r && r < 1.0:
			name := fmt.Sprintf("second%d", iteration)
			claim(name)
		}
	})
}
コード例 #4
0
ファイル: allocator_test.go プロジェクト: n054/weave
func TestAllocFree(t *testing.T) {
	const (
		container1 = "abcdef"
		container2 = "baddf00d"
		container3 = "b01df00d"
		universe   = "10.0.3.0/26"
		subnet1    = "10.0.3.0/28"
		subnet2    = "10.0.3.32/28"
		testAddr1  = "10.0.3.1"
		testAddr2  = "10.0.3.33"
		spaceSize  = 62 // 64 IP addresses in /26, minus .0 and .63
	)

	alloc, subnet := makeAllocatorWithMockGossip(t, "01:00:00:01:00:00", universe, 1)
	defer alloc.Stop()
	cidr1, _ := address.ParseCIDR(subnet1)
	cidr2, _ := address.ParseCIDR(subnet2)

	alloc.claimRingForTesting()
	addr1, err := alloc.SimplyAllocate(container1, cidr1)
	require.NoError(t, err)
	require.Equal(t, testAddr1, addr1.String(), "address")

	addr2, err := alloc.SimplyAllocate(container1, cidr2)
	require.NoError(t, err)
	require.Equal(t, testAddr2, addr2.String(), "address")

	addrs, err := alloc.Lookup(container1, subnet.Range())
	require.NoError(t, err)
	require.Equal(t, []address.CIDR{address.MakeCIDR(cidr1, addr1), address.MakeCIDR(cidr2, addr2)}, addrs)

	// Ask for another address for a different container and check it's different
	addr1b, _ := alloc.SimplyAllocate(container2, cidr1)
	if addr1b.String() == testAddr1 {
		t.Fatalf("Expected different address but got %s", addr1b.String())
	}

	// Ask for the first container again and we should get the same addresses again
	addr1a, _ := alloc.SimplyAllocate(container1, cidr1)
	require.Equal(t, testAddr1, addr1a.String(), "address")
	addr2a, _ := alloc.SimplyAllocate(container1, cidr2)
	require.Equal(t, testAddr2, addr2a.String(), "address")

	// Now delete the first container, and we should get its addresses back
	require.NoError(t, alloc.Delete(container1))
	addr3, _ := alloc.SimplyAllocate(container3, cidr1)
	require.Equal(t, testAddr1, addr3.String(), "address")
	addr4, _ := alloc.SimplyAllocate(container3, cidr2)
	require.Equal(t, testAddr2, addr4.String(), "address")

	alloc.ContainerDied(container2)

	// Resurrect
	addr1c, err := alloc.SimplyAllocate(container2, cidr1)
	require.NoError(t, err)
	require.Equal(t, addr1b, addr1c, "address")

	alloc.ContainerDied(container3)
	alloc.Encode() // sync up
	// Move the clock forward and clear out the dead container
	alloc.actionChan <- func() { alloc.now = func() time.Time { return time.Now().Add(containerDiedTimeout * 2) } }
	alloc.actionChan <- func() { alloc.removeDeadContainers() }
	require.Equal(t, address.Count(spaceSize+1), alloc.NumFreeAddresses(subnet.Range()))
}