// subdivide subdivides the [from,to) CIDR for the given peers into // CIDR-aligned subranges. func (r *Ring) subdivide(from, to address.Address, peers []mesh.PeerName) { share := address.Length(to, from) if share == 0 { return } if share == 1 || len(peers) == 1 { r.Entries.insert(entry{Token: from, Peer: peers[0], Free: share}) return } mid := address.Add(from, address.Offset(share/2)) r.subdivide(from, mid, peers[:len(peers)/2]) r.subdivide(address.Add(mid, address.Offset(share%2)), to, peers[len(peers)/2:]) }
func TestDonateHard(t *testing.T) { //common.InitDefaultLogging(true) var ( start = ip("10.0.1.0") size address.Offset = 48 ) // Fill a fresh space spaceset := makeSpace(start, size) for i := address.Offset(0); i < size; i++ { ok, _ := spaceset.Allocate(address.NewRange(start, size)) require.True(t, ok, "Failed to get IP!") } require.Equal(t, address.Count(0), spaceset.NumFreeAddresses()) // Now free all but the last address // this will force us to split the free list for i := address.Offset(0); i < size-1; i++ { require.NoError(t, spaceset.Free(address.Add(start, i))) } // Now split newRange, ok := spaceset.Donate(address.NewRange(start, size)) require.True(t, ok, "GiveUpSpace result") require.Equal(t, address.NewRange(ip("10.0.1.16"), 16), newRange, "Wrong space") require.Equal(t, address.Count(31), spaceset.NumFreeAddresses()) //Space set should now have 3 spaces expected := &Space{ ours: add(nil, ip("10.0.1.47"), ip("10.0.1.48")), free: add(add(nil, ip("10.0.1.0"), ip("10.0.1.16")), ip("10.0.1.32"), ip("10.0.1.47")), } require.Equal(t, expected, spaceset) }
// NewAllocator creates and initialises a new Allocator func NewAllocator(ourName router.PeerName, ourUID router.PeerUID, ourNickname string, universe address.Range, quorum uint) *Allocator { return &Allocator{ ourName: ourName, universe: universe, ring: ring.New(universe.Start, address.Add(universe.Start, universe.Size()), ourName), owned: make(map[string][]address.Address), paxos: paxos.NewNode(ourName, ourUID, quorum), nicknames: map[router.PeerName]string{ourName: ourNickname}, now: time.Now, } }
// ClaimForPeers claims the entire ring for the array of peers passed // in. Only works for empty rings. Each claimed range is CIDR-aligned. func (r *Ring) ClaimForPeers(peers []mesh.PeerName) { common.Assert(r.Empty()) defer r.trackUpdates()() defer r.assertInvariants() defer r.updateExportedVariables() defer func() { e := r.Entries[len(r.Entries)-1] common.Assert(address.Add(e.Token, address.Offset(e.Free)) == r.End) }() r.subdivide(r.Start, r.End, peers) r.Seeds = peers }
func (s *Space) Donate(r address.Range) (address.Range, bool) { biggest := s.biggestFreeRange(r) if biggest.Size() == 0 { return address.Range{}, false } // Donate half of that biggest free range. Note size/2 rounds down, so // the resulting donation size rounds up, and in particular can't be empty. biggest.Start = address.Add(biggest.Start, biggest.Size()/2) s.ours = subtract(s.ours, biggest.Start, biggest.End) s.free = subtract(s.free, biggest.Start, biggest.End) return biggest, true }
func TestAllocatorFuzz(t *testing.T) { const ( firstpass = 1000 secondpass = 10000 nodes = 5 maxAddresses = 1000 concurrency = 5 cidr = "10.0.4.0/22" ) allocs, router, subnet := makeNetworkOfAllocators(nodes, cidr) defer stopNetworkOfAllocators(allocs, router) // Test state // For each IP issued we store the allocator // that issued it and the name of the container // it was issued to. type result struct { name string alloc int32 block bool } stateLock := sync.Mutex{} state := make(map[string]result) // Keep a list of addresses issued, so we // Can pick random ones var addrs []string numPending := 0 rand.Seed(0) // Remove item from list by swapping it with last // and reducing slice length by 1 rm := func(xs []string, i int32) []string { ls := len(xs) - 1 xs[i] = xs[ls] return xs[:ls] } bumpPending := func() bool { stateLock.Lock() if len(addrs)+numPending >= maxAddresses { stateLock.Unlock() return false } numPending++ stateLock.Unlock() return true } noteAllocation := func(allocIndex int32, name string, addr address.Address) { //common.Log.Infof("Allocate: got address %s for name %s", addr, name) addrStr := addr.String() stateLock.Lock() defer stateLock.Unlock() if res, existing := state[addrStr]; existing { panic(fmt.Sprintf("Dup found for address %s - %s and %s", addrStr, name, res.name)) } state[addrStr] = result{name, allocIndex, false} addrs = append(addrs, addrStr) numPending-- } // Do a Allocate and check the address // is unique. Needs a unique container // name. allocate := func(name string) { if !bumpPending() { return } allocIndex := rand.Int31n(nodes) alloc := allocs[allocIndex] //common.Log.Infof("Allocate: asking allocator %d", allocIndex) addr, err := alloc.SimplyAllocate(name, subnet) if err != nil { panic(fmt.Sprintf("Could not allocate addr")) } noteAllocation(allocIndex, name, addr) } // Free a random address. free := func() { stateLock.Lock() if len(addrs) == 0 { stateLock.Unlock() return } // Delete an existing allocation // Pick random addr addrIndex := rand.Int31n(int32(len(addrs))) addr := addrs[addrIndex] res := state[addr] if res.block { stateLock.Unlock() return } addrs = rm(addrs, addrIndex) delete(state, addr) stateLock.Unlock() alloc := allocs[res.alloc] //common.Log.Infof("Freeing %s (%s) on allocator %d", res.name, addr, res.alloc) oldAddr, err := address.ParseIP(addr) if err != nil { panic(err) } require.NoError(t, alloc.Free(res.name, oldAddr)) } // Do a Allocate on an existing container & allocator // and check we get the right answer. allocateAgain := func() { stateLock.Lock() addrIndex := rand.Int31n(int32(len(addrs))) addr := addrs[addrIndex] res := state[addr] if res.block { stateLock.Unlock() return } res.block = true state[addr] = res stateLock.Unlock() alloc := allocs[res.alloc] //common.Log.Infof("Asking for %s (%s) on allocator %d again", res.name, addr, res.alloc) newAddr, _ := alloc.SimplyAllocate(res.name, subnet) oldAddr, _ := address.ParseIP(addr) if newAddr != oldAddr { panic(fmt.Sprintf("Got different address for repeat request for %s: %s != %s", res.name, newAddr, oldAddr)) } stateLock.Lock() res.block = false state[addr] = res stateLock.Unlock() } // Claim a random address for a unique container name - may not succeed claim := func(name string) { if !bumpPending() { return } allocIndex := rand.Int31n(nodes) addressIndex := rand.Int31n(int32(subnet.Size())) alloc := allocs[allocIndex] addr := address.Add(subnet.Addr, address.Offset(addressIndex)) err := alloc.SimplyClaim(name, address.MakeCIDR(subnet, addr)) if err == nil { noteAllocation(allocIndex, name, addr) } } // Run function _f_ _iterations_ times, in _concurrency_ // number of goroutines doConcurrentIterations := func(iterations int, f func(int)) { iterationsPerThread := iterations / concurrency wg := sync.WaitGroup{} for i := 0; i < concurrency; i++ { wg.Add(1) go func(j int) { defer wg.Done() for k := 0; k < iterationsPerThread; k++ { f((j * iterationsPerThread) + k) } }(i) } wg.Wait() } // First pass, just allocate a bunch of ips doConcurrentIterations(firstpass, func(iteration int) { name := fmt.Sprintf("first%d", iteration) allocate(name) }) // Second pass, random ask for more allocations, // or remove existing ones, or ask for allocation // again. doConcurrentIterations(secondpass, func(iteration int) { r := rand.Float32() switch { case 0.0 <= r && r < 0.4: // Ask for a new allocation name := fmt.Sprintf("second%d", iteration) allocate(name) case (0.4 <= r && r < 0.8): // free a random addr free() case 0.8 <= r && r < 0.95: // ask for an existing name again, check we get same ip allocateAgain() case 0.95 <= r && r < 1.0: name := fmt.Sprintf("second%d", iteration) claim(name) } }) }
func TestFuzzRingHard(t *testing.T) { //common.SetLogLevel("debug") var ( numPeers = 100 iterations = 3000 peers []mesh.PeerName rings []*Ring nextPeerID = 0 ) addPeer := func() { peer := makePeerName(nextPeerID) common.Log.Debugf("%s: Adding peer", peer) nextPeerID++ peers = append(peers, peer) rings = append(rings, New(start, end, peer)) } for i := 0; i < numPeers; i++ { addPeer() } rings[0].ClaimItAll() randomPeer := func(exclude int) (int, mesh.PeerName, *Ring) { var peerIndex int if exclude >= 0 { peerIndex = rand.Intn(len(peers) - 1) if peerIndex == exclude { peerIndex++ } } else { peerIndex = rand.Intn(len(peers)) } return peerIndex, peers[peerIndex], rings[peerIndex] } // Keep a map of index -> ranges, as these are a little expensive to // calculate for every ring on every iteration. var theRanges = make(map[int][]address.Range) theRanges[0] = rings[0].OwnedRanges() addOrRmPeer := func() { if len(peers) < numPeers { addPeer() return } // Pick one peer to remove, and a different one to transfer to peerIndex, peername, _ := randomPeer(-1) _, otherPeername, otherRing := randomPeer(peerIndex) // We need to be in a ~converged ring to rmpeer for _, ring := range rings { require.NoError(t, otherRing.Merge(*ring)) } common.Log.Debugf("%s: transferring from peer %s", otherPeername, peername) otherRing.Transfer(peername, otherPeername) // Remove peer from our state peers = append(peers[:peerIndex], peers[peerIndex+1:]...) rings = append(rings[:peerIndex], rings[peerIndex+1:]...) theRanges = make(map[int][]address.Range) // And now tell everyone about the transfer - rmpeer is // not partition safe for i, ring := range rings { require.NoError(t, ring.Merge(*otherRing)) theRanges[i] = ring.OwnedRanges() } } doGrantOrGossip := func() { var ringsWithRanges = make([]int, 0, len(rings)) for index, ranges := range theRanges { if len(ranges) > 0 { ringsWithRanges = append(ringsWithRanges, index) } } if len(ringsWithRanges) > 0 { // Produce a random split in a random owned range, given to a random peer indexWithRanges := ringsWithRanges[rand.Intn(len(ringsWithRanges))] ownedRanges := theRanges[indexWithRanges] ring := rings[indexWithRanges] rangeToSplit := ownedRanges[rand.Intn(len(ownedRanges))] size := address.Subtract(rangeToSplit.End, rangeToSplit.Start) ipInRange := address.Add(rangeToSplit.Start, address.Offset(rand.Intn(int(size)))) _, peerToGiveTo, _ := randomPeer(-1) common.Log.Debugf("%s: Granting [%v, %v) to %s", ring.Peer, ipInRange, rangeToSplit.End, peerToGiveTo) ring.GrantRangeToHost(ipInRange, rangeToSplit.End, peerToGiveTo) // Now 'gossip' this to a random host (note, note could be same host as above) otherIndex, _, otherRing := randomPeer(-1) common.Log.Debugf("%s: 'Gossiping' to %s", ring.Peer, otherRing.Peer) require.NoError(t, otherRing.Merge(*ring)) theRanges[indexWithRanges] = ring.OwnedRanges() theRanges[otherIndex] = otherRing.OwnedRanges() return } // No rings think they own anything (as gossip might be behind) // We're going to pick a random host (which has entries) and gossip // it to a random host (which may or may not have entries). var ringsWithEntries = make([]*Ring, 0, len(rings)) for _, ring := range rings { if len(ring.Entries) > 0 { ringsWithEntries = append(ringsWithEntries, ring) } } ring1 := ringsWithEntries[rand.Intn(len(ringsWithEntries))] ring2index, _, ring2 := randomPeer(-1) common.Log.Debugf("%s: 'Gossiping' to %s", ring1.Peer, ring2.Peer) require.NoError(t, ring2.Merge(*ring1)) theRanges[ring2index] = ring2.OwnedRanges() } for i := 0; i < iterations; i++ { // about 1 in 10 times, rmpeer or add host n := rand.Intn(10) switch { case n < 1: addOrRmPeer() default: doGrantOrGossip() } } }
func (s *Space) Add(start address.Address, size address.Offset) { s.free = add(s.free, start, address.Add(start, size)) }