Exemplo n.º 1
0
func TestAllocatorClaim(t *testing.T) {
	const (
		container1 = "abcdef"
		container2 = "baddf00d"
		container3 = "b01df00d"
		universe   = "10.0.3.0/30"
		testAddr1  = "10.0.3.1" // first address allocated should be .1 because .0 is network addr
	)

	alloc, subnet := makeAllocatorWithMockGossip(t, "01:00:00:01:00:00", universe, 1)
	defer alloc.Stop()

	alloc.claimRingForTesting()
	addr1, _ := address.ParseIP(testAddr1)

	err := alloc.Claim(container3, addr1, nil)
	require.NoError(t, err)
	// Check we get this address back if we try an allocate
	addr3, _ := alloc.Allocate(container3, subnet, nil)
	require.Equal(t, testAddr1, addr3.String(), "address")
}
Exemplo n.º 2
0
func ip(s string) address.Address {
	addr, _ := address.ParseIP(s)
	return addr
}
Exemplo n.º 3
0
Arquivo: http.go Projeto: gnomix/weave
// HandleHTTP wires up ipams HTTP endpoints to the provided mux.
func (alloc *Allocator) HandleHTTP(router *mux.Router, defaultSubnet address.CIDR, dockerCli *docker.Client) {
	router.Methods("PUT").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		closedChan := w.(http.CloseNotifier).CloseNotify()
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Claim(ident, ip, closedChan); err != nil {
			badRequest(w, fmt.Errorf("Unable to claim: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("GET").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		cidr := vars["ip"] + "/" + vars["prefixlen"]
		_, subnet, err := address.ParseCIDR(cidr)
		if err != nil {
			badRequest(w, err)
			return
		}
		addr, err := alloc.Lookup(vars["id"], subnet.HostRange())
		if err != nil {
			http.NotFound(w, r)
			return
		}
		fmt.Fprintf(w, "%s/%d", addr, subnet.PrefixLen)
	})

	router.Methods("GET").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		addr, err := alloc.Lookup(mux.Vars(r)["id"], defaultSubnet.HostRange())
		if err != nil {
			http.NotFound(w, r)
			return
		}
		fmt.Fprintf(w, "%s/%d", addr, defaultSubnet.PrefixLen)
	})

	router.Methods("POST").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		closedChan := w.(http.CloseNotifier).CloseNotify()
		vars := mux.Vars(r)
		ident := vars["id"]
		cidrStr := vars["ip"] + "/" + vars["prefixlen"]
		subnetAddr, cidr, err := address.ParseCIDR(cidrStr)
		if err != nil {
			badRequest(w, err)
			return
		}
		if cidr.Start != subnetAddr {
			badRequest(w, fmt.Errorf("Invalid subnet %s - bits after network prefix are not all zero", cidrStr))
			return
		}
		addr, err := alloc.Allocate(ident, cidr.HostRange(), closedChan)
		if err != nil {
			badRequest(w, fmt.Errorf("Unable to allocate: %s", err))
			return
		}
		if dockerCli != nil && dockerCli.IsContainerNotRunning(ident) {
			common.Log.Infof("[allocator] '%s' is not running: freeing %s", ident, addr)
			alloc.Free(ident, addr)
			return
		}

		fmt.Fprintf(w, "%s/%d", addr, cidr.PrefixLen)
	})

	router.Methods("POST").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		closedChan := w.(http.CloseNotifier).CloseNotify()
		ident := mux.Vars(r)["id"]
		newAddr, err := alloc.Allocate(ident, defaultSubnet.HostRange(), closedChan)
		if err != nil {
			badRequest(w, err)
			return
		}
		if dockerCli != nil && dockerCli.IsContainerNotRunning(ident) {
			common.Log.Infof("[allocator] '%s' is not running: freeing %s", ident, newAddr)
			alloc.Free(ident, newAddr)
			return
		}

		fmt.Fprintf(w, "%s/%d", newAddr, defaultSubnet.PrefixLen)
	})

	router.Methods("DELETE").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Free(ident, ip); err != nil {
			badRequest(w, fmt.Errorf("Unable to free: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.Delete(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		alloc.Shutdown()
		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.AdminTakeoverRanges(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})
}
Exemplo n.º 4
0
func TestAllocatorFuzz(t *testing.T) {
	common.InitDefaultLogging(false)
	const (
		firstpass    = 1000
		secondpass   = 20000
		nodes        = 10
		maxAddresses = 1000
		concurrency  = 30
		cidr         = "10.0.1.7/22"
	)
	allocs, _, subnet := makeNetworkOfAllocators(nodes, cidr)
	defer stopNetworkOfAllocators(allocs)

	// Test state
	// For each IP issued we store the allocator
	// that issued it and the name of the container
	// it was issued to.
	type result struct {
		name  string
		alloc int32
		block bool
	}
	stateLock := sync.Mutex{}
	state := make(map[string]result)
	// Keep a list of addresses issued, so we
	// Can pick random ones
	var addrs []string
	numPending := 0

	rand.Seed(0)

	// Remove item from list by swapping it with last
	// and reducing slice length by 1
	rm := func(xs []string, i int32) []string {
		ls := len(xs) - 1
		xs[i] = xs[ls]
		return xs[:ls]
	}

	// Do a Allocate and check the address
	// is unique.  Needs a unique container
	// name.
	allocate := func(name string) {
		stateLock.Lock()
		if len(addrs)+numPending >= maxAddresses {
			stateLock.Unlock()
			return
		}
		numPending++
		stateLock.Unlock()

		allocIndex := rand.Int31n(nodes)
		alloc := allocs[allocIndex]
		//common.Info.Printf("Allocate: asking allocator %d", allocIndex)
		addr, err := alloc.Allocate(name, subnet, nil)

		if err != nil {
			panic(fmt.Sprintf("Could not allocate addr"))
		}

		//common.Info.Printf("Allocate: got address %s for name %s", addr, name)
		addrStr := addr.String()

		stateLock.Lock()
		defer stateLock.Unlock()

		if res, existing := state[addrStr]; existing {
			panic(fmt.Sprintf("Dup found for address %s - %s and %s", addrStr,
				name, res.name))
		}

		state[addrStr] = result{name, allocIndex, false}
		addrs = append(addrs, addrStr)
		numPending--
	}

	// Free a random address.
	free := func() {
		stateLock.Lock()
		if len(addrs) == 0 {
			stateLock.Unlock()
			return
		}
		// Delete an existing allocation
		// Pick random addr
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		addrs = rm(addrs, addrIndex)
		delete(state, addr)
		stateLock.Unlock()

		alloc := allocs[res.alloc]
		//common.Info.Printf("Freeing %s (%s) on allocator %d", res.name, addr, res.alloc)

		oldAddr, err := address.ParseIP(addr)
		if err != nil {
			panic(err)
		}
		require.NoError(t, alloc.Free(res.name, oldAddr))
	}

	// Do a Allocate on an existing container & allocator
	// and check we get the right answer.
	allocateAgain := func() {
		stateLock.Lock()
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		res.block = true
		state[addr] = res
		stateLock.Unlock()
		alloc := allocs[res.alloc]

		//common.Info.Printf("Asking for %s (%s) on allocator %d again", res.name, addr, res.alloc)

		newAddr, _ := alloc.Allocate(res.name, subnet, nil)
		oldAddr, _ := address.ParseIP(addr)
		if newAddr != oldAddr {
			panic(fmt.Sprintf("Got different address for repeat request for %s: %s != %s", res.name, newAddr, oldAddr))
		}

		stateLock.Lock()
		res.block = false
		state[addr] = res
		stateLock.Unlock()
	}

	// Run function _f_ _iterations_ times, in _concurrency_
	// number of goroutines
	doConcurrentIterations := func(iterations int, f func(int)) {
		iterationsPerThread := iterations / concurrency

		wg := sync.WaitGroup{}
		for i := 0; i < concurrency; i++ {
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				for k := 0; k < iterationsPerThread; k++ {
					f((j * iterationsPerThread) + k)
				}
			}(i)
		}
		wg.Wait()
	}

	// First pass, just allocate a bunch of ips
	doConcurrentIterations(firstpass, func(iteration int) {
		name := fmt.Sprintf("first%d", iteration)
		allocate(name)
	})

	// Second pass, random ask for more allocations,
	// or remove existing ones, or ask for allocation
	// again.
	doConcurrentIterations(secondpass, func(iteration int) {
		r := rand.Float32()
		switch {
		case 0.0 <= r && r < 0.4:
			// Ask for a new allocation
			name := fmt.Sprintf("second%d", iteration)
			allocate(name)

		case (0.4 <= r && r < 0.8):
			// free a random addr
			free()

		case 0.8 <= r && r < 1.0:
			// ask for an existing name again, check we get same ip
			allocateAgain()
		}
	})
}