Beispiel #1
0
Datei: dns.go Projekt: n054/weave
func (h *handler) handleReverse(w dns.ResponseWriter, req *dns.Msg) {
	h.ns.debugf("reverse request: %+v", *req)
	if len(req.Question) != 1 || req.Question[0].Qtype != dns.TypePTR {
		h.nameError(w, req)
		return
	}

	ipStr := strings.TrimSuffix(req.Question[0].Name, "."+reverseDNSdomain)
	ip, err := address.ParseIP(ipStr)
	if err != nil {
		h.nameError(w, req)
		return
	}

	hostname, err := h.ns.ReverseLookup(ip.Reverse())
	if err != nil {
		h.handleRecursive(w, req)
		return
	}

	header := dns.RR_Header{
		Name:   req.Question[0].Name,
		Rrtype: dns.TypePTR,
		Class:  dns.ClassINET,
		Ttl:    h.ttl,
	}
	answers := []dns.RR{&dns.PTR{
		Hdr: header,
		Ptr: hostname,
	}}

	h.respond(w, h.makeResponse(req, answers))
}
func TestAllocatorClaim(t *testing.T) {
	const (
		container1 = "abcdef"
		container3 = "b01df00d"
		universe   = "10.0.3.0/24"
		testAddr1  = "10.0.3.2"
		testAddr2  = "10.0.4.2"
	)

	allocs, _, subnet := makeNetworkOfAllocators(2, universe)
	alloc := allocs[1]
	defer alloc.Stop()
	addr1, _ := address.ParseIP(testAddr1)

	// First claim should trigger "dunno, I'm going to wait"
	err := alloc.Claim(container3, addr1)
	require.NoError(t, err)

	// Do one allocate to ensure paxos is all done
	alloc.Allocate("unused", subnet, nil)
	// Do an allocate on the other peer, which we will try to claim later
	addrx, err := allocs[0].Allocate(container1, subnet, nil)

	// Now try the claim again
	err = alloc.Claim(container3, addr1)
	require.NoError(t, err)
	// Check we get this address back if we try an allocate
	addr3, _ := alloc.Allocate(container3, subnet, nil)
	require.Equal(t, testAddr1, addr3.String(), "address")
	// one more claim should still work
	err = alloc.Claim(container3, addr1)
	require.NoError(t, err)
	// claim for a different container should fail
	err = alloc.Claim(container1, addr1)
	require.Error(t, err)
	// claiming the address allocated on the other peer should fail
	err = alloc.Claim(container1, addrx)
	require.Error(t, err, "claiming address allocated on other peer should fail")
	// Check an address outside of our universe
	addr2, _ := address.ParseIP(testAddr2)
	err = alloc.Claim(container1, addr2)
	require.NoError(t, err)
}
Beispiel #3
0
func (d *DNSServer) handleReverse(client *dns.Client, defaultMaxResponseSize int) func(dns.ResponseWriter, *dns.Msg) {
	return func(w dns.ResponseWriter, req *dns.Msg) {
		d.ns.debugf("reverse request: %+v", *req)
		if len(req.Question) != 1 || req.Question[0].Qtype != dns.TypePTR {
			d.errorResponse(req, dns.RcodeNameError, w)
			return
		}

		ipStr := strings.TrimSuffix(req.Question[0].Name, "."+reverseDNSdomain)
		ip, err := address.ParseIP(ipStr)
		if err != nil {
			d.errorResponse(req, dns.RcodeNameError, w)
			return
		}

		hostname, err := d.ns.ReverseLookup(ip.Reverse())
		if err != nil {
			d.handleRecursive(client, defaultMaxResponseSize)(w, req)
			return
		}

		response := dns.Msg{}
		response.RecursionAvailable = true
		response.Authoritative = true
		response.SetReply(req)

		header := dns.RR_Header{
			Name:   req.Question[0].Name,
			Rrtype: dns.TypePTR,
			Class:  dns.ClassINET,
			Ttl:    d.ttl,
		}

		response.Answer = []dns.RR{&dns.PTR{
			Hdr: header,
			Ptr: hostname,
		}}

		maxResponseSize := getMaxResponseSize(req, defaultMaxResponseSize)
		truncateResponse(&response, maxResponseSize)

		d.ns.debugf("response: %+v", response)
		if err := w.WriteMsg(&response); err != nil {
			d.ns.infof("error responding: %v", err)
		}
	}
}
Beispiel #4
0
func TestAllocatorClaim(t *testing.T) {
	const (
		container1 = "abcdef"
		container2 = "baddf00d"
		container3 = "b01df00d"
		universe   = "10.0.3.0/30"
		testAddr1  = "10.0.3.1" // first address allocated should be .1 because .0 is network addr
	)

	alloc, subnet := makeAllocatorWithMockGossip(t, "01:00:00:01:00:00", universe, 1)
	defer alloc.Stop()

	alloc.claimRingForTesting()
	addr1, _ := address.ParseIP(testAddr1)

	err := alloc.Claim(container3, addr1)
	require.NoError(t, err)
	// Check we get this address back if we try an allocate
	addr3, _ := alloc.Allocate(container3, subnet, nil)
	require.Equal(t, testAddr1, addr3.String(), "address")
}
Beispiel #5
0
func TestAllocatorFuzz(t *testing.T) {
	const (
		firstpass    = 1000
		secondpass   = 10000
		nodes        = 5
		maxAddresses = 1000
		concurrency  = 5
		cidr         = "10.0.4.0/22"
	)
	allocs, router, subnet := makeNetworkOfAllocators(nodes, cidr)
	defer stopNetworkOfAllocators(allocs, router)

	// Test state
	// For each IP issued we store the allocator
	// that issued it and the name of the container
	// it was issued to.
	type result struct {
		name  string
		alloc int32
		block bool
	}
	stateLock := sync.Mutex{}
	state := make(map[string]result)
	// Keep a list of addresses issued, so we
	// Can pick random ones
	var addrs []string
	numPending := 0

	rand.Seed(0)

	// Remove item from list by swapping it with last
	// and reducing slice length by 1
	rm := func(xs []string, i int32) []string {
		ls := len(xs) - 1
		xs[i] = xs[ls]
		return xs[:ls]
	}

	bumpPending := func() bool {
		stateLock.Lock()
		if len(addrs)+numPending >= maxAddresses {
			stateLock.Unlock()
			return false
		}
		numPending++
		stateLock.Unlock()
		return true
	}

	noteAllocation := func(allocIndex int32, name string, addr address.Address) {
		//common.Log.Infof("Allocate: got address %s for name %s", addr, name)
		addrStr := addr.String()

		stateLock.Lock()
		defer stateLock.Unlock()

		if res, existing := state[addrStr]; existing {
			panic(fmt.Sprintf("Dup found for address %s - %s and %s", addrStr,
				name, res.name))
		}

		state[addrStr] = result{name, allocIndex, false}
		addrs = append(addrs, addrStr)
		numPending--
	}

	// Do a Allocate and check the address
	// is unique.  Needs a unique container
	// name.
	allocate := func(name string) {
		if !bumpPending() {
			return
		}

		allocIndex := rand.Int31n(nodes)
		alloc := allocs[allocIndex]
		//common.Log.Infof("Allocate: asking allocator %d", allocIndex)
		addr, err := alloc.SimplyAllocate(name, subnet)

		if err != nil {
			panic(fmt.Sprintf("Could not allocate addr"))
		}

		noteAllocation(allocIndex, name, addr)
	}

	// Free a random address.
	free := func() {
		stateLock.Lock()
		if len(addrs) == 0 {
			stateLock.Unlock()
			return
		}
		// Delete an existing allocation
		// Pick random addr
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		addrs = rm(addrs, addrIndex)
		delete(state, addr)
		stateLock.Unlock()

		alloc := allocs[res.alloc]
		//common.Log.Infof("Freeing %s (%s) on allocator %d", res.name, addr, res.alloc)

		oldAddr, err := address.ParseIP(addr)
		if err != nil {
			panic(err)
		}
		require.NoError(t, alloc.Free(res.name, oldAddr))
	}

	// Do a Allocate on an existing container & allocator
	// and check we get the right answer.
	allocateAgain := func() {
		stateLock.Lock()
		addrIndex := rand.Int31n(int32(len(addrs)))
		addr := addrs[addrIndex]
		res := state[addr]
		if res.block {
			stateLock.Unlock()
			return
		}
		res.block = true
		state[addr] = res
		stateLock.Unlock()
		alloc := allocs[res.alloc]

		//common.Log.Infof("Asking for %s (%s) on allocator %d again", res.name, addr, res.alloc)

		newAddr, _ := alloc.SimplyAllocate(res.name, subnet)
		oldAddr, _ := address.ParseIP(addr)
		if newAddr != oldAddr {
			panic(fmt.Sprintf("Got different address for repeat request for %s: %s != %s", res.name, newAddr, oldAddr))
		}

		stateLock.Lock()
		res.block = false
		state[addr] = res
		stateLock.Unlock()
	}

	// Claim a random address for a unique container name - may not succeed
	claim := func(name string) {
		if !bumpPending() {
			return
		}
		allocIndex := rand.Int31n(nodes)
		addressIndex := rand.Int31n(int32(subnet.Size()))
		alloc := allocs[allocIndex]
		addr := address.Add(subnet.Addr, address.Offset(addressIndex))
		err := alloc.SimplyClaim(name, address.MakeCIDR(subnet, addr))
		if err == nil {
			noteAllocation(allocIndex, name, addr)
		}
	}

	// Run function _f_ _iterations_ times, in _concurrency_
	// number of goroutines
	doConcurrentIterations := func(iterations int, f func(int)) {
		iterationsPerThread := iterations / concurrency

		wg := sync.WaitGroup{}
		for i := 0; i < concurrency; i++ {
			wg.Add(1)
			go func(j int) {
				defer wg.Done()
				for k := 0; k < iterationsPerThread; k++ {
					f((j * iterationsPerThread) + k)
				}
			}(i)
		}
		wg.Wait()
	}

	// First pass, just allocate a bunch of ips
	doConcurrentIterations(firstpass, func(iteration int) {
		name := fmt.Sprintf("first%d", iteration)
		allocate(name)
	})

	// Second pass, random ask for more allocations,
	// or remove existing ones, or ask for allocation
	// again.
	doConcurrentIterations(secondpass, func(iteration int) {
		r := rand.Float32()
		switch {
		case 0.0 <= r && r < 0.4:
			// Ask for a new allocation
			name := fmt.Sprintf("second%d", iteration)
			allocate(name)

		case (0.4 <= r && r < 0.8):
			// free a random addr
			free()

		case 0.8 <= r && r < 0.95:
			// ask for an existing name again, check we get same ip
			allocateAgain()

		case 0.95 <= r && r < 1.0:
			name := fmt.Sprintf("second%d", iteration)
			claim(name)
		}
	})
}
Beispiel #6
0
// HandleHTTP wires up ipams HTTP endpoints to the provided mux.
func (alloc *Allocator) HandleHTTP(router *mux.Router, defaultSubnet address.CIDR, dockerCli *docker.Client) {
	router.Methods("PUT").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		noErrorOnUnknown := r.FormValue("noErrorOnUnknown") == "true"
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Claim(ident, ip, noErrorOnUnknown); err != nil {
			badRequest(w, fmt.Errorf("Unable to claim: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("GET").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		if subnet, ok := parseCIDR(w, vars["ip"]+"/"+vars["prefixlen"]); ok {
			addr, err := alloc.Lookup(vars["id"], subnet.HostRange())
			if err != nil {
				http.NotFound(w, r)
				return
			}
			fmt.Fprintf(w, "%s/%d", addr, subnet.PrefixLen)
		}
	})

	router.Methods("GET").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		addr, err := alloc.Lookup(mux.Vars(r)["id"], defaultSubnet.HostRange())
		if err != nil {
			http.NotFound(w, r)
			return
		}
		fmt.Fprintf(w, "%s/%d", addr, defaultSubnet.PrefixLen)
	})

	router.Methods("POST").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		if subnet, ok := parseCIDR(w, vars["ip"]+"/"+vars["prefixlen"]); ok {
			alloc.handleHTTPAllocate(dockerCli, w, vars["id"], r.FormValue("check-alive") == "true", subnet)
		}
	})

	router.Methods("POST").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		alloc.handleHTTPAllocate(dockerCli, w, vars["id"], r.FormValue("check-alive") == "true", defaultSubnet)
	})

	router.Methods("DELETE").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Free(ident, ip); err != nil {
			badRequest(w, fmt.Errorf("Unable to free: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.Delete(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		alloc.Shutdown()
		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.AdminTakeoverRanges(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})
}
Beispiel #7
0
func (n *Nameserver) HandleHTTP(router *mux.Router) {
	router.Methods("GET").Path("/domain").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprint(w, n.domain)
	})

	router.Methods("PUT").Path("/name/{container}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		var (
			vars      = mux.Vars(r)
			container = vars["container"]
			ipStr     = vars["ip"]
			hostname  = dns.Fqdn(r.FormValue("fqdn"))
			ip, err   = address.ParseIP(ipStr)
		)
		if err != nil {
			n.badRequest(w, err)
			return
		}

		if err := n.AddEntry(hostname, container, n.ourName, ip); err != nil {
			n.badRequest(w, fmt.Errorf("Unable to add entry: %v", err))
			return
		}

		if r.FormValue("check-alive") == "true" && n.docker.IsContainerNotRunning(container) {
			n.infof("container '%s' is not running: removing", container)
			if err := n.Delete(hostname, container, ipStr, ip); err != nil {
				n.infof("failed to remove: %v", err)
			}
		}

		w.WriteHeader(204)
	})

	deleteHandler := func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)

		hostname := r.FormValue("fqdn")
		if hostname == "" {
			hostname = "*"
		} else {
			hostname = dns.Fqdn(hostname)
		}

		container, ok := vars["container"]
		if !ok {
			container = "*"
		}

		ipStr, ok := vars["ip"]
		ip, err := address.ParseIP(ipStr)
		if ok && err != nil {
			n.badRequest(w, err)
			return
		} else if !ok {
			ipStr = "*"
		}

		if err := n.Delete(hostname, container, ipStr, ip); err != nil {
			n.badRequest(w, fmt.Errorf("Unable to delete entries: %v", err))
			return
		}
		w.WriteHeader(204)
	}
	router.Methods("DELETE").Path("/name/{container}/{ip}").HandlerFunc(deleteHandler)
	router.Methods("DELETE").Path("/name/{container}").HandlerFunc(deleteHandler)
	router.Methods("DELETE").Path("/name").HandlerFunc(deleteHandler)

	router.Methods("GET").Path("/name").Headers("Accept", "application/json").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		n.RLock()
		defer n.RUnlock()
		if err := json.NewEncoder(w).Encode(n.entries); err != nil {
			n.badRequest(w, fmt.Errorf("Error marshalling response: %v", err))
		}
	})
}
Beispiel #8
0
func ip(s string) address.Address {
	addr, _ := address.ParseIP(s)
	return addr
}
Beispiel #9
0
// HandleHTTP wires up ipams HTTP endpoints to the provided mux.
func (alloc *Allocator) HandleHTTP(router *mux.Router, defaultSubnet address.CIDR, dockerCli *docker.Client) {
	router.Methods("PUT").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		closedChan := w.(http.CloseNotifier).CloseNotify()
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Claim(ident, ip, closedChan); err != nil {
			badRequest(w, fmt.Errorf("Unable to claim: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("GET").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		cidr := vars["ip"] + "/" + vars["prefixlen"]
		_, subnet, err := address.ParseCIDR(cidr)
		if err != nil {
			badRequest(w, err)
			return
		}
		addr, err := alloc.Lookup(vars["id"], subnet.HostRange())
		if err != nil {
			http.NotFound(w, r)
			return
		}
		fmt.Fprintf(w, "%s/%d", addr, subnet.PrefixLen)
	})

	router.Methods("GET").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		addr, err := alloc.Lookup(mux.Vars(r)["id"], defaultSubnet.HostRange())
		if err != nil {
			http.NotFound(w, r)
			return
		}
		fmt.Fprintf(w, "%s/%d", addr, defaultSubnet.PrefixLen)
	})

	router.Methods("POST").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		closedChan := w.(http.CloseNotifier).CloseNotify()
		vars := mux.Vars(r)
		ident := vars["id"]
		cidrStr := vars["ip"] + "/" + vars["prefixlen"]
		subnetAddr, cidr, err := address.ParseCIDR(cidrStr)
		if err != nil {
			badRequest(w, err)
			return
		}
		if cidr.Start != subnetAddr {
			badRequest(w, fmt.Errorf("Invalid subnet %s - bits after network prefix are not all zero", cidrStr))
			return
		}
		addr, err := alloc.Allocate(ident, cidr.HostRange(), closedChan)
		if err != nil {
			badRequest(w, fmt.Errorf("Unable to allocate: %s", err))
			return
		}
		if r.FormValue("check-alive") == "true" && dockerCli != nil && dockerCli.IsContainerNotRunning(ident) {
			common.Log.Infof("[allocator] '%s' is not running: freeing %s", ident, addr)
			alloc.Free(ident, addr)
			return
		}

		fmt.Fprintf(w, "%s/%d", addr, cidr.PrefixLen)
	})

	router.Methods("POST").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		closedChan := w.(http.CloseNotifier).CloseNotify()
		ident := mux.Vars(r)["id"]
		newAddr, err := alloc.Allocate(ident, defaultSubnet.HostRange(), closedChan)
		if err != nil {
			badRequest(w, err)
			return
		}
		if r.FormValue("check-alive") == "true" && dockerCli != nil && dockerCli.IsContainerNotRunning(ident) {
			common.Log.Infof("[allocator] '%s' is not running: freeing %s", ident, newAddr)
			alloc.Free(ident, newAddr)
			return
		}

		fmt.Fprintf(w, "%s/%d", newAddr, defaultSubnet.PrefixLen)
	})

	router.Methods("DELETE").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Free(ident, ip); err != nil {
			badRequest(w, fmt.Errorf("Unable to free: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.Delete(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		alloc.Shutdown()
		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.AdminTakeoverRanges(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})
}
Beispiel #10
0
// HandleHTTP wires up ipams HTTP endpoints to the provided mux.
func (alloc *Allocator) HandleHTTP(router *mux.Router, defaultSubnet address.CIDR, tracker string, dockerCli *docker.Client) {
	router.Methods("GET").Path("/ipinfo/defaultsubnet").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprintf(w, "%s", defaultSubnet)
	})

	router.Methods("PUT").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		if cidr, ok := parseCIDR(w, vars["ip"]+"/"+vars["prefixlen"], false); ok {
			ident := vars["id"]
			checkAlive := r.FormValue("check-alive") == "true"
			noErrorOnUnknown := r.FormValue("noErrorOnUnknown") == "true"
			alloc.handleHTTPClaim(dockerCli, w, ident, cidr, checkAlive, noErrorOnUnknown)
		}
	})

	router.Methods("GET").Path("/ring").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		alloc.Prime()
	})

	router.Methods("GET").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		if subnet, ok := parseCIDR(w, vars["ip"]+"/"+vars["prefixlen"], true); ok {
			cidrs, err := alloc.Lookup(vars["id"], subnet.HostRange())
			if err != nil {
				http.NotFound(w, r)
				return
			}
			writeAddresses(w, cidrs)
		}
	})

	router.Methods("GET").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		addrs, err := alloc.Lookup(mux.Vars(r)["id"], defaultSubnet.HostRange())
		if err != nil {
			http.NotFound(w, r)
			return
		}
		writeAddresses(w, addrs)
	})

	router.Methods("POST").Path("/ip/{id}/{ip}/{prefixlen}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		if subnet, ok := parseCIDR(w, vars["ip"]+"/"+vars["prefixlen"], true); ok {
			alloc.handleHTTPAllocate(dockerCli, w, vars["id"], r.FormValue("check-alive") == "true", subnet)
		}
	})

	router.Methods("POST").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		alloc.handleHTTPAllocate(dockerCli, w, vars["id"], r.FormValue("check-alive") == "true", defaultSubnet)
	})

	router.Methods("DELETE").Path("/ip/{id}/{ip}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		vars := mux.Vars(r)
		ident := vars["id"]
		ipStr := vars["ip"]
		if ip, err := address.ParseIP(ipStr); err != nil {
			badRequest(w, err)
			return
		} else if err := alloc.Free(ident, ip); err != nil {
			badRequest(w, fmt.Errorf("Unable to free: %s", err))
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/ip/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		if err := alloc.Delete(ident); err != nil {
			badRequest(w, err)
			return
		}

		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		alloc.Shutdown()
		w.WriteHeader(204)
	})

	router.Methods("DELETE").Path("/peer/{id}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		ident := mux.Vars(r)["id"]
		transferred := alloc.AdminTakeoverRanges(ident)
		fmt.Fprintf(w, "%d IPs taken over from %s\n", transferred, ident)
	})

	router.Methods("GET").Path("/ipinfo/tracker").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprintf(w, tracker)
	})
}