func TestRepairEmpty(t *testing.T) { _, cidr, _ := net.ParseCIDR("192.168.1.0/24") previous := ipallocator.NewCIDRRange(cidr) previous.Allocate(net.ParseIP("192.168.1.10")) var dst api.RangeAllocation err := previous.Snapshot(&dst) if err != nil { t.Fatal(err) } registry := registrytest.NewServiceRegistry() ipregistry := &mockRangeRegistry{ item: &api.RangeAllocation{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", }, Range: dst.Range, Data: dst.Data, }, } r := NewRepair(0, registry, cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) } after := ipallocator.NewCIDRRange(cidr) if err := after.Restore(cidr, ipregistry.updated.Data); err != nil { t.Fatal(err) } if after.Has(net.ParseIP("192.168.1.10")) { t.Errorf("unexpected ipallocator state: %#v", after) } }
func TestRepairWithExisting(t *testing.T) { _, cidr, _ := net.ParseCIDR("192.168.1.0/24") previous := ipallocator.NewCIDRRange(cidr) var dst api.RangeAllocation err := previous.Snapshot(&dst) if err != nil { t.Fatal(err) } registry := registrytest.NewServiceRegistry() registry.List = api.ServiceList{ Items: []api.Service{ { Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"}, }, { Spec: api.ServiceSpec{ClusterIP: "192.168.1.100"}, }, { // outside CIDR, will be dropped Spec: api.ServiceSpec{ClusterIP: "192.168.0.1"}, }, { // empty, ignored Spec: api.ServiceSpec{ClusterIP: ""}, }, { // duplicate, dropped Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"}, }, { // headless Spec: api.ServiceSpec{ClusterIP: "None"}, }, }, } ipregistry := &mockRangeRegistry{ item: &api.RangeAllocation{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", }, Range: dst.Range, Data: dst.Data, }, } r := NewRepair(0, registry, cidr, ipregistry) if err := r.RunOnce(); err != nil { t.Fatal(err) } after := ipallocator.NewCIDRRange(cidr) if err := after.Restore(cidr, ipregistry.updated.Data); err != nil { t.Fatal(err) } if !after.Has(net.ParseIP("192.168.1.1")) || !after.Has(net.ParseIP("192.168.1.100")) { t.Errorf("unexpected ipallocator state: %#v", after) } if after.Free() != 252 { t.Errorf("unexpected ipallocator state: %#v", after) } }
func NewTestREST(t *testing.T, endpoints *api.EndpointsList) (*REST, *registrytest.ServiceRegistry) { registry := registrytest.NewServiceRegistry() endpointRegistry := ®istrytest.EndpointRegistry{ Endpoints: endpoints, } r := ipallocator.NewCIDRRange(makeIPNet(t)) portRange := util.PortRange{Base: 30000, Size: 1000} portAllocator := portallocator.NewPortAllocator(portRange) storage := NewStorage(registry, endpointRegistry, r, portAllocator, nil) return storage, registry }
// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs. func (c *Repair) RunOnce() error { // TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read, // or if they are executed against different leaders, // the ordering guarantee required to ensure no IP is allocated twice is violated. // ListServices must return a ResourceVersion higher than the etcd index Get triggers, // and the release code must not release services that have had IPs allocated but not yet been created // See #8295 // If etcd server is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and etcd at the same time. var latest *api.RangeAllocation var err error err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { latest, err = c.alloc.Get() return err == nil, err }) if err != nil { return fmt.Errorf("unable to refresh the service IP block: %v", err) } ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll) list, err := c.registry.ListServices(ctx, labels.Everything(), fields.Everything()) if err != nil { return fmt.Errorf("unable to refresh the service IP block: %v", err) } r := ipallocator.NewCIDRRange(c.network) for _, svc := range list.Items { if !api.IsServiceIPSet(&svc) { continue } ip := net.ParseIP(svc.Spec.ClusterIP) if ip == nil { // cluster IP is broken, reallocate util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.ClusterIP, svc.Name, svc.Namespace)) continue } switch err := r.Allocate(ip); err { case nil: case ipallocator.ErrAllocated: // TODO: send event // cluster IP is broken, reallocate util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace)) case ipallocator.ErrNotInRange: // TODO: send event // cluster IP is broken, reallocate util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network)) case ipallocator.ErrFull: // TODO: send event return fmt.Errorf("the service CIDR %v is full; you must widen the CIDR in order to create new services", r) default: return fmt.Errorf("unable to allocate cluster IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err) } } err = r.Snapshot(latest) if err != nil { return fmt.Errorf("unable to persist the updated service IP allocations: %v", err) } if err := c.alloc.CreateOrUpdate(latest); err != nil { return fmt.Errorf("unable to persist the updated service IP allocations: %v", err) } return nil }