func addVLANs(c *Cluster, p *pb.Cluster) { for _, v := range p.Vlan { h := protoToHost(v.Host) c.AddVLAN(&seesaw.VLAN{ ID: uint16(*v.VlanId), Host: h, BackendCount: make(map[seesaw.AF]uint), VIPCount: make(map[seesaw.AF]uint), }) } // Determine number of backend and VIP addresses in each VLAN. backends := make(map[seesaw.IP]bool) vips := make(map[seesaw.IP]bool) for _, vs := range p.Vserver { host := protoToHost(vs.GetEntryAddress()) if host.IPv4Addr != nil { vips[seesaw.NewIP(host.IPv4Addr)] = true } if host.IPv6Addr != nil { vips[seesaw.NewIP(host.IPv6Addr)] = true } for _, backend := range vs.Backend { host := protoToHost(backend.GetHost()) if host.IPv4Addr != nil { backends[seesaw.NewIP(host.IPv4Addr)] = true } if host.IPv6Addr != nil { backends[seesaw.NewIP(host.IPv6Addr)] = true } } } for _, v := range c.VLANs { for bip := range backends { if v.IPv4Net().Contains(bip.IP()) { v.BackendCount[seesaw.IPv4]++ } if v.IPv6Net().Contains(bip.IP()) { v.BackendCount[seesaw.IPv6]++ } } for vip := range vips { if v.IPv4Net().Contains(vip.IP()) { v.VIPCount[seesaw.IPv4]++ } if v.IPv6Net().Contains(vip.IP()) { v.VIPCount[seesaw.IPv6]++ } } } }
// expandFWMServices returns a list of services that have been expanded from the // vserver configuration for a firewall mark based vserver. func (v *vserver) expandFWMServices() map[serviceKey]*service { svcs := make(map[serviceKey]*service) for _, af := range seesaw.AFs() { var ip net.IP switch af { case seesaw.IPv4: ip = v.config.Host.IPv4Addr case seesaw.IPv6: ip = v.config.Host.IPv6Addr } if ip == nil { continue } // Persistence, etc., is stored in the VserverEntry. For FWM services, these // values must be the same for all VserverEntries, so just use the first // one. var ventry *config.VserverEntry for _, entry := range v.config.Entries { ventry = entry break } if v.fwm[af] == 0 { mark, err := v.engine.fwmAlloc.get() if err != nil { log.Fatalf("%v: failed to get mark: %v", v, err) } v.fwm[af] = mark } svc := &service{ serviceKey: serviceKey{ af: af, fwm: v.fwm[af], }, ip: seesaw.NewIP(ip), ventry: ventry, vserver: v, stats: &seesaw.ServiceStats{}, } svc.ipvsSvc = svc.ipvsService() svcs[svc.serviceKey] = svc } return svcs }
// expandServices returns a list of services that have been expanded from the // vserver configuration. func (v *vserver) expandServices() map[serviceKey]*service { if v.config.UseFWM { return v.expandFWMServices() } svcs := make(map[serviceKey]*service) for _, af := range seesaw.AFs() { var ip net.IP switch af { case seesaw.IPv4: ip = v.config.Host.IPv4Addr case seesaw.IPv6: ip = v.config.Host.IPv6Addr } if ip == nil { continue } for _, entry := range v.config.Entries { svc := &service{ serviceKey: serviceKey{ af: af, proto: entry.Proto, port: entry.Port, }, ip: seesaw.NewIP(ip), ventry: entry, vserver: v, dests: make(map[destinationKey]*destination, 0), stats: &seesaw.ServiceStats{}, } svc.ipvsSvc = svc.ipvsService() svcs[svc.serviceKey] = svc } } return svcs }
func TestWatermarks(t *testing.T) { vserver := newTestVserver(nil) vsConfig := vserverConfig entriesCopy := make(map[string]*config.VserverEntry) vsConfig.Entries = entriesCopy for k, vse := range vserverConfig.Entries { vseCopy := *vse vseCopy.HighWatermark = 0.51 vseCopy.LowWatermark = 0.49 vsConfig.Entries[k] = &vseCopy } vserver.handleConfigUpdate(&vsConfig) // Both backends must be healthy for the service to come up. ipv4 := seesaw.NewIP(backend1.IPv4Addr) ipv6 := seesaw.NewIP(backend1.IPv6Addr) for _, c := range vserver.checks { if c.key.backendIP.Equal(ipv4) || c.key.backendIP.Equal(ipv6) { n := &checkNotification{key: c.key, status: statusHealthy} vserver.handleCheckNotification(n) } } for _, err := range checkAllDown(vserver) { t.Error(err) } for _, c := range vserver.checks { n := &checkNotification{key: c.key, status: statusHealthy} vserver.handleCheckNotification(n) } for _, err := range checkAllUp(vserver) { t.Error(err) } // One backend goes down, one stays up, service should still be up. for _, c := range vserver.checks { if c.key.backendIP.Equal(ipv4) || c.key.backendIP.Equal(ipv6) { n := &checkNotification{key: c.key, status: statusUnhealthy} vserver.handleCheckNotification(n) } } for _, err := range checkAllUp(vserver) { t.Error(err) } // Remaining dests go unhealthy, service should go down. for _, c := range vserver.checks { if !c.key.backendIP.Equal(ipv4) && !c.key.backendIP.Equal(ipv6) { n := &checkNotification{key: c.key, status: statusUnhealthy} vserver.handleCheckNotification(n) } } for _, err := range checkAllDown(vserver) { t.Error(err) } // Check edge condition: high/low watermark = 50% and one backend becomes // healthy => service should come up. vserver = newTestVserver(nil) for k, vse := range vserverConfig.Entries { vseCopy := *vse vseCopy.LowWatermark = 0.5 vseCopy.HighWatermark = vseCopy.LowWatermark vsConfig.Entries[k] = &vseCopy } vserver.handleConfigUpdate(&vsConfig) for _, c := range vserver.checks { if c.key.backendIP.Equal(ipv4) || c.key.backendIP.Equal(ipv6) { n := &checkNotification{key: c.key, status: statusHealthy} vserver.handleCheckNotification(n) } } for _, err := range checkAllUp(vserver) { t.Error(err) } // More than two backends... vserver = newTestVserver(nil) vsConfig = vserverConfig vsConfig.Entries = entriesCopy for k, vse := range vserverConfig.Entries { vseCopy := *vse vseCopy.LowWatermark = 0.4 vseCopy.HighWatermark = vseCopy.LowWatermark vsConfig.Entries[k] = &vseCopy } vsConfig.Backends = map[string]*seesaw.Backend{ backend1.Hostname: backend1, backend2.Hostname: backend2, backend3.Hostname: backend3, backend4.Hostname: backend4, backend5.Hostname: backend5, } vserver.handleConfigUpdate(&vsConfig) for _, c := range vserver.checks { n := &checkNotification{key: c.key, status: statusHealthy} vserver.handleCheckNotification(n) } for _, err := range checkAllUp(vserver) { t.Error(err) } t.Logf("All backends healthy, vserver has been brought up") // Now take them down one at a time, the tipping point should be // when we only have one remaining backend. i := len(vsConfig.Backends) for _, b := range vsConfig.Backends { t.Logf("Taking down %s (%d)", b.Hostname, i) for _, c := range vserver.checks { if b.IPv4Addr.Equal(c.key.backendIP.IP()) || b.IPv6Addr.Equal(c.key.backendIP.IP()) { n := &checkNotification{key: c.key, status: statusUnhealthy} vserver.handleCheckNotification(n) } } i-- var errs []error if i <= 1 { t.Logf("Expecting vserver to be down") errs = checkAllDown(vserver) } else { t.Logf("Expecting vserver to be up") errs = checkAllUp(vserver) } for _, err := range errs { t.Error(err) } } // Now bring them up again one at a time, the tipping point should be // when we have at least two healthy backends. i = 1 for _, b := range vsConfig.Backends { t.Logf("Bringing up %s (%d)", b.Hostname, i) for _, c := range vserver.checks { if b.IPv4Addr.Equal(c.key.backendIP.IP()) || b.IPv6Addr.Equal(c.key.backendIP.IP()) { n := &checkNotification{key: c.key, status: statusHealthy} vserver.handleCheckNotification(n) } } var errs []error if i <= 1 { t.Logf("Expecting vserver to be down") errs = checkAllDown(vserver) } else { t.Logf("Expecting vserver to be up") errs = checkAllUp(vserver) } i++ for _, err := range errs { t.Error(err) } } }
// newDestinationKey returns a new destinationKey. func newDestinationKey(ip net.IP) destinationKey { return destinationKey{seesaw.NewIP(ip)} }
"HTTP/16767_0": { Name: "HTTP/16767_0", Mode: seesaw.HCModeDSR, Type: seesaw.HCTypeHTTPS, Port: 16767, Interval: time.Duration(10 * time.Second), // protobuf default Timeout: time.Duration(5 * time.Second), // protobuf default TLSVerify: false, Send: "/healthz", Receive: "Ok", Code: 200, }, }, map[string]*seesaw.VIP{ "192.168.255.1 (Anycast)": { seesaw.NewIP(net.ParseIP("192.168.255.1")), seesaw.AnycastVIP, }, }, true, false, nil, }, "dns.resolver@au-syd": { "dns.resolver@au-syd", seesaw.Host{ Hostname: "dns-vip1.example.com.", IPv4Addr: net.ParseIP("192.168.36.1").To4(), IPv4Mask: net.CIDRMask(26, 32), IPv6Addr: net.ParseIP("2015:cafe:36::a800:1ff:ffee:dd01"), IPv6Mask: net.CIDRMask(64, 128),