func getNewCaches(localStates peer.CRStatesThreadsafe, monitorConfigTS TrafficMonitorConfigMapThreadsafe) map[enum.CacheName]struct{} { monitorConfig := monitorConfigTS.Get() caches := map[enum.CacheName]struct{}{} for cacheName := range localStates.GetCaches() { // ONLINE and OFFLINE caches are not polled. // TODO add a function IsPolled() which can be called by this and the monitorConfig func which sets the polling, to prevent updating in one place breaking the other. if ts, ok := monitorConfig.TrafficServer[string(cacheName)]; !ok || ts.Status == "ONLINE" || ts.Status == "OFFLINE" { continue } caches[cacheName] = struct{}{} } return caches }
// TODO timing, and determine if the case, or its internal `for`, should be put in a goroutine // TODO determine if subscribers take action on change, and change to mutexed objects if not. func monitorConfigListen(monitorConfigTS TrafficMonitorConfigMapThreadsafe, monitorConfigPollChan <-chan to.TrafficMonitorConfigMap, localStates peer.CRStatesThreadsafe, statUrlSubscriber chan<- poller.HttpPollerConfig, healthUrlSubscriber chan<- poller.HttpPollerConfig, peerUrlSubscriber chan<- poller.HttpPollerConfig, cfg config.Config, staticAppData StaticAppData) { for { select { case monitorConfig := <-monitorConfigPollChan: monitorConfigTS.Set(monitorConfig) healthUrls := map[string]string{} statUrls := map[string]string{} peerUrls := map[string]string{} caches := map[string]string{} for _, srv := range monitorConfig.TrafficServer { caches[srv.HostName] = srv.Status cacheName := enum.CacheName(srv.HostName) if srv.Status == "ONLINE" { localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: true}) continue } if srv.Status == "OFFLINE" { localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: false}) continue } // seed states with available = false until our polling cycle picks up a result if _, exists := localStates.Get().Caches[cacheName]; !exists { localStates.SetCache(cacheName, peer.IsAvailable{IsAvailable: false}) } url := monitorConfig.Profile[srv.Profile].Parameters.HealthPollingURL r := strings.NewReplacer( "${hostname}", srv.FQDN, "${interface_name}", srv.InterfaceName, "application=system", "application=plugin.remap", "application=", "application=plugin.remap", ) url = r.Replace(url) healthUrls[srv.HostName] = url r = strings.NewReplacer("application=plugin.remap", "application=") url = r.Replace(url) statUrls[srv.HostName] = url } for _, srv := range monitorConfig.TrafficMonitor { if srv.HostName == staticAppData.Hostname { continue } if srv.Status != "ONLINE" { continue } // TODO: the URL should be config driven. -jse url := fmt.Sprintf("http://%s:%d/publish/CrStates?raw", srv.IP, srv.Port) peerUrls[srv.HostName] = url } statUrlSubscriber <- poller.HttpPollerConfig{Urls: statUrls, Interval: cfg.CacheStatPollingInterval} healthUrlSubscriber <- poller.HttpPollerConfig{Urls: healthUrls, Interval: cfg.CacheHealthPollingInterval} peerUrlSubscriber <- poller.HttpPollerConfig{Urls: peerUrls, Interval: cfg.PeerPollingInterval} for cacheName := range localStates.GetCaches() { if _, exists := monitorConfig.TrafficServer[string(cacheName)]; !exists { log.Warnf("Removing %s from localStates", cacheName) localStates.DeleteCache(cacheName) } } // TODO because there are multiple writers to localStates.DeliveryService, there is a race condition, where MonitorConfig (this func) and HealthResultManager could write at the same time, and the HealthResultManager could overwrite a delivery service addition or deletion here. Probably the simplest and most performant fix would be a lock-free algorithm using atomic compare-and-swaps. for _, ds := range monitorConfig.DeliveryService { // since caches default to unavailable, also default DS false if _, exists := localStates.Get().Deliveryservice[enum.DeliveryServiceName(ds.XMLID)]; !exists { localStates.SetDeliveryService(enum.DeliveryServiceName(ds.XMLID), peer.Deliveryservice{IsAvailable: false, DisabledLocations: []enum.CacheName{}}) // important to initialize DisabledLocations, so JSON is `[]` not `null` } } for ds, _ := range localStates.Get().Deliveryservice { if _, exists := monitorConfig.DeliveryService[string(ds)]; !exists { localStates.DeleteDeliveryService(ds) } } } } }
// TODO timing, and determine if the case, or its internal `for`, should be put in a goroutine // TODO determine if subscribers take action on change, and change to mutexed objects if not. func monitorConfigListen(monitorConfigTS TrafficMonitorConfigMapThreadsafe, monitorConfigPollChan <-chan to.TrafficMonitorConfigMap, localStates peer.CRStatesThreadsafe, statUrlSubscriber chan<- poller.HttpPollerConfig, healthUrlSubscriber chan<- poller.HttpPollerConfig, peerUrlSubscriber chan<- poller.HttpPollerConfig) { for { select { case monitorConfig := <-monitorConfigPollChan: monitorConfigTS.Set(monitorConfig) healthUrls := map[string]string{} statUrls := map[string]string{} peerUrls := map[string]string{} caches := map[string]string{} for _, srv := range monitorConfig.TrafficServer { caches[srv.HostName] = srv.Status if srv.Status == "ONLINE" { localStates.SetCache(srv.HostName, peer.IsAvailable{IsAvailable: true}) continue } if srv.Status == "OFFLINE" { localStates.SetCache(srv.HostName, peer.IsAvailable{IsAvailable: false}) continue } // seed states with available = false until our polling cycle picks up a result if _, exists := localStates.Get().Caches[srv.HostName]; !exists { localStates.SetCache(srv.HostName, peer.IsAvailable{IsAvailable: false}) } url := monitorConfig.Profile[srv.Profile].Parameters.HealthPollingURL r := strings.NewReplacer( "${hostname}", srv.FQDN, "${interface_name}", srv.InterfaceName, "application=system", "application=plugin.remap", "application=", "application=plugin.remap", ) url = r.Replace(url) healthUrls[srv.HostName] = url r = strings.NewReplacer("application=plugin.remap", "application=") url = r.Replace(url) statUrls[srv.HostName] = url } for _, srv := range monitorConfig.TrafficMonitor { if srv.Status != "ONLINE" { continue } // TODO: the URL should be config driven. -jse url := fmt.Sprintf("http://%s:%d/publish/CrStates?raw", srv.IP, srv.Port) peerUrls[srv.HostName] = url } statUrlSubscriber <- poller.HttpPollerConfig{Urls: statUrls, Interval: defaultCacheStatPollingInterval} healthUrlSubscriber <- poller.HttpPollerConfig{Urls: healthUrls, Interval: defaultCacheHealthPollingInterval} peerUrlSubscriber <- poller.HttpPollerConfig{Urls: peerUrls, Interval: defaultPeerPollingInterval} for k := range localStates.GetCaches() { if _, exists := monitorConfig.TrafficServer[k]; !exists { log.Warnf("Removing %s from localStates", k) localStates.DeleteCache(k) } } addStateDeliveryServices(monitorConfig, localStates.Get().Deliveryservice) } } }