コード例 #1
0
// TODO remove change subscribers, give Threadsafes directly to the things that need them. If they only set vars, and don't actually do work on change.
func opsConfigManagerListen(opsConfig OpsConfigThreadsafe, opsConfigChannel <-chan handler.OpsConfig, dr chan<- http_server.DataRequest, toSession towrap.ITrafficOpsSession, toData todata.TODataThreadsafe, opsConfigChangeSubscribers []chan<- handler.OpsConfig, toChangeSubscribers []chan<- towrap.ITrafficOpsSession) {
	httpServer := http_server.Server{}

	errorCount := 0 // TODO make threadsafe and a pointer to errorcount in the main manager?
	for {
		select {
		case newOpsConfig := <-opsConfigChannel:
			var err error
			opsConfig.Set(newOpsConfig)

			listenAddress := ":80" // default

			if newOpsConfig.HttpListener != "" {
				listenAddress = newOpsConfig.HttpListener
			}

			handleErr := func(err error) {
				errorCount++
				log.Errorf("OpsConfigManager: %v\n", err)
			}

			err = httpServer.Run(dr, listenAddress)
			if err != nil {
				handleErr(fmt.Errorf("MonitorConfigPoller: error creating HTTP server: %s\n", err))
				continue
			}

			realToSession, err := to.Login(newOpsConfig.Url, newOpsConfig.Username, newOpsConfig.Password, newOpsConfig.Insecure)
			if err != nil {
				handleErr(fmt.Errorf("MonitorConfigPoller: error instantiating Session with traffic_ops: %s\n", err))
				continue
			}
			toSession = towrap.NewTrafficOpsSessionThreadsafe(realToSession)

			if err := toData.Fetch(toSession, newOpsConfig.CdnName); err != nil {
				handleErr(fmt.Errorf("Error getting Traffic Ops data: %v\n", err))
				continue
			}

			// These must be in a goroutine, because the monitorConfigPoller tick sends to a channel this select listens for. Thus, if we block on sends to the monitorConfigPoller, we have a livelock race condition.
			// More generically, we're using goroutines as an infinite chan buffer, to avoid potential livelocks
			for _, subscriber := range opsConfigChangeSubscribers {
				go func() {
					subscriber <- newOpsConfig // this is needed for cdnName
				}()
			}
			for _, subscriber := range toChangeSubscribers {
				go func() {
					subscriber <- toSession
				}()
			}
		}
	}
}
コード例 #2
0
func writeSummaryStats(config StartupConfig, statsSummary traffic_ops.StatsSummary) {
	to, err := traffic_ops.Login(config.ToURL, config.ToUser, config.ToPasswd, true)
	if err != nil {
		newErr := fmt.Errorf("Could not store summary stats! Error logging in to %v: %v", config.ToURL, err)
		log.Error(newErr)
		return
	}
	err = to.AddSummaryStats(statsSummary)
	if err != nil {
		log.Error(err)
	}
}
コード例 #3
0
func writeSummaryStats(config *StartupConfig, statsSummary traffic_ops.StatsSummary) error {
	tm, err := traffic_ops.Login(config.ToUrl, config.ToUser, config.ToPasswd, true)
	if err != nil {
		msg := fmt.Sprintf("Could not store summary stats! Error logging in to %v: %v", config.ToUrl, err)
		log.Error(msg)
		return err
	}
	err = tm.AddSummaryStats(statsSummary)
	if err != nil {
		return err
	}
	return nil
}
コード例 #4
0
func TestLoginUnauthorized(t *testing.T) {
	server := testHelper.InvalidHTTPServer(http.StatusUnauthorized)
	defer server.Close()

	testHelper.Context(t, "Given the need to test an unsuccessful login to Traffic Ops")

	_, err := client.Login(server.URL, "test", "password", true)
	if err == nil {
		testHelper.Error(t, "Should not be able to login")
	} else {
		testHelper.Success(t, "Should not be able to login")
	}
}
コード例 #5
0
func getToData(config *StartupConfig, init bool) (TrafOpsData, error) {
	var trafOpsData TrafOpsData
	tm, err := traffic_ops.Login(config.ToUrl, config.ToUser, config.ToPasswd, true)
	if err != nil {
		msg := fmt.Sprintf("Error logging in to %v: %v", config.ToUrl, err)
		if init {
			panic(msg)
		} else {
			log.Error(msg)
			return trafOpsData, err
		}
	}

	servers, err := tm.Servers()
	if err != nil {
		msg := fmt.Sprintf("Error getting server list from %v: %v ", config.ToUrl, err)
		if init {
			panic(msg)
		} else {
			log.Error(msg)
			return trafOpsData, err
		}
	}
	for _, server := range servers {
		if server.Type == "INFLUXDB" && server.Status == "ONLINE" {
			fqdn := server.HostName + "." + server.DomainName
			port, err := strconv.ParseInt(server.TcpPort, 10, 32)
			if err != nil {
				port = 8086 //default port
			}
			trafOpsData.InfluxDbProps = append(trafOpsData.InfluxDbProps, InfluxDbProps{Fqdn: fqdn, Port: port})
		}
	}
	lastSummaryTime, err := tm.SummaryStatsLastUpdated("daily_maxgbps")
	if err != nil {
		errHndlr(err, ERROR)
	}
	trafOpsData.LastSummaryTime = lastSummaryTime
	return trafOpsData, nil
}
コード例 #6
0
func TestLogin(t *testing.T) {
	resp := client.Result{
		Alerts: []client.Alert{
			client.Alert{
				Level: "success",
				Text:  "Successfully logged in.",
			},
		},
	}

	server := testHelper.ValidHTTPServer(resp)

	testHelper.Context(t, "Given the need to test a successful login to Traffic Ops")

	session, err := client.Login(server.URL, "test", "password", true)
	if err != nil {
		testHelper.Error(t, "Should be able to login")
	} else {
		testHelper.Success(t, "Should be able to login")
	}

	if session.UserName != "test" {
		testHelper.Error(t, "Should get back \"test\" for \"UserName\", got %s", session.UserName)
	} else {
		testHelper.Success(t, "Should get back \"test\" for \"UserName\"")
	}

	if session.Password != "password" {
		testHelper.Error(t, "Should get back \"password\" for \"Password\", got %s", session.Password)
	} else {
		testHelper.Success(t, "Should get back \"password\" for \"Password\"")
	}

	if session.URL != server.URL {
		testHelper.Error(t, "Should get back \"%s\" for \"URL\", got %s", server.URL, session.URL)
	} else {
		testHelper.Success(t, "Should get back \"%s\" for \"URL\"", server.URL)
	}
}
コード例 #7
0
func getToData(config StartupConfig, init bool, configChan chan RunningConfig) {
	var runningConfig RunningConfig
	to, err := traffic_ops.Login(config.ToURL, config.ToUser, config.ToPasswd, true)
	if err != nil {
		msg := fmt.Sprintf("Error logging in to %v: %v", config.ToURL, err)
		if init {
			panic(msg)
		}
		log.Error(msg)
		return
	}

	servers, err := to.Servers()
	if err != nil {
		msg := fmt.Sprintf("Error getting server list from %v: %v ", config.ToURL, err)
		if init {
			panic(msg)
		}
		log.Error(msg)
		return
	}

	runningConfig.CacheGroupMap = make(map[string]string)
	for _, server := range servers {
		runningConfig.CacheGroupMap[server.HostName] = server.Location
		if server.Type == "INFLUXDB" && server.Status == "ONLINE" {
			fqdn := server.HostName + "." + server.DomainName
			port, err := strconv.ParseInt(server.TcpPort, 10, 32)
			if err != nil {
				port = 8086 //default port
			}
			runningConfig.InfluxDBProps = append(runningConfig.InfluxDBProps, struct {
				Fqdn string
				Port int64
			}{fqdn, port})
		}
	}

	cacheStatPath := "/publish/CacheStats?hc=1&stats="
	dsStatPath := "/publish/DsStats?hc=1&wildcard=1&stats="
	parameters, err := to.Parameters("TRAFFIC_STATS")
	if err != nil {
		msg := fmt.Sprintf("Error getting parameter list from %v: %v", config.ToURL, err)
		if init {
			panic(msg)
		}
		log.Error(msg)
		return
	}

	for _, param := range parameters {
		if param.Name == "DsStats" {
			statName := param.Value
			dsStatPath += "," + statName
		} else if param.Name == "CacheStats" {
			cacheStatPath += "," + param.Value
		}
	}
	cacheStatPath = strings.Replace(cacheStatPath, "=,", "=", 1)
	dsStatPath = strings.Replace(dsStatPath, "=,", "=", 1)

	runningConfig.HealthUrls = make(map[string]map[string]string)
	for _, server := range servers {
		if server.Type == "RASCAL" && server.Status == config.StatusToMon {
			cdnName := server.CdnName
			if cdnName == "" {
				log.Error("Unable to find CDN name for " + server.HostName + ".. skipping")
				continue
			}

			if runningConfig.HealthUrls[cdnName] == nil {
				runningConfig.HealthUrls[cdnName] = make(map[string]string)
			}
			url := "http://" + server.IpAddress + cacheStatPath
			runningConfig.HealthUrls[cdnName]["CacheStats"] = url
			url = "http://" + server.IpAddress + dsStatPath
			runningConfig.HealthUrls[cdnName]["DsStats"] = url
		}
	}

	lastSummaryTimeStr, err := to.SummaryStatsLastUpdated("daily_maxgbps")
	if err != nil {
		errHndlr(err, ERROR)
	} else {
		lastSummaryTime, err := time.Parse("2006-01-02 15:04:05", lastSummaryTimeStr)
		if err != nil {
			errHndlr(err, ERROR)
		} else {
			runningConfig.LastSummaryTime = lastSummaryTime
		}
	}

	configChan <- runningConfig
}
コード例 #8
0
func getToData(config *StartupConfig, init bool) (RunningConfig, error) {
	var runningConfig RunningConfig
	tm, err := traffic_ops.Login(config.ToUrl, config.ToUser, config.ToPasswd, true)
	if err != nil {
		msg := fmt.Sprintf("Error logging in to %v: %v", config.ToUrl, err)
		if init {
			panic(msg)
		} else {
			log.Error(msg)
			return runningConfig, err
		}
	}

	servers, err := tm.Servers()
	if err != nil {
		msg := fmt.Sprintf("Error getting server list from %v: %v ", config.ToUrl, err)
		if init {
			panic(msg)
		} else {
			log.Error(msg)
			return runningConfig, err
		}
	}
	runningConfig.CacheGroupMap = make(map[string]string)
	influxDbProps := make([]InfluxDbProps, 0)
	for _, server := range servers {
		runningConfig.CacheGroupMap[server.HostName] = server.Location
		if server.Type == "INFLUXDB" && server.Status == "ONLINE" {
			fqdn := server.HostName + "." + server.DomainName
			port, err := strconv.ParseInt(server.TcpPort, 10, 32)
			if err != nil {
				port = 8086 //default port
			}
			influxDbProps = append(influxDbProps, InfluxDbProps{Fqdn: fqdn, Port: port})
		}
	}
	runningConfig.InfluxDbProps = influxDbProps

	cacheStatPath := "/publish/CacheStats?hc=1&stats="
	dsStatPath := "/publish/DsStats?hc=1&wildcard=1&stats="
	parameters, err := tm.Parameters("TRAFFIC_STATS")
	if err != nil {
		msg := fmt.Sprintf("Error getting parameter list from %v: %v", config.ToUrl, err)
		if init {
			panic(msg)
		} else {
			log.Error(msg)
			return runningConfig, err
		}
	}
	for _, param := range parameters {
		if param.Name == "DsStats" {
			statName := param.Value
			dsStatPath += "," + statName
		} else if param.Name == "CacheStats" {
			cacheStatPath += "," + param.Value
		}
	}
	cacheStatPath = strings.Replace(cacheStatPath, "=,", "=", 1)
	dsStatPath = strings.Replace(dsStatPath, "=,", "=", 1)

	runningConfig.HealthUrls = make(map[string]map[string]string)
	for _, server := range servers {
		if server.Type == "RASCAL" && server.Status == config.StatusToMon {
			cdnName := ""
			parameters, _ := tm.Parameters(server.Profile)
			for _, param := range parameters {
				if param.Name == "CDN_name" && param.ConfigFile == "rascal-config.txt" {
					cdnName = param.Value
					break
				}
			}

			if cdnName == "" {
				log.Error("Unable to find CDN name for " + server.HostName + ".. skipping")
				continue
			}

			if runningConfig.HealthUrls[cdnName] == nil {
				runningConfig.HealthUrls[cdnName] = make(map[string]string)
			}
			url := "http://" + server.IpAddress + cacheStatPath
			runningConfig.HealthUrls[cdnName]["CacheStats"] = url
			url = "http://" + server.IpAddress + dsStatPath
			runningConfig.HealthUrls[cdnName]["DsStats"] = url
		}
	}
	return runningConfig, nil
}
コード例 #9
0
// Note the OpsConfigManager is in charge of the httpServer, because ops config changes trigger server changes. If other things needed to trigger server restarts, the server could be put in its own goroutine with signal channels
func StartOpsConfigManager(
	opsConfigFile string,
	toSession towrap.ITrafficOpsSession,
	toData todata.TODataThreadsafe,
	opsConfigChangeSubscribers []chan<- handler.OpsConfig,
	toChangeSubscribers []chan<- towrap.ITrafficOpsSession,
	localStates peer.CRStatesThreadsafe,
	peerStates peer.CRStatesPeersThreadsafe,
	combinedStates peer.CRStatesThreadsafe,
	statHistory StatHistoryThreadsafe,
	lastStats LastStatsThreadsafe,
	dsStats DSStatsThreadsafe,
	events EventsThreadsafe,
	staticAppData StaticAppData,
	healthPollInterval time.Duration,
	lastHealthDurations DurationMapThreadsafe,
	fetchCount UintThreadsafe,
	healthIteration UintThreadsafe,
	errorCount UintThreadsafe,
	localCacheStatus CacheAvailableStatusThreadsafe,
	unpolledCaches UnpolledCachesThreadsafe,
) OpsConfigThreadsafe {

	opsConfigFileChannel := make(chan interface{})
	opsConfigFilePoller := poller.FilePoller{
		File:          opsConfigFile,
		ResultChannel: opsConfigFileChannel,
	}

	opsConfigChannel := make(chan handler.OpsConfig)
	opsConfigFileHandler := handler.OpsConfigFileHandler{
		ResultChannel:    opsConfigFilePoller.ResultChannel,
		OpsConfigChannel: opsConfigChannel,
	}

	go opsConfigFileHandler.Listen()
	go opsConfigFilePoller.Poll()

	opsConfig := NewOpsConfigThreadsafe()

	// TODO remove change subscribers, give Threadsafes directly to the things that need them. If they only set vars, and don't actually do work on change.
	go func() {
		httpServer := http_server.Server{}

		for {
			select {
			case newOpsConfig := <-opsConfigChannel:
				var err error
				opsConfig.Set(newOpsConfig)

				listenAddress := ":80" // default

				if newOpsConfig.HttpListener != "" {
					listenAddress = newOpsConfig.HttpListener
				}

				handleErr := func(err error) {
					errorCount.Inc()
					log.Errorf("OpsConfigManager: %v\n", err)
				}

				err = httpServer.Run(func(req http_server.DataRequest) ([]byte, int) {
					return DataRequest(
						req,
						opsConfig,
						toSession,
						localStates,
						peerStates,
						combinedStates,
						statHistory,
						dsStats,
						events,
						staticAppData,
						healthPollInterval,
						lastHealthDurations,
						fetchCount,
						healthIteration,
						errorCount,
						toData,
						localCacheStatus,
						lastStats,
						unpolledCaches,
					)
				}, listenAddress)
				if err != nil {
					handleErr(fmt.Errorf("MonitorConfigPoller: error creating HTTP server: %s\n", err))
					continue
				}

				realToSession, err := to.Login(newOpsConfig.Url, newOpsConfig.Username, newOpsConfig.Password, newOpsConfig.Insecure)
				if err != nil {
					handleErr(fmt.Errorf("MonitorConfigPoller: error instantiating Session with traffic_ops: %s\n", err))
					continue
				}
				toSession.Set(realToSession)

				if err := toData.Fetch(toSession, newOpsConfig.CdnName); err != nil {
					handleErr(fmt.Errorf("Error getting Traffic Ops data: %v\n", err))
					continue
				}

				// These must be in a goroutine, because the monitorConfigPoller tick sends to a channel this select listens for. Thus, if we block on sends to the monitorConfigPoller, we have a livelock race condition.
				// More generically, we're using goroutines as an infinite chan buffer, to avoid potential livelocks
				for _, subscriber := range opsConfigChangeSubscribers {
					go func() {
						subscriber <- newOpsConfig // this is needed for cdnName
					}()
				}
				for _, subscriber := range toChangeSubscribers {
					go func() {
						subscriber <- toSession
					}()
				}
			}
		}
	}()

	return opsConfig
}
コード例 #10
0
//
// Kicks off the pollers and handlers
//
func Start(opsConfigFile string, staticAppData StaticAppData) {
	var toSession *traffic_ops.Session

	fetchSuccessCounter := gmx.NewCounter("fetchSuccess")
	fetchFailCounter := gmx.NewCounter("fetchFail")
	fetchPendingGauge := gmx.NewGauge("fetchPending")

	tr := &http.Transport{
		TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
	}

	sharedClient := http.Client{
		Timeout:   defaultHttpTimeout,
		Transport: tr,
	}

	cacheHealthConfigChannel := make(chan poller.HttpPollerConfig)
	cacheHealthChannel := make(chan cache.Result)
	cacheHealthTick := make(chan uint64)
	cacheHealthPoller := poller.HttpPoller{
		TickChan:      cacheHealthTick,
		ConfigChannel: cacheHealthConfigChannel,
		Config: poller.HttpPollerConfig{
			Interval: defaultCacheHealthPollingInterval,
		},
		Fetcher: fetcher.HttpFetcher{
			Handler: cache.Handler{ResultChannel: cacheHealthChannel},
			Client:  sharedClient,
			Success: fetchSuccessCounter,
			Fail:    fetchFailCounter,
			Pending: fetchPendingGauge,
		},
	}

	cacheStatConfigChannel := make(chan poller.HttpPollerConfig)
	cacheStatChannel := make(chan cache.Result)
	cacheStatPoller := poller.HttpPoller{
		ConfigChannel: cacheStatConfigChannel,
		Config: poller.HttpPollerConfig{
			Interval: defaultCacheStatPollingInterval,
		},
		Fetcher: fetcher.HttpFetcher{
			Handler: cache.Handler{ResultChannel: cacheStatChannel},
			Client:  sharedClient,
			Success: fetchSuccessCounter,
			Fail:    fetchFailCounter,
			Pending: fetchPendingGauge,
		},
	}

	sessionChannel := make(chan *traffic_ops.Session)
	monitorConfigChannel := make(chan traffic_ops.TrafficMonitorConfigMap)
	monitorOpsConfigChannel := make(chan handler.OpsConfig)
	monitorConfigPoller := poller.MonitorConfigPoller{
		Interval:         defaultMonitorConfigPollingInterval,
		SessionChannel:   sessionChannel,
		ConfigChannel:    monitorConfigChannel,
		OpsConfigChannel: monitorOpsConfigChannel,
	}

	opsConfigFileChannel := make(chan interface{})
	opsConfigFilePoller := poller.FilePoller{
		File:          opsConfigFile,
		ResultChannel: opsConfigFileChannel,
	}

	opsConfigChannel := make(chan handler.OpsConfig)
	opsConfigFileHandler := handler.OpsConfigFileHandler{
		ResultChannel:    opsConfigFilePoller.ResultChannel,
		OpsConfigChannel: opsConfigChannel,
	}

	peerConfigChannel := make(chan poller.HttpPollerConfig)
	peerChannel := make(chan peer.Result)
	peerPoller := poller.HttpPoller{
		ConfigChannel: peerConfigChannel,
		Config: poller.HttpPollerConfig{
			Interval: defaultPeerPollingInterval,
		},
		Fetcher: fetcher.HttpFetcher{
			Handler: peer.Handler{ResultChannel: peerChannel},
			Client:  sharedClient,
			Success: fetchSuccessCounter,
			Fail:    fetchFailCounter,
			Pending: fetchPendingGauge,
		},
	}

	go opsConfigFileHandler.Listen()
	go opsConfigFilePoller.Poll()
	go monitorConfigPoller.Poll()
	go cacheHealthPoller.Poll()
	go cacheStatPoller.Poll()
	go peerPoller.Poll()

	dr := make(chan http_server.DataRequest)

	healthHistory := make(map[string][]interface{})
	statHistory := make(map[string][]interface{})

	var opsConfig handler.OpsConfig
	var monitorConfig traffic_ops.TrafficMonitorConfigMap
	localStates := peer.Crstates{Caches: make(map[string]peer.IsAvailable), Deliveryservice: make(map[string]peer.Deliveryservice)}    // this is the local state as discoverer by this traffic_monitor
	peerStates := make(map[string]peer.Crstates)                                                                                       // each peer's last state is saved in this map
	combinedStates := peer.Crstates{Caches: make(map[string]peer.IsAvailable), Deliveryservice: make(map[string]peer.Deliveryservice)} // this is the result of combining the localStates and all the peerStates using the var ??

	deliveryServiceServers := map[string][]string{}
	serverTypes := map[string]string{}

	// TODO put stat data in a struct, for brevity
	lastHealthEndTimes := map[string]time.Time{}
	lastHealthDurations := map[string]time.Duration{}
	fetchCount := uint64(0) // note this is the number of individual caches fetched from, not the number of times all the caches were polled.
	healthIteration := uint64(0)
	errorCount := uint64(0)
	events := []Event{}
	eventIndex := uint64(0)
	for {
		select {
		case req := <-dr:
			defer close(req.C)

			var body []byte
			var err error

			switch req.T {
			case http_server.TR_CONFIG:
				if toSession != nil && opsConfig.CdnName != "" {
					body, err = toSession.CRConfigRaw(opsConfig.CdnName)
				}
			case http_server.TR_STATE_DERIVED:
				body, err = peer.CrStatesMarshall(combinedStates)
			case http_server.TR_STATE_SELF:
				body, err = peer.CrStatesMarshall(localStates)
			case http_server.CACHE_STATS:
				// TODO: add support for ?hc=N query param, stats=, wildcard, individual caches
				// add pp and date to the json:
				/*
					pp: "0=[my-ats-edge-cache-1], hc=[1]",
					date: "Thu Oct 09 20:28:36 UTC 2014"
				*/
				params := req.Parameters
				hc := 1
				if _, exists := params["hc"]; exists {
					v, err := strconv.Atoi(params["hc"][0])
					if err == nil {
						hc = v
					}
				}
				body, err = cache.StatsMarshall(statHistory, hc)
			case http_server.DS_STATS:
				body = []byte("TODO implement")
			case http_server.EVENT_LOG:
				body, err = json.Marshal(JSONEvents{Events: events})
			case http_server.PEER_STATES:
				body = []byte("TODO implement")
			case http_server.STAT_SUMMARY:
				body = []byte("TODO implement")
			case http_server.STATS:
				body, err = getStats(staticAppData, cacheHealthPoller.Config.Interval, lastHealthDurations, fetchCount, healthIteration, errorCount)
				if err != nil {
					// TODO send error to client
					errorCount++
					log.Printf("ERROR getting stats %v\n", err)
					continue
				}
			case http_server.CONFIG_DOC:
				opsConfigCopy := opsConfig
				// if the password is blank, leave it blank, so callers can see it's missing.
				if opsConfigCopy.Password != "" {
					opsConfigCopy.Password = "******"
				}
				body, err = json.Marshal(opsConfigCopy)
			default:
				body = []byte("TODO error message")
			}
			req.C <- body
		case oc := <-opsConfigFileHandler.OpsConfigChannel:
			var err error
			opsConfig = oc

			listenAddress := ":80" // default

			if opsConfig.HttpListener != "" {
				listenAddress = opsConfig.HttpListener
			}

			err = http_server.Run(dr, listenAddress)
			if err != nil {
				errorCount++
				log.Printf("MonitorConfigPoller: error creating HTTP server: %s\n", err)
				continue
			}

			toSession, err = traffic_ops.Login(opsConfig.Url, opsConfig.Username, opsConfig.Password, opsConfig.Insecure)
			if err != nil {
				errorCount++
				log.Printf("MonitorConfigPoller: error instantiating Session with traffic_ops: %s\n", err)
				continue
			}

			deliveryServiceServers, err = getDeliveryServiceServers(toSession, opsConfig.CdnName)
			if err != nil {
				errorCount++
				log.Printf("Error getting delivery service servers from Traffic Ops: %v\n", err)
				continue
			}

			serverTypes, err = getServerTypes(toSession, opsConfig.CdnName)
			if err != nil {
				errorCount++
				log.Printf("Error getting server types from Traffic Ops: %v\n", err)
				continue
			}

			// This must be in a goroutine, because the monitorConfigPoller tick sends to a channel this select listens for. Thus, if we block on sends to the monitorConfigPoller, we have a livelock race condition.
			go func() {
				monitorConfigPoller.OpsConfigChannel <- opsConfig // this is needed for cdnName
				monitorConfigPoller.SessionChannel <- toSession
			}()
		case monitorConfig = <-monitorConfigPoller.ConfigChannel:
			healthUrls := map[string]string{}
			statUrls := map[string]string{}
			peerUrls := map[string]string{}
			caches := map[string]string{}

			for _, srv := range monitorConfig.TrafficServer {
				caches[srv.HostName] = srv.Status

				if srv.Status == "ONLINE" {
					localStates.Caches[srv.HostName] = peer.IsAvailable{IsAvailable: true}
					continue
				}
				if srv.Status == "OFFLINE" {
					localStates.Caches[srv.HostName] = peer.IsAvailable{IsAvailable: false}
					continue
				}
				// seed states with available = false until our polling cycle picks up a result
				if _, exists := localStates.Caches[srv.HostName]; !exists {
					localStates.Caches[srv.HostName] = peer.IsAvailable{IsAvailable: false}
				}

				url := monitorConfig.Profile[srv.Profile].Parameters.HealthPollingURL
				r := strings.NewReplacer(
					"${hostname}", srv.FQDN,
					"${interface_name}", srv.InterfaceName,
					"application=system", "application=plugin.remap",
					"application=", "application=plugin.remap",
				)
				url = r.Replace(url)
				healthUrls[srv.HostName] = url
				r = strings.NewReplacer("application=plugin.remap", "application=")
				url = r.Replace(url)
				statUrls[srv.HostName] = url
			}

			for _, srv := range monitorConfig.TrafficMonitor {
				if srv.Status != "ONLINE" {
					continue
				}
				// TODO: the URL should be config driven. -jse
				url := fmt.Sprintf("http://%s:%d/publish/CrStates?raw", srv.IP, srv.Port)
				peerUrls[srv.HostName] = url
			}

			cacheStatPoller.ConfigChannel <- poller.HttpPollerConfig{Urls: statUrls, Interval: defaultCacheStatPollingInterval}
			cacheHealthPoller.ConfigChannel <- poller.HttpPollerConfig{Urls: healthUrls, Interval: defaultCacheHealthPollingInterval}
			peerPoller.ConfigChannel <- poller.HttpPollerConfig{Urls: peerUrls, Interval: defaultPeerPollingInterval}

			for k := range localStates.Caches {
				_, exists := monitorConfig.TrafficServer[k]

				if !exists {
					fmt.Printf("Warning: removing %s from localStates", k)
					delete(localStates.Caches, k)
				}
			}

			addStateDeliveryServices(monitorConfig, localStates.Deliveryservice)
		case i := <-cacheHealthTick:
			healthIteration = i
		case healthResult := <-cacheHealthChannel:
			fetchCount++
			var prevResult cache.Result
			if len(healthHistory[healthResult.Id]) != 0 {
				prevResult = healthHistory[healthResult.Id][len(healthHistory[healthResult.Id])-1].(cache.Result)
			}
			health.GetVitals(&healthResult, &prevResult, &monitorConfig)
			healthHistory[healthResult.Id] = pruneHistory(append(healthHistory[healthResult.Id], healthResult), defaultMaxHistory)
			isAvailable, whyAvailable := health.EvalCache(healthResult, &monitorConfig)
			if localStates.Caches[healthResult.Id].IsAvailable != isAvailable {
				fmt.Println("Changing state for", healthResult.Id, " was:", prevResult.Available, " is now:", isAvailable, " because:", whyAvailable, " errors:", healthResult.Errors)
				e := Event{
					Index:       eventIndex,
					Time:        time.Now().Unix(),
					Description: whyAvailable,
					Name:        healthResult.Id,
					Hostname:    healthResult.Id,
					Type:        serverTypes[healthResult.Id],
					Available:   isAvailable,
				}
				events = append([]Event{e}, events...)
				if len(events) > maxEvents {
					events = events[:maxEvents-1]
				}
				eventIndex++
			}
			localStates.Caches[healthResult.Id] = peer.IsAvailable{IsAvailable: isAvailable}
			calculateDeliveryServiceState(deliveryServiceServers, localStates.Caches, localStates.Deliveryservice)

			if lastHealthStart, ok := lastHealthEndTimes[healthResult.Id]; ok {
				lastHealthDurations[healthResult.Id] = time.Since(lastHealthStart)
			}
			lastHealthEndTimes[healthResult.Id] = time.Now()

			// if _, ok := queryIntervalStart[pollI]; !ok {
			// 	log.Printf("ERROR poll start index not found")
			// 	continue
			// }
			// lastQueryIntervalTime = time.Since(queryIntervalStart[pollI])
		case stats := <-cacheStatChannel:
			statHistory[stats.Id] = pruneHistory(append(statHistory[stats.Id], stats), defaultMaxHistory)
		case crStatesResult := <-peerChannel:
			peerStates[crStatesResult.Id] = crStatesResult.PeerStats
			combinedStates = combineCrStates(peerStates, localStates)
		}
	}
}
コード例 #11
0
func getToData(config StartupConfig, init bool, configChan chan RunningConfig) {
	var runningConfig RunningConfig
	to, err := traffic_ops.Login(config.ToURL, config.ToUser, config.ToPasswd, true)
	if err != nil {
		msg := fmt.Sprintf("Error logging in to %v: %v", config.ToURL, err)
		if init {
			panic(msg)
		}
		log.Error(msg)
		return
	}

	servers, err := to.Servers()
	if err != nil {
		msg := fmt.Sprintf("Error getting server list from %v: %v ", config.ToURL, err)
		if init {
			panic(msg)
		}
		log.Error(msg)
		return
	}

	runningConfig.CacheMap = make(map[string]traffic_ops.Server)
	for _, server := range servers {
		runningConfig.CacheMap[server.HostName] = server
	}

	cacheStatPath := "/publish/CacheStats?hc=1&stats="
	dsStatPath := "/publish/DsStats?hc=1&wildcard=1&stats="
	parameters, err := to.Parameters("TRAFFIC_STATS")
	if err != nil {
		msg := fmt.Sprintf("Error getting parameter list from %v: %v", config.ToURL, err)
		if init {
			panic(msg)
		}
		log.Error(msg)
		return
	}

	for _, param := range parameters {
		if param.Name == "DsStats" {
			statName := param.Value
			dsStatPath += "," + statName
		} else if param.Name == "CacheStats" {
			cacheStatPath += "," + param.Value
		}
	}
	cacheStatPath = strings.Replace(cacheStatPath, "=,", "=", 1)
	dsStatPath = strings.Replace(dsStatPath, "=,", "=", 1)

	runningConfig.HealthUrls = make(map[string]map[string]string)
	for _, server := range servers {
		if server.Type == "RASCAL" && server.Status != config.StatusToMon {
			log.Debugf("Skipping %s%s.  Looking for status %s but got status %s", server.HostName, server.DomainName, config.StatusToMon, server.Status)
			continue
		}

		if server.Type == "RASCAL" && server.Status == config.StatusToMon {
			cdnName := server.CDNName
			if cdnName == "" {
				log.Error("Unable to find CDN name for " + server.HostName + ".. skipping")
				continue
			}

			if runningConfig.HealthUrls[cdnName] == nil {
				runningConfig.HealthUrls[cdnName] = make(map[string]string)
			}
			url := "http://" + server.IPAddress + cacheStatPath
			runningConfig.HealthUrls[cdnName]["CacheStats"] = url
			url = "http://" + server.IPAddress + dsStatPath
			runningConfig.HealthUrls[cdnName]["DsStats"] = url
		}
	}

	lastSummaryTimeStr, err := to.SummaryStatsLastUpdated("daily_maxgbps")
	if err != nil {
		errHndlr(err, ERROR)
	} else {
		lastSummaryTime, err := time.Parse("2006-01-02 15:04:05", lastSummaryTimeStr)
		if err != nil {
			errHndlr(err, ERROR)
		} else {
			runningConfig.LastSummaryTime = lastSummaryTime
		}
	}

	configChan <- runningConfig
}