func Announce(localIP string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) chan (chan bool) { dopplerMetaBytes, err := buildDopplerMeta(localIP, config) if err != nil { panic(err) } key := fmt.Sprintf("%s/%s/%s/%d", META_ROOT, config.Zone, config.JobName, config.Index) logger.Debugf("Starting Health Status Updates to Store: %s", key) node := storeadapter.StoreNode{ Key: key, Value: dopplerMetaBytes, TTL: uint64(ttl.Seconds()), } // Call to create to make sure node is created before we return storeAdapter.Create(node) status, stopChan, err := storeAdapter.MaintainNode(node) if err != nil { panic(err) } // The status channel needs to be drained to maintain the node within the etcd cluster go func() { for stat := range status { logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now()) } }() return stopChan }
func Connect(adapter Adapter, backoffStrategy retrystrategy.RetryStrategy, logger *gosteno.Logger, maxRetries int) error { timer := time.NewTimer(backoffStrategy(0)) defer timer.Stop() numberOfTries := 0 for { err := adapter.Connect() if err == nil { logger.Info("Connected to etcd") return nil } numberOfTries++ sleepDuration := backoffStrategy(numberOfTries) logger.Warnd(map[string]interface{}{ "error": err.Error(), }, fmt.Sprintf("Failed to connect to etcd. Number of tries: %d. Backing off for %v.", numberOfTries, sleepDuration)) timer.Reset(sleepDuration) <-timer.C if numberOfTries >= maxRetries { return fmt.Errorf("Failed to connect to etcd after %d tries.", numberOfTries) } } }
func connectToNatsServer(c *config.Config, logger *steno.Logger) yagnats.NATSConn { var natsClient yagnats.NATSConn var err error natsServers := c.NatsServers() attempts := 3 for attempts > 0 { natsClient, err = yagnats.Connect(natsServers) if err == nil { break } else { attempts-- time.Sleep(100 * time.Millisecond) } } if err != nil { logger.Errorf("Error connecting to NATS: %s\n", err) os.Exit(1) } natsClient.AddClosedCB(func(conn *nats.Conn) { logger.Errorf("Close on NATS client. nats.Conn: %+v", *conn) os.Exit(1) }) return natsClient }
func StartHeartbeats(localIp string, ttl time.Duration, config *config.Config, storeAdapter storeadapter.StoreAdapter, logger *gosteno.Logger) (stopChan chan (chan bool)) { if len(config.EtcdUrls) == 0 { return } if storeAdapter == nil { panic("store adapter is nil") } logger.Debugf("Starting Health Status Updates to Store: /healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index) status, stopChan, err := storeAdapter.MaintainNode(storeadapter.StoreNode{ Key: fmt.Sprintf("/healthstatus/doppler/%s/%s/%d", config.Zone, config.JobName, config.Index), Value: []byte(localIp), TTL: uint64(ttl.Seconds()), }) if err != nil { panic(err) } go func() { for stat := range status { logger.Debugf("Health updates channel pushed %v at time %v", stat, time.Now()) } }() return stopChan }
func (self *executorBBS) batchCompareAndSwapTasks(tasksToCAS [][]models.Task, logger *gosteno.Logger) { done := make(chan struct{}, len(tasksToCAS)) for _, taskPair := range tasksToCAS { originalStoreNode := storeadapter.StoreNode{ Key: taskSchemaPath(&taskPair[0]), Value: taskPair[0].ToJSON(), } taskPair[1].UpdatedAt = self.timeProvider.Time().UnixNano() newStoreNode := storeadapter.StoreNode{ Key: taskSchemaPath(&taskPair[1]), Value: taskPair[1].ToJSON(), } go func() { err := self.store.CompareAndSwap(originalStoreNode, newStoreNode) if err != nil { logger.Errord(map[string]interface{}{ "error": err.Error(), }, "runonce.converge.failed-to-compare-and-swap") } done <- struct{}{} }() } for _ = range tasksToCAS { <-done } }
func NewLoggregatorClient(loggregatorAddress string, logger *gosteno.Logger, bufferSize int) LoggregatorClient { loggregatorClient := &udpLoggregatorClient{} la, err := net.ResolveUDPAddr("udp", loggregatorAddress) if err != nil { logger.Fatalf("Error resolving loggregator address %s, %s", loggregatorAddress, err) } connection, err := net.ListenPacket("udp", "") if err != nil { logger.Fatalf("Error opening udp stuff") } loggregatorClient.loggregatorAddress = la.IP.String() loggregatorClient.sendChannel = make(chan []byte, bufferSize) go func() { for dataToSend := range loggregatorClient.sendChannel { if len(dataToSend) > 0 { writeCount, err := connection.WriteTo(dataToSend, la) if err != nil { logger.Errorf("Writing to loggregator %s failed %s", loggregatorAddress, err) continue } logger.Debugf("Wrote %d bytes to %s", writeCount, loggregatorAddress) atomic.AddUint64(&loggregatorClient.sentMessageCount, 1) atomic.AddUint64(&loggregatorClient.sentByteCount, uint64(writeCount)) } else { logger.Debugf("Skipped writing of 0 byte message to %s", loggregatorAddress) } } }() return loggregatorClient }
func newTokenFetcher(c *config.Config, logger *steno.Logger) token_fetcher.TokenFetcher { if c.RoutingApi.AuthDisabled { logger.Info("using noop token fetcher") return token_fetcher.NewNoOpTokenFetcher() } tokenFetcher := token_fetcher.NewTokenFetcher(&c.OAuth) logger.Info("using uaa token fetcher") return tokenFetcher }
func setRequestXVcapRequestId(request *http.Request, logger *steno.Logger) { uuid, err := common.GenerateUUID() if err == nil { request.Header.Set(router_http.VcapRequestIdHeader, uuid) if logger != nil { logger.Set(router_http.VcapRequestIdHeader, uuid) } } }
func makeOutgoingProxy(ipAddress string, config *Config, logger *gosteno.Logger) *trafficcontroller.Proxy { authorizer := authorization.NewLogAccessAuthorizer(config.ApiHost, config.SkipCertVerify) logger.Debugf("Output Proxy Startup: Number of zones: %v", len(config.Loggregators)) hashers := makeHashers(config.Loggregators, config.OutgoingPort, logger) logger.Debugf("Output Proxy Startup: Number of hashers for the proxy: %v", len(hashers)) proxy := trafficcontroller.NewProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingPort), 10)), hashers, authorizer, logger) return proxy }
func sendMessagesToWebsocket(logMessages []*logmessage.Message, ws *websocket.Conn, logger *gosteno.Logger) { for _, message := range logMessages { err := ws.WriteMessage(websocket.BinaryMessage, message.GetRawMessage()) if err != nil { logger.Debugf("Dump Sink %s: Error when trying to send data to sink %s. Requesting close. Err: %v", ws.RemoteAddr(), err) } else { logger.Debugf("Dump Sink %s: Successfully sent data", ws.RemoteAddr()) } } }
func makeOutgoingProxy(config *Config, logger *gosteno.Logger) *trafficcontroller.Proxy { authorizer := authorization.NewLogAccessAuthorizer(config.ApiHost, config.SkipCertVerify) logger.Debugf("Output Proxy Startup: Number of zones: %v", len(config.Loggregators)) hashers := makeHashers(config.Loggregators, config.LoggregatorOutgoingPort, logger) logger.Debugf("Output Proxy Startup: Number of hashers for the proxy: %v", len(hashers)) proxy := trafficcontroller.NewProxy(hashers, authorizer, logger) return proxy }
func createCrypto(logger *steno.Logger, secret string) *secure.AesGCM { // generate secure encryption key using key derivation function (pbkdf2) secretPbkdf2 := secure.NewPbkdf2([]byte(secret), 16) crypto, err := secure.NewAesGCM(secretPbkdf2) if err != nil { logger.Errorf("Error creating route service crypto: %s\n", err) os.Exit(1) } return crypto }
func dumpMessagesFromChannelToWebsocket(dumpChan <-chan *logmessage.Message, ws *websocket.Conn, clientAddress net.Addr, logger *gosteno.Logger) { for message := range dumpChan { err := websocket.Message.Send(ws, message.GetRawMessage()) if err != nil { logger.Debugf("Dump Sink %s: Error when trying to send data to sink %s. Requesting close. Err: %v", clientAddress, err) } else { logger.Debugf("Dump Sink %s: Successfully sent data", clientAddress) } } }
func initializeDopplerPool(config *config.Config, logger *gosteno.Logger) (*clientpool.DopplerPool, error) { adapter, err := storeAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests) if err != nil { return nil, err } err = adapter.Connect() if err != nil { logger.Warnd(map[string]interface{}{ "error": err.Error(), }, "Failed to connect to etcd") } preferInZone := func(relativePath string) bool { return strings.HasPrefix(relativePath, "/"+config.Zone+"/") } var tlsConfig *tls.Config if config.PreferredProtocol == "tls" { c := config.TLSConfig tlsConfig, err = listeners.NewTLSConfig(c.CertFile, c.KeyFile, c.CAFile) if err != nil { return nil, err } tlsConfig.ServerName = "doppler" } clientPool := clientpool.NewDopplerPool(logger, func(logger *gosteno.Logger, url string) (clientpool.Client, error) { client, err := clientpool.NewClient(logger, url, tlsConfig) if err == nil && client.Scheme() != config.PreferredProtocol { logger.Warnd(map[string]interface{}{ "url": url, }, "Doppler advertising UDP only") } return client, err }) onUpdate := func(all map[string]string, preferred map[string]string) { clientPool.Set(all, preferred) } dopplers, err := dopplerservice.NewFinder(adapter, config.PreferredProtocol, preferInZone, onUpdate, logger) if err != nil { return nil, err } dopplers.Start() onLegacyUpdate := func(all map[string]string, preferred map[string]string) { clientPool.SetLegacy(all, preferred) } legacyDopplers := dopplerservice.NewLegacyFinder(adapter, config.LoggregatorDropsondePort, preferInZone, onLegacyUpdate, logger) legacyDopplers.Start() return clientPool, nil }
func NewSyslogSink(appId string, drainUrl string, givenLogger *gosteno.Logger, syslogWriter SyslogWriter) Sink { givenLogger.Debugf("Syslog Sink %s: Created for appId [%s]", drainUrl, appId) return &SyslogSink{ appId: appId, drainUrl: drainUrl, logger: givenLogger, sentMessageCount: new(uint64), sentByteCount: new(uint64), listenerChannel: make(chan *logmessage.Message), syslogWriter: syslogWriter, } }
func NewSyslogSink(appId string, drainUrl string, givenLogger *gosteno.Logger, syslogWriter syslogwriter.Writer, errorHandler func(string, string, string), dropsondeOrigin string, metricUpdateChannel chan<- int64) *SyslogSink { givenLogger.Debugf("Syslog Sink %s: Created for appId [%s]", drainUrl, appId) return &SyslogSink{ appId: appId, drainUrl: drainUrl, Logger: givenLogger, syslogWriter: syslogWriter, handleSendError: errorHandler, disconnectChannel: make(chan struct{}), dropsondeOrigin: dropsondeOrigin, metricUpdateChannel: metricUpdateChannel, } }
func (r router) Start(logger *gosteno.Logger) { dataChan := r.agentListener.Start() for { dataToProxy := <-dataChan appId, err := appid.FromLogMessage(dataToProxy) if err != nil { logger.Warn(err.Error()) } else { lc := r.lookupLoggregatorClientForAppId(appId) lc.Send(dataToProxy) } } }
func NewSyslogSink(appId string, drainUrl string, givenLogger *gosteno.Logger, messageDrainBufferSize uint, syslogWriter syslogwriter.Writer, errorHandler func(string, string, string), dropsondeOrigin string) *SyslogSink { givenLogger.Debugf("Syslog Sink %s: Created for appId [%s]", drainUrl, appId) return &SyslogSink{ appId: appId, drainUrl: drainUrl, logger: givenLogger, messageDrainBufferSize: messageDrainBufferSize, syslogWriter: syslogWriter, handleSendError: errorHandler, disconnectChannel: make(chan struct{}), dropsondeOrigin: dropsondeOrigin, } }
func NewSyslogSink(appId string, drainUrl string, givenLogger *gosteno.Logger, syslogWriter syslogwriter.SyslogWriter, errorChannel chan<- *logmessage.Message) sinks.Sink { givenLogger.Debugf("Syslog Sink %s: Created for appId [%s]", drainUrl, appId) return &SyslogSink{ appId: appId, drainUrl: drainUrl, logger: givenLogger, sentMessageCount: new(uint64), sentByteCount: new(uint64), syslogWriter: syslogWriter, errorChannel: errorChannel, disconnectChannel: make(chan struct{}), } }
// NewDropsondeUnmarshallerCollection instantiates a DropsondeUnmarshallerCollection, // creates the specified number of DropsondeUnmarshaller instances and logs to the // provided logger. func NewDropsondeUnmarshallerCollection(logger *gosteno.Logger, size int) *DropsondeUnmarshallerCollection { var unmarshallers []*DropsondeUnmarshaller for i := 0; i < size; i++ { unmarshallers = append(unmarshallers, NewDropsondeUnmarshaller(logger)) } logger.Debugf("dropsondeUnmarshallerCollection: created %v unmarshallers", size) return &DropsondeUnmarshallerCollection{ logger: logger, unmarshallers: unmarshallers, } }
func dial(taskIdentifier string, messageType events.LogMessage_MessageType, logger *gosteno.Logger) (net.Conn, error) { var err error var connection net.Conn for i := 0; i < 10; i++ { connection, err = net.Dial("unix", filepath.Join(taskIdentifier, socketName(messageType))) if err == nil { logger.Debugf("Opened socket %s, %s", messageType, taskIdentifier) break } logger.Debugf("Could not read from socket %s, %s, retrying: %s", messageType, taskIdentifier, err) time.Sleep(100 * time.Millisecond) } return connection, err }
func NewLoggregatorClient(loggregatorAddress string, logger *gosteno.Logger, bufferSize int) *udpLoggregatorClient { loggregatorClient := &udpLoggregatorClient{make(chan []byte, bufferSize)} connection, err := net.Dial("udp", loggregatorAddress) if err != nil { logger.Fatalf("Error resolving loggregator address %s, %s", loggregatorAddress, err) panic(err) } go func() { for { dataToSend := <-loggregatorClient.sendChannel if len(dataToSend) > 0 { writeCount, err := connection.Write(dataToSend) logger.Debugf("Wrote %d bytes to %s", writeCount, loggregatorAddress) if err != nil { logger.Errorf("Writing to loggregator %s failed %s", loggregatorAddress, err) } } else { logger.Debugf("Skipped writing of 0 byte message to %s", loggregatorAddress) } } }() return loggregatorClient }
func createCrypto(secret string, logger *steno.Logger) *secure.AesGCM { secretDecoded, err := base64.StdEncoding.DecodeString(secret) if err != nil { logger.Errorf("Error decoding route service secret: %s\n", err) os.Exit(1) } crypto, err := secure.NewAesGCM(secretDecoded) if err != nil { logger.Errorf("Error creating route service crypto: %s\n", err) os.Exit(1) } return crypto }
func New(address string, name string, writer writers.ByteArrayWriter, logger *gosteno.Logger) (*NetworkReader, error) { connection, err := net.ListenPacket("udp4", address) if err != nil { return nil, err } logger.Infof("Listening on %s", address) return &NetworkReader{ connection: connection, contextName: name, writer: writer, logger: logger, }, nil }
func OverwritingMessageChannel(in <-chan *logmessage.Message, out chan *logmessage.Message, logger *gosteno.Logger) { for v := range in { select { case out <- v: default: <-out out <- v if logger != nil { logger.Warnf("OMC: Reader was too slow. Dropped message.") } } } close(out) }
func (r Router) Start(logger *gosteno.Logger) { go r.agentListener.Start() for dataToProxy := range r.dataChan { appId, err := appid.FromProtobufferMessage(dataToProxy) if err != nil { logger.Warn(err.Error()) } else { server := r.hasher.GetLoggregatorServerForAppId(appId) lc := r.loggregatorClients[server] r.Component.Logger.Debugf("Incoming Router: AppId is %v. Using server: %v", appId, server) lc.Send(dataToProxy) } } }
func setupRouteFetcher(logger *steno.Logger, c *config.Config, registry rregistry.RegistryInterface) *route_fetcher.RouteFetcher { clock := clock.NewClock() tokenFetcher := newTokenFetcher(logger, clock, c) _, err := tokenFetcher.FetchToken(false) if err != nil { logger.Errorf("Unable to fetch token: %s", err.Error()) os.Exit(1) } routingApiUri := fmt.Sprintf("%s:%d", c.RoutingApi.Uri, c.RoutingApi.Port) routingApiClient := routing_api.NewClient(routingApiUri) routeFetcher := route_fetcher.NewRouteFetcher(steno.NewLogger("router.route_fetcher"), tokenFetcher, registry, c, routingApiClient, 1, clock) return routeFetcher }
func initializeComponent(config metronConfig, instrumentables []instrumentation.Instrumentable, logger *gosteno.Logger) cfcomponent.Component { if len(config.NatsHosts) == 0 { logger.Warn("Startup: Did not receive a NATS host - not going to register component") cfcomponent.DefaultYagnatsClientProvider = func(logger *gosteno.Logger, c *cfcomponent.Config) (yagnats.NATSConn, error) { return fakeyagnats.Connect(), nil } } component, err := cfcomponent.NewComponent(logger, "MetronAgent", config.Index, &metronHealthMonitor{}, config.VarzPort, []string{config.VarzUser, config.VarzPass}, instrumentables) if err != nil { panic(err) } return component }
func initializeClientPool(config metronConfig, logger *gosteno.Logger) *clientpool.LoggregatorClientPool { adapter := storeAdapterProvider(config.EtcdUrls, config.EtcdMaxConcurrentRequests) err := adapter.Connect() if err != nil { logger.Errorf("Error connecting to ETCD: %v", err) } inZoneServerAddressList := servicediscovery.NewServerAddressList(adapter, "/healthstatus/doppler/"+config.Zone, logger) allZoneServerAddressList := servicediscovery.NewServerAddressList(adapter, "/healthstatus/doppler/", logger) go inZoneServerAddressList.Run(time.Duration(config.EtcdQueryIntervalMilliseconds) * time.Millisecond) go allZoneServerAddressList.Run(time.Duration(config.EtcdQueryIntervalMilliseconds) * time.Millisecond) clientPool := clientpool.NewLoggregatorClientPool(logger, config.LoggregatorDropsondePort, inZoneServerAddressList, allZoneServerAddressList) return clientPool }
func waitOnErrOrSignal(c *config.Config, logger *steno.Logger, errChan <-chan error, router *router.Router) { signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1) select { case err := <-errChan: if err != nil { logger.Errorf("Error occurred: %s", err.Error()) os.Exit(1) } case sig := <-signals: go func() { for sig := range signals { logger.Infod( map[string]interface{}{ "signal": sig.String(), }, "gorouter.signal.ignored", ) } }() if sig == syscall.SIGUSR1 { logger.Infod( map[string]interface{}{ "timeout": (c.DrainTimeout).String(), }, "gorouter.draining", ) router.Drain(c.DrainTimeout) } stoppingAt := time.Now() logger.Info("gorouter.stopping") router.Stop() logger.Infod( map[string]interface{}{ "took": time.Since(stoppingAt).String(), }, "gorouter.stopped", ) } }