func initiateFirehoseConnection(tcPort int) (*consumer.Consumer, <-chan *events.Envelope) {
	localIP, _ := localip.LocalIP()
	url := fmt.Sprintf("ws://%s:%d", localIP, tcPort)
	firehoseConnection := consumer.New(url, &tls.Config{InsecureSkipVerify: true}, nil)
	msgChan, _ := firehoseConnection.Firehose("uniqueId", "")
	return firehoseConnection, msgChan
}
Exemple #2
0
func New(deploymentName string, job string, index string, outputWriter writers.EnvelopeWriter) *Tagger {
	ip, _ := localip.LocalIP()
	return &Tagger{
		deploymentName: deploymentName,
		job:            job,
		index:          index,
		ip:             ip,
		outputWriter:   outputWriter,
	}
}
func basicTaggedMessage(envelope events.Envelope) *events.Envelope {
	ip, _ := localip.LocalIP()

	envelope.Deployment = proto.String("test-deployment")
	envelope.Job = proto.String("test-job")
	envelope.Index = proto.String("2")
	envelope.Ip = proto.String(ip)

	return &envelope
}
Exemple #4
0
func (c *VcapComponent) Start() error {
	if c.Varz.Type == "" {
		err := errors.New("type is required")
		log.Error("Component type is required", err)
		return err
	}

	c.quitCh = make(chan struct{}, 1)
	c.Varz.StartTime = schema.Time(time.Now())
	guid, err := uuid.GenerateUUID()
	if err != nil {
		return err
	}
	c.Varz.UUID = fmt.Sprintf("%d-%s", c.Varz.Index, guid)

	if c.Varz.Host == "" {
		host, err := localip.LocalIP()
		if err != nil {
			log.Error("error-getting-localIP", err)
			return err
		}

		port, err := localip.LocalPort()
		if err != nil {
			log.Error("error-getting-localPort", err)
			return err
		}

		c.Varz.Host = fmt.Sprintf("%s:%d", host, port)
	}

	if c.Varz.Credentials == nil || len(c.Varz.Credentials) != 2 {
		user, err := uuid.GenerateUUID()
		if err != nil {
			return err
		}
		password, err := uuid.GenerateUUID()
		if err != nil {
			return err
		}

		c.Varz.Credentials = []string{user, password}
	}

	if c.Logger != nil {
		log = c.Logger
	}

	c.Varz.NumCores = runtime.NumCPU()

	procStat = NewProcessStatus()

	c.ListenAndServe()
	return nil
}
Exemple #5
0
func (c *Config) Process() {
	var err error

	if c.GoMaxProcs == -1 {
		c.GoMaxProcs = runtime.NumCPU()
	}

	c.Logging.JobName = "gorouter"
	if c.StartResponseDelayInterval > c.DropletStaleThreshold {
		c.DropletStaleThreshold = c.StartResponseDelayInterval
	}

	// To avoid routes getting purged because of unresponsive NATS server
	// we need to set the ping interval of nats client such that it fails over
	// to next NATS server before dropletstalethreshold is hit. We are hardcoding the ping interval
	// to 20 sec because the operators cannot set the value of DropletStaleThreshold and StartResponseDelayInterval
	// ping_interval = ((DropletStaleThreshold- StartResponseDelayInterval)-minimumRegistrationInterval+(2 * number_of_nats_servers))/3
	c.NatsClientPingInterval = 20 * time.Second

	if c.DrainTimeout == 0 || c.DrainTimeout == defaultConfig.EndpointTimeout {
		c.DrainTimeout = c.EndpointTimeout
	}

	c.Ip, err = localip.LocalIP()
	if err != nil {
		panic(err)
	}

	if c.EnableSSL {
		c.CipherSuites = c.processCipherSuites()
		cert, err := tls.LoadX509KeyPair(c.SSLCertPath, c.SSLKeyPath)
		if err != nil {
			panic(err)
		}
		c.SSLCertificate = cert
	}

	if c.RouteServiceSecret != "" {
		c.RouteServiceEnabled = true
	}

	// check if valid load balancing strategy
	validLb := false
	for _, lb := range LoadBalancingStrategies {
		if c.LoadBalance == lb {
			validLb = true
			break
		}
	}
	if !validLb {
		errMsg := fmt.Sprintf("Invalid load balancing algorithm %s. Allowed values are %s", c.LoadBalance, LoadBalancingStrategies)
		panic(errMsg)
	}
}
Exemple #6
0
func main() {
	runtime.GOMAXPROCS(4)

	var interval = flag.String("interval", "1s", "Interval for reported results")
	var writeRate = flag.Int("writeRate", 15000, "Number of writes per second to send to doppler")
	var stopAfter = flag.String("stopAfter", "5m", "How long to run the experiment for")
	var sharedSecret string
	flag.StringVar(&sharedSecret, "sharedSecret", "", "Shared secret used by Doppler to verify message validity")
	var dopplerOutgoingPort = flag.Int("dopplerOutgoingPort", 8080, "Outgoing port from doppler")
	var dopplerIncomingDropsondePort = flag.Int("dopplerIncomingDropsondePort", 3457, "Incoming dropsonde port to doppler")
	flag.Parse()

	duration, err := time.ParseDuration(*interval)
	if err != nil {
		log.Fatalf("Invalid duration %s\n", *interval)
	}

	stopAfterDuration, err := time.ParseDuration(*stopAfter)
	if err != nil {
		log.Fatalf("Invalid duration %s\n", *stopAfter)
	}

	reporter := metricsreporter.New(duration, os.Stdout)
	ip, err := localip.LocalIP()
	if err != nil {
		panic(err)
	}

	generator := messagegenerator.NewValueMetricGenerator()
	writer := messagewriter.NewMessageWriter(ip, *dopplerIncomingDropsondePort, sharedSecret, reporter.SentCounter())
	reader := websocketmessagereader.New(fmt.Sprintf("%s:%d", ip, *dopplerOutgoingPort), reporter.ReceivedCounter())
	defer reader.Close()

	writeStrategy := writestrategies.NewConstantWriteStrategy(generator, writer, *writeRate)
	exp := experiment.NewExperiment(reader)
	exp.AddWriteStrategy(writeStrategy)

	exp.Warmup()
	go reporter.Start()
	go exp.Start()

	timer := time.NewTimer(stopAfterDuration)
	<-timer.C
	exp.Stop()
	reporter.Stop()
}
Exemple #7
0
func (s *Subscriber) startMessage() ([]byte, error) {
	host, err := localip.LocalIP()
	if err != nil {
		return nil, err
	}

	d := common.RouterStart{
		Id:    s.opts.ID,
		Hosts: []string{host},
		MinimumRegisterIntervalInSeconds: s.opts.MinimumRegisterIntervalInSeconds,
		PruneThresholdInSeconds:          s.opts.PruneThresholdInSeconds,
	}
	message, err := json.Marshal(d)
	if err != nil {
		return nil, err
	}

	return message, nil
}
import (
	"code.cloudfoundry.org/localip"
	"github.com/nu7hatch/gouuid"
	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"integration_tests/doppler/helpers"
	. "integration_tests/doppler/helpers"
	"io"
	"net"
	"time"
	"tools/benchmark/metricsreporter"
)

var _ = Describe("MessageLossBenchmark", func() {
	Measure("number of messages through a syslog drain per second", func(b Benchmarker) {
		localIP, err := localip.LocalIP()
		Expect(err).NotTo(HaveOccurred())
		syslogDrainAddress := net.JoinHostPort(localIP, "6547")
		listener, err := net.Listen("tcp", syslogDrainAddress)
		Expect(err).NotTo(HaveOccurred())

		counter := metricsreporter.NewCounter("messages")
		go func() {
			conn, err := listener.Accept()
			Expect(err).NotTo(HaveOccurred())

			defer conn.Close()
			buffer := make([]byte, 1024)
			for {
				conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
				_, err := conn.Read(buffer)
	pathToGoStatsdClient, err = gexec.Build("statsd-injector/tools/statsdGoClient")
	Expect(err).NotTo(HaveOccurred())

	command := exec.Command(pathToMetronExecutable, "--config=fixtures/metron.json", "--debug")

	metronSession, err = gexec.Start(command, gexec.NewPrefixedWriter("[o][metron]", GinkgoWriter), gexec.NewPrefixedWriter("[e][metron]", GinkgoWriter))
	Expect(err).ShouldNot(HaveOccurred())

	pathToStatsdInjectorExecutable, err := gexec.Build("statsd-injector")
	Expect(err).NotTo(HaveOccurred())
	command = exec.Command(pathToStatsdInjectorExecutable, "-statsdPort=51162", "-metronPort=51161", "-logLevel=debug")

	statsdInjectorSession, err = gexec.Start(command, gexec.NewPrefixedWriter("[o][statsdInjector]", GinkgoWriter), gexec.NewPrefixedWriter("[e][statsdInjector]", GinkgoWriter))
	Expect(err).ShouldNot(HaveOccurred())

	localIPAddress, _ = localip.LocalIP()

	etcdPort = 5800 + (config.GinkgoConfig.ParallelNode-1)*10
	etcdRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1, nil)
	etcdRunner.Start()

	Eventually(metronSession, 1).Should(gbytes.Say("metron started"))
	Eventually(statsdInjectorSession).Should(gbytes.Say("Listening for statsd on host :51162"))

	// Put a key in etcd so that metron doesn't block on getting an event from
	// finder service.
	exec.Command(
		"curl", "-v", "-X", "PUT",
		"localhost:4002/v2/keys/healthstatus/doppler/z1/deployment-name/42",
		"-d", "value=127.0.0.1",
	).Run()
	"github.com/cloudfoundry/loggregatorlib/loggertesthelper"
	"github.com/cloudfoundry/storeadapter"
	"github.com/cloudfoundry/storeadapter/storerunner/etcdstorerunner"
	ginkgoConfig "github.com/onsi/ginkgo/config"
)

var _ = Describe("Announcer", func() {
	var (
		localIP     string
		conf        config.Config
		etcdRunner  *etcdstorerunner.ETCDClusterRunner
		etcdAdapter storeadapter.StoreAdapter
	)

	BeforeSuite(func() {
		localIP, _ = localip.LocalIP()

		etcdPort := 5500 + ginkgoConfig.GinkgoConfig.ParallelNode*10
		etcdRunner = etcdstorerunner.NewETCDClusterRunner(etcdPort, 1, nil)
		etcdRunner.Start()

		etcdAdapter = etcdRunner.Adapter(nil)

		conf = config.Config{
			JobName: "doppler_z1",
			Index:   "0",
			EtcdMaxConcurrentRequests: 10,
			EtcdUrls:                  etcdRunner.NodeURLS(),
			Zone:                      "z1",
			IncomingUDPPort:           1234,
			IncomingTCPPort:           5678,
Exemple #11
0
func SetupTrafficcontroller(etcdClientURL string, dopplerPort, metronPort int) (func(), int) {
	By("making sure trafficcontroller was build")
	tcPath := os.Getenv("TRAFFIC_CONTROLLER_BUILD_PATH")
	Expect(tcPath).ToNot(BeEmpty())

	By("starting trafficcontroller")
	tcPort := getPort(trafficcontrollerPortOffset)
	tcConfig := trafficcontrollerConfig.Config{
		Index:   jobIndex,
		JobName: jobName,

		DopplerPort:           uint32(dopplerPort),
		OutgoingDropsondePort: uint32(tcPort),
		MetronHost:            "localhost",
		MetronPort:            metronPort,

		EtcdUrls:                  []string{etcdClientURL},
		EtcdMaxConcurrentRequests: 5,

		SystemDomain:   "vcap.me",
		SkipCertVerify: true,

		ApiHost:         "http://127.0.0.1:65530",
		UaaHost:         "http://127.0.0.1:65531",
		UaaClient:       "bob",
		UaaClientSecret: "yourUncle",
	}

	tcCfgFile, err := ioutil.TempFile("", "trafficcontroller-config")
	Expect(err).ToNot(HaveOccurred())

	err = json.NewEncoder(tcCfgFile).Encode(tcConfig)
	Expect(err).ToNot(HaveOccurred())
	err = tcCfgFile.Close()
	Expect(err).ToNot(HaveOccurred())

	tcCommand := exec.Command(tcPath, "--debug", "--disableAccessControl", "--config", tcCfgFile.Name())
	tcSession, err := gexec.Start(
		tcCommand,
		gexec.NewPrefixedWriter(color("o", "tc", green, cyan), GinkgoWriter),
		gexec.NewPrefixedWriter(color("e", "tc", red, cyan), GinkgoWriter),
	)
	Expect(err).ToNot(HaveOccurred())

	By("waiting for trafficcontroller to listen")
	ip, err := localip.LocalIP()
	Expect(err).ToNot(HaveOccurred())
	Eventually(func() error {
		url := fmt.Sprintf("http://%s:%d", ip, tcPort)
		resp, err := http.Get(url)
		if err == nil {
			resp.Body.Close()
		}
		return err
	}, 3).Should(Succeed())

	return func() {
		os.Remove(tcCfgFile.Name())
		tcSession.Kill().Wait()
	}, tcPort
}
Exemple #12
0
func initializeDopplerPool(conf *config.Config, batcher *metricbatcher.MetricBatcher, logger *gosteno.Logger) (*eventmarshaller.EventMarshaller, error) {
	adapter, err := storeAdapterProvider(conf)
	if err != nil {
		return nil, err
	}

	backoffStrategy := retrystrategy.Exponential()
	err = backoff.Connect(adapter, backoffStrategy, logger, connectionRetries)
	if err != nil {
		return nil, err
	}

	var protocols []string
	clientPool := make(map[string]clientreader.ClientPool)
	writers := make(map[string]eventmarshaller.BatchChainByteWriter)

	ip, err := localip.LocalIP()
	if err != nil {
		return nil, err
	}

	for protocol := range conf.Protocols {
		proto := string(protocol)
		protocols = append(protocols, proto)
		switch proto {
		case "udp":
			udpCreator := clientpool.NewUDPClientCreator(logger)
			udpWrapper := dopplerforwarder.NewUDPWrapper([]byte(conf.SharedSecret), logger)
			udpPool := clientpool.NewDopplerPool(logger, udpCreator)
			udpForwarder := dopplerforwarder.New(udpWrapper, udpPool, logger)
			clientPool[proto] = udpPool
			writers[proto] = udpForwarder
		case "tcp":
			tcpCreator := clientpool.NewTCPClientCreator(logger, nil)
			tcpWrapper := dopplerforwarder.NewWrapper(logger, proto)
			tcpPool := clientpool.NewDopplerPool(logger, tcpCreator)
			tcpForwarder := dopplerforwarder.New(tcpWrapper, tcpPool, logger)

			tcpBatchInterval := time.Duration(conf.TCPBatchIntervalMilliseconds) * time.Millisecond

			dropCounter := batch.NewDroppedCounter(tcpForwarder, batcher, origin, ip, conf)
			batchWriter, err := batch.NewWriter(
				"tcp",
				tcpForwarder,
				dropCounter,
				conf.TCPBatchSizeBytes,
				tcpBatchInterval,
				logger,
			)
			if err != nil {
				return nil, err
			}
			clientPool[proto] = tcpPool
			writers[proto] = batchWriter
		case "tls":
			c := conf.TLSConfig
			tlsConfig, err := listeners.NewTLSConfig(c.CertFile, c.KeyFile, c.CAFile)
			if err != nil {
				return nil, err
			}
			tlsConfig.ServerName = "doppler"
			tlsCreator := clientpool.NewTCPClientCreator(logger, tlsConfig)
			tlsWrapper := dopplerforwarder.NewWrapper(logger, proto)
			tlsPool := clientpool.NewDopplerPool(logger, tlsCreator)
			tlsForwarder := dopplerforwarder.New(tlsWrapper, tlsPool, logger)
			tcpBatchInterval := time.Duration(conf.TCPBatchIntervalMilliseconds) * time.Millisecond

			dropCounter := batch.NewDroppedCounter(tlsForwarder, batcher, origin, ip, conf)
			batchWriter, err := batch.NewWriter(
				"tls",
				tlsForwarder,
				dropCounter,
				conf.TCPBatchSizeBytes,
				tcpBatchInterval,
				logger,
			)
			if err != nil {
				return nil, err
			}
			clientPool[proto] = tlsPool
			writers[proto] = batchWriter
		}
	}

	finder := dopplerservice.NewFinder(adapter, conf.LoggregatorDropsondePort, conf.Protocols.Strings(), conf.Zone, logger)
	finder.Start()

	marshaller := eventmarshaller.New(batcher, logger)

	go func() {
		for {
			protocol := clientreader.Read(clientPool, conf.Protocols.Strings(), finder.Next())
			logger.Infof("Chose protocol %s from last etcd event, updating writer...", protocol)
			marshaller.SetWriter(writers[protocol])
		}
	}()

	return marshaller, nil
}
	"github.com/gorilla/websocket"
	"github.com/nu7hatch/gouuid"
)

var _ = Describe("Listener test", func() {
	var (
		address     string
		ws          *websocket.Conn
		conn        net.Conn
		receiveChan chan []byte
		err         error
	)

	Context("with TLS Listener config specified", func() {
		BeforeEach(func() {
			ip, _ := localip.LocalIP()
			address = fmt.Sprintf("%s:%d", ip, 8766)
			conn = openTLSConnection(address)

			receiveChan = make(chan []byte, 10)
			ws, _ = AddWSSink(receiveChan, "4567", "/firehose/hose-subcription-a")
		})

		AfterEach(func() {
			receiveChan = nil
			ws.Close()
		})

		It("listens for dropsonde log message on TLS port", func() {
			message := "my-random-tls-message"
			guid, err := uuid.NewV4()
Exemple #14
0
func main() {
	flag.Parse()

	config, err := config.ParseConfig(*logLevel, *configFile)
	if err != nil {
		panic(fmt.Errorf("Unable to parse config: %s", err))
	}

	httpsetup.SetInsecureSkipVerify(config.SkipCertVerify)

	ipAddress, err := localip.LocalIP()
	if err != nil {
		panic(fmt.Errorf("Unable to resolve own IP address: %s", err))
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "loggregator trafficcontroller", config.Syslog)
	log.Info("Startup: Setting up the loggregator traffic controller")

	batcher, err := initializeMetrics("LoggregatorTrafficController", net.JoinHostPort(config.MetronHost, strconv.Itoa(config.MetronPort)))
	if err != nil {
		log.Errorf("Error initializing dropsonde: %s", err)
	}

	go func() {
		err := http.ListenAndServe(net.JoinHostPort("localhost", pprofPort), nil)
		if err != nil {
			log.Errorf("Error starting pprof server: %s", err.Error())
		}
	}()

	monitorInterval := time.Duration(config.MonitorIntervalSeconds) * time.Second
	uptimeMonitor := monitor.NewUptime(monitorInterval)
	go uptimeMonitor.Start()
	defer uptimeMonitor.Stop()

	openFileMonitor := monitor.NewLinuxFD(monitorInterval, log)
	go openFileMonitor.Start()
	defer openFileMonitor.Stop()

	etcdAdapter := defaultStoreAdapterProvider(config)
	err = etcdAdapter.Connect()
	if err != nil {
		panic(fmt.Errorf("Unable to connect to ETCD: %s", err))
	}

	logAuthorizer := authorization.NewLogAccessAuthorizer(*disableAccessControl, config.ApiHost)

	uaaClient := uaa_client.NewUaaClient(config.UaaHost, config.UaaClient, config.UaaClientSecret)
	adminAuthorizer := authorization.NewAdminAccessAuthorizer(*disableAccessControl, &uaaClient)

	// TODO: The preferredProtocol of udp tells the finder to pull out the Doppler URLs from the legacy ETCD endpoint.
	// Eventually we'll have a separate websocket client pool
	finder := dopplerservice.NewFinder(etcdAdapter, int(config.DopplerPort), []string{"udp"}, "", log)
	finder.Start()
	// Draining the finder's events channel in order to not block the finder from handling etcd events.
	go func() {
		for {
			finder.Next()
		}
	}()

	var accessMiddleware, legacyAccessMiddleware func(middleware.HttpHandler) *middleware.AccessHandler
	if config.SecurityEventLog != "" {
		accessLog, err := os.OpenFile(config.SecurityEventLog, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
		if err != nil {
			panic(fmt.Errorf("Unable to open access log: %s", err))
		}
		defer func() {
			accessLog.Sync()
			accessLog.Close()
		}()
		accessLogger := accesslogger.New(accessLog, log)
		accessMiddleware = middleware.Access(accessLogger, ipAddress, config.OutgoingDropsondePort, log)
		legacyAccessMiddleware = middleware.Access(accessLogger, ipAddress, config.OutgoingPort, log)
	}

	dopplerCgc := channel_group_connector.NewChannelGroupConnector(finder, newDropsondeWebsocketListener, marshaller.DropsondeLogMessage, batcher, log)
	dopplerHandler := http.Handler(dopplerproxy.NewDopplerProxy(logAuthorizer, adminAuthorizer, dopplerCgc, dopplerproxy.TranslateFromDropsondePath, "doppler."+config.SystemDomain, log))
	if accessMiddleware != nil {
		dopplerHandler = accessMiddleware(dopplerHandler)
	}
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingDropsondePort), 10)), dopplerHandler)

	legacyCgc := channel_group_connector.NewChannelGroupConnector(finder, newLegacyWebsocketListener, marshaller.LoggregatorLogMessage, batcher, log)
	legacyHandler := http.Handler(dopplerproxy.NewDopplerProxy(logAuthorizer, adminAuthorizer, legacyCgc, dopplerproxy.TranslateFromLegacyPath, "loggregator."+config.SystemDomain, log))
	if legacyAccessMiddleware != nil {
		legacyHandler = legacyAccessMiddleware(legacyHandler)
	}
	startOutgoingProxy(net.JoinHostPort(ipAddress, strconv.FormatUint(uint64(config.OutgoingPort), 10)), legacyHandler)

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")
			return
		}
	}
}
Exemple #15
0
func main() {
	seed := time.Now().UnixNano()
	rand.Seed(seed)

	flag.Parse()

	localIp, err := localip.LocalIP()
	if err != nil {
		fatal("Unable to resolve own IP address: %s", err)
	}

	conf, err := config.ParseConfig(*configFile)
	if err != nil {
		fatal("Unable to parse config: %s", err)
	}

	log := logger.NewLogger(*logLevel, *logFilePath, "doppler", conf.Syslog)

	go func() {
		err := http.ListenAndServe(net.JoinHostPort("localhost", pprofPort), nil)
		if err != nil {
			log.Errorf("Error starting pprof server: %s", err.Error())
		}
	}()

	log.Info("Startup: Setting up the doppler server")
	dropsonde.Initialize(conf.MetronAddress, DOPPLER_ORIGIN)
	storeAdapter := NewStoreAdapter(conf)

	doppler, err := New(log, localIp, conf, storeAdapter, conf.MessageDrainBufferSize, DOPPLER_ORIGIN, time.Duration(conf.WebsocketWriteTimeoutSeconds)*time.Second, time.Duration(conf.SinkDialTimeoutSeconds)*time.Second)

	if err != nil {
		fatal("Failed to create doppler: %s", err)
	}

	go doppler.Start()
	log.Info("Startup: doppler server started.")

	killChan := signalmanager.RegisterKillSignalChannel()
	dumpChan := signalmanager.RegisterGoRoutineDumpSignalChannel()

	releaseNodeChan := dopplerservice.Announce(localIp, config.HeartbeatInterval, conf, storeAdapter, log)
	legacyReleaseNodeChan := dopplerservice.AnnounceLegacy(localIp, config.HeartbeatInterval, conf, storeAdapter, log)

	for {
		select {
		case <-dumpChan:
			signalmanager.DumpGoRoutine()
		case <-killChan:
			log.Info("Shutting down")

			stopped := make(chan bool)
			legacyStopped := make(chan bool)
			releaseNodeChan <- stopped
			legacyReleaseNodeChan <- legacyStopped

			doppler.Stop()

			<-stopped
			<-legacyStopped

			return
		}
	}
}