Exemplo n.º 1
0
func main() {
	ticker := time.NewTicker(50 * time.Millisecond)

	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()
	go http.ListenAndServe(net.JoinHostPort("", "9090"), hystrixStreamHandler)

	for {
		select {
		case <-ticker.C:
			err := hystrix.Do("Localhost 8080", func() error {
				resp, err := http.Get("http://localhost:8080/")

				if resp != nil {
					resp.Body.Close()
				}

				return err
			}, nil)

			if err != nil {
				log.Printf("Error: %v", err)
			}
		}
	}
}
Exemplo n.º 2
0
func main() {
	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()

	router := mux.NewRouter().StrictSlash(true)
	router.HandleFunc("/", index)
	router.HandleFunc("/stream", hystrixStreamHandler.ServeHTTP)

	log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", os.Getenv("PORT")), router))
}
Exemplo n.º 3
0
func main() {

	// hystrix.ConfigureCommand("my_command", hystrix.CommandConfig{
	// 	Timeout:               100,
	// 	MaxConcurrentRequests: 100,
	// 	ErrorPercentThreshold: 50,
	// })

	// Launch streamHandler for Hystrix dashboard
	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()
	go http.ListenAndServe(net.JoinHostPort("", "8222"), hystrixStreamHandler)

	http.HandleFunc("/start", startHandler)
	http.HandleFunc("/configure", configureHandler)
	http.HandleFunc("/toggleOpen", toggleOpenHandler)
	http.HandleFunc("/close", closeHandler)
	http.HandleFunc("/status", statusHandler)
	http.ListenAndServe(":8221", nil)

	output := make(chan bool, 1)
	<-output

}
Exemplo n.º 4
0
func main() {
	flag.Parse()

	stats, err := initStatsd(*stats_prefix, os.Getenv("STATSD_HOSTPORT"))
	if err != nil {
		log.Fatalf("Statsd configuration error: %v", err)
	}

	auth, err := aws.GetAuth("", "", "", time.Now())
	if err != nil {
		log.Fatalln("Failed to recieve auth from env")
	}
	awsConnection := s3.New(
		auth,
		aws.USWest2,
	)

	auditBucket := awsConnection.Bucket(auditBucketName + "-" + CLOUD_ENV)
	auditBucket.PutBucket(s3.BucketOwnerFull)
	eventBucket := awsConnection.Bucket(eventBucketName + "-" + CLOUD_ENV)
	eventBucket.PutBucket(s3.BucketOwnerFull)

	auditInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge_audit", *logging_dir)
	loggingInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge", *logging_dir)

	auditRotateCoordinator := gologging.NewRotateCoordinator(auditMaxLogLines, auditMaxLogAge)
	loggingRotateCoordinator := gologging.NewRotateCoordinator(maxLogLines, maxLogAge)

	auditLogger, err := gologging.StartS3Logger(
		auditRotateCoordinator,
		auditInfo,
		&DummyNotifierHarness{},
		&uploader.S3UploaderBuilder{
			Bucket:           auditBucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: auditInfo},
		},
		BuildSQSErrorHarness(),
		2,
	)
	if err != nil {
		log.Fatalf("Got Error while building audit: %s\n", err)
	}

	spadeEventLogger, err := gologging.StartS3Logger(
		loggingRotateCoordinator,
		loggingInfo,
		BuildSQSNotifierHarness(),
		&uploader.S3UploaderBuilder{
			Bucket:           eventBucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: loggingInfo},
		},
		BuildSQSErrorHarness(),
		2,
	)
	if err != nil {
		log.Fatalf("Got Error while building logger: %s\n", err)
	}

	// Initialize Loggers.
	// AuditLogger writes to the audit log, for analysis of system success rate.
	// SpadeLogger writes requests to a file for processing by the spade processor.
	// K(afka)Logger writes produces messages for kafka, currently in dark launch.
	// We allow the klogger to be null incase we boot up with an unresponsive kafka cluster.
	var logger *request_handler.EventLoggers
	brokerList := ParseBrokerList(*brokers)
	klogger, err := kafka_logger.NewKafkaLogger(*clientId, brokerList)
	if err == nil {
		klogger.(*kafka_logger.KafkaLogger).Init()
		logger = &request_handler.EventLoggers{
			AuditLogger: auditLogger,
			SpadeLogger: spadeEventLogger,
			KLogger:     klogger,
		}
	} else {
		log.Printf("Got Error while building logger: %s + %v\nUsing Nop Logger\n", err, brokerList)
		logger = &request_handler.EventLoggers{
			AuditLogger: auditLogger,
			SpadeLogger: spadeEventLogger,
			KLogger:     &request_handler.NoopLogger{},
		}
	}

	// Trigger close on receipt of SIGINT
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGINT)
	go func() {
		<-sigc
		// Cause flush
		logger.Close()
		os.Exit(0)
	}()

	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()
	go http.ListenAndServe(net.JoinHostPort("", "81"), hystrixStreamHandler)

	// setup server and listen
	server := &http.Server{
		Addr: *listen_port,
		Handler: &request_handler.SpadeHandler{
			StatLogger: stats,
			EdgeLogger: logger,
			Assigner:   request_handler.Assigner,
		},
		ReadTimeout:    5 * time.Second,
		WriteTimeout:   5 * time.Second,
		MaxHeaderBytes: 1 << 20, // 0.5MB
	}
	if err := server.ListenAndServe(); err != nil {
		log.Fatalln(err)
	}
}
Exemplo n.º 5
0
func main() {
	index := resource.NewIndex()

	index.Bind("posts", post, restrix.Wrap("posts", mem.NewHandler()), resource.Conf{
		AllowedModes: resource.ReadWrite,
	})

	// Create API HTTP handler for the resource graph
	api, err := rest.NewHandler(index)
	if err != nil {
		log.Fatalf("Invalid API configuration: %s", err)
	}

	// Setup logger
	c := alice.New()
	c = c.Append(xlog.NewHandler(xlog.Config{}))
	c = c.Append(xaccess.NewHandler())
	resource.LoggerLevel = resource.LogLevelDebug
	resource.Logger = func(ctx context.Context, level resource.LogLevel, msg string, fields map[string]interface{}) {
		xlog.FromContext(ctx).OutputF(xlog.Level(level), 2, msg, fields)
	}

	// Bind the API under the root path
	http.Handle("/", c.Then(api))

	// Configure hystrix commands
	hystrix.Configure(map[string]hystrix.CommandConfig{
		"posts.MultiGet": {
			Timeout:               500,
			MaxConcurrentRequests: 200,
			ErrorPercentThreshold: 25,
		},
		"posts.Find": {
			Timeout:               1000,
			MaxConcurrentRequests: 100,
			ErrorPercentThreshold: 25,
		},
		"posts.Insert": {
			Timeout:               1000,
			MaxConcurrentRequests: 50,
			ErrorPercentThreshold: 25,
		},
		"posts.Update": {
			Timeout:               1000,
			MaxConcurrentRequests: 50,
			ErrorPercentThreshold: 25,
		},
		"posts.Delete": {
			Timeout:               1000,
			MaxConcurrentRequests: 10,
			ErrorPercentThreshold: 10,
		},
		"posts.Clear": {
			Timeout:               10000,
			MaxConcurrentRequests: 5,
			ErrorPercentThreshold: 10,
		},
	})

	// Start the metrics stream handler
	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()
	log.Print("Serving Hystrix metrics on http://localhost:8081")
	go http.ListenAndServe(net.JoinHostPort("", "8081"), hystrixStreamHandler)

	// Inject some fixtures
	fixtures := [][]string{
		{"POST", "/posts", `{"title": "First Post", "body": "This is my first post"}`},
		{"POST", "/posts", `{"title": "Second Post", "body": "This is my second post"}`},
		{"POST", "/posts", `{"title": "Third Post", "body": "This is my third post"}`},
	}
	for _, fixture := range fixtures {
		req, err := http.NewRequest(fixture[0], fixture[1], strings.NewReader(fixture[2]))
		if err != nil {
			log.Fatal(err)
		}
		w := httptest.NewRecorder()
		api.ServeHTTP(w, req)
		if w.Code >= 400 {
			log.Fatalf("Error returned for `%s %s`: %v", fixture[0], fixture[1], w)
		}
	}

	// Serve it
	log.Print("Serving API on http://localhost:8080")
	if err := http.ListenAndServe(":8080", nil); err != nil {
		log.Fatal(err)
	}
}