Exemple #1
0
func (app *AppContext) setupS3Logger() (err error) {
	auth, err := aws.GetAuth("", "", "", time.Now())

	if err != nil {
		log.Fatalln("Failed to find AWS credentials in env")
	}

	awsConnection := s3.New(
		auth,
		getAWSRegion(app.config.aws_region),
	)
	bucket := awsConnection.Bucket(app.config.bucket)

	instanceInfo := keygen.BuildInstanceInfo(
		&keygen.EnvInstanceFetcher{},
		serviceName,
		app.config.logging_dir,
	)

	rotateCoordinator := gologging.NewRotateCoordinator(
		app.config.max_log_lines,
		app.config.max_log_age,
	)

	metricsLogger := MetricsLogger{app.metrics}

	app.s3log, err = gologging.StartS3Logger(
		rotateCoordinator,
		instanceInfo,
		&metricsLogger,
		&uploader.S3UploaderBuilder{
			Bucket: bucket,
			KeyNameGenerator: &KeyNameGenerator{
				Info:   instanceInfo,
				Prefix: app.config.key_prefix,
			},
		},
		&metricsLogger,
		app.config.num_workers,
	)
	if err != nil {
		return
	}

	// Make sure logger is flushed when shutdown signal is received
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)
	go func() {
		<-sigc
		log.Println("interrupted, closing logger...")
		app.s3log.Close()
		os.Exit(0)
	}()

	return nil
}
Exemple #2
0
func main() {
	flag.Parse()
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Fatalln("Failed to recieve auth from env")
	}
	awsConnection := s3.New(
		auth,
		aws.USWest2,
	)
	bucket := awsConnection.Bucket(targetBucket)
	info := gen.BuildInstanceInfo(&localInstanceFetcher{}, "basic_example", ".")
	rotateCoordinator := gologging.NewRotateCoordinator(adjustedMaxLines, time.Hour*1)
	logger, err := gologging.StartS3Logger(
		rotateCoordinator,
		info,
		&stdoutNotifier{},
		&uploader.S3UploaderBuilder{
			Bucket:           bucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{info},
		},
		&stderrNotifier{},
		5,
	)
	if err != nil {
		log.Fatalf("Error building uploader: %s\n ", err)
	}

	i := 0
	now := time.Now()
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)
	go func() {
		<-sigc
		logger.Close()
		fmt.Printf("Produced %f rps\n", float64(i)/(float64(time.Now().Sub(now))/float64(time.Second)))
		os.Exit(0)
	}()
	x := int(time.Second) / *rps
	for ; i < MAX_LINES_PER_LOG*4; i++ {
		// throttle - there is a better solution to this
		defer func() {
			if x := recover(); x != nil { // means we cuagh a signal
				time.Sleep(120 * time.Second)
			}
		}()
		time.Sleep(time.Duration(int(0.8 * float64(x))))
		logger.Log("MOAR! %d", i)
	}
	logger.Close()
}
Exemple #3
0
func main() {
	flag.Parse()

	stats, err := initStatsd(*stats_prefix, os.Getenv("STATSD_HOSTPORT"))
	if err != nil {
		log.Fatalf("Statsd configuration error: %v", err)
	}

	auth, err := aws.GetAuth("", "", "", time.Now())
	if err != nil {
		log.Fatalln("Failed to recieve auth from env")
	}
	awsConnection := s3.New(
		auth,
		aws.USWest2,
	)

	auditBucket := awsConnection.Bucket(auditBucketName + "-" + CLOUD_ENV)
	auditBucket.PutBucket(s3.BucketOwnerFull)
	eventBucket := awsConnection.Bucket(eventBucketName + "-" + CLOUD_ENV)
	eventBucket.PutBucket(s3.BucketOwnerFull)

	auditInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge_audit", *logging_dir)
	loggingInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge", *logging_dir)

	auditRotateCoordinator := gologging.NewRotateCoordinator(auditMaxLogLines, auditMaxLogAge)
	loggingRotateCoordinator := gologging.NewRotateCoordinator(maxLogLines, maxLogAge)

	auditLogger, err := gologging.StartS3Logger(
		auditRotateCoordinator,
		auditInfo,
		&DummyNotifierHarness{},
		&uploader.S3UploaderBuilder{
			Bucket:           auditBucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: auditInfo},
		},
		BuildSQSErrorHarness(),
		2,
	)
	if err != nil {
		log.Fatalf("Got Error while building audit: %s\n", err)
	}

	spadeEventLogger, err := gologging.StartS3Logger(
		loggingRotateCoordinator,
		loggingInfo,
		BuildSQSNotifierHarness(),
		&uploader.S3UploaderBuilder{
			Bucket:           eventBucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: loggingInfo},
		},
		BuildSQSErrorHarness(),
		2,
	)
	if err != nil {
		log.Fatalf("Got Error while building logger: %s\n", err)
	}

	// Initialize Loggers.
	// AuditLogger writes to the audit log, for analysis of system success rate.
	// SpadeLogger writes requests to a file for processing by the spade processor.
	// K(afka)Logger writes produces messages for kafka, currently in dark launch.
	// We allow the klogger to be null incase we boot up with an unresponsive kafka cluster.
	var logger *request_handler.EventLoggers
	brokerList := ParseBrokerList(*brokers)
	klogger, err := kafka_logger.NewKafkaLogger(*clientId, brokerList)
	if err == nil {
		klogger.(*kafka_logger.KafkaLogger).Init()
		logger = &request_handler.EventLoggers{
			AuditLogger: auditLogger,
			SpadeLogger: spadeEventLogger,
			KLogger:     klogger,
		}
	} else {
		log.Printf("Got Error while building logger: %s + %v\nUsing Nop Logger\n", err, brokerList)
		logger = &request_handler.EventLoggers{
			AuditLogger: auditLogger,
			SpadeLogger: spadeEventLogger,
			KLogger:     &request_handler.NoopLogger{},
		}
	}

	// Trigger close on receipt of SIGINT
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGINT)
	go func() {
		<-sigc
		// Cause flush
		logger.Close()
		os.Exit(0)
	}()

	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()
	go http.ListenAndServe(net.JoinHostPort("", "81"), hystrixStreamHandler)

	// setup server and listen
	server := &http.Server{
		Addr: *listen_port,
		Handler: &request_handler.SpadeHandler{
			StatLogger: stats,
			EdgeLogger: logger,
			Assigner:   request_handler.Assigner,
		},
		ReadTimeout:    5 * time.Second,
		WriteTimeout:   5 * time.Second,
		MaxHeaderBytes: 1 << 20, // 0.5MB
	}
	if err := server.ListenAndServe(); err != nil {
		log.Fatalln(err)
	}
}