Beispiel #1
0
// sendReadyChunksToClient reads from the readyChunks channel for a particular
// client, sending those chunks to the remote system. This function is also
// responsible for snapshotting progress and unlocking the readers after it has
// successfully sent.
func (s *Supervisor) sendReadyChunksToClient(client client.Client) {
	backoff := &ExponentialBackoff{Minimum: 50 * time.Millisecond, Maximum: 5000 * time.Millisecond}
	for {
		var readyChunk *readyChunk
		select {
		case <-s.stopRequest:
			return
		case readyChunk = <-s.retryChunks:
			// got a retry chunk; use it
		default:
			// pull from the default readyChunk queue
			select {
			case <-s.stopRequest:
				return
			case readyChunk = <-s.readyChunks:
				// got a chunk
			}
		}

		if readyChunk != nil {
			GlobalStatistics.SetClientStatus(client.Name(), clientStatusSending)
			if err := s.sendChunk(client, readyChunk.Chunk); err != nil {
				grohl.Report(err, grohl.Data{"msg": "failed to send chunk", "resolution": "retrying"})
				GlobalStatistics.SetClientStatus(client.Name(), clientStatusRetrying)

				// Put the chunk back on the queue for someone else to try
				select {
				case <-s.stopRequest:
					return
				case s.retryChunks <- readyChunk:
					// continue
				}

				// Backoff
				select {
				case <-s.stopRequest:
					return
				case <-time.After(backoff.Next()):
					// continue
				}
			} else {
				backoff.Reset()
				GlobalStatistics.IncrementClientLinesSent(client.Name(), len(readyChunk.Chunk))

				// Snapshot progress
				if err := s.acknowledgeChunk(readyChunk.Chunk); err != nil {
					grohl.Report(err, grohl.Data{"msg": "failed to acknowledge progress", "resolution": "skipping"})
				}

				s.readerPool.UnlockAll(readyChunk.LockedReaders)
			}
		}
	}
}
func main() {
	grohl.AddContext("app", "buttered-scones")

	var configFile string
	flag.StringVar(&configFile, "config", "", "configuration file path")
	flag.Parse()

	if configFile == "" {
		flag.Usage()
		os.Exit(1)
	}

	config, err := butteredscones.LoadConfiguration(configFile)
	if err != nil {
		fmt.Printf("error opening configuration file: %s\n", err.Error())
		os.Exit(1)
	}

	clients := make([]client.Client, 0, len(config.Network.Servers))
	for _, server := range config.Network.Servers {
		tlsConfig, err := config.BuildTLSConfig()
		if err != nil {
			fmt.Printf("%s\n", err.Error())
			os.Exit(1)
		}
		tlsConfig.ServerName = server.Name

		options := &lumberjack.ClientOptions{
			Network:           "tcp",
			Address:           server.Addr,
			TLSConfig:         tlsConfig,
			ConnectionTimeout: time.Duration(config.Network.Timeout) * time.Second,
			SendTimeout:       time.Duration(config.Network.Timeout) * time.Second,
		}
		client := lumberjack.NewClient(options)
		clients = append(clients, client)
	}

	// clients := []Client{&StdoutClient{}}

	db, err := bolt.Open(config.State, 0600, &bolt.Options{Timeout: 2 * time.Second})
	if err != nil {
		fmt.Printf("error opening state database: %s\n", err.Error())
		os.Exit(1)
	}
	snapshotter := &butteredscones.BoltSnapshotter{DB: db}

	if config.Statistics.Addr != "" {
		stats_server := &butteredscones.StatisticsServer{
			Statistics: butteredscones.GlobalStatistics,
			Addr:       config.Statistics.Addr,
		}

		go func() {
			err := stats_server.ListenAndServe()
			grohl.Report(err, grohl.Data{"msg": "stats server failed to start"})
		}()
	}

	// Default spool size
	spoolSize := config.Network.SpoolSize
	if spoolSize == 0 {
		spoolSize = 1024
	}

	supervisor := butteredscones.NewSupervisor(config.Files, clients, snapshotter, config.MaxLength)
	supervisor.SpoolSize = spoolSize
	supervisor.GlobRefresh = 15 * time.Second

	supervisor.Start()

	signalCh := make(chan os.Signal, 1)
	go signal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT)

	signal := <-signalCh
	fmt.Printf("Received %s, shutting down cleanly ...\n", signal)
	supervisor.Stop()
	fmt.Printf("Done shutting down\n")
}