Esempio n. 1
0
File: slowdown.go Progetto: npk/devd
// NewSlowListener creates a SlowListener with specified read and write rates.
func NewSlowListener(listener net.Listener, readrate float64, writerate float64) net.Listener {
	return &SlowListener{
		listener:    listener,
		readbucket:  ratelimit.NewBucketWithRate(readrate, capacity),
		writebucket: ratelimit.NewBucketWithRate(writerate, capacity),
	}
}
Esempio n. 2
0
// NewSlowListener creates a SlowListener with specified read and write rates.
// Both the readrate and the writerate are specified in bytes per second. A
// value of 0 disables throttling.
func NewSlowListener(listener net.Listener, readrate uint, writerate uint) net.Listener {
	if readrate == 0 {
		readrate = MaxRate
	}
	if writerate == 0 {
		writerate = MaxRate
	}
	return &SlowListener{
		listener:    listener,
		readbucket:  ratelimit.NewBucketWithRate(float64(readrate), capacity),
		writebucket: ratelimit.NewBucketWithRate(float64(writerate), capacity),
	}
}
Esempio n. 3
0
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder,
	bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service {

	service := &Service{
		Supervisor:           suture.NewSimple("connections.Service"),
		cfg:                  cfg,
		myID:                 myID,
		model:                mdl,
		tlsCfg:               tlsCfg,
		discoverer:           discoverer,
		conns:                make(chan IntermediateConnection),
		bepProtocolName:      bepProtocolName,
		tlsDefaultCommonName: tlsDefaultCommonName,
		lans:                 lans,
		natService:           nat.NewService(myID, cfg),

		listenersMut:   sync.NewRWMutex(),
		listeners:      make(map[string]genericListener),
		listenerTokens: make(map[string]suture.ServiceToken),

		curConMut:         sync.NewMutex(),
		currentConnection: make(map[protocol.DeviceID]Connection),
	}
	cfg.Subscribe(service)

	// The rate variables are in KiB/s in the UI (despite the camel casing
	// of the name). We multiply by 1024 here to get B/s.
	options := service.cfg.Options()
	if options.MaxSendKbps > 0 {
		service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxSendKbps), int64(5*1024*options.MaxSendKbps))
	}

	if options.MaxRecvKbps > 0 {
		service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxRecvKbps), int64(5*1024*options.MaxRecvKbps))
	}

	// There are several moving parts here; one routine per listening address
	// (handled in configuration changing) to handle incoming connections,
	// one routine to periodically attempt outgoing connections, one routine to
	// the the common handling regardless of whether the connection was
	// incoming or outgoing.

	service.Add(serviceFunc(service.connect))
	service.Add(serviceFunc(service.handle))

	raw := cfg.Raw()
	// Actually starts the listeners and NAT service
	service.CommitConfiguration(raw, raw)

	return service
}
Esempio n. 4
0
// DoParallelContainerStopBenchmark starts routineNumber of goroutines and let them stop containers, returns latencies
// of all the stopping calls in nanoseconds. There is a global rate limit on stopping calls per second.
func DoParallelContainerStopBenchmark(client *docker.Client, qps float64, routineNumber int) []int {
	wg := &sync.WaitGroup{}
	ids := GetContainerIDs(client)
	idTable := make([][]string, routineNumber)
	for i := 0; i < len(ids); i++ {
		idTable[i%routineNumber] = append(idTable[i%routineNumber], ids[i])
	}
	wg.Add(routineNumber)
	ratelimit := ratelimit.NewBucketWithRate(qps, int64(routineNumber))
	latenciesTable := make([][]int, routineNumber)
	for i := 0; i < routineNumber; i++ {
		go func(index int) {
			latencies := []int{}
			for _, id := range idTable[index] {
				ratelimit.Wait(1)
				start := time.Now()
				StopContainers(client, []string{id})
				RemoveContainers(client, []string{id})
				latencies = append(latencies, int(time.Since(start).Nanoseconds()))
			}
			latenciesTable[index] = latencies
			wg.Done()
		}(i)
	}
	wg.Wait()
	allLatencies := []int{}
	for _, latencies := range latenciesTable {
		allLatencies = append(allLatencies, latencies...)
	}
	return allLatencies
}
Esempio n. 5
0
func init() {
	if len(os.Getenv("AWS_REGION")) == 0 {
		logrus.Info("AWS_REGION is not set, skipping init of Route53 provider")
		return
	}

	if len(os.Getenv("AWS_ACCESS_KEY")) == 0 {
		logrus.Info("AWS_ACCESS_KEY is not set, skipping init of Route53 provider")
		return
	}

	if len(os.Getenv("AWS_SECRET_KEY")) == 0 {
		logrus.Info("AWS_SECRET_KEY is not set, skipping init of Route53 provider")
		return
	}

	route53Handler := &Route53Handler{}
	if err := RegisterProvider("route53", route53Handler); err != nil {
		logrus.Fatal("Could not register route53 provider")
	}

	if err := setRegion(); err != nil {
		logrus.Fatalf("Failed to set region: %v", err)
	}

	if err := setHostedZone(); err != nil {
		logrus.Fatalf("Failed to set hosted zone for root domain %s: %v", dns.RootDomainName, err)
	}

	// Throttle Route53 API calls to 5 req/s
	limiter = ratelimit.NewBucketWithRate(5.0, 1)

	logrus.Infof("Configured %s with hosted zone \"%s\" in region \"%s\" ", route53Handler.GetName(), dns.RootDomainName, region.Name)
}
Esempio n. 6
0
func (r *Route53Provider) Init(rootDomainName string) error {
	var region, accessKey, secretKey string
	if region = os.Getenv("AWS_REGION"); len(region) == 0 {
		return fmt.Errorf("AWS_REGION is not set")
	}

	if accessKey = os.Getenv("AWS_ACCESS_KEY"); len(accessKey) == 0 {
		return fmt.Errorf("AWS_ACCESS_KEY is not set")
	}

	if secretKey = os.Getenv("AWS_SECRET_KEY"); len(secretKey) == 0 {
		return fmt.Errorf("AWS_SECRET_KEY is not set")
	}

	// Comply with the API's 5 req/s rate limit. If there are other
	// clients using the same account the AWS SDK will throttle the
	// requests automatically if the global rate limit is exhausted.
	r.limiter = ratelimit.NewBucketWithRate(5.0, 1)

	creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
	config := aws.NewConfig().WithMaxRetries(route53MaxRetries).
		WithCredentials(creds).
		WithRegion(region)

	r.client = awsRoute53.New(session.New(config))

	if err := r.setHostedZone(rootDomainName); err != nil {
		return err
	}

	logrus.Infof("Configured %s with hosted zone %s in region %s",
		r.GetName(), rootDomainName, region)

	return nil
}
Esempio n. 7
0
func TestReader(t *testing.T) {
	sizes := []int64{0, 1, capacity, blockSize, 4096, 99, 100}
	for _, size := range sizes {
		src := make([]byte, size)
		_, err := rand.Read(src)
		if err != nil {
			t.Errorf("Could not read random data")
		}
		sr := slowReader{
			bytes.NewBuffer(src),
			ratelimit.NewBucketWithRate(1024*1024, capacity),
		}

		dst := make([]byte, size)
		len, err := sr.Read(dst)
		if err != nil {
			t.Errorf("Read error: %s", err)
		}
		if int64(len) != size {
			t.Errorf("Expected %d bytes, got %d", size, len)
		}

		if bytes.Equal(dst, src) != true {
			t.Fail()
		}
	}
}
Esempio n. 8
0
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
// smoothed qps rate of 'qps'.
// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
// The maximum number of tokens in the bucket is capped at 'burst'.
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
	limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst))
	return &tokenBucketRateLimiter{
		limiter: limiter,
		qps:     qps,
	}
}
Esempio n. 9
0
func TestWriter(t *testing.T) {
	sizes := []int64{0, 1, capacity, blockSize, 4096, 99, 100}
	for _, size := range sizes {
		b := &bytes.Buffer{}
		sw := slowWriter{b, ratelimit.NewBucketWithRate(1024*1024, capacity)}

		data := make([]byte, size)
		_, err := rand.Read(data)
		if err != nil {
			t.Errorf("Could not read random data")
		}
		len, err := sw.Write(data)
		if err != nil {
			t.Errorf("Write error: %s", err)
		}
		if int64(len) != size {
			t.Errorf("Expected to write %d bytes, wrote %d", size, len)
		}

		if bytes.Equal(data, b.Bytes()) != true {
			t.Fail()
		}

	}
}
Esempio n. 10
0
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue.  It has
// both overall and per-item rate limitting.  The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
	return NewMaxOfRateLimiter(
		DefaultItemBasedRateLimiter(),
		// 10 qps, 100 bucket size.  This is only for retry speed and its only the overall factor (not per item)
		&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
	)
}
Esempio n. 11
0
func (d *DNSimpleProvider) Init(rootDomainName string) error {
	var email, apiToken string
	if email = os.Getenv("DNSIMPLE_EMAIL"); len(email) == 0 {
		return fmt.Errorf("DNSIMPLE_EMAIL is not set")
	}

	if apiToken = os.Getenv("DNSIMPLE_TOKEN"); len(apiToken) == 0 {
		return fmt.Errorf("DNSIMPLE_TOKEN is not set")
	}

	d.root = utils.UnFqdn(rootDomainName)
	d.client = api.NewClient(apiToken, email)
	d.limiter = ratelimit.NewBucketWithRate(1.5, 5)

	domains, _, err := d.client.Domains.List()
	if err != nil {
		return fmt.Errorf("Failed to list zones: %v", err)
	}

	found := false
	for _, domain := range domains {
		if domain.Name == d.root {
			found = true
			break
		}
	}

	if !found {
		return fmt.Errorf("Zone for '%s' not found", d.root)
	}

	logrus.Infof("Configured %s with zone '%s'", d.GetName(), d.root)
	return nil
}
Esempio n. 12
0
func TestTokenBucketLimiter(t *testing.T) {
	e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
	for _, n := range []int{1, 2, 100} {
		tb := jujuratelimit.NewBucketWithRate(float64(n), int64(n))
		testLimiter(t, ratelimit.NewTokenBucketLimiter(tb)(e), n)
	}
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue.  It has
// both overall and per-item rate limitting.  The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
	return NewMaxOfRateLimiter(
		NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
		// 10 qps, 100 bucket size.  This is only for retry speed and its only the overall factor (not per item)
		&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
	)
}
Esempio n. 14
0
// DoParallelContainerStartBenchmark starts routineNumber of goroutines and let them start containers, returns latencies
// of all the starting calls in nanoseconds. There is a global rate limit on starting calls per second.
func DoParallelContainerStartBenchmark(client *docker.Client, qps float64, testPeriod time.Duration, routineNumber int) []int {
	wg := &sync.WaitGroup{}
	wg.Add(routineNumber)
	ratelimit := ratelimit.NewBucketWithRate(qps, int64(routineNumber))
	latenciesTable := make([][]int, routineNumber)
	for i := 0; i < routineNumber; i++ {
		go func(index int) {
			startTime := time.Now()
			latencies := []int{}
			for {
				ratelimit.Wait(1)
				start := time.Now()
				ids := CreateContainers(client, 1)
				StartContainers(client, ids)
				latencies = append(latencies, int(time.Since(start).Nanoseconds()))
				if time.Now().Sub(startTime) >= testPeriod {
					break
				}
			}
			latenciesTable[index] = latencies
			wg.Done()
		}(i)
	}
	wg.Wait()
	allLatencies := []int{}
	for _, latencies := range latenciesTable {
		allLatencies = append(allLatencies, latencies...)
	}
	return allLatencies
}
Esempio n. 15
0
func NewRateLimitReader(rc io.ReadCloser, rate float64, capacity int64) io.ReadCloser {
	var rlr rateLimitReader

	rlr.rc = rc
	rlr.rlr = ratelimit.Reader(rc, ratelimit.NewBucketWithRate(rate, capacity))

	return &rlr
}
Esempio n. 16
0
func factory(ctx context.Context, qps int) loadbalancer.Factory {
	return func(instance string) (endpoint.Endpoint, io.Closer, error) {
		var e endpoint.Endpoint
		e = makeUppercaseProxy(ctx, instance)
		e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e)
		e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e)
		return e, nil, nil
	}
}
Esempio n. 17
0
// New returns an AddService backed by an HTTP server living at the remote
// instance. We expect instance to come from a service discovery system, so
// likely of the form "host:port".
func New(instance string, tracer stdopentracing.Tracer, logger log.Logger) (addsvc.Service, error) {
	if !strings.HasPrefix(instance, "http") {
		instance = "http://" + instance
	}
	u, err := url.Parse(instance)
	if err != nil {
		return nil, err
	}

	// We construct a single ratelimiter middleware, to limit the total outgoing
	// QPS from this client to all methods on the remote instance. We also
	// construct per-endpoint circuitbreaker middlewares to demonstrate how
	// that's done, although they could easily be combined into a single breaker
	// for the entire remote instance, too.

	limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100))

	var sumEndpoint endpoint.Endpoint
	{
		sumEndpoint = httptransport.NewClient(
			"POST",
			copyURL(u, "/sum"),
			addsvc.EncodeHTTPGenericRequest,
			addsvc.DecodeHTTPSumResponse,
			httptransport.SetClientBefore(opentracing.FromHTTPRequest(tracer, "Sum", logger)),
		).Endpoint()
		sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint)
		sumEndpoint = limiter(sumEndpoint)
		sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{
			Name:    "Sum",
			Timeout: 30 * time.Second,
		}))(sumEndpoint)
	}

	var concatEndpoint endpoint.Endpoint
	{
		concatEndpoint = httptransport.NewClient(
			"POST",
			copyURL(u, "/concat"),
			addsvc.EncodeHTTPGenericRequest,
			addsvc.DecodeHTTPConcatResponse,
			httptransport.SetClientBefore(opentracing.FromHTTPRequest(tracer, "Concat", logger)),
		).Endpoint()
		concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint)
		concatEndpoint = limiter(concatEndpoint)
		sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{
			Name:    "Concat",
			Timeout: 30 * time.Second,
		}))(sumEndpoint)
	}

	return addsvc.Endpoints{
		SumEndpoint:    sumEndpoint,
		ConcatEndpoint: concatEndpoint,
	}, nil
}
Esempio n. 18
0
func warningFor(dev protocol.DeviceID, msg string) {
	warningLimitersMut.Lock()
	defer warningLimitersMut.Unlock()
	lim, ok := warningLimiters[dev]
	if !ok {
		lim = ratelimit.NewBucketWithRate(perDeviceWarningRate, 1)
		warningLimiters[dev] = lim
	}
	if lim.TakeAvailable(1) == 1 {
		l.Warnln(msg)
	}
}
Esempio n. 19
0
File: client.go Progetto: crezam/kit
// New returns an AddService backed by a gRPC client connection. It is the
// responsibility of the caller to dial, and later close, the connection.
func New(conn *grpc.ClientConn, tracer stdopentracing.Tracer, logger log.Logger) addsvc.Service {
	// We construct a single ratelimiter middleware, to limit the total outgoing
	// QPS from this client to all methods on the remote instance. We also
	// construct per-endpoint circuitbreaker middlewares to demonstrate how
	// that's done, although they could easily be combined into a single breaker
	// for the entire remote instance, too.

	limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100))

	var sumEndpoint endpoint.Endpoint
	{
		sumEndpoint = grpctransport.NewClient(
			conn,
			"Add",
			"Sum",
			addsvc.EncodeGRPCSumRequest,
			addsvc.DecodeGRPCSumResponse,
			pb.SumReply{},
			grpctransport.ClientBefore(opentracing.FromGRPCRequest(tracer, "Sum", logger)),
		).Endpoint()
		sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint)
		sumEndpoint = limiter(sumEndpoint)
		sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{
			Name:    "Sum",
			Timeout: 30 * time.Second,
		}))(sumEndpoint)
	}

	var concatEndpoint endpoint.Endpoint
	{
		concatEndpoint = grpctransport.NewClient(
			conn,
			"Add",
			"Concat",
			addsvc.EncodeGRPCConcatRequest,
			addsvc.DecodeGRPCConcatResponse,
			pb.ConcatReply{},
			grpctransport.ClientBefore(opentracing.FromGRPCRequest(tracer, "Concat", logger)),
		).Endpoint()
		concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint)
		concatEndpoint = limiter(concatEndpoint)
		concatEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{
			Name:    "Concat",
			Timeout: 30 * time.Second,
		}))(concatEndpoint)
	}

	return addsvc.Endpoints{
		SumEndpoint:    sumEndpoint,
		ConcatEndpoint: concatEndpoint,
	}
}
Esempio n. 20
0
func TestTokenBucketThrottler(t *testing.T) {
	d := time.Duration(0)
	s := func(d0 time.Duration) { d = d0 }

	e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
	e = ratelimit.NewTokenBucketThrottler(jujuratelimit.NewBucketWithRate(1, 1), s)(e)

	// First request should go through with no delay.
	e(context.Background(), struct{}{})
	if want, have := time.Duration(0), d; want != have {
		t.Errorf("want %s, have %s", want, have)
	}

	// Next request should request a ~1s sleep.
	e(context.Background(), struct{}{})
	if want, have, tol := time.Second, d, time.Millisecond; math.Abs(float64(want-have)) > float64(tol) {
		t.Errorf("want %s, have %s", want, have)
	}
}
Esempio n. 21
0
// NewTokenBucketLimiter returns an endpoint.Middleware that acts as a rate
// limiter based on a token-bucket algorithm. Requests that would exceed the
// maximum request rate are simply rejected with an error.
func NewTokenBucketLimiter(options ...TokenBucketLimiterOption) endpoint.Middleware {
	limiter := tokenBucketLimiter{
		rate:     100,
		capacity: 100,
		take:     1,
	}
	for _, option := range options {
		option(&limiter)
	}
	tb := juju.NewBucketWithRate(limiter.rate, limiter.capacity)
	return func(next endpoint.Endpoint) endpoint.Endpoint {
		return func(ctx context.Context, request interface{}) (interface{}, error) {
			if tb.TakeAvailable(limiter.take) == 0 {
				return nil, ErrLimited
			}
			return next(ctx, request)
		}
	}
}
Esempio n. 22
0
// NewTokenBucketThrottler returns an endpoint.Middleware that acts as a
// request throttler based on a token-bucket algorithm. Requests that would
// exceed the maximum request rate are delayed via a parameterized sleep
// function.
func NewTokenBucketThrottler(options ...TokenBucketThrottlerOption) endpoint.Middleware {
	throttler := tokenBucketThrottler{
		tokenBucketLimiter: tokenBucketLimiter{
			rate:     100,
			capacity: 100,
			take:     1,
		},
		sleep: time.Sleep,
	}
	for _, option := range options {
		option(&throttler)
	}
	tb := juju.NewBucketWithRate(throttler.rate, throttler.capacity)
	return func(next endpoint.Endpoint) endpoint.Endpoint {
		return func(ctx context.Context, request interface{}) (interface{}, error) {
			throttler.sleep(tb.Take(throttler.take))
			return next(ctx, request)
		}
	}
}
Esempio n. 23
0
func proxyingMiddleware(instances string, ctx context.Context, logger log.Logger) ServiceMiddleware {
	// If instances is empty, don't proxy.
	if instances == "" {
		logger.Log("proxy_to", "none")
		return func(next StringService) StringService { return next }
	}

	// Set some parameters for our client.
	var (
		qps         = 100                    // beyond which we will return an error
		maxAttempts = 3                      // per request, before giving up
		maxTime     = 250 * time.Millisecond // wallclock time, before giving up
	)

	// Otherwise, construct an endpoint for each instance in the list, and add
	// it to a fixed set of endpoints. In a real service, rather than doing this
	// by hand, you'd probably use package sd's support for your service
	// discovery system.
	var (
		instanceList = split(instances)
		subscriber   sd.FixedSubscriber
	)
	logger.Log("proxy_to", fmt.Sprint(instanceList))
	for _, instance := range instanceList {
		var e endpoint.Endpoint
		e = makeUppercaseProxy(ctx, instance)
		e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e)
		e = ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e)
		subscriber = append(subscriber, e)
	}

	// Now, build a single, retrying, load-balancing endpoint out of all of
	// those individual endpoints.
	balancer := lb.NewRoundRobin(subscriber)
	retry := lb.Retry(maxAttempts, maxTime, balancer)

	// And finally, return the ServiceMiddleware, implemented by proxymw.
	return func(next StringService) StringService {
		return proxymw{ctx, next, retry}
	}
}
Esempio n. 24
0
// startPoller creates a new goroutine for a channel.
// TODO: confirm delivery or retry instead (circuitbreaker?)
func (m *messenger) startPoller(q *queue.Queue) {
	logger.Debug("messenger", "Starting new poller goroutine")
	tb := ratelimit.NewBucketWithRate(msnRateLimit, 1)
	for {
		select {
		case <-m.ctx.Done():
			logger.Debug("messenger", "Closing poller")
			return
		default:
			res, err := q.Poll(1, msnPollWaitTime)
			if err != nil {
				if err != queue.ErrTimeout {
					logger.Warn("messenger", "startPoller", "error", err)
				}
				continue
			}
			msg := res[0].(*slack.OutgoingMessage)
			m.rtm.SendMessage(msg)
			tb.Wait(1) // and relax for a bit!
		}
	}
}
Esempio n. 25
0
// New returns an AddService backed by a Thrift server described by the provided
// client. The caller is responsible for constructing the client, and eventually
// closing the underlying transport.
func New(client *thriftadd.AddServiceClient) addsvc.Service {
	// We construct a single ratelimiter middleware, to limit the total outgoing
	// QPS from this client to all methods on the remote instance. We also
	// construct per-endpoint circuitbreaker middlewares to demonstrate how
	// that's done, although they could easily be combined into a single breaker
	// for the entire remote instance, too.

	limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100))

	// Thrift does not currently have tracer bindings, so we skip tracing.

	var sumEndpoint endpoint.Endpoint
	{
		sumEndpoint = addsvc.MakeThriftSumEndpoint(client)
		sumEndpoint = limiter(sumEndpoint)
		sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{
			Name:    "Sum",
			Timeout: 30 * time.Second,
		}))(sumEndpoint)
	}

	var concatEndpoint endpoint.Endpoint
	{
		concatEndpoint = addsvc.MakeThriftConcatEndpoint(client)
		concatEndpoint = limiter(concatEndpoint)
		sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{
			Name:    "Concat",
			Timeout: 30 * time.Second,
		}))(sumEndpoint)
	}

	return addsvc.Endpoints{
		SumEndpoint:    addsvc.MakeThriftSumEndpoint(client),
		ConcatEndpoint: addsvc.MakeThriftConcatEndpoint(client),
	}
}
Esempio n. 26
0
func main() {
	log.SetFlags(log.Lshortfile | log.LstdFlags)

	var dir, extAddress string

	flag.StringVar(&listen, "listen", ":22067", "Protocol listen address")
	flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
	flag.DurationVar(&networkTimeout, "network-timeout", networkTimeout, "Timeout for network operations between the client and the relay.\n\tIf no data is received between the client and the relay in this period of time, the connection is terminated.\n\tFurthermore, if no data is sent between either clients being relayed within this period of time, the session is also terminated.")
	flag.DurationVar(&pingInterval, "ping-interval", pingInterval, "How often pings are sent")
	flag.DurationVar(&messageTimeout, "message-timeout", messageTimeout, "Maximum amount of time we wait for relevant messages to arrive")
	flag.IntVar(&sessionLimitBps, "per-session-rate", sessionLimitBps, "Per session rate limit, in bytes/s")
	flag.IntVar(&globalLimitBps, "global-rate", globalLimitBps, "Global rate limit, in bytes/s")
	flag.BoolVar(&debug, "debug", debug, "Enable debug output")
	flag.StringVar(&statusAddr, "status-srv", ":22070", "Listen address for status service (blank to disable)")
	flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
	flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
	flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertising as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")

	flag.Parse()

	if extAddress == "" {
		extAddress = listen
	}

	addr, err := net.ResolveTCPAddr("tcp", extAddress)
	if err != nil {
		log.Fatal(err)
	}

	sessionAddress = addr.IP[:]
	sessionPort = uint16(addr.Port)

	certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
	if err != nil {
		log.Println("Failed to load keypair. Generating one, this might take a while...")
		cert, err = tlsutil.NewCertificate(certFile, keyFile, "relaysrv", 3072)
		if err != nil {
			log.Fatalln("Failed to generate X509 key pair:", err)
		}
	}

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{protocol.ProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	id := syncthingprotocol.NewDeviceID(cert.Certificate[0])
	if debug {
		log.Println("ID:", id)
	}

	if sessionLimitBps > 0 {
		sessionLimiter = ratelimit.NewBucketWithRate(float64(sessionLimitBps), int64(2*sessionLimitBps))
	}
	if globalLimitBps > 0 {
		globalLimiter = ratelimit.NewBucketWithRate(float64(globalLimitBps), int64(2*globalLimitBps))
	}

	if statusAddr != "" {
		go statusService(statusAddr)
	}

	uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", extAddress, id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
	if err != nil {
		log.Fatalln("Failed to construct URI", err)
	}

	log.Println("URI:", uri.String())

	if poolAddrs == defaultPoolAddrs {
		log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
		log.Println("!!  Joining default relay pools, this relay will be available for public use. !!")
		log.Println(`!!      Use the -pools="" command line option to make the relay private.      !!`)
		log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
	}

	pools = strings.Split(poolAddrs, ",")
	for _, pool := range pools {
		pool = strings.TrimSpace(pool)
		if len(pool) > 0 {
			go poolHandler(pool, uri)
		}
	}

	listener(listen, tlsCfg)
}
Esempio n. 27
0
func main() {
	var reset bool
	var showVersion bool
	var doUpgrade bool
	var doUpgradeCheck bool
	var noBrowser bool
	var generateDir string
	var guiAddress string
	var guiAuthentication string
	var guiAPIKey string
	flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
	flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster")
	flag.BoolVar(&showVersion, "version", false, "Show version")
	flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
	flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
	flag.BoolVar(&noBrowser, "no-browser", false, "Do not start browser")
	flag.StringVar(&generateDir, "generate", "", "Generate key in specified dir")
	flag.StringVar(&guiAddress, "gui-address", "", "Override GUI address")
	flag.StringVar(&guiAuthentication, "gui-authentication", "", "Override GUI authentication. Expects 'username:password'")
	flag.StringVar(&guiAPIKey, "gui-apikey", "", "Override GUI API key")
	flag.IntVar(&logFlags, "logflags", logFlags, "Set log flags")
	flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
	flag.Parse()

	if showVersion {
		fmt.Println(LongVersion)
		return
	}

	l.SetFlags(logFlags)

	if generateDir != "" {
		dir := expandTilde(generateDir)

		info, err := os.Stat(dir)
		l.FatalErr(err)
		if !info.IsDir() {
			l.Fatalln(dir, "is not a directory")
		}

		cert, err := loadCert(dir, "")
		if err == nil {
			l.Warnln("Key exists; will not overwrite.")
			l.Infoln("Node ID:", protocol.NewNodeID(cert.Certificate[0]))
			return
		}

		newCertificate(dir, "")
		cert, err = loadCert(dir, "")
		l.FatalErr(err)
		if err == nil {
			l.Infoln("Node ID:", protocol.NewNodeID(cert.Certificate[0]))
		}
		return
	}

	if doUpgrade || doUpgradeCheck {
		rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
		if err != nil {
			l.Fatalln("Upgrade:", err) // exits 1
		}

		if upgrade.CompareVersions(rel.Tag, Version) <= 0 {
			l.Infof("No upgrade available (current %q >= latest %q).", Version, rel.Tag)
			os.Exit(2)
		}

		l.Infof("Upgrade available (current %q < latest %q)", Version, rel.Tag)

		if doUpgrade {
			err = upgrade.UpgradeTo(rel, GoArchExtra)
			if err != nil {
				l.Fatalln("Upgrade:", err) // exits 1
			}
			l.Okf("Upgraded to %q", rel.Tag)
			return
		} else {
			return
		}
	}

	var err error
	lockPort, err = getLockPort()
	if err != nil {
		l.Fatalln("Opening lock port:", err)
	}

	if len(os.Getenv("GOGC")) == 0 {
		debug.SetGCPercent(25)
	}

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	confDir = expandTilde(confDir)

	events.Default.Log(events.Starting, map[string]string{"home": confDir})

	if _, err := os.Stat(confDir); err != nil && confDir == getDefaultConfDir() {
		// We are supposed to use the default configuration directory. It
		// doesn't exist. In the past our default has been ~/.syncthing, so if
		// that directory exists we move it to the new default location and
		// continue. We don't much care if this fails at this point, we will
		// be checking that later.

		var oldDefault string
		if runtime.GOOS == "windows" {
			oldDefault = filepath.Join(os.Getenv("AppData"), "Syncthing")
		} else {
			oldDefault = expandTilde("~/.syncthing")
		}
		if _, err := os.Stat(oldDefault); err == nil {
			os.MkdirAll(filepath.Dir(confDir), 0700)
			if err := os.Rename(oldDefault, confDir); err == nil {
				l.Infoln("Moved config dir", oldDefault, "to", confDir)
			}
		}
	}

	// Ensure that our home directory exists and that we have a certificate and key.

	ensureDir(confDir, 0700)
	cert, err = loadCert(confDir, "")
	if err != nil {
		newCertificate(confDir, "")
		cert, err = loadCert(confDir, "")
		l.FatalErr(err)
	}

	myID = protocol.NewNodeID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Prepare to be able to save configuration

	cfgFile := filepath.Join(confDir, "config.xml")
	go saveConfigLoop(cfgFile)

	var myName string

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	cf, err := os.Open(cfgFile)
	if err == nil {
		// Read config.xml
		cfg, err = config.Load(cf, myID)
		if err != nil {
			l.Fatalln(err)
		}
		cf.Close()
		myCfg := cfg.GetNodeConfiguration(myID)
		if myCfg == nil || myCfg.Name == "" {
			myName, _ = os.Hostname()
		} else {
			myName = myCfg.Name
		}
	} else {
		l.Infoln("No config file; starting with empty defaults")
		myName, _ = os.Hostname()
		defaultRepo := filepath.Join(getHomeDir(), "Sync")

		cfg, err = config.Load(nil, myID)
		cfg.Repositories = []config.RepositoryConfiguration{
			{
				ID:        "default",
				Directory: defaultRepo,
				Nodes:     []config.RepositoryNodeConfiguration{{NodeID: myID}},
			},
		}
		cfg.Nodes = []config.NodeConfiguration{
			{
				NodeID:    myID,
				Addresses: []string{"dynamic"},
				Name:      myName,
			},
		}

		port, err := getFreePort("127.0.0.1", 8080)
		l.FatalErr(err)
		cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)

		port, err = getFreePort("0.0.0.0", 22000)
		l.FatalErr(err)
		cfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)}

		saveConfig()
		l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
	}

	if reset {
		resetRepositories()
		return
	}

	if len(os.Getenv("STRESTART")) > 0 {
		waitForParentExit()
	}

	if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{"bep/1.0"},
		ServerName:             myID.String(),
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
	}

	// If the write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	if cfg.Options.MaxSendKbps > 0 {
		rateBucket = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxSendKbps), int64(5*1000*cfg.Options.MaxSendKbps))
	}

	// If this is the first time the user runs v0.9, archive the old indexes and config.
	archiveLegacyConfig()

	db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), nil)
	if err != nil {
		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
	}
	m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)

nextRepo:
	for i, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}

		repo.Directory = expandTilde(repo.Directory)

		fi, err := os.Stat(repo.Directory)
		if m.LocalVersion(repo.ID) > 0 {
			// Safety check. If the cached index contains files but the
			// repository doesn't exist, we have a problem. We would assume
			// that all files have been deleted which might not be the case,
			// so mark it as invalid instead.
			if err != nil || !fi.IsDir() {
				cfg.Repositories[i].Invalid = "repo directory missing"
				continue nextRepo
			}
		} else if os.IsNotExist(err) {
			// If we don't have ny files in the index, and the directory does
			// exist, try creating it.
			err = os.MkdirAll(repo.Directory, 0700)
		}

		if err != nil {
			// If there was another error or we could not create the
			// directory, the repository is invalid.
			cfg.Repositories[i].Invalid = err.Error()
			continue nextRepo
		}

		m.AddRepo(repo)
	}

	// GUI

	guiCfg := overrideGUIConfig(cfg.GUI, guiAddress, guiAuthentication, guiAPIKey)

	if guiCfg.Enabled && guiCfg.Address != "" {
		addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address)
		if err != nil {
			l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err)
		} else {
			var hostOpen, hostShow string
			switch {
			case addr.IP == nil:
				hostOpen = "localhost"
				hostShow = "0.0.0.0"
			case addr.IP.IsUnspecified():
				hostOpen = "localhost"
				hostShow = addr.IP.String()
			default:
				hostOpen = addr.IP.String()
				hostShow = hostOpen
			}

			var proto = "http"
			if guiCfg.UseTLS {
				proto = "https"
			}

			l.Infof("Starting web GUI on %s://%s:%d/", proto, hostShow, addr.Port)
			err := startGUI(guiCfg, os.Getenv("STGUIASSETS"), m)
			if err != nil {
				l.Fatalln("Cannot start GUI:", err)
			}
			if !noBrowser && cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 {
				openURL(fmt.Sprintf("%s://%s:%d", proto, hostOpen, addr.Port))
			}
		}
	}

	// Clear out old indexes for other nodes. Otherwise we'll start up and
	// start needing a bunch of files which are nowhere to be found. This
	// needs to be changed when we correctly do persistent indexes.
	for _, repoCfg := range cfg.Repositories {
		for _, node := range repoCfg.NodeIDs() {
			m.Index(node, repoCfg.ID, nil)
		}
	}

	// Walk the repository and update the local model before establishing any
	// connections to other nodes.

	m.CleanRepos()
	l.Infoln("Performing initial repository scan")
	m.ScanRepos()

	// Remove all .idx* files that don't belong to an active repo.

	validIndexes := make(map[string]bool)
	for _, repo := range cfg.Repositories {
		dir := expandTilde(repo.Directory)
		id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
		validIndexes[id] = true
	}

	allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*"))
	if err == nil {
		for _, idx := range allIndexes {
			bn := filepath.Base(idx)
			fs := strings.Split(bn, ".")
			if len(fs) > 1 {
				if _, ok := validIndexes[fs[0]]; !ok {
					l.Infoln("Removing old index", bn)
					os.Remove(idx)
				}
			}
		}
	}

	// UPnP

	if cfg.Options.UPnPEnabled {
		setupUPnP()
	}

	// Routine to connect out to configured nodes
	discoverer = discovery(externalPort)
	go listenConnect(myID, m, tlsCfg)

	for _, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}

		// Routine to pull blocks from other nodes to synchronize the local
		// repository. Does not run when we are in read only (publish only) mode.
		if repo.ReadOnly {
			l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID)
			m.StartRepoRO(repo.ID)
		} else {
			l.Okf("Ready to synchronize %s (read-write)", repo.ID)
			m.StartRepoRW(repo.ID, cfg.Options.ParallelRequests)
		}
	}

	if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 {
		f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	for _, node := range cfg.Nodes {
		if len(node.Name) > 0 {
			l.Infof("Node %s is %q at %v", node.NodeID, node.Name, node.Addresses)
		}
	}

	if cfg.Options.URAccepted > 0 && cfg.Options.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		cfg.Options.URAccepted = 0
	}
	if cfg.Options.URAccepted >= usageReportVersion {
		go usageReportingLoop(m)
		go func() {
			time.Sleep(10 * time.Minute)
			err := sendUsageReport(m)
			if err != nil {
				l.Infoln("Usage report:", err)
			}
		}()
	}

	events.Default.Log(events.StartupComplete, nil)
	go generateEvents()

	<-stop

	l.Okln("Exiting")
}
Esempio n. 28
0
func main() {
	var reset bool
	var showVersion bool
	var doUpgrade bool
	flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
	flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster")
	flag.BoolVar(&showVersion, "version", false, "Show version")
	flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
	flag.IntVar(&logFlags, "logflags", logFlags, "Set log flags")
	flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
	flag.Parse()

	if showVersion {
		fmt.Println(LongVersion)
		return
	}

	l.SetFlags(logFlags)

	if doUpgrade {
		err := upgrade()
		if err != nil {
			l.Fatalln(err)
		}
		return
	}

	if len(os.Getenv("GOGC")) == 0 {
		debug.SetGCPercent(25)
	}

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	confDir = expandTilde(confDir)

	events.Default.Log(events.Starting, map[string]string{"home": confDir})

	if _, err := os.Stat(confDir); err != nil && confDir == getDefaultConfDir() {
		// We are supposed to use the default configuration directory. It
		// doesn't exist. In the past our default has been ~/.syncthing, so if
		// that directory exists we move it to the new default location and
		// continue. We don't much care if this fails at this point, we will
		// be checking that later.

		var oldDefault string
		if runtime.GOOS == "windows" {
			oldDefault = filepath.Join(os.Getenv("AppData"), "Syncthing")
		} else {
			oldDefault = expandTilde("~/.syncthing")
		}
		if _, err := os.Stat(oldDefault); err == nil {
			os.MkdirAll(filepath.Dir(confDir), 0700)
			if err := os.Rename(oldDefault, confDir); err == nil {
				l.Infoln("Moved config dir", oldDefault, "to", confDir)
			}
		}
	}

	// Ensure that our home directory exists and that we have a certificate and key.

	ensureDir(confDir, 0700)
	cert, err := loadCert(confDir, "")
	if err != nil {
		newCertificate(confDir, "")
		cert, err = loadCert(confDir, "")
		l.FatalErr(err)
	}

	myID = protocol.NewNodeID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Prepare to be able to save configuration

	cfgFile := filepath.Join(confDir, "config.xml")
	go saveConfigLoop(cfgFile)

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	cf, err := os.Open(cfgFile)
	if err == nil {
		// Read config.xml
		cfg, err = config.Load(cf, myID)
		if err != nil {
			l.Fatalln(err)
		}
		cf.Close()
	} else {
		l.Infoln("No config file; starting with empty defaults")
		name, _ := os.Hostname()
		defaultRepo := filepath.Join(getHomeDir(), "Sync")
		ensureDir(defaultRepo, 0755)

		cfg, err = config.Load(nil, myID)
		cfg.Repositories = []config.RepositoryConfiguration{
			{
				ID:        "default",
				Directory: defaultRepo,
				Nodes:     []config.NodeConfiguration{{NodeID: myID}},
			},
		}
		cfg.Nodes = []config.NodeConfiguration{
			{
				NodeID:    myID,
				Addresses: []string{"dynamic"},
				Name:      name,
			},
		}

		port, err := getFreePort("127.0.0.1", 8080)
		l.FatalErr(err)
		cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)

		port, err = getFreePort("0.0.0.0", 22000)
		l.FatalErr(err)
		cfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)}

		saveConfig()
		l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
	}

	if reset {
		resetRepositories()
		return
	}

	if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	if len(os.Getenv("STRESTART")) > 0 {
		waitForParentExit()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{"bep/1.0"},
		ServerName:             myID.String(),
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
	}

	// If the write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	if cfg.Options.MaxSendKbps > 0 {
		rateBucket = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxSendKbps), int64(5*1000*cfg.Options.MaxSendKbps))
	}

	removeLegacyIndexes()
	db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), nil)
	if err != nil {
		l.Fatalln("leveldb.OpenFile():", err)
	}
	m := model.NewModel(confDir, &cfg, "syncthing", Version, db)

nextRepo:
	for i, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}

		repo.Directory = expandTilde(repo.Directory)

		// Safety check. If the cached index contains files but the repository
		// doesn't exist, we have a problem. We would assume that all files
		// have been deleted which might not be the case, so abort instead.

		id := fmt.Sprintf("%x", sha1.Sum([]byte(repo.Directory)))
		idxFile := filepath.Join(confDir, id+".idx.gz")
		if _, err := os.Stat(idxFile); err == nil {
			if fi, err := os.Stat(repo.Directory); err != nil || !fi.IsDir() {
				cfg.Repositories[i].Invalid = "repo directory missing"
				continue nextRepo
			}
		}

		ensureDir(repo.Directory, -1)
		m.AddRepo(repo)
	}

	// GUI
	if cfg.GUI.Enabled && cfg.GUI.Address != "" {
		addr, err := net.ResolveTCPAddr("tcp", cfg.GUI.Address)
		if err != nil {
			l.Fatalf("Cannot start GUI on %q: %v", cfg.GUI.Address, err)
		} else {
			var hostOpen, hostShow string
			switch {
			case addr.IP == nil:
				hostOpen = "localhost"
				hostShow = "0.0.0.0"
			case addr.IP.IsUnspecified():
				hostOpen = "localhost"
				hostShow = addr.IP.String()
			default:
				hostOpen = addr.IP.String()
				hostShow = hostOpen
			}

			var proto = "http"
			if cfg.GUI.UseTLS {
				proto = "https"
			}

			l.Infof("Starting web GUI on %s://%s:%d/", proto, hostShow, addr.Port)
			err := startGUI(cfg.GUI, os.Getenv("STGUIASSETS"), m)
			if err != nil {
				l.Fatalln("Cannot start GUI:", err)
			}
			if cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 {
				openURL(fmt.Sprintf("%s://%s:%d", proto, hostOpen, addr.Port))
			}
		}
	}

	// Walk the repository and update the local model before establishing any
	// connections to other nodes.

	m.CleanRepos()
	l.Infoln("Performing initial repository scan")
	m.ScanRepos()

	// Remove all .idx* files that don't belong to an active repo.

	validIndexes := make(map[string]bool)
	for _, repo := range cfg.Repositories {
		dir := expandTilde(repo.Directory)
		id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
		validIndexes[id] = true
	}

	allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*"))
	if err == nil {
		for _, idx := range allIndexes {
			bn := filepath.Base(idx)
			fs := strings.Split(bn, ".")
			if len(fs) > 1 {
				if _, ok := validIndexes[fs[0]]; !ok {
					l.Infoln("Removing old index", bn)
					os.Remove(idx)
				}
			}
		}
	}

	// UPnP

	var externalPort = 0
	if cfg.Options.UPnPEnabled {
		// We seed the random number generator with the node ID to get a
		// repeatable sequence of random external ports.
		externalPort = setupUPnP(rand.NewSource(certSeed(cert.Certificate[0])))
	}

	// Routine to connect out to configured nodes
	discoverer = discovery(externalPort)
	go listenConnect(myID, m, tlsCfg)

	for _, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}

		// Routine to pull blocks from other nodes to synchronize the local
		// repository. Does not run when we are in read only (publish only) mode.
		if repo.ReadOnly {
			l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID)
			m.StartRepoRO(repo.ID)
		} else {
			l.Okf("Ready to synchronize %s (read-write)", repo.ID)
			m.StartRepoRW(repo.ID, cfg.Options.ParallelRequests)
		}
	}

	if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 {
		f, err := os.Create(cpuprof)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	for _, node := range cfg.Nodes {
		if len(node.Name) > 0 {
			l.Infof("Node %s is %q at %v", node.NodeID, node.Name, node.Addresses)
		}
	}

	if cfg.Options.URAccepted > 0 && cfg.Options.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		cfg.Options.URAccepted = 0
	}
	if cfg.Options.URAccepted >= usageReportVersion {
		go usageReportingLoop(m)
		go func() {
			time.Sleep(10 * time.Minute)
			err := sendUsageReport(m)
			if err != nil {
				l.Infoln("Usage report:", err)
			}
		}()
	}

	events.Default.Log(events.StartupComplete, nil)
	go generateEvents()

	<-stop

	l.Okln("Exiting")
}
Esempio n. 29
0
func syncthingMain() {
	// Create a main service manager. We'll add things to this as we go along.
	// We want any logging it does to go through our log system.
	mainSvc := suture.New("main", suture.Spec{
		Log: func(line string) {
			if debugSuture {
				l.Debugln(line)
			}
		},
	})
	mainSvc.ServeBackground()

	// Set a log prefix similar to the ID we will have later on, or early log
	// lines look ugly.
	l.SetPrefix("[start] ")

	if auditEnabled {
		startAuditing(mainSvc)
	}

	if verbose {
		mainSvc.Add(newVerboseSvc())
	}

	// Event subscription for the API; must start early to catch the early events.
	apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	// Ensure that that we have a certificate and key.
	cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
	if err != nil {
		cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName)
		if err != nil {
			l.Fatalln("load cert:", err)
		}
	}

	// We reinitialize the predictable RNG with our device ID, to get a
	// sequence that is always the same but unique to this syncthing instance.
	predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))

	myID = protocol.NewDeviceID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Emit the Starting event, now that we know who we are.

	events.Default.Log(events.Starting, map[string]string{
		"home": baseDirs["config"],
		"myID": myID.String(),
	})

	// Prepare to be able to save configuration

	cfgFile := locations[locConfigFile]

	var myName string

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	if info, err := os.Stat(cfgFile); err == nil {
		if !info.Mode().IsRegular() {
			l.Fatalln("Config file is not a file?")
		}
		cfg, err = config.Load(cfgFile, myID)
		if err == nil {
			myCfg := cfg.Devices()[myID]
			if myCfg.Name == "" {
				myName, _ = os.Hostname()
			} else {
				myName = myCfg.Name
			}
		} else {
			l.Fatalln("Configuration:", err)
		}
	} else {
		l.Infoln("No config file; starting with empty defaults")
		myName, _ = os.Hostname()
		newCfg := defaultConfig(myName)
		cfg = config.Wrap(cfgFile, newCfg)
		cfg.Save()
		l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
	}

	if cfg.Raw().OriginalVersion != config.CurrentVersion {
		l.Infoln("Archiving a copy of old config file format")
		// Archive a copy
		osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion))
		// Save the new version
		cfg.Save()
	}

	if err := checkShortIDs(cfg); err != nil {
		l.Fatalln("Short device IDs are in conflict. Unlucky!\n  Regenerate the device ID of one if the following:\n  ", err)
	}

	if len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{bepProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	// If the read or write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	opts := cfg.Options()

	if !opts.SymlinksEnabled {
		symlinks.Supported = false
	}

	protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second
	protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second

	if opts.MaxSendKbps > 0 {
		writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps))
	}
	if opts.MaxRecvKbps > 0 {
		readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
	}

	if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
		lans, _ = osutil.GetLans()
		networks := make([]string, 0, len(lans))
		for _, lan := range lans {
			networks = append(networks, lan.String())
		}
		l.Infoln("Local networks:", strings.Join(networks, ", "))
	}

	dbFile := locations[locDatabase]
	ldb, err := leveldb.OpenFile(dbFile, dbOpts())
	if err != nil && errors.IsCorrupted(err) {
		ldb, err = leveldb.RecoverFile(dbFile, dbOpts())
	}
	if err != nil {
		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
	}

	// Remove database entries for folders that no longer exist in the config
	folders := cfg.Folders()
	for _, folder := range db.ListFolders(ldb) {
		if _, ok := folders[folder]; !ok {
			l.Infof("Cleaning data for dropped folder %q", folder)
			db.DropFolder(ldb, folder)
		}
	}

	m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
	cfg.Subscribe(m)

	if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
		it, err := strconv.Atoi(t)
		if err == nil {
			m.StartDeadlockDetector(time.Duration(it) * time.Second)
		}
	} else if !IsRelease || IsBeta {
		m.StartDeadlockDetector(20 * 60 * time.Second)
	}

	// Clear out old indexes for other devices. Otherwise we'll start up and
	// start needing a bunch of files which are nowhere to be found. This
	// needs to be changed when we correctly do persistent indexes.
	for _, folderCfg := range cfg.Folders() {
		m.AddFolder(folderCfg)
		for _, device := range folderCfg.DeviceIDs() {
			if device == myID {
				continue
			}
			m.Index(device, folderCfg.ID, nil, 0, nil)
		}
		// Routine to pull blocks from other devices to synchronize the local
		// folder. Does not run when we are in read only (publish only) mode.
		if folderCfg.ReadOnly {
			l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folderCfg.ID)
			m.StartFolderRO(folderCfg.ID)
		} else {
			l.Okf("Ready to synchronize %s (read-write)", folderCfg.ID)
			m.StartFolderRW(folderCfg.ID)
		}
	}

	mainSvc.Add(m)

	// GUI

	setupGUI(mainSvc, cfg, m, apiSub)

	// The default port we announce, possibly modified by setupUPnP next.

	addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0])
	if err != nil {
		l.Fatalln("Bad listen address:", err)
	}

	// Start discovery

	localPort := addr.Port
	discoverer = discovery(localPort)

	// Start UPnP. The UPnP service will restart global discovery if the
	// external port changes.

	if opts.UPnPEnabled {
		upnpSvc := newUPnPSvc(cfg, localPort)
		mainSvc.Add(upnpSvc)
	}

	connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg)
	cfg.Subscribe(connectionSvc)
	mainSvc.Add(connectionSvc)

	if cpuProfile {
		f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	for _, device := range cfg.Devices() {
		if len(device.Name) > 0 {
			l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
		}
	}

	if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		opts.URAccepted = 0
		opts.URUniqueID = ""
		cfg.SetOptions(opts)
	}
	if opts.URAccepted >= usageReportVersion {
		if opts.URUniqueID == "" {
			// Previously the ID was generated from the node ID. We now need
			// to generate a new one.
			opts.URUniqueID = randomString(8)
			cfg.SetOptions(opts)
			cfg.Save()
		}
	}

	// The usageReportingManager registers itself to listen to configuration
	// changes, and there's nothing more we need to tell it from the outside.
	// Hence we don't keep the returned pointer.
	newUsageReportingManager(m, cfg)

	if opts.RestartOnWakeup {
		go standbyMonitor()
	}

	if opts.AutoUpgradeIntervalH > 0 {
		if noUpgrade {
			l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
		} else if IsRelease {
			go autoUpgrade()
		} else {
			l.Infof("No automatic upgrades; %s is not a release version.", Version)
		}
	}

	events.Default.Log(events.StartupComplete, map[string]string{
		"myID": myID.String(),
	})
	go generatePingEvents()

	cleanConfigDirectory()

	code := <-stop

	mainSvc.Stop()

	l.Okln("Exiting")
	os.Exit(code)
}
Esempio n. 30
0
func main() {
	var reset bool
	var showVersion bool
	var doUpgrade bool
	flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
	flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster")
	flag.BoolVar(&showVersion, "version", false, "Show version")
	flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
	flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
	flag.Parse()

	if len(os.Getenv("STRESTART")) > 0 {
		// Give the parent process time to exit and release sockets etc.
		time.Sleep(1 * time.Second)
	}

	if showVersion {
		fmt.Println(LongVersion)
		return
	}

	if doUpgrade {
		err := upgrade()
		if err != nil {
			l.Fatalln(err)
		}
		return
	}

	if len(os.Getenv("GOGC")) == 0 {
		debug.SetGCPercent(25)
	}

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	confDir = expandTilde(confDir)

	if _, err := os.Stat(confDir); err != nil && confDir == getDefaultConfDir() {
		// We are supposed to use the default configuration directory. It
		// doesn't exist. In the past our default has been ~/.syncthing, so if
		// that directory exists we move it to the new default location and
		// continue. We don't much care if this fails at this point, we will
		// be checking that later.

		oldDefault := expandTilde("~/.syncthing")
		if _, err := os.Stat(oldDefault); err == nil {
			os.MkdirAll(filepath.Dir(confDir), 0700)
			if err := os.Rename(oldDefault, confDir); err == nil {
				l.Infoln("Moved config dir", oldDefault, "to", confDir)
			}
		}
	}

	// Ensure that our home directory exists and that we have a certificate and key.

	ensureDir(confDir, 0700)
	cert, err := loadCert(confDir)
	if err != nil {
		newCertificate(confDir)
		cert, err = loadCert(confDir)
		l.FatalErr(err)
	}

	myID = certID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Prepare to be able to save configuration

	cfgFile := filepath.Join(confDir, "config.xml")
	go saveConfigLoop(cfgFile)

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	cf, err := os.Open(cfgFile)
	if err == nil {
		// Read config.xml
		cfg, err = config.Load(cf, myID)
		if err != nil {
			l.Fatalln(err)
		}
		cf.Close()
	} else {
		l.Infoln("No config file; starting with empty defaults")
		name, _ := os.Hostname()
		defaultRepo := filepath.Join(getHomeDir(), "Sync")
		ensureDir(defaultRepo, 0755)

		cfg, err = config.Load(nil, myID)
		cfg.Repositories = []config.RepositoryConfiguration{
			{
				ID:        "default",
				Directory: defaultRepo,
				Nodes:     []config.NodeConfiguration{{NodeID: myID}},
			},
		}
		cfg.Nodes = []config.NodeConfiguration{
			{
				NodeID:    myID,
				Addresses: []string{"dynamic"},
				Name:      name,
			},
		}

		port, err := getFreePort("127.0.0.1", 8080)
		l.FatalErr(err)
		cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)

		port, err = getFreePort("", 22000)
		l.FatalErr(err)
		cfg.Options.ListenAddress = []string{fmt.Sprintf(":%d", port)}

		saveConfig()
		l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
	}

	if reset {
		resetRepositories()
		return
	}

	if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{"bep/1.0"},
		ServerName:             myID,
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
	}

	// If the write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	if cfg.Options.MaxSendKbps > 0 {
		rateBucket = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxSendKbps), int64(5*1000*cfg.Options.MaxSendKbps))
	}

	m := model.NewModel(confDir, &cfg, "syncthing", Version)

	for _, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}
		dir := expandTilde(repo.Directory)
		m.AddRepo(repo.ID, dir, repo.Nodes)
	}

	// GUI
	if cfg.GUI.Enabled && cfg.GUI.Address != "" {
		addr, err := net.ResolveTCPAddr("tcp", cfg.GUI.Address)
		if err != nil {
			l.Fatalf("Cannot start GUI on %q: %v", cfg.GUI.Address, err)
		} else {
			var hostOpen, hostShow string
			switch {
			case addr.IP == nil:
				hostOpen = "localhost"
				hostShow = "0.0.0.0"
			case addr.IP.IsUnspecified():
				hostOpen = "localhost"
				hostShow = addr.IP.String()
			default:
				hostOpen = addr.IP.String()
				hostShow = hostOpen
			}

			l.Infof("Starting web GUI on http://%s:%d/", hostShow, addr.Port)
			err := startGUI(cfg.GUI, m)
			if err != nil {
				l.Fatalln("Cannot start GUI:", err)
			}
			if cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 {
				openURL(fmt.Sprintf("http://%s:%d", hostOpen, addr.Port))
			}
		}
	}

	// Walk the repository and update the local model before establishing any
	// connections to other nodes.

	l.Infoln("Populating repository index")
	m.LoadIndexes(confDir)

	for _, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}

		dir := expandTilde(repo.Directory)

		// Safety check. If the cached index contains files but the repository
		// doesn't exist, we have a problem. We would assume that all files
		// have been deleted which might not be the case, so abort instead.

		if files, _, _ := m.LocalSize(repo.ID); files > 0 {
			if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
				l.Warnf("Configured repository %q has index but directory %q is missing; not starting.", repo.ID, repo.Directory)
				l.Fatalf("Ensure that directory is present or remove repository from configuration.")
			}
		}

		// Ensure that repository directories exist for newly configured repositories.
		ensureDir(dir, -1)
	}

	m.CleanRepos()
	m.ScanRepos()
	m.SaveIndexes(confDir)

	// UPnP

	var externalPort = 0
	if cfg.Options.UPnPEnabled {
		// We seed the random number generator with the node ID to get a
		// repeatable sequence of random external ports.
		rand.Seed(certSeed(cert.Certificate[0]))
		externalPort = setupUPnP()
	}

	// Routine to connect out to configured nodes
	discoverer = discovery(externalPort)
	go listenConnect(myID, m, tlsCfg)

	for _, repo := range cfg.Repositories {
		if repo.Invalid != "" {
			continue
		}

		// Routine to pull blocks from other nodes to synchronize the local
		// repository. Does not run when we are in read only (publish only) mode.
		if repo.ReadOnly {
			l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID)
			m.StartRepoRO(repo.ID)
		} else {
			l.Okf("Ready to synchronize %s (read-write)", repo.ID)
			m.StartRepoRW(repo.ID, cfg.Options.ParallelRequests)
		}
	}

	if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 {
		f, err := os.Create(cpuprof)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	<-stop
}